text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 21:31:02 2017
@author: thuzhang
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
File='DataBase/SeasonalPredict.csv'
OriginData=pd.read_table(File,sep=",",index_col=False)
Data=OriginData.as_matrix().astype(np.float32)
_LengthOfFile=1095
#Get the properties and the target
Y=Data[:,1]
X=Data[:,2:]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
method=RandomForestRegressor()
method.fit(X_train,y_train.ravel())
Y_Predict=method.predict(X_test)
Y_Error=Y_Predict-y_test
_Result=np.c_[Y_Predict,y_test,Y_Error]
df = pd.DataFrame(data=_Result)
df.to_csv('RandomForestRegressorSeasonal.csv')
_Expection=np.mean(Y_Error)
print('Exception:%d'%_Expection)
print(1-np.sqrt(np.mean((Y_Predict- y_test)**2))/np.mean(y_test))
print(np.sqrt(np.mean((Y_Predict- y_test)**2))/np.mean(y_test))
plt.figure(figsize=(18,8))
plt.plot(y_test,'green',label='Test')
plt.plot(Y_Predict, 'blue',label='Predict')
plt.plot(Y_Error,'orange',label='Error')
plt.legend(loc='best')
plt.show()
|
{"hexsha": "80ec147109819e66e84d7d1ec4c6fd44c7807724", "size": 1344, "ext": "py", "lang": "Python", "max_stars_repo_path": "MultiLevelForecast/TwoDayAhead/SeasonalPredict.py", "max_stars_repo_name": "AmateurZhang/EnjoyWithDataOnPowerSystems", "max_stars_repo_head_hexsha": "64227d0505012d2b5650874c65268e85d9751a17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MultiLevelForecast/TwoDayAhead/SeasonalPredict.py", "max_issues_repo_name": "AmateurZhang/EnjoyWithDataOnPowerSystems", "max_issues_repo_head_hexsha": "64227d0505012d2b5650874c65268e85d9751a17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MultiLevelForecast/TwoDayAhead/SeasonalPredict.py", "max_forks_repo_name": "AmateurZhang/EnjoyWithDataOnPowerSystems", "max_forks_repo_head_hexsha": "64227d0505012d2b5650874c65268e85d9751a17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8888888889, "max_line_length": 72, "alphanum_fraction": 0.7708333333, "include": true, "reason": "import numpy", "num_tokens": 356}
|
(* --------------------------------------------------------------------
* Copyright (c) - 2006--2012 - IMDEA Software Institute
* Copyright (c) - 2006--2012 - Inria
* Copyright (c) - 2006--2012 - Microsoft Coprporation
*
* Distributed under the terms of the CeCILL-B-V1 license
* -------------------------------------------------------------------- *)
(*************************************************************)
(* This file is distributed under the terms of the *)
(* GNU Lesser General Public License Version 2.1 *)
(*************************************************************)
(* Benjamin.Gregoire@inria.fr Laurent.Thery@inria.fr *)
(*************************************************************)
(**********************************************************************
PGroup.v
Build the group of pairs modulo needed for the theorem of
lucas lehmer
Definition: PGroup
**********************************************************************)
Require Import ZArith.
Require Import ZAux.
Require Import Tactic.
Require Import Wf_nat.
Require Import ListAux.
Require Import UList.
Require Import FGroup.
Require Import EGroup.
Require Import IGroup.
Open Scope Z_scope.
Definition base := 3.
(**************************************
Equality is decidable on pairs
**************************************)
Definition P_dec: forall p q: Z * Z, {p = q} + {p <> q}.
intros p1 q1; case p1; case q1; intros z t x y; case (Z_eq_dec x z); intros H1.
case (Z_eq_dec y t); intros H2.
left; eq_tac; auto.
right; contradict H2; injection H2; auto.
right; contradict H1; injection H1; auto.
Defined.
(**************************************
Addition of two pairs
**************************************)
Definition pplus (p q: Z * Z) := let (x ,y) := p in let (z,t) := q in (x + z, y + t).
(**************************************
Properties of addition
**************************************)
Theorem pplus_assoc: forall p q r, (pplus p (pplus q r)) = (pplus (pplus p q) r).
intros p q r; case p; case q; case r; intros r1 r2 q1 q2 p1 p2; unfold pplus.
eq_tac; ring.
Qed.
Theorem pplus_comm: forall p q, (pplus p q) = (pplus q p).
intros p q; case p; case q; intros q1 q2 p1 p2; unfold pplus.
eq_tac; ring.
Qed.
(**************************************
Multiplication of two pairs
**************************************)
Definition pmult (p q: Z * Z) := let (x ,y) := p in let (z,t) := q in (x * z + base * y * t, x * t + y * z).
(**************************************
Properties of multiplication
**************************************)
Theorem pmult_assoc: forall p q r, (pmult p (pmult q r)) = (pmult (pmult p q) r).
intros p q r; case p; case q; case r; intros r1 r2 q1 q2 p1 p2; unfold pmult.
eq_tac; ring.
Qed.
Theorem pmult_0_l: forall p, (pmult (0, 0) p) = (0, 0).
intros p; case p; intros x y; unfold pmult; eq_tac; ring.
Qed.
Theorem pmult_0_r: forall p, (pmult p (0, 0)) = (0, 0).
intros p; case p; intros x y; unfold pmult; eq_tac; ring.
Qed.
Theorem pmult_1_l: forall p, (pmult (1, 0) p) = p.
intros p; case p; intros x y; unfold pmult; eq_tac; ring.
Qed.
Theorem pmult_1_r: forall p, (pmult p (1, 0)) = p.
intros p; case p; intros x y; unfold pmult; eq_tac; ring.
Qed.
Theorem pmult_comm: forall p q, (pmult p q) = (pmult q p).
intros p q; case p; case q; intros q1 q2 p1 p2; unfold pmult.
eq_tac; ring.
Qed.
Theorem pplus_pmult_dist_l: forall p q r, (pmult p (pplus q r)) = (pplus (pmult p q) (pmult p r)).
intros p q r; case p; case q; case r; intros r1 r2 q1 q2 p1 p2; unfold pplus, pmult.
eq_tac; ring.
Qed.
Theorem pplus_pmult_dist_r: forall p q r, (pmult (pplus q r) p) = (pplus (pmult q p) (pmult r p)).
intros p q r; case p; case q; case r; intros r1 r2 q1 q2 p1 p2; unfold pplus, pmult.
eq_tac; ring.
Qed.
(**************************************
In this section we create the group PGroup of inversible elements {(p, q) | 0 <= p < m /\ 0 <= q < m}
**************************************)
Section Mod.
Variable m : Z.
Hypothesis m_pos: 1 < m.
(**************************************
mkLine creates {(a, p) | 0 <= p < n}
**************************************)
Fixpoint mkLine (a: Z) (n: nat) {struct n} : list (Z * Z) :=
(a, Z_of_nat n) :: match n with O => nil | (S n1) => mkLine a n1 end.
(**************************************
Some properties of mkLine
**************************************)
Theorem mkLine_length: forall a n, length (mkLine a n) = (n + 1)%nat.
intros a n; elim n; simpl; auto.
Qed.
Theorem mkLine_in: forall a n p, 0 <= p <= Z_of_nat n -> (In (a, p) (mkLine a n)).
intros a n; elim n.
simpl; auto with zarith.
intros p (H1, H2); replace p with 0; auto with zarith.
intros n1 Rec p (H1, H2).
case (Zle_lt_or_eq p (Z_of_nat (S n1))); auto with zarith.
rewrite inj_S in H2; auto with zarith.
rewrite inj_S; auto with zarith.
intros H3; right; apply Rec; auto with zarith.
intros H3; subst; simpl; auto.
Qed.
Theorem in_mkLine: forall a n p, In p (mkLine a n) -> exists q, 0 <= q <= Z_of_nat n /\ p = (a, q).
intros a n p; elim n; clear n.
simpl; intros [H1 | H1]; exists 0; auto with zarith; case H1.
simpl; intros n Rec [H1 | H1]; auto.
exists (Z_of_nat (S n)); auto with zarith.
case Rec; auto; intros q ((H2, H3), H4); exists q; repeat split; auto with zarith.
change (q <= Z_of_nat (S n)).
rewrite inj_S; auto with zarith.
Qed.
Theorem mkLine_ulist: forall a n, ulist (mkLine a n).
intros a n; elim n; simpl; auto.
intros n1 H; apply ulist_cons; auto.
change (~ In (a, Z_of_nat (S n1)) (mkLine a n1)).
rewrite inj_S; intros H1.
case in_mkLine with (1 := H1); auto with zarith.
intros x ((H2, H3), H4); injection H4.
intros H5; subst; auto with zarith.
Qed.
(**************************************
mkRect creates the list {(p, q) | 0 <= p < n /\ 0 <= q < m}
**************************************)
Fixpoint mkRect (n m: nat) {struct n} : list (Z * Z) :=
(mkLine (Z_of_nat n) m) ++ match n with O => nil | (S n1) => mkRect n1 m end.
(**************************************
Some properties of mkRect
**************************************)
Theorem mkRect_length: forall n m, length (mkRect n m) = ((n + 1) * (m + 1))%nat.
intros n; elim n; simpl; auto.
intros n1; rewrite <- app_nil_end; rewrite mkLine_length; rewrite plus_0_r; auto.
intros n1 Rec m1; rewrite length_app; rewrite Rec; rewrite mkLine_length; auto.
Qed.
Theorem mkRect_in: forall n m p q, 0 <= p <= Z_of_nat n -> 0 <= q <= Z_of_nat m -> (In (p, q) (mkRect n m)).
intros n m1; elim n; simpl.
intros p q (H1, H2) (H3, H4); replace p with 0; auto with zarith.
rewrite <- app_nil_end; apply mkLine_in; auto.
intros n1 Rec p q (H1, H2) (H3, H4).
case (Zle_lt_or_eq p (Z_of_nat (S n1))); auto with zarith; intros H5.
rewrite inj_S in H5; apply in_or_app; auto with zarith.
apply in_or_app; left; subst; apply mkLine_in; auto with zarith.
Qed.
Theorem in_mkRect: forall n m p, In p (mkRect n m) -> exists p1, exists p2, 0 <= p1 <= Z_of_nat n /\ 0 <= p2 <= Z_of_nat m /\ p = (p1, p2).
intros n m1 p; elim n; clear n; simpl.
rewrite <- app_nil_end; intros H1.
case in_mkLine with (1 := H1).
intros p2 (H2, H3); exists 0; exists p2; auto with zarith.
intros n Rec H1.
case in_app_or with (1 := H1); intros H2.
case in_mkLine with (1 := H2).
intros p2 (H3, H4); exists (Z_of_nat (S n)); exists p2; subst; simpl; auto with zarith.
case Rec with (1 := H2); auto.
intros p1 (p2, (H3, (H4, H5))); exists p1; exists p2; repeat split; auto with zarith.
change (p1 <= Z_of_nat (S n)).
rewrite inj_S; auto with zarith.
Qed.
Theorem mkRect_ulist: forall n m, ulist (mkRect n m).
intros n; elim n; simpl; auto.
intros n1; rewrite <- app_nil_end; apply mkLine_ulist; auto.
intros n1 Rec m1; apply ulist_app; auto.
apply mkLine_ulist.
intros a H1 H2.
case in_mkLine with (1 := H1); intros p1 ((H3, H4), H5).
case in_mkRect with (1 := H2); intros p2 (p3, ((H6, H7), ((H8, H9), H10))).
subst; injection H10; clear H10; intros; subst.
contradict H7.
change (~ Z_of_nat (S n1) <= Z_of_nat n1).
rewrite inj_S; auto with zarith.
Qed.
(**************************************
mL is the list {(p, q) | 0 <= p < m-1 /\ 0 <= q < m - 1}
**************************************)
Definition mL := mkRect (Zabs_nat (m - 1)) (Zabs_nat (m -1)).
(**************************************
Some properties of mL
**************************************)
Theorem mL_length : length mL = Zabs_nat (m * m).
unfold mL; rewrite mkRect_length; simpl; apply inj_eq_inv.
repeat (rewrite inj_mult || rewrite inj_plus || rewrite Z_of_nat_Zabs_nat); simpl; auto with zarith.
eq_tac; auto with zarith.
Qed.
Theorem mL_in: forall p q, 0 <= p < m -> 0 <= q < m -> (In (p, q) mL).
intros p q (H1, H2) (H3, H4); unfold mL; apply mkRect_in; rewrite Z_of_nat_Zabs_nat; auto with zarith.
Qed.
Theorem in_mL: forall p, In p mL-> exists p1, exists p2, 0 <= p1 < m /\ 0 <= p2 < m /\ p = (p1, p2).
unfold mL; intros p H1; case in_mkRect with (1 := H1).
repeat rewrite Z_of_nat_Zabs_nat; auto with zarith.
intros p1 (p2, ((H2, H3), ((H4, H5), H6))); exists p1; exists p2; repeat split; auto with zarith.
Qed.
Theorem mL_ulist: ulist mL.
unfold mL; apply mkRect_ulist; auto.
Qed.
(**************************************
We define zpmult the multiplication of pairs module m
**************************************)
Definition zpmult (p q: Z * Z) := let (x ,y) := pmult p q in (Zmod x m, Zmod y m).
(**************************************
Some properties of zpmult
**************************************)
Theorem zpmult_internal: forall p q, (In (zpmult p q) mL).
intros p q; unfold zpmult; case (pmult p q); intros z y; apply mL_in; auto with zarith.
apply Z_mod_lt; auto with zarith.
apply Z_mod_lt; auto with zarith.
Qed.
Theorem zpmult_assoc: forall p q r, (zpmult p (zpmult q r)) = (zpmult (zpmult p q) r).
assert (U: 0 < m); auto with zarith.
intros p q r; unfold zpmult.
generalize (pmult_assoc p q r).
case (pmult p q); intros x1 x2.
case (pmult q r); intros y1 y2.
case p; case r; unfold pmult.
intros z1 z2 t1 t2 H.
match goal with
H: (?X, ?Y) = (?Z, ?T) |- _ =>
assert (H1: X = Z); assert (H2: Y = T); try (injection H; simpl; auto; fail); clear H
end.
eq_tac.
generalize (f_equal (fun x => x mod m) H1).
repeat rewrite <- Zmult_assoc.
repeat (rewrite (fun x => Zmod_plus (t1 * x))); auto.
repeat (rewrite (fun x => Zmod_plus (x1 * x))); auto.
repeat (rewrite (fun x => Zmod_plus (x1 mod m * x))); auto.
repeat (rewrite (Zmod_mult t1)); auto.
repeat (rewrite (Zmod_mult x1)); auto.
repeat (rewrite (Zmod_mult base)); auto.
repeat (rewrite (Zmod_mult t2)); auto.
repeat (rewrite (Zmod_mult x2)); auto.
repeat (rewrite (Zmod_mult (t2 mod m))); auto.
repeat (rewrite (Zmod_mult (x1 mod m))); auto.
repeat (rewrite (Zmod_mult (x2 mod m))); auto.
repeat (rewrite Zmod_mod); auto.
generalize (f_equal (fun x => x mod m) H2).
repeat (rewrite (fun x => Zmod_plus (t1 * x))); auto.
repeat (rewrite (fun x => Zmod_plus (x1 * x))); auto.
repeat (rewrite (fun x => Zmod_plus (x1 mod m * x))); auto.
repeat (rewrite (Zmod_mult t1)); auto.
repeat (rewrite (Zmod_mult x1)); auto.
repeat (rewrite (Zmod_mult t2)); auto.
repeat (rewrite (Zmod_mult x2)); auto.
repeat (rewrite (Zmod_mult (t2 mod m))); auto.
repeat (rewrite (Zmod_mult (x1 mod m))); auto.
repeat (rewrite (Zmod_mult (x2 mod m))); auto.
repeat (rewrite Zmod_mod); auto.
Qed.
Theorem zpmult_0_l: forall p, (zpmult (0, 0) p) = (0, 0).
intros p; case p; intros x y; unfold zpmult, pmult; simpl.
rewrite Zmod_def_small; auto with zarith.
Qed.
Theorem zpmult_1_l: forall p, In p mL -> zpmult (1, 0) p = p.
intros p H; case in_mL with (1 := H); clear H; intros p1 (p2, ((H1, H2), (H3, H4))); subst.
unfold zpmult; rewrite pmult_1_l.
repeat rewrite Zmod_def_small; auto with zarith.
Qed.
Theorem zpmult_1_r: forall p, In p mL -> zpmult p (1, 0) = p.
intros p H; case in_mL with (1 := H); clear H; intros p1 (p2, ((H1, H2), (H3, H4))); subst.
unfold zpmult; rewrite pmult_1_r.
repeat rewrite Zmod_def_small; auto with zarith.
Qed.
Theorem zpmult_comm: forall p q, zpmult p q = zpmult q p.
intros p q; unfold zpmult; rewrite pmult_comm; auto.
Qed.
(**************************************
We are now ready to build our group
**************************************)
Definition PGroup : (FGroup zpmult).
apply IGroup with (support := mL) (e:= (1, 0)).
exact P_dec.
apply mL_ulist.
apply mL_in; auto with zarith.
intros; apply zpmult_internal.
intros; apply zpmult_assoc.
exact zpmult_1_l.
exact zpmult_1_r.
Defined.
End Mod.
|
{"author": "EasyCrypt", "repo": "certicrypt", "sha": "7b3cd2fe4a317aec38dfff9eec902b265c575587", "save_path": "github-repos/coq/EasyCrypt-certicrypt", "path": "github-repos/coq/EasyCrypt-certicrypt/certicrypt-7b3cd2fe4a317aec38dfff9eec902b265c575587/Examples/Indifferentiability/ECurve/PrimalityTest/PGroup.v"}
|
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
class Handler:
def __init__(self):
self.name = __name__
def predict_transform(self, prediction, inp_dim, anchors, num_classes, CUDA=True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs)
anchors = [(a[0] / stride, a[1] / stride) for a in anchors]
# Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4])
# Add the center offsets
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1, 1)
y_offset = torch.FloatTensor(b).view(-1, 1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1, num_anchors).view(-1, 2).unsqueeze(0)
prediction[:, :, :2] += x_y_offset
# log space transform height and the width
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4]) * anchors
prediction[:, :, 5: 5 + num_classes] = torch.sigmoid((prediction[:, :, 5: 5 + num_classes]))
prediction[:, :, :4] *= stride
return prediction
def get_test_input(self):
img = cv2.imread("../example.jpg")
img = cv2.resize(img, (608, 608)) # Resize to the input dimension
img_ = img[:, :, ::-1].transpose((2, 0, 1)) # BGR -> RGB | H X W C -> C X H X W
img_ = img_[np.newaxis, :, :, :] / 255.0 # Add a channel at 0 (for batch) | Normalise
img_ = torch.from_numpy(img_).float() # Convert to float
img_ = Variable(img_) # Convert to Variable
return img_
|
{"hexsha": "24c0ad700cf5e4d0fef36c9ba9c63bc0851f1f2b", "size": 2510, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/core/models/internal/yolo/v3/util.py", "max_stars_repo_name": "romanovacca/detectioncollection", "max_stars_repo_head_hexsha": "98f43ceb4ddd51af39fd6566685a8dc57a1380b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/core/models/internal/yolo/v3/util.py", "max_issues_repo_name": "romanovacca/detectioncollection", "max_issues_repo_head_hexsha": "98f43ceb4ddd51af39fd6566685a8dc57a1380b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/core/models/internal/yolo/v3/util.py", "max_forks_repo_name": "romanovacca/detectioncollection", "max_forks_repo_head_hexsha": "98f43ceb4ddd51af39fd6566685a8dc57a1380b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3521126761, "max_line_length": 103, "alphanum_fraction": 0.606374502, "include": true, "reason": "import numpy", "num_tokens": 652}
|
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\file genScanNetData.py
\brief Code to train a segmentation network on the ScanNet dataset.
\copyright Copyright (c) 2018 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
\author pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import sys
import math
import time
import argparse
import importlib
import os
import numpy as np
import tensorflow as tf
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from PyUtils import visualize_progress
from ScanNetDataSet import ScanNetDataSet
current_milli_time = lambda: time.time() * 1000.0
def create_loss(logits, labels, labelWeights, weigthDecay):
labels = tf.to_int64(labels)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=tf.reshape(labels, [-1]), logits=logits,
weights=tf.reshape(labelWeights, [-1]), scope='xentropy')
xentropyloss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
regularizer = tf.contrib.layers.l2_regularizer(scale=weigthDecay)
regVariables = tf.get_collection('weight_decay_loss')
regTerm = tf.contrib.layers.apply_regularization(regularizer, regVariables)
return xentropyloss, regTerm
def create_accuracy(logits, labels, inAccWeights, scope):
_, logitsIndexs = tf.nn.top_k(logits)
with tf.variable_scope(scope):
return tf.metrics.accuracy(labels, logitsIndexs, weights=inAccWeights)
def create_trainning(lossGraph, learningRate, maxLearningRate, learningDecayFactor, learningRateDecay, epoch_step):
learningRateExp = tf.train.exponential_decay(learningRate, epoch_step, learningRateDecay, learningDecayFactor, staircase=True)
learningRateExp = tf.maximum(learningRateExp, maxLearningRate)
optimizer = tf.train.AdamOptimizer(learning_rate =learningRateExp)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(lossGraph)
return train_op, learningRateExp
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to train MCCNN for segmentation tasks (S3DIS)')
parser.add_argument('--logFolder', default='log', help='Folder of the output models (default: log)')
parser.add_argument('--model', default='MCSegScanNet', help='model (default: MCSegScanNet)')
parser.add_argument('--grow', default=32, type=int, help='Grow rate (default: 32)')
parser.add_argument('--batchSize', default=4, type=int, help='Batch size (default: 4)')
parser.add_argument('--maxEpoch', default=201, type=int, help='Max Epoch (default: 201)')
parser.add_argument('--initLearningRate', default=0.005, type=float, help='Init learning rate (default: 0.005)')
parser.add_argument('--learningDeacyFactor', default=0.5, type=float, help='Learning deacy factor (default: 0.5)')
parser.add_argument('--learningDecayRate', default=20, type=int, help='Learning decay rate (default: 20 Epochs)')
parser.add_argument('--maxLearningRate', default=0.00001, type=float, help='Maximum Learning rate (default: 0.00001)')
parser.add_argument('--useDropOut', action='store_true', help='Use drop out (default: True)')
parser.add_argument('--dropOutKeepProb', default=0.5, type=float, help='Keep neuron probabillity drop out (default: 0.5)')
parser.add_argument('--useDropOutConv', action='store_true', help='Use drop out in convolution layers (default: False)')
parser.add_argument('--dropOutKeepProbConv', default=0.8, type=float, help='Keep neuron probabillity drop out in convolution layers (default: 0.8)')
parser.add_argument('--weightDecay', default=0.0, type=float, help='Weight decay (default: 0.0)')
parser.add_argument('--maxNumPts', default=600000, type=int, help='Maximum number of points (default: 600000)')
parser.add_argument('--ptDropOut', default=0.85, type=float, help='Point drop out (default: 0.85)')
parser.add_argument('--gpu', default='0', help='GPU (default: 0)')
parser.add_argument('--gpuMem', default=1.0, type=float, help='GPU memory used (default: 1.0)')
parser.add_argument('--augment', action='store_true', help='Augment data (default: False)')
parser.add_argument('--useColor', action='store_true', help='Augment data (default: False)')
args = parser.parse_args()
#Create log folder.
if not os.path.exists(args.logFolder): os.mkdir(args.logFolder)
os.system('cp ../models/%s.py %s' % (args.model, args.logFolder))
os.system('cp ScanNet.py %s' % (args.logFolder))
logFile = args.logFolder+"/log.txt"
#Write execution info.
with open(logFile, "a") as myFile:
myFile.write("Model: "+args.model+"\n")
myFile.write("Grow: "+str(args.grow)+"\n")
myFile.write("BatchSize: "+str(args.batchSize)+"\n")
myFile.write("MaxEpoch: "+str(args.maxEpoch)+"\n")
myFile.write("InitLearningRate: "+str(args.initLearningRate)+"\n")
myFile.write("LearningDeacyFactor: "+str(args.learningDeacyFactor)+"\n")
myFile.write("LearningDecayRate: "+str(args.learningDecayRate)+"\n")
myFile.write("MaxLearningRate: "+str(args.maxLearningRate)+"\n")
myFile.write("UseDropOut: "+str(args.useDropOut)+"\n")
myFile.write("DropOutKeepProb: "+str(args.dropOutKeepProb)+"\n")
myFile.write("UseDropOutConv: "+str(args.useDropOutConv)+"\n")
myFile.write("DropOutKeepProbConv: "+str(args.dropOutKeepProbConv)+"\n")
myFile.write("WeightDecay: "+str(args.weightDecay)+"\n")
myFile.write("MaxNumPts: "+str(args.maxNumPts)+"\n")
myFile.write("ptDropOut: "+str(args.ptDropOut)+"\n")
myFile.write("Augment: "+str(args.augment)+"\n")
myFile.write("Use color: "+str(args.useColor)+"\n")
print("Model: "+args.model)
print("Grow: "+str(args.grow))
print("BatchSize: "+str(args.batchSize))
print("MaxEpoch: "+str(args.maxEpoch))
print("InitLearningRate: "+str(args.initLearningRate))
print("LearningDeacyFactor: "+str(args.learningDeacyFactor))
print("LearningDecayRate: "+str(args.learningDecayRate))
print("MaxLearningRate: "+str(args.maxLearningRate))
print("UseDropOut: "+str(args.useDropOut))
print("DropOutKeepProb: "+str(args.dropOutKeepProb))
print("UseDropOutConv: "+str(args.useDropOutConv))
print("DropOutKeepProbConv: "+str(args.dropOutKeepProbConv))
print("WeightDecay: "+str(args.weightDecay))
print("MaxNumPts: "+str(args.maxNumPts))
print("ptDropOut: "+str(args.ptDropOut))
print("Augment: "+str(args.augment))
print("Use color: "+str(args.useColor))
#Load the model
model = importlib.import_module(args.model)
#Get train and test files
mTrainDataSet = ScanNetDataSet(0, args.batchSize, args.ptDropOut,
args.maxNumPts, args.augment, args.useColor)
mTestDataSet = ScanNetDataSet(1, 1, 1.0, 0, False, args.useColor)
semLabels = mTrainDataSet.get_labels()
print(semLabels)
numTrainRooms = mTrainDataSet.get_num_models()
numTestRooms = mTestDataSet.get_num_models()
print("Num train rooms: " + str(numTrainRooms))
print("Num test rooms: " + str(numTestRooms))
#Create variable and place holders
epoch_step = tf.Variable(0, name='epoch_step', trainable=False)
inPts = tf.placeholder(tf.float32, [None, 3])
inBatchIds = tf.placeholder(tf.int32, [None, 1])
if args.useColor:
inFeatures = tf.placeholder(tf.float32, [None, 4])
else:
inFeatures = tf.placeholder(tf.float32, [None, 1])
inLabels = tf.placeholder(tf.int32, [None, 1])
inWeights = tf.placeholder(tf.float32, [None, 1])
inAccWeights = tf.placeholder(tf.float32, [None, 1])
isTraining = tf.placeholder(tf.bool)
keepProbConv = tf.placeholder(tf.float32)
keepProbFull = tf.placeholder(tf.float32)
#Accuracy placeholders
iouVal = tf.placeholder(tf.float32)
accuracyVal = tf.placeholder(tf.float32)
voxelAccuracyVal = tf.placeholder(tf.float32)
#Increment epoch step
increment_epoch_step_op = tf.assign(epoch_step, epoch_step+1)
#Create the network
numInputs = 1
if args.useColor:
numInputs = 4
logits = model.create_network(inPts, inBatchIds, inFeatures, numInputs, len(semLabels), args.batchSize,
args.grow, isTraining, keepProbConv, keepProbFull, args.useDropOutConv, args.useDropOut)
#Create predict labels
predictedLabels = tf.argmax(logits, 1)
#Create loss
xentropyLoss, regularizationLoss = create_loss(logits, inLabels, inWeights, args.weightDecay)
loss = xentropyLoss + regularizationLoss
#Create training
trainning, learningRateExp = create_trainning(loss,
args.initLearningRate, args.maxLearningRate, args.learningDeacyFactor,
args.learningDecayRate, epoch_step)
learningRateSumm = tf.summary.scalar('learninRate', learningRateExp)
#Create accuracy metric
accuracyVal, accuracyAccumOp = create_accuracy(logits, inLabels, inAccWeights, 'metrics')
metricsVars = tf.contrib.framework.get_variables('metrics', collection=tf.GraphKeys.LOCAL_VARIABLES)
resetMetrics = tf.variables_initializer(metricsVars)
#Create sumaries
lossSummary = tf.summary.scalar('loss', loss)
xEntropyLossSummary = tf.summary.scalar('loss_XEntropy', xentropyLoss)
regularizationLossSummary = tf.summary.scalar('loss_Regularization', regularizationLoss)
trainingSummary = tf.summary.merge([lossSummary, xEntropyLossSummary, regularizationLossSummary, learningRateSumm])
metricsSummary = tf.summary.scalar('accuracy', accuracyVal)
metricsTestSummary = tf.summary.merge([tf.summary.scalar('Test_Accuracy', accuracyVal), tf.summary.scalar('Test_Voxel_Accuracy', voxelAccuracyVal),
tf.summary.scalar('Test_IoU', iouVal)], name='TestMetrics')
#Create init variables
init = tf.global_variables_initializer()
initLocal = tf.local_variables_initializer()
#create the saver
saver = tf.train.Saver(max_to_keep=100)
#Create session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpuMem, visible_device_list=args.gpu)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#Create the summary writer
summary_writer = tf.summary.FileWriter(args.logFolder, sess.graph)
summary_writer.add_graph(sess.graph)
#Init variables
sess.run(init)
sess.run(initLocal)
step = 0
epochStep = 0
maxIoU = 0.0
maxAccuracy = 0.0
maxVoxAccuracy = 0.0
maxNoMeantotalAccuracy = 0.0
np.random.seed(int(time.time()))
#Train
for epoch in range(args.maxEpoch):
print("############################## Epoch %3d training" %(epoch))
startEpochTime = current_milli_time()
startTrainTime = current_milli_time()
epochStep = 0
sess.run(resetMetrics)
#Iterate over all the train files
currRoomITer = 0
mTrainDataSet.start_iteration()
while mTrainDataSet.has_more_batches():
modelsxBatch, trainPoints, trainBatchIds, trainFeatures, trainLabels, _, paths = mTrainDataSet.get_next_batch()
trainWeights = mTrainDataSet.get_weights(trainLabels)
trainAccWeights = mTrainDataSet.get_accuracy_masks(trainLabels)
currRoomITer += modelsxBatch
startProcessTime = current_milli_time()
_, lossRes, xentropyLossRes, regularizationLossRes, trainingSummRes, _ = \
sess.run([trainning, loss, xentropyLoss, regularizationLoss, trainingSummary, accuracyAccumOp],
{inPts: trainPoints, inBatchIds: trainBatchIds, inFeatures: trainFeatures, inWeights: trainWeights,
inAccWeights: trainAccWeights, inLabels: trainLabels, isTraining: True, keepProbConv: args.dropOutKeepProbConv,
keepProbFull: args.dropOutKeepProb})
endProcessTime = current_milli_time()
summary_writer.add_summary(trainingSummRes, step)
endTrainTime = current_milli_time()
currAccuracy, metricsSummRes = sess.run([accuracyVal, metricsSummary])
summary_writer.add_summary(metricsSummRes, step)
visualize_progress(currRoomITer-1, numTrainRooms, "Loss: %.6f | Accuracy: %.4f | Time: %.4f [%.4f] | Num points: %d" % (
lossRes, currAccuracy*100.0, (endTrainTime-startTrainTime)/1000.0, (endProcessTime-startProcessTime), len(trainPoints)))
with open(logFile, "a") as myfile:
myfile.write("Step: %6d (%4d) | Loss: %.6f | Accuracy: %.4f | Num points: %d\n" % (
step, epochStep, lossRes, currAccuracy*100.0, len(trainPoints)))
sess.run(resetMetrics)
startTrainTime = current_milli_time()
step += 1
epochStep += 1
endEpochTime = current_milli_time()
print("Epoch %3d train time: %.4f" %(epoch, (endEpochTime-startEpochTime)/1000.0))
with open(logFile, "a") as myfile:
myfile.write("Epoch %3d train time: %.4f \n" %(epoch, (endEpochTime-startEpochTime)/1000.0))
if epoch%10==0:
saver.save(sess, args.logFolder+"/check_model.ckpt", global_step=epoch)
#Test data
print("############################## Epoch %3d evaluation" %(epoch))
it = 0
accumLoss = 0.0
accumTestLoss = 0.0
sess.run(resetMetrics)
accumIntersection = [0.0 for i in range(len(semLabels))]
accumUnion = [0.0 for i in range(len(semLabels))]
accumGt = [0.0 for i in range(len(semLabels))]
accumVox = [0.0 for i in range(len(semLabels))]
accumVoxGt = [0.0 for i in range(len(semLabels))]
mTestDataSet.start_iteration()
while mTestDataSet.has_more_batches():
_, points, batchIds, features, labels, _, _ = mTestDataSet.get_next_batch()
currAccWeights = mTestDataSet.get_accuracy_masks(labels)
lossRes, predictedLabelsRes = sess.run([loss, predictedLabels],
{inPts: points, inBatchIds: batchIds, inFeatures: features, inWeights: currAccWeights,
inAccWeights: currAccWeights, inLabels: labels, isTraining: False, keepProbConv: 1.0, keepProbFull: 1.0})
labels = labels.reshape((-1))
#Compute IoU
for k in range(len(predictedLabelsRes)):
if labels[k] != 0:
if labels[k] == predictedLabelsRes[k]:
accumIntersection[predictedLabelsRes[k]] = accumIntersection[predictedLabelsRes[k]] + 1.0
accumUnion[predictedLabelsRes[k]] = accumUnion[predictedLabelsRes[k]] + 1.0
else:
accumUnion[labels[k]] = accumUnion[labels[k]] + 1.0
accumUnion[predictedLabelsRes[k]] = accumUnion[predictedLabelsRes[k]] + 1.0
accumGt[labels[k]] = accumGt[labels[k]] + 1.0
accumLoss += lossRes
accumTestLoss += lossRes
#Compute Voxel accuracy
resolution = 0.05
coordMax = np.amax(points, axis=0)
coordMin = np.amin(points, axis=0)
nVoxels = np.ceil((coordMax-coordMin)/resolution)
vidx = np.ceil((points-coordMin)/resolution)
vidx = vidx[:,0]+vidx[:,1]*nVoxels[0]+vidx[:,2]*nVoxels[0]*nVoxels[1]
uvidx = np.unique(vidx)
voxelLabelCount = [np.bincount(labels[vidx==uv].astype(np.int32), minlength=len(semLabels)) for uv in uvidx]
voxelPredLabelCount = [np.bincount(predictedLabelsRes[vidx==uv].astype(np.int32), minlength=len(semLabels)) for uv in uvidx]
uvlabel = np.argmax(voxelLabelCount, axis = 1)
uvpredlabel = np.argmax(voxelPredLabelCount, axis = 1)
validVoxels = [1 if float(voxelLabelCount[k][0])/float(np.sum(voxelLabelCount[k])) < 0.3 and uvlabel[k] > 0 else 0 for k in range(len(uvidx))]
for k in range(len(uvlabel)):
if validVoxels[k] == 1:
if uvlabel[k] == uvpredlabel[k]:
accumVox[uvlabel[k]] = accumVox[uvlabel[k]] + 1.0
accumVoxGt[uvlabel[k]] = accumVoxGt[uvlabel[k]] + 1.0
if (it+1)%10 == 0:
visualize_progress(it, numTestRooms, ("Loss: %.6f") % (accumLoss/10.0))
accumLoss = 0.0
it += 1
#Compute mean IoU
print("############################## Category IoU / Acc / VoxAcc")
meanIoUxCat = 0.0
totalAccuracy = 0.0
totalVoxAccuracy = 0.0
totalIntersection = 0.0
totalGt = 0.0
for i in range(1, len(semLabels)):
currMean = 0.0
if accumUnion[i] <= 0.0:
currMean = 1.0
else:
currMean = accumIntersection[i] / accumUnion[i]
currAccuracy = 0.0
if accumGt[i] <= 0.0:
currAccuracy = 1.0
else:
currAccuracy = accumIntersection[i] / accumGt[i]
currVoxAccuracy = 0.0
if accumVoxGt[i] <= 0.0:
currVoxAccuracy = 1.0
else:
currVoxAccuracy = accumVox[i] / accumVoxGt[i]
totalIntersection = totalIntersection + accumIntersection[i]
totalGt = totalGt + accumGt[i]
print("Mean category "+semLabels[i]+": %.4f | %.4f | %.4f" % (currMean*100.0, currAccuracy*100.0, currVoxAccuracy*100.0))
meanIoUxCat = meanIoUxCat + currMean
totalAccuracy = totalAccuracy + currAccuracy
totalVoxAccuracy = totalVoxAccuracy + currVoxAccuracy
meanIoUxCat = meanIoUxCat / float(len(semLabels)-1)
totalAccuracy = totalAccuracy / float(len(semLabels)-1)
totalVoxAccuracy = totalVoxAccuracy / float(len(semLabels)-1)
accumTestLoss = accumTestLoss/float(numTestRooms)
noMeantotalAccuracy = totalIntersection / totalGt
metricsTestSummRes = sess.run(metricsTestSummary, {iouVal: meanIoUxCat, accuracyVal : totalAccuracy, voxelAccuracyVal: totalVoxAccuracy})
summary_writer.add_summary(metricsTestSummRes, step)
#Print results
print("############################## Global Accuracy and IoU")
print("Loss: %.6f" % (accumTestLoss))
print("Test total accuracy: %.4f [%.4f]" % (noMeantotalAccuracy*100.0, maxNoMeantotalAccuracy*100.0))
print("Test accuracy: %.4f [%.4f]" % (totalAccuracy*100.0, maxAccuracy*100.0))
print("Test voxel accuracy: %.4f [%.4f] " % (totalVoxAccuracy*100.0, maxVoxAccuracy*100.0))
print("Test IoU %.4f [ %.4f ]" % (meanIoUxCat*100.0, maxIoU*100.0))
with open(logFile, "a") as myfile:
myfile.write("Loss: %.6f" % (accumTestLoss))
myfile.write("Test total accuracy: %.4f [%.4f]" % (noMeantotalAccuracy*100.0, maxNoMeantotalAccuracy*100.0))
myfile.write("Test accuracy: %.4f [%.4f]" % (totalAccuracy*100.0, maxAccuracy*100.0))
myfile.write("Test voxel accuracy: %.4f [%.4f] " % (totalVoxAccuracy*100.0, maxVoxAccuracy*100.0))
myfile.write("Test IoU %.4f [ %.4f ]" % (meanIoUxCat*100.0, maxIoU*100.0))
saveModel = False
if meanIoUxCat > maxIoU:
maxIoU = meanIoUxCat
saveModel = True
if totalAccuracy > maxAccuracy:
maxAccuracy = totalAccuracy
saveModel = True
if totalVoxAccuracy > maxVoxAccuracy:
maxVoxAccuracy = totalVoxAccuracy
saveModel = True
if noMeantotalAccuracy > maxNoMeantotalAccuracy:
maxNoMeantotalAccuracy = noMeantotalAccuracy
saveModel = True
if saveModel:
saver.save(sess, args.logFolder+"/model.ckpt", global_step=epoch)
endEpochTime = current_milli_time()
print("Epoch %3d train and test time: %.4f" %(epoch, (endEpochTime-startEpochTime)/1000.0))
with open(logFile, "a") as myfile:
myfile.write("Epoch %3d train and test time: %.4f \n" %(epoch, (endEpochTime-startEpochTime)/1000.0))
#Increment epoch step variable for the learning rate decay
sess.run(increment_epoch_step_op)
|
{"hexsha": "e3cc2e3787c64522fd3b8dd127f2600d4c467adc", "size": 20702, "ext": "py", "lang": "Python", "max_stars_repo_path": "ScanNet/ScanNet.py", "max_stars_repo_name": "chenzhutian/MCCNN", "max_stars_repo_head_hexsha": "e28ca4a2deeecbfd1c8939ca666fcc010554fcbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 90, "max_stars_repo_stars_event_min_datetime": "2018-07-05T13:43:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T08:23:06.000Z", "max_issues_repo_path": "ScanNet/ScanNet.py", "max_issues_repo_name": "DylanWusee/MCCNN", "max_issues_repo_head_hexsha": "13c2afb81aa231779b2be564ae31931b1d82e3fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2018-11-08T14:22:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T08:35:15.000Z", "max_forks_repo_path": "ScanNet/ScanNet.py", "max_forks_repo_name": "DylanWusee/MCCNN", "max_forks_repo_head_hexsha": "13c2afb81aa231779b2be564ae31931b1d82e3fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-11-09T09:31:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-21T01:23:11.000Z", "avg_line_length": 48.0324825986, "max_line_length": 154, "alphanum_fraction": 0.6502753357, "include": true, "reason": "import numpy", "num_tokens": 5451}
|
FUNCTION MV_EI32 ( n, valin, valout )
C************************************************************************
C* MV_EI32 *
C* *
C* This function converts an array of IEEE 32-bit real numbers to *
C* IBM 32-bit real numbers. The input and output arrays may be the *
C* same. *
C* *
C* INTEGER MV_EI32 ( N, VALIN, VALOUT ) *
C* *
C* Input parameters: *
C* N INTEGER Number of values to convert *
C* VALIN (N) REAL Input data *
C* *
C* Output parameters: *
C* VALOUT (N) REAL Converted data *
C* MV_EI32 INTEGER Return code *
C* 0 = normal return *
C* >0 = # of invalid inputs *
C** *
C* Log: *
C* R. Jones/NMC 06/90 *
C* K. Brill/NMC 02/91 Removed GOTO's; cleaned up *
C* M. desJardins/GSFC 3/91 Added byte swap for VMS *
C* M. Linda/GSC 10/97 Corrected the prologue format *
C************************************************************************
INTEGER valin (*), valout (*)
C*
INTEGER sign
C*
DATA MASKFR / '00FFFFFF'X /
DATA IBIT8 / '00800000'X /
DATA MASKSN / '7FFFFFFF'X /
DATA SIGN / '80000000'X /
C------------------------------------------------------------------------
MV_EI32 = 0
C
C* Swap bytes to make this code which deals with integers work.
C* Note that this swap and the one at the end must be removed to
C* work on non-VMS systems.
C
ier = MV_SWP4 ( n, valin, valout )
C
C* Loop through the input array.
C
DO i = 1, n
C
C* Set the sign bit off.
C
isign = 0
itemp = valout (i)
C
C* Test the sign bit.
C
IF ( itemp .eq. 0 ) THEN
valout (i) = 0
ELSE
IF ( itemp .lt. 0 ) THEN
C
C* Turn the sign bit on.
C
isign = SIGN
C
C* Set the itemp sign bit off.
C
itemp = IAND ( itemp, MASKSN )
END IF
C*
ibmexp = ISHFT (itemp, -23)
C
C* Test for indifinite or nan number.
C
IF ( ibmexp .eq. 255 ) THEN
C
C* Increment return code for indefinite
C* or NAN number.
C
MV_EI32 = MV_EI32 + 1
ELSE
C
C* Test for zero exponent and fraction
C* indicating underflow.
C
If ( ibmexp .eq. 0 ) THEN
valout (i) = 0
ELSE
ibmexp = ibmexp + 133
ibx7 = IAND ( 3, ibmexp )
ibmexp = IEOR ( ibmexp, ibx7 )
ibx7 = IEOR ( 3, ibx7 )
itemp = IOR ( itemp, IBIT8 )
itemp = IOR ( ISHFT ( ibmexp, 22 ),
+ ISHFT
+ ( IAND ( itemp, MASKFR ),
+ -ibx7) )
valout (i) = IOR ( itemp, isign )
END IF
END IF
END IF
END DO
C
C* Swap bytes back.
C
ier = MV_SWP4 ( n, valout, valout )
C*
RETURN
END
|
{"hexsha": "a0bae328a15919332cf819a08c83ae4695721a2a", "size": 2575, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/gemlib/mv/Linux/mvei32.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/gemlib/mv/Linux/mvei32.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/gemlib/mv/Linux/mvei32.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 23.8425925926, "max_line_length": 73, "alphanum_fraction": 0.5285436893, "num_tokens": 953}
|
import pytest
import pandas as pd
import numpy as np
#from collections import namedtuple
#from collections import OrderedDict
import pyactms as ms
class TestActMS(object):
def test_getProtocol(self):
pass
|
{"hexsha": "f954fcbc8fd929438a5efc8a210ff16dc5165a01", "size": 227, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_actms.py", "max_stars_repo_name": "seblum/pyactms", "max_stars_repo_head_hexsha": "46ddb59a9582b53e5fac805340e7fd56a7d7d133", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_actms.py", "max_issues_repo_name": "seblum/pyactms", "max_issues_repo_head_hexsha": "46ddb59a9582b53e5fac805340e7fd56a7d7d133", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_actms.py", "max_forks_repo_name": "seblum/pyactms", "max_forks_repo_head_hexsha": "46ddb59a9582b53e5fac805340e7fd56a7d7d133", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.35, "max_line_length": 36, "alphanum_fraction": 0.7488986784, "include": true, "reason": "import numpy", "num_tokens": 46}
|
#include <boost/static_string/static_string.hpp>
int main()
{
boost::static_string<5> s1("UVXYZ", 3);
return s1 == "UVX" ? 0 : 1;
}
|
{"hexsha": "914860644df7952d37cdfb0f1de7c0a2dbb0386b", "size": 137, "ext": "cc", "lang": "C++", "max_stars_repo_path": "test/static_string_test.cc", "max_stars_repo_name": "wxthon/rules_boost", "max_stars_repo_head_hexsha": "46916bd45a7ed0f1f64f5b5dff0fec5b87870d0a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 214.0, "max_stars_repo_stars_event_min_datetime": "2016-08-24T01:08:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:24:55.000Z", "max_issues_repo_path": "test/static_string_test.cc", "max_issues_repo_name": "wxthon/rules_boost", "max_issues_repo_head_hexsha": "46916bd45a7ed0f1f64f5b5dff0fec5b87870d0a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 184.0, "max_issues_repo_issues_event_min_datetime": "2017-01-20T22:43:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T16:26:45.000Z", "max_forks_repo_path": "test/static_string_test.cc", "max_forks_repo_name": "wxthon/rules_boost", "max_forks_repo_head_hexsha": "46916bd45a7ed0f1f64f5b5dff0fec5b87870d0a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 258.0, "max_forks_repo_forks_event_min_datetime": "2016-08-24T01:08:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:07:16.000Z", "avg_line_length": 17.125, "max_line_length": 48, "alphanum_fraction": 0.6423357664, "num_tokens": 47}
|
/**
* @file VolumeBrickOctree.cpp
* @author Sebastian Maisch <sebastian.maisch@uni-ulm.de>
* @date 2015.08.17
*
* @brief Implementation of the layered octree.
*/
#include "VolumeBrickOctree.h"
#include "gfx/glrenderer/GLTexture3D.h"
#include "gfx/glrenderer/GLTexture.h"
#include "app/ApplicationBase.h"
#include <boost/assign.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace cgu {
namespace cguOctreeMath {
inline unsigned int calculateOverlapPixels(unsigned int val) {
return (val / VolumeBrickOctree::MAX_SIZE) << 1;
}
}
VolumeBrickOctree::VolumeBrickOctree(const glm::uvec3& pos, const glm::uvec3& size, const glm::vec3& scale,
unsigned int lvl, GPUProgram* minMaxProg, const std::vector<BindingLocation> uniformNames) :
posOffset(pos),
origSize(size),
voxelScale(scale),
level(lvl),
hasAnyData(true),
minTexValue(0.0f),
maxTexValue(1.0f),
maxLevel(0),
streamFile(nullptr),
dataSize(0),
brickTextureDesc(4, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE),
minMaxMapProgram(minMaxProg),
minMaxMapUniformNames(uniformNames)
{
}
/**
* Constructor.
* @param texData the 3D texture resource to create the tree from.
* @param pos the position in the texture.
* @param size the size of this part of the tree (in voxels).
* @param scale the scale of a voxel in this tree.
* @param minMaxProg the program used to create Min-/Max-Maps.
* @param uniformNames the uniform names of the minMaxProg.
* @param app the application object.
*/
VolumeBrickOctree::VolumeBrickOctree(const GLTexture3D* texData, const glm::uvec3& pos, const glm::uvec3& size,
const glm::vec3& scale, GPUProgram* minMaxProg, const std::vector<BindingLocation> uniformNames,
ApplicationBase* app) :
VolumeBrickOctree(pos, size, scale, 0, minMaxProg, uniformNames)
{
if (origSize.x > MAX_SIZE || origSize.y > MAX_SIZE || origSize.z > MAX_SIZE) {
auto ovlp = cguOctreeMath::calculateOverlapPixels(glm::max(origSize.x, glm::max(origSize.y, origSize.z)));
glm::uvec3 sizeOverlap{ ovlp };
auto sizeWithOverlap = origSize + sizeOverlap;
glm::uvec3 sizePowerOfTwo{ cguMath::roundupPow2(origSize.x), cguMath::roundupPow2(origSize.y),
cguMath::roundupPow2(origSize.z) };
if (sizeWithOverlap.x > sizePowerOfTwo.x) {
sizeWithOverlap.x = cguMath::roundupPow2(sizeWithOverlap.x);
sizeOverlap <<= 1;
} else sizeWithOverlap.x = sizePowerOfTwo.x;
if (sizeWithOverlap.y > sizePowerOfTwo.y) {
sizeWithOverlap.y = cguMath::roundupPow2(sizeWithOverlap.y);
sizeOverlap <<= 1;
} else sizeWithOverlap.y = sizePowerOfTwo.y;
if (sizeWithOverlap.z > sizePowerOfTwo.z) {
sizeWithOverlap.z = cguMath::roundupPow2(sizeWithOverlap.z);
sizeOverlap <<= 1;
} else sizeWithOverlap.z = sizePowerOfTwo.z;
glm::uvec3 childSizeBase{ sizeWithOverlap.x >> 1, sizeWithOverlap.y >> 1, sizeWithOverlap.z >> 1 };
CreateNode(childSizeBase, texData, app);
} else {
CreateLeafTexture(texData);
}
}
VolumeBrickOctree::VolumeBrickOctree(const GLTexture3D* texData, const glm::uvec3& pos,
const glm::uvec3& size, unsigned int lvl, const glm::vec3& scale, GPUProgram* minMaxProg,
const std::vector<BindingLocation> uniformNames, ApplicationBase* app) :
VolumeBrickOctree(pos, size, scale, lvl, minMaxProg, uniformNames)
{
if (origSize.x > MAX_SIZE || origSize.y > MAX_SIZE || origSize.z > MAX_SIZE) {
glm::uvec3 sizePowerOfTwo{ cguMath::roundupPow2(origSize.x), cguMath::roundupPow2(origSize.y),
cguMath::roundupPow2(origSize.z) };
glm::uvec3 childSizeBase{ sizePowerOfTwo.x >> 1, sizePowerOfTwo.y >> 1, sizePowerOfTwo.z >> 1 };
CreateNode(childSizeBase, texData, app);
} else {
CreateLeafTexture(texData);
}
}
void VolumeBrickOctree::CreateNode(const glm::uvec3& childSizeBase, const GLTexture3D* texData, ApplicationBase* app)
{
glm::uvec3 posOffsets[8];
posOffsets[0] = glm::uvec3(0, 0, 0);
posOffsets[1] = glm::uvec3(0, 0, 1);
posOffsets[2] = glm::uvec3(0, 1, 0);
posOffsets[3] = glm::uvec3(0, 1, 1);
posOffsets[4] = glm::uvec3(1, 0, 0);
posOffsets[5] = glm::uvec3(1, 0, 1);
posOffsets[6] = glm::uvec3(1, 1, 0);
posOffsets[7] = glm::uvec3(1, 1, 1);
auto ovlp = cguOctreeMath::calculateOverlapPixels(glm::max(childSizeBase.x, glm::max(childSizeBase.y, childSizeBase.z)));
for (unsigned int i = 0; i < 8; ++i) {
auto childPosOffset = posOffsets[i] * (childSizeBase - glm::uvec3(ovlp));
auto childPos = posOffset + childPosOffset;
auto childSize = glm::uvec3(glm::max(glm::ivec3(0),
glm::ivec3(glm::min(origSize, childPosOffset + childSizeBase)) - glm::ivec3(childPosOffset)));
if (childSizeBase.x == ovlp && posOffsets[i].x == 1) childSize.x = 0;
if (childSizeBase.y == ovlp && posOffsets[i].y == 1) childSize.y = 0;
if (childSizeBase.z == ovlp && posOffsets[i].z == 1) childSize.z = 0;
children[i].reset(new VolumeBrickOctree(texData, childPos, childSize, level + 1, voxelScale,
minMaxMapProgram, minMaxMapUniformNames, app));
}
texSize.x = (children[0]->texSize.x + children[4]->texSize.x) >> 1;
texSize.y = (children[0]->texSize.y + children[2]->texSize.y) >> 1;
texSize.z = (children[0]->texSize.z + children[1]->texSize.z) >> 1;
for (auto& child : children) {
maxLevel = glm::max(maxLevel, child->maxLevel);
}
CalculateTexBorders(texData);
CombineChildTextures(app);
WriteBrickTextureToTempFile();
for (auto& child : children) child->ResetAllData();
}
void VolumeBrickOctree::CreateLeafTexture(const GLTexture3D* texData)
{
texSize = origSize;
maxLevel = level;
if (texSize.x * texSize.y * texSize.z == 0) {
dataSize = 0;
hasAnyData = false;
return;
}
CalculateTexBorders(texData);
brickTexture = texData->CreateMinMaxTexture(posOffset, origSize, brickTextureDesc);
WriteBrickTextureToTempFile();
}
void VolumeBrickOctree::CalculateTexBorders(const GLTexture3D* texData)
{
if (posOffset.x != 0) minTexValue.x = 1.0f / static_cast<float>(texSize.x);
if (posOffset.y != 0) minTexValue.y = 1.0f / static_cast<float>(texSize.y);
if (posOffset.z != 0) minTexValue.z = 1.0f / static_cast<float>(texSize.z);
if (posOffset.x + origSize.x < texData->GetSize().x) maxTexValue.x = (static_cast<float>(texSize.x) - 1.0f) / static_cast<float>(texSize.x);
if (posOffset.y + origSize.y < texData->GetSize().y) maxTexValue.y = (static_cast<float>(texSize.y) - 1.0f) / static_cast<float>(texSize.y);
if (posOffset.z + origSize.z < texData->GetSize().z) maxTexValue.z = (static_cast<float>(texSize.z) - 1.0f) / static_cast<float>(texSize.z);
}
/**
* Destructor.
*/
VolumeBrickOctree::~VolumeBrickOctree()
{
if (streamFile) std::fclose(streamFile);
streamFile = nullptr;
}
/**
* Reloads all data from this tree node.
*/
void VolumeBrickOctree::ReloadData()
{
if (dataSize == 0) return;
std::fseek(streamFile, 0, SEEK_SET);
std::vector<uint8_t> data;
data.resize(dataSize);
std::fread(data.data(), sizeof(uint8_t), dataSize, streamFile);
brickTexture.reset(new GLTexture(texSize.x, texSize.y, texSize.z, brickTextureDesc, nullptr));
brickTexture->UploadData(data);
brickTexture->GenerateMinMaxMaps(minMaxMapProgram, minMaxMapUniformNames);
brickTexture->SampleLinear();
brickTexture->SampleWrapClamp();
}
/**
* Releases all the data from this tree node.
*/
void VolumeBrickOctree::ResetData()
{
brickTexture.reset(nullptr);
}
/**
* Releases all the data from this tree node and its children.
*/
void VolumeBrickOctree::ResetAllData()
{
if (hasAnyData) {
brickTexture.reset(nullptr);
if (children[0]) for (auto& child : children) child->ResetAllData();
hasAnyData = false;
}
}
/**
* Writes the brick texture to a temporary file.
*/
void VolumeBrickOctree::WriteBrickTextureToTempFile()
{
if (texSize.x * texSize.y * texSize.z == 0) {
dataSize = 0;
hasAnyData = false;
return;
}
std::vector<uint8_t> data;
brickTexture->DownloadData(data);
dataSize = static_cast<unsigned int>(data.size());
streamFile = std::tmpfile();
std::fwrite(data.data(), sizeof(uint8_t), dataSize, streamFile);
ReloadData();
}
/**
* Combines the 8 textures of this nodes children to a new texture.
* @param app the application object.
*/
void VolumeBrickOctree::CombineChildTextures(ApplicationBase* app)
{
if (texSize.x * texSize.y * texSize.z == 0) return;
GPUProgram* combineProg = nullptr;
brickTextureDesc.bytesPP = children[0]->brickTextureDesc.bytesPP;
brickTextureDesc.format = children[0]->brickTextureDesc.format;
brickTextureDesc.type = children[0]->brickTextureDesc.type;
brickTextureDesc.internalFormat = children[0]->brickTextureDesc.internalFormat;
if (brickTextureDesc.type == GL_UNSIGNED_BYTE) combineProg = app->GetGPUProgramManager()->GetResource("combineChildTextures8.cp");
else if (brickTextureDesc.type == GL_UNSIGNED_SHORT) combineProg = app->GetGPUProgramManager()->GetResource("combineChildTextures16.cp");
else if (brickTextureDesc.type == GL_UNSIGNED_INT) combineProg = app->GetGPUProgramManager()->GetResource("combineChildTextures32.cp");
else throw std::runtime_error("Pixel-type not supported.");
auto uniformNames = combineProg->GetUniformLocations({ "childTex", "combineTex", "childShift", "maxChunkSize" });
brickTexture.reset(new GLTexture(texSize.x, texSize.y, texSize.z, brickTextureDesc, nullptr));
//glm::uvec3 chunkSize = glm::uvec3(glm::ceil(glm::vec3(texSize) / 2.0f));
combineProg->UseProgram();
combineProg->SetUniform(uniformNames[0], 0);
combineProg->SetUniform(uniformNames[1], 1);
glm::uvec3 childShifts[8];
childShifts[0] = glm::uvec3(0, 0, 0);
childShifts[1] = glm::uvec3(0, 0, 1);
childShifts[2] = glm::uvec3(0, 1, 0);
childShifts[3] = glm::uvec3(0, 1, 1);
childShifts[4] = glm::uvec3(1, 0, 0);
childShifts[5] = glm::uvec3(1, 0, 1);
childShifts[6] = glm::uvec3(1, 1, 0);
childShifts[7] = glm::uvec3(1, 1, 1);
brickTexture->ActivateImage(1, 0, GL_WRITE_ONLY);
for (unsigned int i = 0; i < 8; ++i) {
if (children[i]->dataSize != 0) {
auto numGroups = glm::ivec3(glm::ceil(glm::vec3(children[i]->texSize) / 8.0f));
combineProg->SetUniform(uniformNames[3], children[i]->texSize);
combineProg->SetUniform(uniformNames[2], childShifts[i]);
children[i]->brickTexture->ActivateImage(0, 0, GL_READ_ONLY);
OGL_CALL(glDispatchCompute, numGroups.x, numGroups.y, numGroups.z);
}
}
OGL_CALL(glMemoryBarrier, GL_ALL_BARRIER_BITS);
OGL_SCALL(glFinish);
}
/**
* Updates the loading state of the tree depending on the view frustum (in object space).
* @param camera the current camera to calculate the frustum for.
* @param world the world matrix of the parent node.
* @return whether there are any children with loaded data in the sub-tree.
*/
bool VolumeBrickOctree::UpdateFrustum(const cgu::CameraView& camera, const glm::mat4& world)
{
if (dataSize == 0) return false;
auto isCorrectLod = false; // TODO: add LOD mechanism here. [8/26/2015 Sebastian Maisch]
// if (level == maxLevel - 1) isCorrectLod = true;
auto localWorld = GetLocalWorld(world);
cguMath::AABB3<float> box{ { { glm::vec3(0.0f), glm::vec3(1.0f) } } };
if (!cguMath::AABBInFrustumTest(camera.GetViewFrustum(localWorld), box)) ResetAllData();
else if (!children[0]) {
if (!IsLoaded()) { ReloadData(); hasAnyData = true; }
} else {
if (isCorrectLod) {
if (IsLoaded()) for (auto& child : children) child->ResetAllData();
else { ReloadData(); hasAnyData = true; }
} else {
if (IsLoaded()) {
ResetData();
}
hasAnyData = false;
for (auto& child : children) hasAnyData = child->UpdateFrustum(camera, world) || hasAnyData;
}
}
return hasAnyData;
}
void VolumeBrickOctree::GetRenderedBricksList(const cgu::CameraView& camera, const glm::mat4& world,
std::vector<std::pair<const VolumeBrickOctree*, float>>& result) const
{
if (!hasAnyData || dataSize == 0) return;
else if (IsLoaded()) {
auto childWorld = GetLocalWorld(world);
result.push_back(std::make_pair(this, camera.GetSignedDistanceToUnitAABB2(childWorld)));
}
else {
for (auto& child : children) child->GetRenderedBricksList(camera, world, result);
}
}
glm::mat4 VolumeBrickOctree::GetLocalWorld(const glm::mat4& world) const
{
// glm::mat4 scaleVoxelMat = glm::scale(glm::mat4(), glm::vec3(origSize));
auto scaleVoxelMat = glm::scale(glm::mat4(), glm::vec3(origSize) * (maxTexValue - minTexValue));
auto scaleToWorldMat = glm::scale(glm::mat4(), voxelScale);
// glm::mat4 translateOffsetMat = glm::translate(glm::mat4(), glm::vec3(posOffset));
auto translateOffsetMat = glm::translate(glm::mat4(), glm::vec3(posOffset) + (minTexValue * glm::vec3(texSize)));
return world * scaleToWorldMat * translateOffsetMat * scaleVoxelMat;
}
}
|
{"hexsha": "64102c0e774d3d96f66fa02544121c2b71ece6d5", "size": 14568, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OGLFramework_uulm/gfx/volumes/VolumeBrickOctree.cpp", "max_stars_repo_name": "dasmysh/OGLFramework_uulm", "max_stars_repo_head_hexsha": "b067b20ff2fc4350e4eea41493bda2c9a097921d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-04-28T15:11:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-23T02:31:59.000Z", "max_issues_repo_path": "OGLFramework_uulm/gfx/volumes/VolumeBrickOctree.cpp", "max_issues_repo_name": "dasmysh/OGLFramework_uulm", "max_issues_repo_head_hexsha": "b067b20ff2fc4350e4eea41493bda2c9a097921d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OGLFramework_uulm/gfx/volumes/VolumeBrickOctree.cpp", "max_forks_repo_name": "dasmysh/OGLFramework_uulm", "max_forks_repo_head_hexsha": "b067b20ff2fc4350e4eea41493bda2c9a097921d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1040462428, "max_line_length": 148, "alphanum_fraction": 0.6170373421, "num_tokens": 4000}
|
[STATEMENT]
lemma rank_le_if_sorted_from_list:
assumes "sorted cmp'' ((v1,e1)#ys)" and "is_subtree (Node r0 {|(t0,e0)|}) (dtree_from_list v1 ys)"
shows "rank (rev r0) \<le> rank (rev (root t0))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
obtain e as bs where e_def: "as @ (r0, e) # (root t0, e0) # bs = ((v1,e1)#ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>as e bs. as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using dtree_from_list_sequence[OF assms(2)]
[PROOF STATE]
proof (prove)
using this:
\<exists>e as bs. as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, ?e1.0) # ys
goal (1 subgoal):
1. (\<And>as e bs. as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys
[PROOF STEP]
have "sorted cmp'' (as @ (r0, e) # (root t0, e0) # bs)"
[PROOF STATE]
proof (prove)
using this:
as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys
goal (1 subgoal):
1. Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
as @ (r0, e) # (dtree.root t0, e0) # bs = (v1, e1) # ys
Sorting_Algorithms.sorted cmp'' ((v1, e1) # ys)
goal (1 subgoal):
1. Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
have "sorted cmp'' ((r0, e) # (root t0, e0) # bs)"
[PROOF STATE]
proof (prove)
using this:
Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
goal (1 subgoal):
1. Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
using sorted_app_r
[PROOF STATE]
proof (prove)
using this:
Sorting_Algorithms.sorted cmp'' (as @ (r0, e) # (dtree.root t0, e0) # bs)
Sorting_Algorithms.sorted ?cmp (?xs @ ?ys) \<Longrightarrow> Sorting_Algorithms.sorted ?cmp ?ys
goal (1 subgoal):
1. Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
using rank_le_if_ngt
[PROOF STATE]
proof (prove)
using this:
Sorting_Algorithms.sorted cmp'' ((r0, e) # (dtree.root t0, e0) # bs)
compare cmp'' (?r, ?e) (?r0.0, ?e0.0) \<noteq> Greater \<Longrightarrow> rank (rev ?r) \<le> rank (rev ?r0.0)
goal (1 subgoal):
1. rank (rev r0) \<le> rank (rev (dtree.root t0))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
rank (rev r0) \<le> rank (rev (dtree.root t0))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1766, "file": "Query_Optimization_IKKBZ", "length": 17}
|
# Autogenerated wrapper script for Tcl_jll for x86_64-linux-gnu
export libtcl
using Zlib_jll
JLLWrappers.@generate_wrapper_header("Tcl")
JLLWrappers.@declare_library_product(libtcl, "libtcl8.6.so")
function __init__()
JLLWrappers.@generate_init_header(Zlib_jll)
JLLWrappers.@init_library_product(
libtcl,
"lib/libtcl8.6.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "9086e17175f1b0ef3e86dba6082db841dd075735", "size": 450, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_stars_repo_name": "JuliaBinaryWrappers/Tcl_jll.jl", "max_stars_repo_head_hexsha": "94fce4d69586523974ace7b99a6eea42b17a2f85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_issues_repo_name": "JuliaBinaryWrappers/Tcl_jll.jl", "max_issues_repo_head_hexsha": "94fce4d69586523974ace7b99a6eea42b17a2f85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/x86_64-linux-gnu.jl", "max_forks_repo_name": "JuliaBinaryWrappers/Tcl_jll.jl", "max_forks_repo_head_hexsha": "94fce4d69586523974ace7b99a6eea42b17a2f85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:50:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:50:23.000Z", "avg_line_length": 26.4705882353, "max_line_length": 63, "alphanum_fraction": 0.7422222222, "num_tokens": 135}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import arviz as az
az.style.use('arviz-darkgrid')
μ = 0.
σ = 1.
X = stats.norm(μ, σ)
x = X.rvs(1500)
x[0:50]
x.mean()
mu_params = [-1, 0, 1]
sd_params = [0.5, 1, 1.5]
x = np.linspace(-7, 7, 2000)
_, ax = plt.subplots(len(mu_params), len(sd_params), sharex=True, sharey=True,
figsize=(7, 5), constrained_layout=True)
for i in range(3):
for j in range(3):
mu = mu_params[i]
sd = sd_params[j]
y = stats.norm(mu, sd).pdf(x)
ax[i,j].plot(x, y)
ax[i,j].plot([], label="μ = {:3.2f}\nσ = {:3.2f}".format(mu, sd), alpha=0)
ax[i,j].legend(loc=1)
ax[2,1].set_xlabel('x')
ax[1,0].set_ylabel('p(x)', rotation=0, labelpad=20)
ax[1,0].set_yticks([])
# plt.savefig('B11197_01_01.png', dpi=300);
data = np.genfromtxt('../data/mauna_loa_CO2.csv', delimiter=',')
plt.plot(data[:,0], data[:,1])
plt.xlabel('year')
plt.ylabel('$CO_2$ (ppmv)')
plt.savefig('B11197_01_02.png', dpi=300)
n_params = [1, 2, 4] # Number of trials
p_params = [0.25, 0.5, 0.75] # Probability of success
x = np.arange(0, max(n_params)+1)
f,ax = plt.subplots(len(n_params), len(p_params), sharex=True, sharey=True,
figsize=(8, 7), constrained_layout=True)
for i in range(len(n_params)):
for j in range(len(p_params)):
n = n_params[i]
p = p_params[j]
y = stats.binom(n=n, p=p).pmf(x)
ax[i,j].vlines(x, 0, y, colors='C0', lw=5)
ax[i,j].set_ylim(0, 1)
ax[i,j].plot(0, 0, label="N = {:3.2f}\nθ = {:3.2f}".format(n,p), alpha=0)
ax[i,j].legend()
ax[2,1].set_xlabel('y')
ax[1,0].set_ylabel('p(y | θ, N)')
ax[0,0].set_xticks(x)
plt.savefig('B11197_01_03.png', dpi=300)
params = [0.5, 1, 2, 3]
x = np.linspace(0, 1, 100)
f, ax = plt.subplots(len(params), len(params), sharex=True, sharey=True,
figsize=(8, 7), constrained_layout=True)
for i in range(4):
for j in range(4):
a = params[i]
b = params[j]
y = stats.beta(a, b).pdf(x)
ax[i,j].plot(x, y)
ax[i,j].plot(0, 0, label="α = {:2.1f}\nβ = {:2.1f}".format(a, b), alpha=0)
ax[i,j].legend()
ax[1,0].set_yticks([])
ax[1,0].set_xticks([0, 0.5, 1])
f.text(0.5, 0.05, 'θ', ha='center')
f.text(0.07, 0.5, 'p(θ)', va='center', rotation=0)
plt.savefig('B11197_01_04.png', dpi=300)
plt.figure(figsize=(10, 8))
n_trials = [0, 1, 2, 3, 4, 8, 16, 32, 50, 150]
data = [0, 1, 1, 1, 1, 4, 6, 9, 13, 48]
theta_real = 0.35
beta_params = [(1, 1), (20, 20), (1, 4)]
dist = stats.beta
x = np.linspace(0, 1, 200)
for idx, N in enumerate(n_trials):
if idx == 0:
plt.subplot(4, 3, 2)
plt.xlabel('θ')
else:
plt.subplot(4, 3, idx+3)
plt.xticks([])
y = data[idx]
for (a_prior, b_prior) in beta_params:
p_theta_given_y = dist.pdf(x, a_prior + y, b_prior + N - y)
plt.fill_between(x, 0, p_theta_given_y, alpha=0.7)
plt.axvline(theta_real, ymax=0.3, color='k')
plt.plot(0, 0, label=f'{N:4d} trials\n{y:4d} heads', alpha=0)
plt.xlim(0, 1)
plt.ylim(0, 12)
plt.legend()
plt.yticks([])
plt.tight_layout()
plt.savefig('B11197_01_05.png', dpi=300)
np.random.seed(1)
az.plot_posterior({'θ':stats.beta.rvs(5, 11, size=1000)})
plt.savefig('B11197_01_07.png', dpi=300)
|
{"hexsha": "18858f828059bf503aa4f18f55ab6b20a0358be5", "size": 3387, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter01/.virtual_documents/01 Thinking Probabilistically.ipynb.py", "max_stars_repo_name": "Vedia-JerezDaniel/Bayesi_py", "max_stars_repo_head_hexsha": "3d598c5b740157eb4761989f3b4e357d1598b7cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter01/.virtual_documents/01 Thinking Probabilistically.ipynb.py", "max_issues_repo_name": "Vedia-JerezDaniel/Bayesi_py", "max_issues_repo_head_hexsha": "3d598c5b740157eb4761989f3b4e357d1598b7cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter01/.virtual_documents/01 Thinking Probabilistically.ipynb.py", "max_forks_repo_name": "Vedia-JerezDaniel/Bayesi_py", "max_forks_repo_head_hexsha": "3d598c5b740157eb4761989f3b4e357d1598b7cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4609375, "max_line_length": 82, "alphanum_fraction": 0.5724830233, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1264}
|
# -*- coding: utf-8 -*-
import os
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
import numpy as np
"""
Utility functions for loading, saving,
processing and visualizing hdf5 timeseries files
"""
def path_to_sibling_folders(path):
"""
"""
parent = path.parent
for x in parent.iterdir():
if x.is_dir() and x != path:
yield x
def path_to_h5_file(data_folder):
"""
Input: string
data_folder: name of folder where h5 file is located
Output: string
full path of h5 file
"""
count = 0
for file in os.listdir(data_folder):
if file.endswith(".h5"):
# print(os.path.join(data_folder, file))
return os.path.join(data_folder, file)
return None
def scale_time_series(data, scaling="Normal"):
"""
Scales data to specific range (default 0 to 1)
Input: numpy array
data: sensor data in time series format (samples, time_steps, features)
Output: numpy array
scaled_data: scaled data in time series format (samples, time_steps, features)
"""
array_scaled_data = []
if scaling == "Normal":
scaler = MinMaxScaler(feature_range=(0, 1))
for sample in data:
scaler = scaler.fit(sample)
scaled_data = scaler.transform(sample)
array_scaled_data.append(scaled_data)
return np.asarray(array_scaled_data)
elif scaling == "Standard":
scaler = StandardScaler()
for sample in data:
scaler = scaler.fit(sample)
scaled_data = scaler.transform(sample)
array_scaled_data.append(scaled_data)
return np.asarray(array_scaled_data)
else:
print("[INFO] no support for selected scaling method (returned original data)")
return data
def filter_timeseries_by_columns(data, columns):
"""
Filters features in multi-variate time series data
input (data): 3D numpy array with time series format
input (cols): array with columns (integer encoded) to be chosen
return (data): 3D numpy array with no. of featues = len(columns)
"""
return data[:, :, columns]
# return data[::, col_a: col_b + 1] # both ends
def plot_timeseries_data(data, downsample_data=False, col_name="Spn1ALxb1", single_col=False, col=0):
"""
"""
down_sample_factor = 1
fig = plt.gcf()
fig.set_size_inches(14.5, 8.5)
plt.xlabel("data_points")
plt.grid()
if downsample_data and not single_col:
print("ploting full dataset downsampled " \
"by a factor of %2d..."%(down_sample_factor))
plt.plot(data[::down_sample_factor,:])
elif downsample_data and single_col:
print("ploting column %2d of dataset downsampled " \
"by a factor of %2d..." %(col, down_sample_factor))
plt.plot(data[::down_sample_factor,col],
c=np.random.rand(3,), label=col_name)
plt.legend(loc="upper left")
else:
# TODO: modify this one to plot full single col (not downsampling)
print("ploting full dataset...")
plt.plot(data) # plot y using x as index array 0..N-1
plt.show()
def timeseries_to_pandas(numpy_ice_data):
return pd.DataFrame(data=numpy_ice_data)
class Color:
def __init__(self):
self.PURPLE = '\033[95m'
self.CYAN = '\033[96m'
self.DARKCYAN = '\033[36m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.YELLOW = '\033[93m'
self.RED = '\033[91m'
self.BOLD = '\033[1m'
self.UNDERLINE = '\033[4m'
self.END = '\033[0m'
|
{"hexsha": "539f0ec3f512eee32138d060dfe370e64c5d130e", "size": 3727, "ext": "py", "lang": "Python", "max_stars_repo_path": "hdf5/utils_hdf5.py", "max_stars_repo_name": "agrija9/Wind-Turbine-Anomaly-Detection-VRAE", "max_stars_repo_head_hexsha": "91d3c95f2f19cc9a57f5b9ddff5934cabf0cea30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-12-25T06:08:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T15:27:31.000Z", "max_issues_repo_path": "hdf5/utils_hdf5.py", "max_issues_repo_name": "agrija9/Wind-Turbine-Anomaly-Detection-VRAE", "max_issues_repo_head_hexsha": "91d3c95f2f19cc9a57f5b9ddff5934cabf0cea30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hdf5/utils_hdf5.py", "max_forks_repo_name": "agrija9/Wind-Turbine-Anomaly-Detection-VRAE", "max_forks_repo_head_hexsha": "91d3c95f2f19cc9a57f5b9ddff5934cabf0cea30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8914728682, "max_line_length": 101, "alphanum_fraction": 0.6337536893, "include": true, "reason": "import numpy", "num_tokens": 905}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: layers.py
# Author: Qian Ge <geqian1001@gmail.com>
import numpy as np
import tensorflow as tf
def Linear(inputs, out_dim, name='Linear', nl=tf.identity):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
inputs = batch_flatten(inputs)
in_dim = inputs.get_shape().as_list()[1]
weights = tf.get_variable('weights',
shape=[in_dim, out_dim],
initializer=None,
regularizer=None,
trainable=True)
biases = tf.get_variable('biases',
shape=[out_dim],
initializer=None,
regularizer=None,
trainable=True)
act = tf.nn.xw_plus_b(inputs, weights, biases)
return nl(act, name='output')
def batch_flatten(x):
"""
Flatten the tensor except the first dimension.
"""
shape = x.get_shape().as_list()[1:]
if None not in shape:
return tf.reshape(x, [-1, int(np.prod(shape))])
return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))
|
{"hexsha": "bb18b7c77c755f1f4c314948972bca61e0795b86", "size": 1224, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/model/layers.py", "max_stars_repo_name": "kanedo/recurrent-attention-model", "max_stars_repo_head_hexsha": "8da82b7fbeb7c94859a1d4fd428ea112bde9a0c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-04-04T04:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-23T12:02:28.000Z", "max_issues_repo_path": "lib/model/layers.py", "max_issues_repo_name": "kanedo/recurrent-attention-model", "max_issues_repo_head_hexsha": "8da82b7fbeb7c94859a1d4fd428ea112bde9a0c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-04T13:33:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-29T13:09:57.000Z", "max_forks_repo_path": "lib/model/layers.py", "max_forks_repo_name": "kanedo/recurrent-attention-model", "max_forks_repo_head_hexsha": "8da82b7fbeb7c94859a1d4fd428ea112bde9a0c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-02-07T14:27:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-08T08:21:18.000Z", "avg_line_length": 34.0, "max_line_length": 59, "alphanum_fraction": 0.5114379085, "include": true, "reason": "import numpy", "num_tokens": 248}
|
import logging
import mlflow
import numpy as np
import optuna
from omegaconf import DictConfig
from optuna.integration.mlflow import MLflowCallback
from optuna.trial import Trial
from sklearn.metrics import accuracy_score, log_loss, roc_auc_score
from sklearn.model_selection import TimeSeriesSplit, train_test_split
from universal_data_catalog.data_catalog import DataCatalog
from src.elo.system import EloSystemFactory
def fit_elo_model(config: DictConfig, catalog: DataCatalog) -> None:
"""Fit a elo model to the data
Args:
config: hydra config
catalog: data catalog
"""
df = catalog.load("race_processed")
# Train test split
df = df.sort_values("race_driven")
df_train, df_test = train_test_split(df, shuffle=False, test_size=0.2)
# Create elo system factory
system_factory = EloSystemFactory(nb_tracks=df["track_id"].max())
# Add mlflow callback for model tracking
mlflc = MLflowCallback(**config.mlflow)
@mlflc.track_in_mlflow()
def _objective(trial: Trial) -> float:
# define hyperparameter search space
params = {
"c": trial.suggest_uniform("c", 0, 0.4),
"eta": trial.suggest_loguniform("eta", 200, 1200),
"M": trial.suggest_int("M", 3, 10),
"s": trial.suggest_loguniform("s", 0.1, 0.8),
"alpha": trial.suggest_uniform("alpha", 0.004, 0.007),
"i": trial.suggest_uniform("i", 0, 10),
"a": trial.suggest_uniform("a", -50, 50),
}
# define cross validation split
cv = TimeSeriesSplit(n_splits=3)
# Fit and evaluate the elo model for every fold in the cv split.
scores = []
for i, (train_idx, test_idx) in enumerate(cv.split(df_train)):
# Get training data
train_split = df_train.iloc[np.concatenate([train_idx, test_idx])] # type: ignore
# Create Elo System and fit it to the training data
elo_system = system_factory.get(**params)
results = elo_system.fit(train_split, desc=f"Fit Fold {i}") # type: ignore
# Get Results
y_proba = results["win_probability"].iloc[test_idx]
y_pred = (y_proba >= 0.5).astype(int) # type: ignore
y_true = results["challenger_is_winning"].iloc[test_idx]
# Calculate metrics
auc_score: float = roc_auc_score(y_true, y_proba) # type: ignore
accuracy = accuracy_score(y_true, y_pred)
logit_score = log_loss(y_true, y_proba)
# Log Metrics
mlflow.log_metric("auc_score", auc_score, step=i)
mlflow.log_metric("accuracy_score", accuracy, step=i)
mlflow.log_metric("logit_score", logit_score, step=i)
scores.append(logit_score)
# Return the mean score on all folds
return np.mean(scores)
# optimize hyperparameters
study = optuna.create_study(**config.study)
study.optimize(_objective, **config.optimize, callbacks=[mlflc])
# refit elo model with best params
logging.info("Refit best elo model.")
best_elo_model = system_factory.get(**study.best_params)
results = best_elo_model.fit(df)
results_test = results.iloc[len(df_train) :] # type: ignore
# save results back to catalog
catalog.save("elo_results", results)
catalog.save("elo_results_test", results_test)
catalog.save("elo_system", best_elo_model)
|
{"hexsha": "ca5286a8a92a5c8255a4bc04cf6a94f29293b3b3", "size": 3451, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/elo/step.py", "max_stars_repo_name": "AnH0ang/big-data-challenge", "max_stars_repo_head_hexsha": "aa9ba92e2355bc2838a037219d20d387e2eefdbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/elo/step.py", "max_issues_repo_name": "AnH0ang/big-data-challenge", "max_issues_repo_head_hexsha": "aa9ba92e2355bc2838a037219d20d387e2eefdbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elo/step.py", "max_forks_repo_name": "AnH0ang/big-data-challenge", "max_forks_repo_head_hexsha": "aa9ba92e2355bc2838a037219d20d387e2eefdbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3263157895, "max_line_length": 94, "alphanum_fraction": 0.6554621849, "include": true, "reason": "import numpy", "num_tokens": 842}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.tests.helper import remote_data
from ... import datasets
@remote_data
def test_load_catalog_atnf():
catalog = datasets.load_catalog_atnf()
assert len(catalog) == 2399
# TODO: activate test when available
@remote_data
def _test_load_catalog_hess_galactic():
catalog = datasets.load_catalog_hess_galactic()
assert len(catalog) == 42
@remote_data
def test_load_catalog_green():
catalog = datasets.load_catalog_green()
assert len(catalog) == 294
# TODO: activate test when available
@remote_data
def _test_load_catalog_snrcat():
catalog = datasets.load_catalog_snrcat()
assert len(catalog) == 338
@remote_data
def test_load_catalog_tevcat():
catalog = datasets.load_catalog_tevcat()
assert len(catalog) == 173
|
{"hexsha": "d4699f9bae5ac73f393daed3019e719918c9c21d", "size": 942, "ext": "py", "lang": "Python", "max_stars_repo_path": "gammapy/datasets/tests/test_catalogs.py", "max_stars_repo_name": "joleroi/gammapy", "max_stars_repo_head_hexsha": "c4e0c4bd74c79d30e0837559d18b7a1a269f70d9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gammapy/datasets/tests/test_catalogs.py", "max_issues_repo_name": "joleroi/gammapy", "max_issues_repo_head_hexsha": "c4e0c4bd74c79d30e0837559d18b7a1a269f70d9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gammapy/datasets/tests/test_catalogs.py", "max_forks_repo_name": "joleroi/gammapy", "max_forks_repo_head_hexsha": "c4e0c4bd74c79d30e0837559d18b7a1a269f70d9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7894736842, "max_line_length": 66, "alphanum_fraction": 0.7452229299, "include": true, "reason": "from astropy", "num_tokens": 226}
|
#pragma once
#include "Concepts.hpp"
#include <boost/hana/tuple.hpp>
#include <span>
namespace codys {
template <typename Tag, typename Unit_>
struct State {
using Unit = Unit_;
using depends_on = boost::hana::tuple<State<Tag, Unit>>;
template <class SystemType, std::size_t N>
constexpr static double evaluate(std::span<const double, N> arr) {
return arr[SystemType::template idx_of<State<Tag, Unit>>()];
}
};
} // namespace codys
|
{"hexsha": "df4fe66cd2af7f8a07e23890d2135c6bd5cf4851", "size": 467, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/codys/State.hpp", "max_stars_repo_name": "hansepp/codys", "max_stars_repo_head_hexsha": "987af24cca75745916f4dad03f75a3d0a05586fd", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/codys/State.hpp", "max_issues_repo_name": "hansepp/codys", "max_issues_repo_head_hexsha": "987af24cca75745916f4dad03f75a3d0a05586fd", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/codys/State.hpp", "max_forks_repo_name": "hansepp/codys", "max_forks_repo_head_hexsha": "987af24cca75745916f4dad03f75a3d0a05586fd", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.3043478261, "max_line_length": 70, "alphanum_fraction": 0.6809421842, "num_tokens": 112}
|
using Random
using MLJ
using SigmaRidgeRegression
id_design = BlockCovarianceDesign([IdentityCovarianceDesign(), IdentityCovarianceDesign()])
ps = [100; 100]
grp = GroupedFeatures(ps)
design = set_groups(id_design, grp)
αs = [1.0; 1.0]
ntest = 20_000
ntrain = 400
ridge_sim = GroupRidgeSimulationSettings(;
groups = grp,
ntrain = ntrain,
ntest = ntest,
Σ = design,
response_model = RandomLinearResponseModel(; αs = αs, grp = grp),
)
Random.seed!(1)
sim_res = simulate(ridge_sim)
@test length.(sim_res.resampling_idx[1]) == (ntrain, ntest)
X_train = sim_res.X[sim_res.resampling_idx[1][1], :]
Y_train = sim_res.Y[sim_res.resampling_idx[1][1]]
X_test = sim_res.X[sim_res.resampling_idx[1][2], :]
Y_test = sim_res.Y[sim_res.resampling_idx[1][2]]
λs = [1.0; 2.0]
multiridge =
MultiGroupRidgeRegressor(; groups = grp, λs = λs, center = false, scale = false)
mse = mse_ridge(
StatsBase.fit(MultiGroupRidgeRegressor(; groups = grp, λs = λs), X_train, Y_train, grp),
X_test,
Y_test,
)
_mach = machine(multiridge, MLJ.table(sim_res.X), sim_res.Y)
_eval = evaluate!(_mach, resampling = sim_res.resampling_idx, measure = l2)
@test _eval.measurement[1] == mse
|
{"hexsha": "52ddac92d3a6255c0982cabd36c44e9555691566", "size": 1196, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_simulations.jl", "max_stars_repo_name": "nignatiadis/SigmaRidgeRegression.jl", "max_stars_repo_head_hexsha": "f9dc3300250c87200b66ddae17501e174c221f21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-28T21:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-05T05:15:37.000Z", "max_issues_repo_path": "test/test_simulations.jl", "max_issues_repo_name": "nignatiadis/SigmaRidgeRegression.jl", "max_issues_repo_head_hexsha": "f9dc3300250c87200b66ddae17501e174c221f21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-19T01:37:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-19T19:25:57.000Z", "max_forks_repo_path": "test/test_simulations.jl", "max_forks_repo_name": "nignatiadis/SigmaRidgeRegression.jl", "max_forks_repo_head_hexsha": "f9dc3300250c87200b66ddae17501e174c221f21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9166666667, "max_line_length": 92, "alphanum_fraction": 0.7090301003, "num_tokens": 380}
|
#!/usr/bin/env python
import numpy as np
from sys import argv
class Body:
def __init__(self, m, x_init, v_init):
self.m = m # in kg
self.x = x_init # in m
self.v = v_init # in m/s
def p_transfer(u1, u2, m1, m2):
# from conservation of energy and momentum
v1 = ((m1-m2)/(m1+m2))*u1 + ((2*m2)/(m1+m2))*u2
v2 = ((2*m1)/(m1+m2))*u1 + ((m2-m1)/(m1+m2))*u2
return v1, v2
def check_collisions(x1, x2):
collision_type = 0 # no collision
if x1 <= 0: # if collision with wall
collision_type += 1
if x2 <= x1: # if collision between boxes
collision_type += 2
# collision_type = 3 if both collisions are detected
return collision_type
def is_finished(v1, v2):
if v2>v1 and v1>0:
return True
else:
return False
def main(n, v_init):
"""v_init is now used as the measure of precision of the simulation thus reducing arithmetic calculating distance steps.
The simulation now only runs until no more collisions will occur. This check is somewhat demanding so only runs every 1000 iterations.
Error handling is more straightforward reducing number of checks per loop.
Reduced the starting distance between two bodies as this is a waste of time, the distance is now set such that they collide after first interval."""
# init objects
small = Body(1, 1.-v_init, 0.) # stationary at 30 cm from wall
large = Body(10**(2*(n-1)), 1., -v_init) # moving to left at 10 cm/s from 1m away
# run simulation
cycles = 0
finished = False
collision_counter = 0
while not finished:
# increment cycles
cycles += 1
# move bodies
small.x += small.v
large.x += large.v
# check for collisions
collision_type = check_collisions(small.x, large.x)
# deal with collisions
if collision_type == 0:
pass # do nothing
elif collision_type == 1:
small.v = - small.v # reverse direction of small body
collision_counter += 1
elif collision_type == 2:
small.v, large.v = p_transfer(small.v, large.v, small.m, large.m) # adjust velocity of both
collision_counter += 1
else:
print("Temporal resolution too low to resolve individual collisions.")
break
if cycles%1000 == 0: # finshed check is more demanding so only run after every 1000 increments
finished = is_finished(small.v, large.v)
print(f"The simulation counted {collision_counter} collisions after {cycles} increments.")
if __name__ == "__main__":
try:
n = int(argv[1])
v_init = float(argv[2])
except Exception as e:
print(e)
main(n, v_init)
|
{"hexsha": "f8e2ada27e916bf0c101dd88d5703645254deb50", "size": 2759, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python-implementation/collision-sim.py", "max_stars_repo_name": "physicodes/collision-sim", "max_stars_repo_head_hexsha": "aa90d7af9565d95996999b8f2dc4a1fe14d14308", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python-implementation/collision-sim.py", "max_issues_repo_name": "physicodes/collision-sim", "max_issues_repo_head_hexsha": "aa90d7af9565d95996999b8f2dc4a1fe14d14308", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python-implementation/collision-sim.py", "max_forks_repo_name": "physicodes/collision-sim", "max_forks_repo_head_hexsha": "aa90d7af9565d95996999b8f2dc4a1fe14d14308", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9891304348, "max_line_length": 152, "alphanum_fraction": 0.6248640812, "include": true, "reason": "import numpy", "num_tokens": 723}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from arch.api.utils import log_utils
from federatedml.framework.homo.procedure import aggregator
from federatedml.federatedrec.factorization_machine.fm_model_weight import FactorizationMachineWeights
from federatedml.federatedrec.factorization_machine.homo_factorization_machine.homo_fm_base import HomoFMBase
from federatedml.optim import activation
from federatedml.util import consts
LOGGER = log_utils.getLogger()
class HomoFMArbiter(HomoFMBase):
def __init__(self):
super(HomoFMArbiter, self).__init__()
# self.re_encrypt_times = [] # Record the times needed for each host
self.loss_history = []
self.is_converged = False
self.role = consts.ARBITER
self.aggregator = aggregator.Arbiter()
self.model_weights = None
self.host_predict_results = []
def _init_model(self, params):
super()._init_model(params)
def fit(self, data_instances=None, validate_data=None):
validation_strategy = self.init_validation_strategy()
model_shape = -1
embed_size = self.init_param_obj.embed_size
while self.n_iter_ < self.max_iter+1:
suffix = (self.n_iter_,)
if (self.n_iter_ > 0 and self.n_iter_ % self.aggregate_iters == 0) or self.n_iter_ == self.max_iter:
merged_model = self.aggregator.aggregate_and_broadcast(ciphers_dict=None,
suffix=suffix)
if model_shape == -1:
if self.init_param_obj.fit_intercept:
model_shape = int((len(merged_model._weights) - 1) / (embed_size + 1))
else:
model_shape = int(len(merged_model._weights) / (embed_size + 1))
# Initialize the model
fit_intercept = False
if self.init_param_obj.fit_intercept:
fit_intercept = True
self.init_param_obj.fit_intercept = False
w_ = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
embed_ = self.initializer.init_model([model_shape, self.init_param_obj.embed_size],
init_params=self.init_param_obj)
self.model_weights = \
FactorizationMachineWeights(w_, embed_, fit_intercept=fit_intercept)
total_loss = self.aggregator.aggregate_loss(suffix=suffix)
self.callback_loss(self.n_iter_, total_loss)
self.loss_history.append(total_loss)
if self.use_loss:
converge_var = total_loss
else:
converge_var = np.array(merged_model.unboxed)
self.is_converged = self.aggregator.send_converge_status(self.converge_func.is_converge,
(converge_var,),
suffix=(self.n_iter_,))
LOGGER.info("n_iters: {}, total_loss: {}, converge flag is :{}".format(self.n_iter_,
total_loss,
self.is_converged))
if self.is_converged:
break
# self.model_weights = FactorizationMachineWeights(merged_model.coef_, merged_model.embed_,
# self.model_param.init_param.fit_intercept)
merged_model._weights = np.array(merged_model._weights)
self.model_weights.update(merged_model)
if self.header is None:
self.header = ['x' + str(i) for i in
range(model_shape)]
validation_strategy.validate(self, self.n_iter_)
self.n_iter_ += 1
LOGGER.info("Finish Training task, total iters: {}".format(self.n_iter_))
def predict(self, data_instantces=None):
LOGGER.info(f'Start predict task')
|
{"hexsha": "da0f18e77c94a875f589bac0a6bb49c07b3135cb", "size": 4927, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/federatedml/federatedrec/factorization_machine/homo_factorization_machine/homo_fm_arbiter.py", "max_stars_repo_name": "showKstage/fut-fate", "max_stars_repo_head_hexsha": "77ef6fb84e9f8345aca3dda92ccc63b6a462bc20", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-21T06:06:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T06:06:48.000Z", "max_issues_repo_path": "python/federatedml/federatedrec/factorization_machine/homo_factorization_machine/homo_fm_arbiter.py", "max_issues_repo_name": "showKstage/fut-fate", "max_issues_repo_head_hexsha": "77ef6fb84e9f8345aca3dda92ccc63b6a462bc20", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/federatedml/federatedrec/factorization_machine/homo_factorization_machine/homo_fm_arbiter.py", "max_forks_repo_name": "showKstage/fut-fate", "max_forks_repo_head_hexsha": "77ef6fb84e9f8345aca3dda92ccc63b6a462bc20", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.046728972, "max_line_length": 112, "alphanum_fraction": 0.5812867871, "include": true, "reason": "import numpy", "num_tokens": 942}
|
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include <iostream>
#include <string>
#include <fstream>
#include <errno.h>
#include <boost/program_options.hpp>
using namespace std;
namespace po = boost::program_options;
#include "dmlparser.h"
#include "dml-gram.h"
using namespace dmlpackage;
int main(int argc, char* argv[])
{
string sqlfile;
int count;
po::options_description desc ("Allowed options");
desc.add_options ()
("help", "produce help message")
("bisond", /* po::value <string>(),*/ "Have bison produce debug output")
("count", po::value <int>(), "number of runs")
("sql", po::value < string > (), "sql file");
po::variables_map vm;
po::store (po::parse_command_line (argc, argv, desc), vm);
po::notify (vm);
if (vm.count ("sql"))
sqlfile = vm["sql"].as <string> ();
if (vm.count("count"))
count = vm["count"].as<int>();
DMLFileParser parser;
if (vm.count ("bisond"))
parser.setDebug(true);
parser.parse(sqlfile);
if (parser.good())
{
const ParseTree& ptree = parser.getParseTree();
cout << "Parser succeeded." << endl;
cout << ptree.fList.size() << " " << "SQL statements" << endl;
cout << ptree.fSqlText << endl;
cout << ptree;
SqlStatement* statementPtr = ptree[0];
if (statementPtr)
cout << statementPtr->getQueryString();
cout << endl;
}
else
{
cout << "Parser failed." << endl;
}
return parser.good() ? 0 : -1;
}
|
{"hexsha": "2df56453e43407262a9afaa11427c953e62d94a0", "size": 2215, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/dbcon/dmlpackage/gramtest.cpp", "max_stars_repo_name": "zettadb/zettalib", "max_stars_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/dbcon/dmlpackage/gramtest.cpp", "max_issues_repo_name": "zettadb/zettalib", "max_issues_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/dbcon/dmlpackage/gramtest.cpp", "max_forks_repo_name": "zettadb/zettalib", "max_forks_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-02-27T14:00:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:24:22.000Z", "avg_line_length": 26.369047619, "max_line_length": 76, "alphanum_fraction": 0.6316027088, "num_tokens": 549}
|
import sys
import numpy as np
from matplotlib import patches, collections, ticker, pyplot as plt
from skimage import io, morphology, measure
# width of vacuum opening
ROOMBA_WIDTH = 180
SCALING_FACTOR = 10 # mm/pixel
class Polygon():
def __init__(self, points, center=[0, 0], snap=90, scaling=1):
self.snap_angle = snap
self.center = center
self.points = points
self.scaling = scaling
self.segment_vectors = None
self.segment_lengths = None
self.segment_unit_vectors = None
self.segment_orientation = None
self.segment_idx_maxlen = None
self.update_properties()
self.segment_done = np.zeros(len(self.segment_vectors))
def update_properties(self):
self.segment_vectors = np.diff(self.points, axis=0)
self.segment_lengths = np.linalg.norm(self.segment_vectors, axis=1)
self.segment_unit_vectors = np.divide(self.segment_vectors, np.array([self.segment_lengths, self.segment_lengths]).T)
self.segment_orientation = np.degrees(np.arctan2(self.segment_unit_vectors[:, 0], self.segment_unit_vectors[:, 1]))+180
self.segment_idx_maxlen = np.argmax(self.segment_lengths)
def align_longest_edge(self):
angle = self.segment_orientation[self.segment_idx_maxlen]
snapped = np.round(angle/self.snap_angle)*self.snap_angle
rotation = np.abs(angle)-snapped
# make longest edge the first segment
self.roll(-1*self.segment_idx_maxlen)
# rotate polygon to snapped longest segment
self.points = self._rotate2D(self.points, self.center, np.radians(rotation))
self.update_properties()
return self
def iterate_segments(self):
for n, vector in enumerate(self.segment_vectors):
segment = np.array([self.points[n], self.points[n+1]])
angle = self.segment_orientation[n]
snapped = np.round(angle/self.snap_angle)*self.snap_angle
rotation = (np.abs(angle)-snapped)
segment = self._rotate2D(segment, segment[0]+(segment[1]-segment[0])/2, np.radians(rotation))
self.points[n] = segment[0]
self.points[n+1] = segment[1]
self.update_properties()
# close polygon
self.points[0] = self.points[-1]
self.update_properties()
return self
def get_points(self):
return self.points
def get_avg_deviation(self):
rotation = []
for n, vector in enumerate(self.segment_vectors):
angle = self.segment_orientation[n]
snapped = np.round(angle/self.snap_angle)*self.snap_angle
rotation.append( np.abs((np.abs(angle)-snapped) ) )
return(np.average(rotation))
def force_snap(self):
for n, vector in enumerate(self.segment_vectors):
segment = np.array([self.points[n], self.points[n+1]])
angle = self.segment_orientation[n]
snapped = np.round(angle/self.snap_angle)*self.snap_angle
rotation = (np.abs(angle)-snapped)
segment = self._rotate2D(segment, segment[0], np.radians(rotation))
self.points[n] = segment[0]
self.points[n+1] = segment[1]
self.update_properties()
return self
def roll(self, index=0):
self.points = np.roll(self.points[:-1], index, axis=0)
self.points = np.append(self.points, [self.points[0]], axis=0)
return self
def close_poly(self):
# parallel lines, same orientation
# FIXME Find solution for this case
if np.abs(self.segment_orientation[0] - self.segment_orientation[-1]) < 1e-6:
print("parallel lines, same orientation")
# parallel lines, opposite orientation
# FIXME Find solution for this case
elif np.abs(self.segment_orientation[0] + self.segment_orientation[-1] - 360) < 1e-6:
print("parallel lines, opposite orientation")
else:
intersect = self.get_intersect(self.points[0], self.points[1], self.points[-2], self.points[-1])
# print(intersect)
# if not (np.isinf(intersect[0]) or np.isinf(intersect[1])):
self.points[0] = self.points[-1] = intersect
return self
def get_area(self):
""" return polygon area in squarepixels """
""" soruce: https://stackoverflow.com/questions/22678990/how-can-i-calculate-the-area-within-a-contour-in-python-using-the-matplotlib"""
x=self.points[:,0]
y=self.points[:,1]
area=0.5*np.sum(y[:-1]*np.diff(x) - x[:-1]*np.diff(y))
area=np.abs(area)*self.scaling**2
return(area)
def simplify(self):
""" will remove unnecessary points on segments """
# same orientation in consecutive segments means unnecessary points
duplicates = np.add(np.where( np.abs(np.diff(self.segment_orientation)) < 1e-6), 1)
self.points = np.delete(self.points, duplicates, axis=0)
self.update_properties()
return self
@staticmethod
def _rotate2D(points, center, angle):
return np.dot(points - center, np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]))+center
@staticmethod
def get_intersect(a1, a2, b1, b2):
"""
Returns the point of intersection of the lines passing through a2,a1 and b2,b1.
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([a1,a2,b1,b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return (np.inf, np.inf)
return (x/z, y/z)
def Rotate2D(points, center, angle=np.pi/4):
return np.dot(points - center, np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]))+center
def renderArea(filename):
npzfile = np.load(filename)
points = npzfile["points"][:] * 11.8 # convert to mm
values = npzfile["values"][:]
minx=np.amin(points, axis=0)[0]
maxx=np.amax(points, axis=0)[0]
miny=np.amin(points, axis=0)[1]
maxy=np.amax(points, axis=0)[1]
print(minx, maxx, miny, maxy)
#fig, ax = plt.subplots()
fig = plt.figure(figsize=((maxx-minx+300)/100/SCALING_FACTOR,(maxy-miny+300)/100/SCALING_FACTOR), dpi=100)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# size and fixed aspect ratio
#fig.set_size_inches(10, 8)
ax.set_aspect('equal')
ax.set_xlim(minx-150, maxx+150)
ax.set_ylim(miny-150, maxy+150)
# set background colors
fig.patch.set_facecolor('black')
#ax.set_facecolor('black')
# all off
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
# call this before any transformations. reason is unknown
fig.canvas.draw()
# plot robot path with respect to width of vacuum unit (e.g. 180mm)
# from https://stackoverflow.com/questions/19394505/matplotlib-expand-the-line-with-specified-width-in-data-unit#42972469
# if you want to updated lines (e.g. resize plot) take the full code from that answer
lw = ((ax.transData.transform((1, ROOMBA_WIDTH))-ax.transData.transform((0, 0)))*(72./fig.dpi))[1]
plt.plot(points[:,0], points[:,1], '-', color="white", linewidth=lw, alpha=1.)
#buffer = io.BytesIO()
plt.savefig(sys.argv[1]+".png", format="png", dpi=100, facecolor=fig.get_facecolor(), edgecolor='none') # save as file (800x600)
plt.close('all')
#return buffer
# load test data
if len(sys.argv) > 1:
renderArea(sys.argv[1])
source = io.imread(sys.argv[1]+".png", as_gray=True)
#source = morphology.convex_hull_image(source)
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(9.3, 6), sharex=True, sharey=True)
fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)
ax = axes.ravel()
for a in ax.ravel():
a.axis('off')
a.set_aspect('equal')
label_img = measure.label(source)
# skimage uses row, column coordinate system where top-left = origin
# matplotlib uses x,y coordinate system where top-left = origin
ax[0].imshow(source, cmap=plt.cm.get_cmap(name="cividis"), vmax=1)
ax[0].set_title("original shape and orientation", fontdict={'fontsize': 11})
# extract polygon from shape
shape = measure.approximate_polygon(measure.find_contours(source, 0.5)[0], tolerance=15)
ax[1].imshow(source*0.4, cmap=plt.cm.get_cmap(name="cividis"), vmax=1)
ax[1].quiver(shape[:,1][:-1], shape[:,0][:-1], shape[:,1][1:]-shape[:,1][:-1], shape[:,0][1:]-shape[:,0][:-1],
range(len(shape)), scale_units='xy', angles='xy', width=.01, scale=1, zorder=99,
cmap=plt.cm.get_cmap(name="spring")
)
ax[1].set_title("polygon approximation and edges", fontdict={'fontsize': 11})
# find global wall orientation
region = measure.regionprops(label_img, coordinates="xy")[0]
wall_orientation = np.array([
[[0, -region.major_axis_length/2],
[0,region.major_axis_length/2]],
[[-region.minor_axis_length/2,0],
[region.minor_axis_length/2, 0]]
])
wall_orientation = Rotate2D(wall_orientation, [0,0], region.orientation)
wall_orientation = wall_orientation + region.centroid
ax[0].plot(wall_orientation[0][:, 1], wall_orientation[0][:, 0], '-r', linewidth=2.5)
ax[0].plot(wall_orientation[1][:, 1], wall_orientation[1][:, 0], '-r', linewidth=2.5)
ax[0].text(region.centroid[1], region.centroid[0], u'{:.2f}°'.format(np.degrees(region.orientation)))
#polygon = Rotate2D(polygon, region.centroid, -region.orientation)
# decomposition into segments (edges)
polygon = Polygon(shape, region.centroid, snap=45, scaling=SCALING_FACTOR)
points = polygon.get_points()
ax[2].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[2].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[2].set_title("original polygon ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
polygon.align_longest_edge()
points = polygon.get_points()
ax[3].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[3].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[3].set_title("align longest edge ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
polygon.iterate_segments()
points = polygon.get_points()
ax[4].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[4].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[4].set_title("1st iteration ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
polygon.iterate_segments()
points = polygon.get_points()
ax[5].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[5].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[5].set_title("2nd iteration ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
for _ in range(3):
polygon.iterate_segments()
points = polygon.get_points()
ax[6].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[6].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[6].set_title("5th iteration ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
for _ in range(5):
polygon.iterate_segments()
points = polygon.get_points()
ax[7].plot(points[:, 1], points[:, 0], linewidth=2.5)
ax[7].plot(points[:, 1], points[:, 0], 'or', markersize=3)
ax[7].set_title("10th iteration ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
points = Polygon(shape, region.centroid, snap=45).align_longest_edge().force_snap().get_points()
ax[8].imshow(source*0.4, cmap=plt.cm.get_cmap(name="cividis"), vmax=1)
ax[8].plot(points[:, 1], points[:, 0], linewidth=2.5, color="red")
ax[8].plot(points[:, 1], points[:, 0], 'o', color="white", markersize=1)
ax[8].set_title("force-snapped only ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
ax[8].quiver(points[:,1][:-1], points[:,0][:-1], points[:,1][1:]-points[:,1][:-1], points[:,0][1:]-points[:,0][:-1],
range(len(points)), scale_units='xy', angles='xy', width=.01, scale=1, zorder=99,
cmap=plt.cm.get_cmap(name="spring")
)
polygon.force_snap()
polygon.close_poly()
points = polygon.get_points()
ax[9].imshow(source, cmap=plt.cm.get_cmap(name="cividis"))
ax[9].plot(points[:, 1], points[:, 0], linewidth=2.5, color="red")
ax[9].plot(points[:, 1], points[:, 0], 'o', color="white", markersize=1)
ax[9].set_title("10th + force-snapped ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
polygon.simplify()
points = polygon.get_points()
ax[10].imshow(source, cmap=plt.cm.get_cmap(name="cividis"))
ax[10].plot(points[:, 1], points[:, 0], linewidth=2.5, color="red")
ax[10].plot(points[:, 1], points[:, 0], 'o', color="white", markersize=1)
ax[10].set_title("simplified ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
points = polygon.get_points()
#fig.patch.set_facecolor('#065da2')
p = collections.PatchCollection([patches.Polygon( np.roll(points, 1, axis=1), True)], edgecolor="cyan", linewidth=3)
ax[11].set_facecolor('#065da2')
ax[11].add_collection(p)
ax[11].set_title("final ({:.2f}°)".format(polygon.get_avg_deviation()), fontdict={'fontsize': 11})
print("Polygon Area={:.2f}m²".format(polygon.get_area()/1e6))
M = measure.moments(source)
print("Original Shape Area={:.2f}m²".format(M[0,0]*(SCALING_FACTOR**2)/1e6))
#print(polygon)
#current_shape = shape.get_shape()
#ax[2].plot(current_shape[:, 1], current_shape[:, 0], linewidth=4)
#plt.tight_layout()
plt.show()
|
{"hexsha": "45e771d560e06b57abb239eae41a117a4f0b4743", "size": 14488, "ext": "py", "lang": "Python", "max_stars_repo_path": "analyze/polygon.py", "max_stars_repo_name": "henrythasler/roomba", "max_stars_repo_head_hexsha": "ecb021b2726f80d269c463f4709b7910e4d1a229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-28T00:42:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-28T00:42:45.000Z", "max_issues_repo_path": "analyze/polygon.py", "max_issues_repo_name": "henrythasler/roomba", "max_issues_repo_head_hexsha": "ecb021b2726f80d269c463f4709b7910e4d1a229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analyze/polygon.py", "max_forks_repo_name": "henrythasler/roomba", "max_forks_repo_head_hexsha": "ecb021b2726f80d269c463f4709b7910e4d1a229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.247761194, "max_line_length": 144, "alphanum_fraction": 0.6269326339, "include": true, "reason": "import numpy", "num_tokens": 3945}
|
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
#!/usr/bin/python
from __future__ import print_function
from progressbar import ProgressBar
import numpy as np
import tensorflow as tf
from xfdnn.rt.xdnn_util import make_list
from xfdnn.rt.xdnn_io import loadImageBlobFromFileScriptBase
########################################################################
## USER EDITABLE:
########################################################################
### Minimum required variables to perform preprocessing
INPUT_NODES = 'data'
LABEL_OFFSET = 0
BATCH_SIZE = 1
### Preprocessing formulas
### Available transformations: ['resize', 'resize2mindim', 'resize2maxdim', 'crop_letterbox',
### 'crop_center', 'plot', 'pxlscale', 'meansub', 'chtranspose', 'chswap']
# for resnet50, inception_v1
CMD_SEQ = [
('meansub', [103.939, 116.779, 123.68]),
('resize2mindim', [224, 224]),
('crop_center', [224, 224]),
]
# for inception_v4
# CMD_SEQ = [
# ('pxlscale', 1/255.),
# ('meansub', 0.5),
# ('pxlscale', 2),
# ('resize2mindim', [299, 299]),
# ('crop_center', [299, 299]),
# ]
# for squeezenet
# CMD_SEQ = [
# ('resize2mindim', [227, 227]),
# ('crop_center', [227, 227]),
# ('chswap',(2,1,0)),
# ('meansub', [104.006, 116.669, 122.679]),
# ]
########################################################################
# Environment Variables (obtained by running "source overlaybins/setup.sh")
IMAGEDIR = "/home/mluser/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/"
IMAGELIST = "/home/mluser/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/val.txt"
LABELSLIST = "/home/mluser/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/synset_words.txt"
INCLUDE_LABELS = False
def input_fn(iter):
images = []
labels = []
line = open(IMAGELIST).readlines()
for index in range(BATCH_SIZE):
curline = line[iter * BATCH_SIZE + index].strip()
[calib_image_name, calib_label_id] = curline.split(' ')
labels.append(int(calib_label_id) + LABEL_OFFSET)
image, __ = loadImageBlobFromFileScriptBase(IMAGEDIR + calib_image_name, CMD_SEQ)
images.append(image)
labels = np.array(labels)
if INCLUDE_LABELS:
return {INPUT_NODES: images, 'labels': labels}
else:
return {INPUT_NODES: images}
def top5_accuracy(graph, input_nodes, output_nodes, iter_cnt, batch_size, label_offset=0):
global BATCH_SIZE, INPUT_NODES, INCLUDE_LABELS, LABEL_OFFSET
INPUT_NODES = input_nodes
INCLUDE_LABELS = True
LABEL_OFFSET = label_offset
BATCH_SIZE = batch_size
with tf.Session(graph=graph) as sess:
input_tensors = {node: sess.graph.get_operation_by_name(node).outputs[0] for node in make_list(input_nodes)}
output_tensor = sess.graph.get_operation_by_name(output_nodes).outputs[0]
top1_acc = 0
top5_acc = 0
progress = ProgressBar()
line = open(IMAGELIST).readlines()
for iter in progress(range(iter_cnt)):
inputs = input_fn(iter)
correct_labels = inputs['labels']
predictions = sess.run(output_tensor, feed_dict={tensor: inputs[name] for name, tensor in input_tensors.items()})
top1_prediction = np.argmax(predictions, axis=1)
top5_prediction = np.argsort(predictions, axis=1)[:,-5:]
top1_accuracy = sum(top1_prediction == correct_labels)
top5_accuracy = sum([label in top5_prediction for label in correct_labels])
top1_acc += top1_accuracy
top5_acc += top5_accuracy
total_samples = float(iter_cnt*batch_size)
final_top1_acc = top1_acc/total_samples
final_top5_acc = top5_acc/total_samples
print ('top1_acc:{}, top5_acc:{}'.format(final_top1_acc,final_top5_acc))
|
{"hexsha": "773d891a625d6c912670e01e14e1133b1d922244", "size": 3958, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/tensorflow/utils.py", "max_stars_repo_name": "yarenty/ml-suite", "max_stars_repo_head_hexsha": "570202d904cf5980c2262b4cfc067eb8428ea8d0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 334, "max_stars_repo_stars_event_min_datetime": "2018-06-07T00:42:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T19:48:50.000Z", "max_issues_repo_path": "examples/tensorflow/utils.py", "max_issues_repo_name": "yarenty/ml-suite", "max_issues_repo_head_hexsha": "570202d904cf5980c2262b4cfc067eb8428ea8d0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 99, "max_issues_repo_issues_event_min_datetime": "2018-06-20T14:13:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-03T19:36:50.000Z", "max_forks_repo_path": "examples/tensorflow/utils.py", "max_forks_repo_name": "yarenty/ml-suite", "max_forks_repo_head_hexsha": "570202d904cf5980c2262b4cfc067eb8428ea8d0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 151, "max_forks_repo_forks_event_min_datetime": "2018-06-15T12:10:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T06:22:35.000Z", "avg_line_length": 32.7107438017, "max_line_length": 119, "alphanum_fraction": 0.6141990904, "include": true, "reason": "import numpy", "num_tokens": 998}
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from imgProcessor import *
from vehDetectFunc import *
import pickle
from scipy.ndimage.measurements import label
VIS_WINDOWS = False
VIS_BBOX = False
VIS_HEAT = False
TEST_ON_IMAGE = True
# Define a function to visualize search window
def get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block):
draw_img = np.copy(img)
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
nblocks_per_window = (window_size // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Bounding Boxes
bbox_list = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window_size*scale)
top_left = (xbox_left, ytop_draw+ystart)
bottom_right = (xbox_left+win_draw,ytop_draw+win_draw+ystart)
# Append bbox position to list
bbox_list.append((top_left, bottom_right))
# Return the list of bounding boxes
return bbox_list
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
nblocks_per_window = (window_size // pix_per_cell) - cell_per_block + 1
cells_per_step = 1 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
# Bounding Boxes
bbox_list = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window_size, xleft:xleft+window_size], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_feat1)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window_size*scale)
top_left = (xbox_left, ytop_draw+ystart)
bottom_right = (xbox_left+win_draw,ytop_draw+win_draw+ystart)
# Append bbox position to list
bbox_list.append((top_left, bottom_right))
# Return the list of bounding boxes
return bbox_list
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap# Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (255,0,0), 6)
# Return the image
return img
def vehicleDetection_pipeline(image):
# Undistort the image
img = cc.undistortImg(image)
heat = np.zeros_like(img[:,:,0]).astype(np.float)
window_size = 64
bbox_list = []
if(VIS_WINDOWS == True):
ystart = 370
ystop = 470
scale = 0.5
bboxes = get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block)
bbox_list.extend(bboxes)
ystart = 370
ystop = 500
scale = 1
bboxes = get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block)
bbox_list.extend(bboxes)
ystart = 370
ystop = 600
scale = 1.5
bboxes = get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block)
bbox_list.extend(bboxes)
ystart = img.shape[0]/2
ystop = img.shape[0]
scale = 2
bboxes = get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block)
bbox_list.extend(bboxes)
ystart = img.shape[0]/2
ystop = img.shape[0]
scale = 2.5
bboxes = get_windows(img, window_size, ystart, ystop, scale, pix_per_cell, cell_per_block)
bbox_list.extend(bboxes)
out_img = draw_boxes(img, bbox_list)
return out_img
else:
ystart = 370
ystop = 470
scale = 0.5
bboxes = find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
bbox_list.extend(bboxes)
ystart = 370
ystop = 500
scale = 1
bboxes = find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
bbox_list.extend(bboxes)
ystart = 370
ystop = 600
scale = 1.5
bboxes = find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
bbox_list.extend(bboxes)
ystart = img.shape[0]/2
ystop = img.shape[0]
scale = 2
bboxes = find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
bbox_list.extend(bboxes)
ystart = img.shape[0]/2
ystop = img.shape[0]
scale = 2.5
bboxes = find_cars(img, window_size, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
bbox_list.extend(bboxes)
if(VIS_BBOX == True):
draw_img = draw_boxes(img, bbox_list)
else:
# Add heat to each box in box list
heat = add_heat(heat,bbox_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat,7)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(img), labels)
if(VIS_HEAT == True):
fig = plt.figure()
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
plt.show()
return draw_img
if __name__=="__main__":
# load a pree-trained svc model from a serialized (pickle) file
dist_pickle = pickle.load( open("trainsvc_pickle.p", "rb" ) )
# get attributes of our svc object
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
# Read in an image
image = mpimg.imread('test_images/test5.jpg')
# Create an instance of Camera Calibration Class
cc = camCalibration("camera_cal/cal_data_pickle.p")
if((VIS_HEAT == True) or (TEST_ON_IMAGE == True)):
## Test image and save
detectedVeh = vehicleDetection_pipeline(image)
if(TEST_ON_IMAGE == True):
mpimg.imsave('output_images/test5.jpg', detectedVeh)
fig = plt.figure()
plt.imshow(detectedVeh)
plt.title('Detected Vehicle')
plt.show()
else:
out_clip_filename = "project_video_out.mp4"
clip1 = VideoFileClip("project_video.mp4")
processed_clip = clip1.fl_image(vehicleDetection_pipeline) #NOTE: this function expects color images!!
if((VIS_WINDOWS == True) or (VIS_BBOX == True)):
processed_clip.preview()
else:
processed_clip.write_videofile(out_clip_filename, audio=False)
#processed_clip.preview()
|
{"hexsha": "2232e1f627249e9387b9b7878b0b8c2027289bdb", "size": 11655, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/vehicle_detection_svm/vehicleDetection.py", "max_stars_repo_name": "towardsautonomy/towardsautonomy.github.com", "max_stars_repo_head_hexsha": "54088e8fa87ba265df2c2d660148996767c36668", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-05-03T18:44:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-01T06:28:07.000Z", "max_issues_repo_path": "projects/vehicle_detection_svm/vehicleDetection.py", "max_issues_repo_name": "towardsautonomy/towardsautonomy.github.com", "max_issues_repo_head_hexsha": "54088e8fa87ba265df2c2d660148996767c36668", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-06-14T05:48:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-28T22:37:27.000Z", "max_forks_repo_path": "projects/vehicle_detection_svm/vehicleDetection.py", "max_forks_repo_name": "towardsautonomy/towardsautonomy.github.com", "max_forks_repo_head_hexsha": "54088e8fa87ba265df2c2d660148996767c36668", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-06-14T05:45:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T05:09:50.000Z", "avg_line_length": 38.3388157895, "max_line_length": 144, "alphanum_fraction": 0.6338052338, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3080}
|
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <math.h>
#include <gsl/gsl_integration.h>
double f1(double x, void *params) {
(void) (params); /* avoid unused parameter warning */
return pow(x, 2);
}
double fi1(double x) {
return pow(x, 3) / 3;
}
double f2(double x, void *params) {
(void) (params); /* avoid unused parameter warning */
if (0 == x) {
return 0;
}
return 1 / sqrt(x);
}
double fi2(double x) {
return 2 * sqrt(x);
}
double pi(double (*fi)(double), double x1, double x2) {
return fi(x2) - fi(x1);
}
double e(double p1, double p2) {
return fabs(p1 - p2);
}
int main(int argc, char *argv[]) {
double a, b, pg1, e1, pi1, pg2, e2, pi2;
if (argc < 2) {
errno = EINVAL;
perror("");
return errno;
}
a = strtod(argv[1], NULL);
b = strtod(argv[2], NULL);
if (0 != errno) {
perror("");
return errno;
}
gsl_integration_workspace *w1
= gsl_integration_workspace_alloc(1000);
gsl_function F1;
F1.function = &f1;
gsl_integration_qags(&F1, a, b, 0, 1e-7, 1000,
w1, &pg1, &e1);
pi1 = pi(fi1, a, b);
gsl_integration_workspace *w2
= gsl_integration_workspace_alloc(1000);
gsl_function F2;
F2.function = &f2;
gsl_integration_qags(&F2, a, b, 0, 1e-7, 1000,
w2, &pg2, &e2);
pi2 = pi(fi2, a, b);
printf("%f\t%f\n", pg1, pg2);
printf("%f\t%f\n", pi1, pi2);
printf("%e\t%e\n", e(pg1, pi1), e(pg2, pi2));
printf("%zu\t%zu\n", w1->size, w2->size);
gsl_integration_workspace_free(w1);
gsl_integration_workspace_free(w2);
return EXIT_SUCCESS;
}
|
{"hexsha": "34b658de8ad7a7103fa47acc2e6d8d3bdcbeed08", "size": 1714, "ext": "c", "lang": "C", "max_stars_repo_path": "lab4/zad4.c", "max_stars_repo_name": "mistyfiky/agh-mownit", "max_stars_repo_head_hexsha": "d88c21308b863942497c111d044e359ce220d421", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab4/zad4.c", "max_issues_repo_name": "mistyfiky/agh-mownit", "max_issues_repo_head_hexsha": "d88c21308b863942497c111d044e359ce220d421", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab4/zad4.c", "max_forks_repo_name": "mistyfiky/agh-mownit", "max_forks_repo_head_hexsha": "d88c21308b863942497c111d044e359ce220d421", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1408450704, "max_line_length": 57, "alphanum_fraction": 0.557176196, "num_tokens": 550}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
roslaunch my_pkg ur5_bringup.launch
turn on external program on teach pendant!!
roslaunch usb_cam usb_cam-test.launch
roslaunch my_pkg single.launch
rosrun my_pkg ibvs.py
yxj 20210826
'''
from numpy import linalg
import rospy
from rospy.rostime import Duration
from std_msgs.msg import String
import time
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PointStamped
import socket
from ur5_pose_get import *
from vision_pose_get import VisionPosition
import numpy as np
import matplotlib.pyplot as plt
import math
from urdf_parser_py.urdf import URDF
from pykdl_utils.kdl_parser import kdl_tree_from_urdf_model
from pykdl_utils.kdl_kinematics import KDLKinematics
from ur5_kinematics import Kinematic
import os
def change_angle_to_pi(qangle):
temp=[]
for i in range(len(qangle)):
temp.append(qangle[i]/180.0*3.14)
return temp
def moveur(pub,q,ace,vel,t):
# ss="movej(["+str(q[0])+","+str(q[1])+","+str(q[2])+","+str(q[3])+","+str(q[4])+","+str(q[5])+"]," +"a="+str(ace)+","+"v="+str(vel)+","+"t="+str(t)+")"
ss="movej(["+str(q[0])+","+str(q[1])+","+str(q[2])+","+str(q[3])+","+str(q[4])+","+str(q[5])+"]," +"a="+str(ace)+","+"v="+str(vel)+")"
rospy.loginfo(ss)
pub.publish(ss)
def movelur(pub,q,ace,vel,t):
ss="movel(["+str(q[0])+","+str(q[1])+","+str(q[2])+","+str(q[3])+","+str(q[4])+","+str(q[5])+"]," +"a="+str(ace)+","+"v="+str(vel)+","+"t="+str(t)+")"
rospy.loginfo(ss)
pub.publish(ss)
def movecur(pub,q,ace,vel,t):
ss="movec(["+str(q[0])+","+str(q[1])+","+str(q[2])+","+str(q[3])+","+str(q[4])+","+str(q[5])+"]," +"a="+str(ace)+","+"v="+str(vel)+","+"t="+str(t)+")"
rospy.loginfo(ss)
pub.publish(ss)
def urscript_speedj_pub(pub , vel, ace,t): # t是必须要有的,规定了动作时间,结束后速度会降为0,所以持续时间一定要比指令周期长
ss = "speedj([" + str(vel[0]) + "," + str(vel[1]) + "," + str(vel[2]) + "," + str(vel[3]) + "," + str(
vel[4]) + "," + str(vel[5]) + "]," + "a=" + str(ace) + "," + "t=" + str(t) + ")"
rospy.loginfo(ss)
pub.publish(ss)
def urscript_speedl_pub(pub , vel, ace,t): # xd, a, t
ss = "speedl([" + str(vel[0]) + "," + str(vel[1]) + "," + str(vel[2]) + "," + str(vel[3]) + "," + str(
vel[4]) + "," + str(vel[5]) + "]," + "a=" + str(ace) + "," + "t=" + str(t) + ")"
rospy.loginfo(ss)
pub.publish(ss)
def get_jacobian_from_joint(urdfname,jointq,flag):
#robot = URDF.from_xml_file("/data/ros/ur_ws/src/universal_robot/ur_description/urdf/ur5.urdf")
robot = URDF.from_xml_file(urdfname)
tree = kdl_tree_from_urdf_model(robot)
# print tree.getNrOfSegments()
chain = tree.getChain("base_link", "wrist_3_link")
# print chain.getNrOfJoints()
# forwawrd kinematics
kdl_kin = KDLKinematics(robot, "base_link", "wrist_3_link")
q=jointq
#q = [0, 0, 1, 0, 1, 0]
pose = kdl_kin.forward(q) # forward kinematics (returns homogeneous 4x4 matrix)
pose[0,:],pose[1,:] = -pose[0,:],-pose[1,:]# added on 0728 yxj
# 注意这里是从base_link到wrist_3_link。调外参的时候是base到wrist_3_link,相差z轴转180度。这里转了之后就相当于从base开始了 0826 yxj
J = kdl_kin.jacobian(q)
J[0,:],J[1,:],J[3,:],J[4,:] = -J[0,:],-J[1,:],-J[3,:],-J[4,:] # added on 0728 yxj
#print 'J:', J
return J,pose
def main():
dir = '/fig_ibvs_Nori/'
current_path = os.path.dirname(__file__)
rospy.init_node("move_ur5_by_urscript")
pub=rospy.Publisher("/ur_hardware_interface/script_command",String,queue_size=10) # the subscriber is in ur5_bringup.launch
ros_freq = 30
rate=rospy.Rate(ros_freq)
vision_reader = VisionPosition()
vision_sub = rospy.Subscriber("/aruco_single/pixel", PointStamped, vision_reader.callback)
ur_reader = Urposition()
ur_sub = rospy.Subscriber("/joint_states", JointState, ur_reader.callback)
pos1_flag = 0
pos2_flag = 0
flag_has_sent = 0
flag_initialized = 0
# initial_state=[0,-1.57,0,0,3.14,0.25]
# initial_state=[-3.75,-89.27,-88.4,-90,90,1.34]# 单位是角度deg
# initial_state=[147.73,-81.12, -97.04, -79.13, 87.39, -155.48]# 单位是角度deg
initial_state=[139.47,-88.87,-90.59,-90,90.83,-173.23]# 单位是角度deg
initial_state=change_angle_to_pi(initial_state)# 单位变成弧度rad
# camera intrinsic parameters
projection_matrix = np.reshape(np.array([ 688.84155, 0. , 319.01795, 0. ,\
0. , 688.81415, 232.94001, 0. ,\
0. , 0. , 1. , 0. ]),[3,4] )
fx = projection_matrix[0,0]
fy = projection_matrix[1,1]
u0 = projection_matrix[0,2]
v0 = projection_matrix[1,2]
# prepare for logging data
qdot = np.zeros([6,1],float)
log_x = []
log_q = np.empty([6,0],float)
log_qdot = np.empty([6,0],float)
log_rdot = np.empty([6,0],float)
log_drdot = np.empty([6,0],float)
v = [0,0,0,0,0,0]
a = 50
tt = 1
dx = np.reshape([320,240],[2,1])
dr = np.reshape([0.476,-0.140, 0.451],[3,1])
drdot = np.zeros([3,1])
Kp = 2*np.eye(2)
time.sleep(0.3)# wait for a short time otherwise q_last is empty
q_last=ur_reader.now_ur_pos
x_last = vision_reader.pos
k_last = vision_reader.k_pos
t_start = time.time()
t_last = t_start
t_ready = t_start
print("time begins at: ",t_start)
# ================================================while begins
while not rospy.is_shutdown():
# rospy.loginfo("start while-----")
t = time.time()
q_now=ur_reader.now_ur_pos
qdot = ur_reader.now_joint_vel
if len(q_now)==0:
print("can not get q !!!!!")
continue
x_now = vision_reader.pos
x = x_now
k_now = vision_reader.k_pos
# 计算雅可比矩阵 J
# urdf = "/home/roboticslab/ur_ws/src/my_pkg/urdf/ur5.urdf"
urdf = current_path+"/../urdf/ur5.urdf"
J,pose = get_jacobian_from_joint(urdf,q_now,0)
J_inv = np.linalg.pinv(J)
J_ori_inv = np.linalg.pinv(J[3:6,:]) # only compute orientation!!
# print(J_inv)
N_ori = np.eye(6)-np.dot(J_ori_inv,J[3:6,:])
# 计算视觉矩阵Js
u = x[0]-u0
v = x[1]-v0
z = 0.5 # =========================
Js = np.array([ [-fx/z, 0, u/z, u*v/fx, -(fx*fx+u*u)/fx, u] , [0, -fy/z, v/z, (fy*fy+v*v)/fy, -u*v/fy, -v] ])
# print(Js)
R_extrinsic = np.array([[-0.76111026 ,0.64717796,-0.043265 ],\
[-0.58715626,-0.65910701, 0.46992071],\
[ 0.27560606, 0.38306479, 0.8816477 ]])
# print(R_extrinsic)
R = np.dot(R_extrinsic, pose[0:3,0:3])# 相机相对于base的旋转矩阵
# print(R)
RR = np.zeros([6,6])
RR[0:3,0:3] = R
RR[3:6,3:6] = R
# print(RR)
Js = np.dot(Js,RR)
# print(Js)
Js_inv = np.linalg.pinv(Js)
# 计算末端位置
r4 = np.dot(pose, [[0],[0],[0],[1]])
r = r4[0:3]
# print('r is:',r)
# 计算末端速度
rdot = np.dot(J , np.reshape(qdot,[6,1]))
# print(rdot)
# 给指令
if flag_initialized==0:# 先回到初始位置,movej的t参数是不需要的,这里给0
moveur(pub,initial_state,5,0.3,0)
time.sleep(5)
flag_initialized=1
elif flag_initialized==1:
flag_initialized=2
print("initialization is done! the time is:", t-t_start)
t_ready = t
elif flag_initialized==2 and t-t_ready<20:#
x = np.reshape(x,[2,1])
v = -np.dot( J_inv, np.dot( Js_inv , np.dot(Kp, (x-dx) ) ) )
v = np.dot(N_ori,v)
v = np.reshape(np.array(v),[-1,])
# print('v',v.tolist())
log_x.append(x.tolist())
log_q = np.concatenate((log_q,np.reshape(q_now,[6,1])),axis=1)
log_qdot = np.concatenate((log_qdot,np.reshape(qdot,[6,1])),axis=1)
log_drdot = np.concatenate((log_drdot,np.reshape(v,[6,1])),axis=1)
log_rdot = np.concatenate((log_rdot,np.reshape(rdot,[6,1])),axis=1)
# 保护
v[v>0.5]=0.5
v[v<-0.5]=-0.5
if k_now != k_last:
urscript_speedj_pub(pub,v,a,0.1)
else:
urscript_speedj_pub(pub,[0,0,0,0,0,0],a,0.1)
q_last = q_now
x_last = x_now
k_last = k_now
t_last = t
rate.sleep()# Sleeps for any leftover time in a cycle. Calculated from the last time sleep, reset, or the constructor was called. 这里就是ros_freq Hz
# ===============================================while ends
print(time.time()-t_start)
print(np.shape(log_qdot))
log_x_array = np.array(log_x)
np.save(current_path+ dir+'log_x.npy',log_x_array)
np.save(current_path+ dir+'log_q.npy',log_q)
np.save(current_path+dir+'log_qdot.npy',log_qdot)
np.save(current_path+dir+'log_rdot.npy',log_rdot)
np.save(current_path+dir+'log_drdot.npy',log_drdot)
plt.figure(figsize=(30,20))
for j in range(6):
ax = plt.subplot(3, 2, j+1)
ax.set_title('task space velocity %d' % (j+1),fontsize=20)
plt.xlabel('time (s)')
plt.ylabel('task space velocity')
plt.plot(np.linspace(0,np.shape(log_rdot)[1]/ros_freq,np.shape(log_rdot)[1]),np.reshape(np.array(log_rdot[j,:]),[-1,]) )
plt.plot(np.linspace(0,np.shape(log_drdot)[1]/ros_freq,np.shape(log_drdot)[1]),log_drdot[j,:].reshape(-1,))
plt.savefig(current_path+dir+'log_r.jpg')
plt.figure()
plt.plot(log_x_array[:,0], log_x_array[:,1],label = 'actual')
plt.scatter(dx[0],dx[1],label = 'target')
plt.legend()
plt.title('vision space trajectory')
plt.xlabel('x (pixel)')
plt.ylabel('y (pixel)')
plt.savefig(current_path+dir+'log_x.jpg')
if __name__=="__main__":
main()
|
{"hexsha": "b29f5b9239e594ee912f69bf98e66b0e041d806a", "size": 9791, "ext": "py", "lang": "Python", "max_stars_repo_path": "ws_icra2022/src/my_pkg/ur5_eyeInhand/ibvs_only_task_space_Nori.py", "max_stars_repo_name": "yanseim/Vision-Based-Control", "max_stars_repo_head_hexsha": "4a92103d99703ac2a45d4ad8d01a663e29c0aa7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ws_icra2022/src/my_pkg/ur5_eyeInhand/ibvs_only_task_space_Nori.py", "max_issues_repo_name": "yanseim/Vision-Based-Control", "max_issues_repo_head_hexsha": "4a92103d99703ac2a45d4ad8d01a663e29c0aa7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ws_icra2022/src/my_pkg/ur5_eyeInhand/ibvs_only_task_space_Nori.py", "max_forks_repo_name": "yanseim/Vision-Based-Control", "max_forks_repo_head_hexsha": "4a92103d99703ac2a45d4ad8d01a663e29c0aa7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8082706767, "max_line_length": 156, "alphanum_fraction": 0.5689919314, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3348}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# saov.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Jan 2017
'''
Contains the Schwarzenberg-Czerny Analysis of Variance period-search algorithm
implementation for periodbase.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
from multiprocessing import Pool, cpu_count
from numpy import (
nan as npnan, arange as nparange, array as nparray, isfinite as npisfinite,
argmax as npargmax, digitize as npdigitize, median as npmedian,
std as npstd, argsort as npargsort, unique as npunique, sum as npsum
)
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import phase_magseries, sigclip_magseries
from .utils import get_frequency_grid, independent_freq_count, resort_by_time
############
## CONFIG ##
############
NCPUS = cpu_count()
#####################################################
## ANALYSIS of VARIANCE (Schwarzenberg-Czerny 1989) ##
#####################################################
def aov_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
'''Calculates the Schwarzenberg-Czerny AoV statistic at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_aov : float
The value of the AoV statistic at the specified `frequency`.
'''
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
ndets = phases.size
binnedphaseinds = npdigitize(phases, bins)
bin_s1_tops = []
bin_s2_tops = []
binndets = []
goodbins = 0
all_xbar = npmedian(pmags)
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_ndet = thisbin_mags.size
thisbin_xbar = npmedian(thisbin_mags)
# get s1
thisbin_s1_top = (
thisbin_ndet *
(thisbin_xbar - all_xbar) *
(thisbin_xbar - all_xbar)
)
# get s2
thisbin_s2_top = npsum((thisbin_mags - all_xbar) *
(thisbin_mags - all_xbar))
bin_s1_tops.append(thisbin_s1_top)
bin_s2_tops.append(thisbin_s2_top)
binndets.append(thisbin_ndet)
goodbins = goodbins + 1
# turn the quantities into arrays
bin_s1_tops = nparray(bin_s1_tops)
bin_s2_tops = nparray(bin_s2_tops)
binndets = nparray(binndets)
# calculate s1 first
s1 = npsum(bin_s1_tops)/(goodbins - 1.0)
# then calculate s2
s2 = npsum(bin_s2_tops)/(ndets - goodbins)
theta_aov = s1/s2
return theta_aov
def _aov_worker(task):
'''This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_aov : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = aov_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception:
return npnan
def aov_periodfind(times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
stepsize=1.0e-4,
autofreq=True,
normalize=True,
phasebinsize=0.05,
mindetperbin=9,
nbestpeaks=5,
periodepsilon=0.1,
sigclip=10.0,
nworkers=None,
verbose=True):
'''This runs a parallelized Analysis-of-Variance (AoV) period search.
NOTE: `normalize = True` here as recommended by Schwarzenberg-Czerny 1996,
i.e. mags will be normalized to zero and rescaled so their variance = 1.0.
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
normalize : bool
This sets if the input time-series is normalized to 0.0 and rescaled
such that its variance = 1.0. This is the recommended procedure by
Schwarzenberg-Czerny 1996.
phasebinsize : float
The bin size in phase to use when calculating the AoV theta statistic at
a test frequency.
mindetperbin : int
The minimum number of elements in a phase bin to consider it valid when
calculating the AoV theta statistic at a test frequency.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'aov' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
stimes, smags, serrs = resort_by_time(stimes, smags, serrs)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if startp:
endf = 1.0/startp
else:
# default start period is 0.1 day
endf = 1.0/0.1
if endp:
startf = 1.0/endp
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
frequencies = nparange(startf, endf, stepsize)
if verbose:
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(frequencies.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
frequencies = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
if verbose:
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(frequencies.size,
1.0/frequencies.max(),
1.0/frequencies.min())
)
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
# renormalize the working mags to zero and scale them so that the
# variance = 1 for use with our LSP functions
if normalize:
nmags = (smags - npmedian(smags))/npstd(smags)
else:
nmags = smags
tasks = [(stimes, nmags, serrs, x, phasebinsize, mindetperbin)
for x in frequencies]
lsp = pool.map(_aov_worker, tasks)
pool.close()
pool.join()
del pool
lsp = nparray(lsp)
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five
# values that are separated by at least periodepsilon in period
# make sure to filter out non-finite values
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'aov',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'periods':periods,
'method':'aov',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'aov',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
def analytic_false_alarm_probability(lspinfo,
times,
conservative_nfreq_eff=True,
peakvals=None,
inplace=True):
'''This returns the analytic false alarm probabilities for periodogram
peak values.
FIXME: this doesn't actually work. Fix later.
The calculation follows that on page 3 of Zechmeister & Kurster (2009)::
FAP = 1 − [1 − Prob(z > z0)]**M
where::
M is the number of independent frequencies
Prob(z > z0) is the probability of peak with value > z0
z0 is the peak value we're evaluating
For AoV and AoV-harmonic, the Prob(z > z0) is described by the F
distribution, according to:
- Schwarzenberg-Czerny (1997;
https://ui.adsabs.harvard.edu/#abs/1997ApJ...489..941S)
This is given by::
F( (B-1), (N-B); theta_aov )
Where::
N = number of observations
B = number of phase bins
This translates to a scipy.stats call to the F distribution CDF::
x = theta_aov_best
prob_exceeds_val = scipy.stats.f.cdf(x, (B-1.0), (N-B))
Which we can then plug into the false alarm prob eqn above with the
calculation of M.
Parameters
----------
lspinfo : dict
The dict returned by the
:py:func:`~astrobase.periodbase.spdm.aov_periodfind` function.
times : np.array
The times for which the periodogram result in ``lspinfo`` was
calculated.
conservative_nfreq_eff : bool
If True, will follow the prescription given in Schwarzenberg-Czerny
(2003):
http://adsabs.harvard.edu/abs/2003ASPC..292..383S
and estimate the effective number of independent frequences M_eff as::
min(N_obs, N_freq, DELTA_f/delta_f)
peakvals : sequence or None
The peak values for which to evaluate the false-alarm probability. If
None, will calculate this for each of the peak values in the
``nbestpeaks`` key of the ``lspinfo`` dict.
inplace : bool
If True, puts the results of the FAP calculation into the ``lspinfo``
dict as a list available as ``lspinfo['falsealarmprob']``.
Returns
-------
list
The calculated false alarm probabilities for each of the peak values in
``peakvals``.
'''
from scipy.stats import f
frequencies = 1.0/lspinfo['periods']
M = independent_freq_count(frequencies,
times,
conservative=conservative_nfreq_eff)
if peakvals is None:
peakvals = lspinfo['nbestlspvals']
nphasebins = nparange(0.0, 1.0, lspinfo['kwargs']['phasebinsize']).size
ndet = times.size
false_alarm_probs = []
for peakval in peakvals:
prob_xval = peakval
prob_exceeds_val = f.cdf(prob_xval,
nphasebins - 1.0,
ndet - nphasebins)
false_alarm_probs.append(1.0 - (1.0 - prob_exceeds_val)**M)
if inplace:
lspinfo['falsealarmprob'] = false_alarm_probs
return false_alarm_probs
|
{"hexsha": "19ea7f67666b74a5e4f11aec38378385b5a6197d", "size": 19850, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrobase/periodbase/saov.py", "max_stars_repo_name": "pierfra-ro/astrobase", "max_stars_repo_head_hexsha": "b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2017-03-09T19:08:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T00:36:28.000Z", "max_issues_repo_path": "astrobase/periodbase/saov.py", "max_issues_repo_name": "pierfra-ro/astrobase", "max_issues_repo_head_hexsha": "b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2016-12-21T19:01:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T15:28:45.000Z", "max_forks_repo_path": "astrobase/periodbase/saov.py", "max_forks_repo_name": "pierfra-ro/astrobase", "max_forks_repo_head_hexsha": "b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2016-12-20T23:01:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T16:24:15.000Z", "avg_line_length": 32.1717990276, "max_line_length": 80, "alphanum_fraction": 0.5617128463, "include": true, "reason": "from numpy,from scipy", "num_tokens": 4704}
|
""""
# DeepCTR
https://github.com/shenweichen/DeepCTR
https://deepctr-doc.readthedocs.io/en/latest/Examples.html#classification-criteo
DeepCTR is a **Easy-to-use**,**Modular** and **Extendible** package of deep-learning based CTR models
along with lots of core components layers which can be used to easily build custom models.It is compatible with **tensorflow 1.4+ and 2.0+**.You can use any complex model with `model.fit()`and `model.predict()` .
## Models List
| Model | Paper |
| :------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Convolutional Click Prediction Model | [CIKM 2015][A Convolutional Click Prediction Model](http://ir.ia.ac.cn/bitstream/173211/12337/1/A%20Convolutional%20Click%20Prediction%20Model.pdf) |
| Factorization-supported Neural Network | [ECIR 2016][Deep Learning over Multi-field Categorical Data: A Case Study on User Response Prediction](https://arxiv.org/pdf/1601.02376.pdf) |
| Product-based Neural Network | [ICDM 2016][Product-based neural networks for user response prediction](https://arxiv.org/pdf/1611.00144.pdf) |
| Wide & Deep | [DLRS 2016][Wide & Deep Learning for Recommender Systems](https://arxiv.org/pdf/1606.07792.pdf) |
| DeepFM | [IJCAI 2017][DeepFM: A Factorization-Machine based Neural Network for CTR Prediction](http://www.ijcai.org/proceedings/2017/0239.pdf) |
| Piece-wise Linear Model | [arxiv 2017][Learning Piece-wise Linear Models from Large Scale Data for Ad Click Prediction](https://arxiv.org/abs/1704.05194) |
| Deep & Cross Network | [ADKDD 2017][Deep & Cross Network for Ad Click Predictions](https://arxiv.org/abs/1708.05123) |
| Attentional Factorization Machine | [IJCAI 2017][Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks](http://www.ijcai.org/proceedings/2017/435) |
| Neural Factorization Machine | [SIGIR 2017][Neural Factorization Machines for Sparse Predictive Analytics](https://arxiv.org/pdf/1708.05027.pdf) |
| xDeepFM | [KDD 2018][xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems](https://arxiv.org/pdf/1803.05170.pdf) |
| AutoInt | [arxiv 2018][AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks](https://arxiv.org/abs/1810.11921) |
| Deep Interest Network | [KDD 2018][Deep Interest Network for Click-Through Rate Prediction](https://arxiv.org/pdf/1706.06978.pdf) |
| Deep Interest Evolution Network | [AAAI 2019][Deep Interest Evolution Network for Click-Through Rate Prediction](https://arxiv.org/pdf/1809.03672.pdf) |
| NFFM | [arxiv 2019][Operation-aware Neural Networks for User Response Prediction](https://arxiv.org/pdf/1904.12579.pdf) |
| FGCNN | [WWW 2019][Feature Generation by Convolutional Neural Network for Click-Through Rate Prediction ](https://arxiv.org/pdf/1904.04447) |
| Deep Session Interest Network | [IJCAI 2019][Deep Session Interest Network for Click-Through Rate Prediction ](https://arxiv.org/abs/1905.06482) |
| FiBiNET | [RecSys 2019][FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction](https://arxiv.org/pdf/1905.09433.pdf) |
"""
import os
import numpy as np
import pandas as pd
from deepctr.inputs import SparseFeat, VarLenSparseFeat, DenseFeat, get_feature_names
from deepctr.layers import custom_objects
from deepctr.models import DeepFM
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import log_loss, roc_auc_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.python.keras.models import save_model, load_model
####################################################################################################
# Helper functions
def os_package_root_path(filepath, sublevel=0, path_add=""):
"""
get the module package root folder
"""
from pathlib import Path
path = Path(os.path.realpath(filepath)).parent
for i in range(1, sublevel + 1):
path = path.parent
path = os.path.join(path.absolute(), path_add)
return path
def log(*s, n=0, m=1):
sspace = "#" * n
sjump = "\n" * m
print(sjump, sspace, s, sspace, flush=True)
####################################################################################################
class Model:
def __init__(self, model_pars=None, data_pars=None, compute_pars=None, **kwargs):
# 4.Define Model
if not model_pars.get('model_type'):
raise Exception("Missing model type when init model object!")
else:
_, linear_cols, dnn_cols, _, _, _ = kwargs.get('dataset')
self.model = DeepFM(linear_cols, dnn_cols, task=compute_pars['task'])
self.model.compile(model_pars["optimization"], model_pars["cost"],
metrics=['binary_crossentropy'], )
####################################################################################################
def _preprocess_criteo(df, **kw):
hash_feature = kw.get('hash_feature')
sparse_col = ['C' + str(i) for i in range(1, 27)]
dense_col = ['I' + str(i) for i in range(1, 14)]
df[sparse_col] = df[sparse_col].fillna('-1', )
df[dense_col] = df[dense_col].fillna(0, )
target = ["label"]
# set hashing space for each sparse field,and record dense feature field name
if hash_feature:
# Transformation for dense features
mms = MinMaxScaler(feature_range=(0, 1))
df[dense_col] = mms.fit_transform(df[dense_col])
sparse_col = ['C' + str(i) for i in range(1, 27)]
dense_col = ['I' + str(i) for i in range(1, 14)]
fixlen_cols = [SparseFeat(feat, vocabulary_size=1000, embedding_dim=4, use_hash=True,
dtype='string') # since the input is string
for feat in sparse_col] + [DenseFeat(feat, 1, )
for feat in dense_col]
else:
for feat in sparse_col:
lbe = LabelEncoder()
df[feat] = lbe.fit_transform(df[feat])
mms = MinMaxScaler(feature_range=(0, 1))
df[dense_col] = mms.fit_transform(df[dense_col])
fixlen_cols = [SparseFeat(feat, vocabulary_size=df[feat].nunique(), embedding_dim=4)
for i, feat in enumerate(sparse_col)] + [DenseFeat(feat, 1, )
for feat in dense_col]
linear_cols = fixlen_cols
dnn_cols = fixlen_cols
train, test = train_test_split(df, test_size=kw['test_size'])
return df, linear_cols, dnn_cols, train, test, target
def _preprocess_movielens(df, **kw):
multiple_value = kw.get('multiple_value')
sparse_col = ["movie_id", "user_id", "gender", "age", "occupation", "zip"]
target = ['rating']
# 1.Label Encoding for sparse features,and do simple Transformation for dense features
for feat in sparse_col:
lbe = LabelEncoder()
df[feat] = lbe.fit_transform(df[feat])
if not multiple_value:
# 2.count #unique features for each sparse field
fixlen_cols = [SparseFeat(feat, df[feat].nunique(), embedding_dim=4) for feat in sparse_col]
linear_cols = fixlen_cols
dnn_cols = fixlen_cols
train, test = train_test_split(df, test_size=0.2)
else:
hash_feature = kw.get('hash_feature', False)
if not hash_feature:
def split(x):
key_ans = x.split('|')
for key in key_ans:
if key not in key2index:
# Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
key2index[key] = len(key2index) + 1
return list(map(lambda x: key2index[x], key_ans))
# preprocess the sequence feature
key2index = {}
genres_list = list(map(split, df['genres'].values))
genres_length = np.array(list(map(len, genres_list)))
max_len = max(genres_length)
# Notice : padding=`post`
genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', )
fixlen_cols = [SparseFeat(feat, df[feat].nunique(), embedding_dim=4) for feat in
sparse_col]
use_weighted_sequence = False
if use_weighted_sequence:
varlen_cols = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
weight_name='genres_weight')] # Notice : value 0 is for padding for sequence input feature
else:
varlen_cols = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
weight_name=None)] # Notice : value 0 is for padding for sequence input feature
linear_cols = fixlen_cols + varlen_cols
dnn_cols = fixlen_cols + varlen_cols
# generate input data for model
model_input = {name: df[name] for name in sparse_col} #
model_input["genres"] = genres_list
model_input["genres_weight"] = np.random.randn(df.shape[0], max_len, 1)
else:
df[sparse_col] = df[sparse_col].astype(str)
# 1.Use hashing encoding on the fly for sparse features,and process sequence features
genres_list = list(map(lambda x: x.split('|'), df['genres'].values))
genres_length = np.array(list(map(len, genres_list)))
max_len = max(genres_length)
# Notice : padding=`post`
genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', dtype=str,
value=0)
# 2.set hashing space for each sparse field and generate feature config for sequence feature
fixlen_cols = [
SparseFeat(feat, df[feat].nunique() * 5, embedding_dim=4, use_hash=True,
dtype='string')
for feat in sparse_col]
varlen_cols = [
VarLenSparseFeat(
SparseFeat('genres', vocabulary_size=100, embedding_dim=4, use_hash=True,
dtype="string"),
maxlen=max_len, combiner='mean',
)] # Notice : value 0 is for padding for sequence input feature
linear_cols = fixlen_cols + varlen_cols
dnn_cols = fixlen_cols + varlen_cols
feature_names = get_feature_names(linear_cols + dnn_cols)
# 3.generate input data for model
model_input = {name: df[name] for name in feature_names}
model_input['genres'] = genres_list
train, test = model_input, model_input
return df, linear_cols, dnn_cols, train, test, target
def get_dataset(**kw):
##check whether dataset is of kind train or test
data_path = kw['train_data_path']
data_type = kw['dataset_type']
test_size = kw['test_size']
#### read from csv file
if kw.get("uri_type") == "pickle":
df = pd.read_pickle(data_path)
else:
df = pd.read_csv(data_path)
if data_type == "criteo":
df, linear_cols, dnn_cols, train, test, target = _preprocess_criteo(df, **kw)
elif data_type == "movie_len":
df, linear_cols, dnn_cols, train, test, target = _preprocess_movielens(df, **kw)
else: ## Already define
linear_cols = kw['linear_cols']
dnn_cols = kw['dnn_cols']
train, test = train_test_split(df, test_size=kw['test_size'])
target = kw['target_col']
return df, linear_cols, dnn_cols, train, test, target
def fit(model, session=None, data_pars=None, model_pars=None, compute_pars=None, out_pars=None,
**kwargs):
##loading dataset
"""
Classe Model --> model, model.model contains thte sub-model
"""
data, linear_cols, dnn_cols, train, test, target = get_dataset(**data_pars)
multiple_value = data_pars.get('multiple_value', None)
m = compute_pars
if multiple_value is None:
feature_names = get_feature_names(linear_cols + dnn_cols)
train_model_input = {name: train[name] for name in feature_names}
model.model.fit(train_model_input, train[target].values,
batch_size=m['batch_size'], epochs=m['epochs'], verbose=2,
validation_split=m['validation_split'], )
else:
model.model.fit(train, data[target].values,
batch_size=m['batch_size'], epochs=m['epochs'], verbose=2,
validation_split=m['validation_split'], )
return model
# Model p redict
def predict(model, data_pars, compute_pars=None, out_pars=None, **kwargs):
## Model is class
## load test dataset
data, linear_cols, dnn_cols, train, test, target = get_dataset(**data_pars)
feature_names = get_feature_names(linear_cols + dnn_cols, )
test_model_input = {name: test[name] for name in feature_names}
multiple_value = data_pars.get('multiple_value', None)
## predict
if multiple_value is None:
pred_ans = model.model.predict(test_model_input, batch_size=256)
else:
pred_ans = None
return pred_ans
def metrics(ypred, data_pars, compute_pars=None, out_pars=None, **kwargs):
## load test dataset
_, linear_cols, dnn_cols, _, test, target = get_dataset(**data_pars)
if compute_pars.get("task") == "binary":
metrics_dict = {"LogLoss": log_loss(test[target].values, ypred),
"AUC": roc_auc_score(test[target].values, ypred)}
elif compute_pars.get("task") == "regression":
multiple_value = data_pars.get('multiple_value', None)
if multiple_value is None:
metrics_dict = {"MSE": mean_squared_error(test[target].values, ypred)}
else:
metrics_dict = {}
return metrics_dict
def reset_model():
pass
########################################################################################################################
class Model_empty(object):
def __init__(self, model_pars=None, compute_pars=None):
## Empty model for Seaialization
self.model = None
def save(model, path):
if not os.path.exists(os.path.dirname(path)):
print("model file path do not exist!")
else:
save_model(model.model, path)
def load(path):
if not os.path.exists(path):
print("model file do not exist!")
return None
else:
model = Model_empty()
model_keras = load_model(path, custom_objects)
model.model = model_keras
#### Add back the model parameters...
return model
########################################################################################################################
def path_setup(out_folder="", sublevel=1, data_path="dataset/"):
data_path = os_package_root_path(__file__, sublevel=sublevel, path_add=data_path)
out_path = os.getcwd() + "/" + out_folder
os.makedirs(out_path, exist_ok=True)
log(data_path, out_path)
return data_path, out_path
def get_params(choice=0, data_path="dataset/", **kw):
if choice == 0:
log("#### Path params ###################################################")
data_path, out_path = path_setup(out_folder="/deepctr_test/", data_path=data_path)
train_data_path = data_path + "criteo_sample.txt"
data_pars = {"train_data_path": train_data_path, "dataset_type": "criteo", "test_size": 0.2}
log("#### Model params #################################################")
model_pars = {"model_type": "DeepFM", "optimization": "adam", "cost": "binary_crossentropy"}
compute_pars = {"task": "binary", "batch_size": 256, "epochs": 10, "validation_split": 0.2}
out_pars = {"path": out_path}
elif choice == 1:
log("#### Path params ##################################################")
data_path, out_path = path_setup(out_folder="/deepctr_test/", data_path=data_path)
train_data_path = data_path + "criteo_sample.txt"
data_pars = {"train_data_path": train_data_path, "hash_feature": True,
"dataset_type": "criteo", "test_size": 0.2}
log("#### Model params #################################################")
model_pars = {"model_type": "DeepFM", "optimization": "adam", "cost": "binary_crossentropy"}
compute_pars = {"task": "binary", "batch_size": 256, "epochs": 10, "validation_split": 0.2}
out_pars = {"path": out_path}
elif choice == 2:
log("#### Path params ################################################")
data_path, out_path = path_setup(out_folder="/deepctr_test/", data_path=data_path)
train_data_path = data_path + "movielens_sample.txt"
data_pars = {"train_data_path": train_data_path, "dataset_type": "movie_len",
"test_size": 0.2}
log("#### Model params ################################################")
model_pars = {"model_type": "DeepFM", "optimization": "adam", "cost": "mse"}
compute_pars = {"task": "regression", "batch_size": 256, "epochs": 10,
"validation_split": 0.2}
out_pars = {"path": out_path}
elif choice == 3:
log("#### Path params ##################################################")
data_path, out_path = path_setup(out_folder="/deepctr_test/", data_path=data_path)
train_data_path = data_path + "movielens_sample.txt"
data_pars = {"train_data_path": train_data_path, "multiple_value": True,
"dataset_type": "movie_len", "test_size": 0.2}
log("#### Model params ################################################")
model_pars = {"model_type": "DeepFM", "optimization": "adam", "cost": "mse"}
compute_pars = {"task": "regression", "batch_size": 256, "epochs": 10,
"validation_split": 0.2}
out_pars = {"path": out_path}
elif choice == 4:
log("#### Path params #################################################")
data_path, out_path = path_setup(out_folder="/deepctr_test/", data_path=data_path)
train_data_path = data_path + "movielens_sample.txt"
data_pars = {"train_data_path": train_data_path, "multiple_value": True,
"hash_feature": True, "dataset_type": "movie_len", "test_size": 0.2}
log("#### Model params ################################################")
model_pars = {"model_type": "DeepFM", "optimization": "adam", "cost": "mse"}
compute_pars = {"task": "regression", "batch_size": 256, "epochs": 10,
"validation_split": 0.2}
out_pars = {"path": out_path}
return model_pars, data_pars, compute_pars, out_pars
########################################################################################################################
########################################################################################################################
def test(data_path="dataset/", pars_choice=0):
### Local test
log("#### Loading params ##############################################")
model_pars, data_pars, compute_pars, out_pars = get_params(choice=pars_choice,
data_path=data_path)
print(model_pars, data_pars, compute_pars, out_pars)
log("#### Loading dataset #############################################")
dataset = get_dataset(**data_pars)
log("#### Model init, fit #############################################")
model = Model(model_pars=model_pars, compute_pars=compute_pars, dataset=dataset)
model = fit(model, data_pars=data_pars, model_pars=model_pars, compute_pars=compute_pars)
log("#### Predict ####################################################")
ypred = predict(model, data_pars, compute_pars, out_pars)
log("#### metrics ####################################################")
metrics_val = metrics(ypred, data_pars, compute_pars, out_pars)
print(metrics_val)
log("#### Plot #######################################################")
log("#### Save/Load ##################################################")
save(model, out_pars['path'] + f"/model_{pars_choice}.h5")
model2 = load(out_pars['path'] + f"/model_{pars_choice}.h5")
print(model2)
if __name__ == '__main__':
VERBOSE = True
test(pars_choice=0)
test(pars_choice=1)
test(pars_choice=2)
test(pars_choice=3)
test(pars_choice=4)
"""
data = pd.read_csv('./criteo_sample.txt')
sparse_col = ['C' + str(i) for i in range(1, 27)]
dense_col = ['I' + str(i) for i in range(1, 14)]
data[sparse_col] = data[sparse_col].fillna('-1', )
data[dense_col] = data[dense_col].fillna(0, )
target = ['label']
# 1.do simple Transformation for dense features
mms = MinMaxScaler(feature_range=(0, 1))
data[dense_col] = mms.fit_transform(data[dense_col])
# 2.set hashing space for each sparse field,and record dense feature field name
fixlen_cols = [SparseFeat(feat, vocabulary_size=1000,embedding_dim=4, use_hash=True, dtype='string') # since the input is string
for feat in sparse_col] + [DenseFeat(feat, 1, )
for feat in dense_col]
linear_cols = fixlen_cols
dnn_cols = fixlen_cols
feature_names = get_feature_names(linear_cols + dnn_cols, )
# 3.generate input data for model
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name] for name in feature_names}
test_model_input = {name:test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
model = DeepFM(linear_cols,dnn_cols, task='binary')
model.compile("adam", "binary_crossentropy",
metrics=['binary_crossentropy'], )
pred_ans = model.predict(test_model_input, batch_size=256)
print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4))
print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))
"""
|
{"hexsha": "770f6b5804b9bc6a897592000b9e23c073268893", "size": 23712, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlmodels/model_keras/01_deepctr.py", "max_stars_repo_name": "whitetiger1002/mlmodels", "max_stars_repo_head_hexsha": "f70f1da7434e8855eed50adc67b49cc169f2ea24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-11T07:57:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T07:57:48.000Z", "max_issues_repo_path": "mlmodels/model_keras/01_deepctr.py", "max_issues_repo_name": "whitetiger1002/mlmodels", "max_issues_repo_head_hexsha": "f70f1da7434e8855eed50adc67b49cc169f2ea24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlmodels/model_keras/01_deepctr.py", "max_forks_repo_name": "whitetiger1002/mlmodels", "max_forks_repo_head_hexsha": "f70f1da7434e8855eed50adc67b49cc169f2ea24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.0476190476, "max_line_length": 212, "alphanum_fraction": 0.5601383266, "include": true, "reason": "import numpy", "num_tokens": 5266}
|
[STATEMENT]
lemma simple_cg_insert_invar_with_conv :
assumes "observable M1" and "observable M2"
shows "convergence_graph_insert_invar M1 M2 simple_cg_lookup_with_conv simple_cg_insert"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. convergence_graph_insert_invar M1 M2 simple_cg_lookup_with_conv simple_cg_insert
[PROOF STEP]
using simple_cg_insert_invar[of M1 M2]
[PROOF STATE]
proof (prove)
using this:
convergence_graph_insert_invar M1 M2 simple_cg_lookup simple_cg_insert
goal (1 subgoal):
1. convergence_graph_insert_invar M1 M2 simple_cg_lookup_with_conv simple_cg_insert
[PROOF STEP]
unfolding convergence_graph_insert_invar_def
[PROOF STATE]
proof (prove)
using this:
\<forall>G \<gamma>. \<gamma> \<in> L M1 \<longrightarrow> \<gamma> \<in> L M2 \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup G \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup (simple_cg_insert G \<gamma>)
goal (1 subgoal):
1. \<forall>G \<gamma>. \<gamma> \<in> L M1 \<longrightarrow> \<gamma> \<in> L M2 \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup_with_conv G \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup_with_conv (simple_cg_insert G \<gamma>)
[PROOF STEP]
unfolding simple_cg_lookup_invar_with_conv_eq[OF assms]
[PROOF STATE]
proof (prove)
using this:
\<forall>G \<gamma>. \<gamma> \<in> L M1 \<longrightarrow> \<gamma> \<in> L M2 \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup G \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup (simple_cg_insert G \<gamma>)
goal (1 subgoal):
1. \<forall>G \<gamma>. \<gamma> \<in> L M1 \<longrightarrow> \<gamma> \<in> L M2 \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup G \<longrightarrow> convergence_graph_lookup_invar M1 M2 simple_cg_lookup (simple_cg_insert G \<gamma>)
[PROOF STEP]
.
|
{"llama_tokens": 694, "file": "FSM_Tests_EquivalenceTesting_Simple_Convergence_Graph", "length": 4}
|
module scalar_projection_routines
use spherepack_precision, only: &
wp, & ! working precision
ip, & ! integer precision
PI, &
MACHINE_EPSILON
use type_SpherepackUtility, only: &
SpherepackUtility
use gaussian_latitudes_and_weights_routines, only: &
compute_gaussian_latitudes_and_weights
! Explicit typing only
implicit none
! Everything is private unless stated otherwise
private
public :: shpe, shpg
public :: shpei, shpgi
public :: truncate, accumulate_inner_products
! Parameters confined to the module
real(wp), parameter :: ZERO = 0.0_wp
real(wp), parameter :: HALF = 0.5_wp
real(wp), parameter :: ONE = 1.0_wp
real(wp), parameter :: TWO = 2.0_wp
real(wp), parameter :: THREE = 3.0_wp
! Declare interfaces for submodule implementation
interface
module subroutine shpe(nlat, nlon, isym, mtrunc, x, y, idxy, &
wshp, lwshp, iwshp, liwshp, work, lwork, ierror)
! Dummy arguments
integer(ip), intent(in) :: nlat
integer(ip), intent(in) :: nlon
integer(ip), intent(in) :: isym
integer(ip), intent(in) :: mtrunc
real(wp), intent(in) :: x(idxy, nlon)
real(wp), intent(out) :: y(idxy, nlon)
integer(ip), intent(in) :: idxy
real(wp), intent(in) :: wshp(lwshp)
integer(ip), intent(in) :: lwshp
integer(ip), intent(in) :: iwshp(liwshp)
integer(ip), intent(in) :: liwshp
real(wp), intent(out) :: work(lwork)
integer(ip), intent(in) :: lwork
integer(ip), intent(out) :: ierror
end subroutine shpe
module subroutine shpei(nlat, nlon, isym, mtrunc, wshp, lwshp, iwshp, &
liwshp, work, lwork, ierror)
! Dummy arguments
integer(ip), intent(in) :: nlat
integer(ip), intent(in) :: nlon
integer(ip), intent(in) :: isym
integer(ip), intent(in) :: mtrunc
real(wp), intent(out) :: wshp(lwshp)
integer(ip), intent(in) :: lwshp
integer(ip), intent(in) :: iwshp(liwshp)
integer(ip), intent(in) :: liwshp
real(wp), intent(out) :: work(lwork)
integer(ip), intent(in) :: lwork
integer(ip), intent(out) :: ierror
end subroutine shpei
module subroutine shpg(nlat, nlon, isym, mtrunc, x, y, idxy, &
wshp, lwshp, iwshp, liwshp, work, lwork, ierror)
! Dummy arguments
integer(ip), intent(in) :: nlat
integer(ip), intent(in) :: nlon
integer(ip), intent(in) :: isym
integer(ip), intent(in) :: mtrunc
real(wp), intent(in) :: x(idxy, nlon)
real(wp), intent(out) :: y(idxy, nlon)
integer(ip), intent(in) :: idxy
real(wp), intent(in) :: wshp(lwshp)
integer(ip), intent(in) :: lwshp
integer(ip), intent(in) :: iwshp(liwshp)
integer(ip), intent(in) :: liwshp
real(wp), intent(out) :: work(lwork)
integer(ip), intent(in) :: lwork
integer(ip), intent(out) :: ierror
end subroutine shpg
module subroutine shpgi(nlat, nlon, isym, mtrunc, wshp, lwshp, iwshp, &
liwshp, work, lwork, ierror)
! Dummy arguments
integer(ip), intent(in) :: nlat
integer(ip), intent(in) :: nlon
integer(ip), intent(in) :: isym
integer(ip), intent(in) :: mtrunc
real(wp), intent(out) :: wshp(lwshp)
integer(ip), intent(in) :: lwshp
integer(ip), intent(in) :: iwshp(liwshp)
integer(ip), intent(in) :: liwshp
real(wp), intent(out) :: work(lwork)
integer(ip), intent(in) :: lwork
integer(ip), intent(out) :: ierror
end subroutine shpgi
end interface
contains
subroutine truncate(irc, n, idp, a, nrc, ijs)
! Dummy arguments
integer(ip), intent(in) :: irc
integer(ip), intent(in) :: n
integer(ip), intent(in) :: idp
real(wp), intent(in) :: a(idp, *)
integer(ip), intent(in) :: nrc
integer(ip), intent(inout) :: ijs(n)
! Local variables
integer(ip), parameter :: COLUMNS =0
integer(ip), parameter :: ROWS = 1
integer(ip) :: i, j
! irc = 0 for columns, or irc = 1 for rows
select case (irc)
case(COLUMNS)
column_loop: do j=1, nrc
do i=1, n
ijs(j) = i
if (abs(a(i, j)) > MACHINE_EPSILON) cycle column_loop
end do
end do column_loop
case(ROWS)
row_loop: do i=1, nrc
do j=1, n
ijs(i) = j
if (abs(a(i, j)) > MACHINE_EPSILON) cycle row_loop
end do
end do row_loop
end select
end subroutine truncate
! Purpose:
!
! Accumulate inner products of x with respect to y.
!
subroutine accumulate_inner_products(n, x, y, z)
! Dummy arguments
integer(ip), intent(in) :: n
real(wp), intent(in) :: x(n)
real(wp), intent(in) :: y(n)
real(wp), intent(inout) :: z(n)
! Let the intrinsic function dot_product take care of optimization.
z = z + dot_product(x, y) * y
end subroutine accumulate_inner_products
end module scalar_projection_routines
|
{"hexsha": "29a366b44f3695b84a092ee9f72a169e5f53b8a1", "size": 5746, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/scalar_projection_routines.f90", "max_stars_repo_name": "jlokimlin/spherepack4.1", "max_stars_repo_head_hexsha": "e80da29462c5987162a0884baaab7434d0215467", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-06-28T14:01:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T21:59:28.000Z", "max_issues_repo_path": "src/scalar_projection_routines.f90", "max_issues_repo_name": "jlokimlin/spherepack4.1", "max_issues_repo_head_hexsha": "e80da29462c5987162a0884baaab7434d0215467", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2016-05-07T23:00:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-22T23:52:30.000Z", "max_forks_repo_path": "src/scalar_projection_routines.f90", "max_forks_repo_name": "jlokimlin/spherepack4.1", "max_forks_repo_head_hexsha": "e80da29462c5987162a0884baaab7434d0215467", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-06-28T14:03:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T12:53:54.000Z", "avg_line_length": 35.2515337423, "max_line_length": 79, "alphanum_fraction": 0.5214061956, "num_tokens": 1560}
|
module DataAndPlotting
using HDF5
using Plots
# Wave parameters
const ki = 3
const kj = 3
# Grid size
const ni = 300
const nj = 200
# i ranges from 1...ni
# i-1 ranges from 0...ni-1
# x ranges from 0...1
function index2coord(i, ni)
@assert ni > 1
@assert 1 <= i <= ni
(i - 1) / (ni - 1)
end
# Set up standing wave
export initialize
function initialize()
arr = zeros(Float64, ni, nj)
for j in 1:nj, i in 1:ni
x = index2coord(i, ni)
y = index2coord(j, nj)
arr[i,j] = sin(2π*ki*x) * sin(2π*kj*y)
end
arr
end
# Write data to file
export output
function output(arr::Array{Float64, 2}, filename::String)
h5write(filename, "wave", arr)
end
# Read data from file
export input
function input(filename::String)
arr = h5read(filename, "wave")
end
# Create a beautiful plot
export makeplot
function makeplot(arr::Array{Float64, 2}, filename::String)
heatmap(arr, clim=(-1.0, +1.0), color=:viridis)
savefig(filename)
end
end
|
{"hexsha": "8422c14bc5421ac3d8896c6d76b19fd7fe3f6435", "size": 999, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/DataAndPlotting.jl", "max_stars_repo_name": "eschnett/DataAndPlotting.jl", "max_stars_repo_head_hexsha": "19e03405a448d13e908b7e5eddd4e05b6287949b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/DataAndPlotting.jl", "max_issues_repo_name": "eschnett/DataAndPlotting.jl", "max_issues_repo_head_hexsha": "19e03405a448d13e908b7e5eddd4e05b6287949b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/DataAndPlotting.jl", "max_forks_repo_name": "eschnett/DataAndPlotting.jl", "max_forks_repo_head_hexsha": "19e03405a448d13e908b7e5eddd4e05b6287949b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:23:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:23:41.000Z", "avg_line_length": 15.3692307692, "max_line_length": 59, "alphanum_fraction": 0.6406406406, "num_tokens": 331}
|
# Requires: a limb darkening information file (with fits of intensity w.r.t surface inclination)
# Outputs: a plot that compares different gravity interpolation schemes
import math
import numpy as np
import pickle
import pa.lib.star as st
import pa.lib.util as ut
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
iodir = '../../' # location of the input/output directory
## unpickle the limb darkening information
with open(iodir + 'data/limbdark_m01.pkl', 'rb') as f:
ld = pickle.load(f)
# constant star parameters
omega, inclination, n = [0, 0, 100]
# temperatures of the stars in Kelvin
temp = np.array([6000, 9000, 12000])
## put the stars on a fictional main sequence where log g = 4.5,
## there is a star with the Sun's mass and luminosity,
## luminosity relates to temperature via a power law,
## and luminosity is proportional to mass**3.5
g = 10**4.5 # gravity in cgs units
logg = 4.5 # log of same
# power and multiplicative constant in the luminosity-temperature dependence,
# with luminosity in solar luminosities and temperature in Kelvins
m = 28./5
k = ( 4 * math.pi * ut.sigma * ut.G * ut.Msun / (g * ut.Lsun) )**(7./5)
# the stars' luminosities in solar luminosities
L = k * temp**m
# the stars' masses in solar masses
M = L**(2./7)
# radii of the stars in solar radii
R = L**(1./7) * np.sqrt(ut.G * ut.Msun/ g) / ut.Rsun
print('Non-rotating stars')
print('Temperatures in Kelvin ' + str(temp))
print('Luminosities in solar luminosities ' + str(L))
print('Masses in solar masses ' + str(M))
print('Radii in solar radii ' + str(R))
# stars with linear temperature interpolaion and full limb darkening information
stars = [st.Star(omega, l, m, r, ut.D10, n, ld=ld) for l, m, r in zip(L, M, R)]
# the light from such stars
light = np.array([s.integrate(inclination) for s in stars])
# remove the limb darkening information at the gravity of the stars
ind_g = np.searchsorted(ld.g, logg)
print('Gravity removed from intensity information: ' + str(ld.g[ind_g]) + \
'. \nThese should be the same as the star gravity.')
print('Gravities between which we are interpolating: ' + str(ld.g[ind_g - 1]) +\
' and ' + str(ld.g[ind_g + 1]) )
ld.fit_params = np.delete(ld.fit_params, ind_g, axis=1)
ld.g = np.delete(ld.g, ind_g)
# stars with missing limb darkening information and log gravity interpolation
stars = [st.Star(omega, l, m, r, ut.D10, n, ld=ld) for l, m, r in zip(L, M, R)]
# the light from such stars
light_log = np.array([s.integrate(inclination) for s in stars])
# stars with missing limb darkening information and linear gravity interpolation
stars = [st.Star(omega, l, m, r, ut.D10, n, ld=ld, g_method='lin') for l, m, r in zip(L, M, R)]
# the light from such stars
light_lin = np.array([s.integrate(inclination) for s in stars])
### calculate difference spectra in erg/s/ster/Hz
## proportional spectra
# keep only the light at wavelengths where the precise spectrum is non-zero in all the stars
mask = np.all(light != 0, axis = 0)
diff_log = np.abs(light_log[:, mask] / light[:, mask] - 1)
diff_lin = np.abs(light_lin[:, mask] / light[:, mask] - 1)
light = light[:, mask]
# wavelength array
lam = ld.lam[mask]
# cutoff wavelength in nm for the plot
wl = 2000
ind_wl = max(np.searchsorted(lam, wl) - 1, 1)
ind_ld_wl = max(np.searchsorted(ld.lam, wl) - 1, 1)
# truncate the spectra and the wavelength array at the cutoff wavelength
lam = lam[:ind_wl]
diff_log = diff_log[:, :ind_wl]
diff_lin = diff_lin[:, :ind_wl]
light = light[:, :ind_wl]
diff = np.concatenate((diff_lin, diff_log), axis=1)
# plot results
plt.rcParams.update({'font.size': 18})
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=True)
ofile = iodir + 'error_g.pdf'
f, axarr = plt.subplots(3, sharex=True)
T_y = [0.17, 0.7, 0.7]
T_x = [0.57, 0.7, 0.7]
min_x = np.min(lam) * 0.9
max_x = np.max(lam) * 1.1
max_y = 6.3
min_y = 6.3e-7
if max_y < np.max(diff):
print('maximum y on the plot,' + str(max_y) + \
', should be above maximum y value in the data, ' + str(np.max(diff)))
# instantiate a second array of axes objects that share the same x-axis
axarr2 = [ axarr[i].twinx() for i in range(3) ]
for i in range(3):
# spectrum axes
axarr2[i].plot(lam, light[i][:ind_ld_wl], color='grey', linewidth=2, alpha=0.5)
axarr2[i].tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
axarr2[i].grid(False)
axarr2[i].set_zorder(1)
axarr[i].set_xscale('log')
axarr[i].set_xlim(left=min_x, right=max_x)
axarr[i].scatter(lam, diff_lin[i], marker='o', c='b', s=6, alpha=0.5, edgecolors="none")
axarr[i].scatter(lam, diff_log[i], marker='o', c='g', s=6, alpha=1, edgecolors="none")
axarr[i].text(T_x[i], T_y[i], 'T = ' + str(temp[i]) + ' K', transform=axarr[i].transAxes)
axarr[i].tick_params(axis="both")
axarr[i].set_yscale('log')
axarr[i].set_ylim(min_y/5, max_y*5)
axarr[i].set_yticks([1e-6, 1e-3, 1])
axarr[i].set_zorder(2)
axarr[i].patch.set_alpha(0)
## add a big axes, hide its frame
ax = f.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
ax.grid(False)
# display the x and y axis labels on the big axes
ax.set_xlabel(r'$\lambda$ (nm)')
ax.set_ylabel(r'$|\delta \mathcal{F}_\nu \,/\, \mathcal{F}_\nu|$', labelpad=20)
## save figure
f.savefig(ofile, dpi=200, bbox_inches='tight')
|
{"hexsha": "a32eb82f38a39cfd07fa4658f1dbde4f5b279b5e", "size": 5389, "ext": "py", "lang": "Python", "max_stars_repo_path": "pa/usr/05b_gravity_interpolation.py", "max_stars_repo_name": "mlipatov/paint_atmospheres", "max_stars_repo_head_hexsha": "75b494010a728e0682645de739752ff20a47d717", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-28T08:06:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-01T04:25:32.000Z", "max_issues_repo_path": "pa/usr/05b_gravity_interpolation.py", "max_issues_repo_name": "mlipatov/paint_atmospheres", "max_issues_repo_head_hexsha": "75b494010a728e0682645de739752ff20a47d717", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pa/usr/05b_gravity_interpolation.py", "max_forks_repo_name": "mlipatov/paint_atmospheres", "max_forks_repo_head_hexsha": "75b494010a728e0682645de739752ff20a47d717", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7697841727, "max_line_length": 96, "alphanum_fraction": 0.6953052514, "include": true, "reason": "import numpy", "num_tokens": 1695}
|
subroutine pl_nupd
!! ~ ~ ~ PURPOSE ~ ~ ~
!! This subroutine calculates plant nitrogen demand
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! bio_n1(:) |none |1st shape parameter for plant N uptake equation
!! bio_n2(:) |none |2nd shape parameter for plant N uptake equation
!! ihru |none |HRU number
!! pltnfr(1,:) |kg N/kg biomass|nitrogen uptake parameter #1: normal fraction
!! |of N in crop biomass at emergence
!! pltnfr(3,:) |kg N/kg biomass|nitrogen uptake parameter #3: normal fraction
!! |of N in crop biomass at maturity
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! uno3d |kg N/ha |plant nitrogen deficiency for day in HRU
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use plant_data_module
use hru_module, only : un2, uno3d, ihru, ipl
use plant_module
use organic_mineral_mass_module
implicit none
integer :: icrop !none |land cover code
integer :: j !none |hru number
integer :: l !none |counter (soil layer)
real :: uno3l !kg N/ha |plant nitrogen demand
integer :: ir !none |flag to denote bottom of root zone reached
integer :: idp ! |
real :: gx !mm |lowest depth in layer from which nitrogen may be removed
j = ihru
idp = pcom(j)%plcur(1)%idplt
pcom(j)%plm(ipl)%n_fr = (pldb(idp)%pltnfr1 - pldb(idp)%pltnfr3) * &
(1. -pcom(j)%plcur(ipl)%phuacc / (pcom(j)%plcur(ipl)%phuacc + &
Exp(plcp(idp)%nup1 - plcp(idp)%nup2 * &
pcom(j)%plcur(ipl)%phuacc))) + pldb(idp)%pltnfr3
un2(ipl) = pcom(j)%plm(ipl)%n_fr * pl_mass(j)%ab_gr(ipl)%m
if (un2(ipl) < pl_mass(j)%ab_gr(ipl)%n) un2(ipl) = pl_mass(j)%ab_gr(ipl)%n
uno3d(ipl) = un2(ipl) - pl_mass(j)%ab_gr(ipl)%n
return
end subroutine pl_nupd
|
{"hexsha": "c712577ee4d03b56beb08fd8210dd4d5b62aca2f", "size": 2633, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/pl_nupd.f90", "max_stars_repo_name": "mikiec84/delphi", "max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z", "max_issues_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/pl_nupd.f90", "max_issues_repo_name": "mikiec84/delphi", "max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 385, "max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z", "max_forks_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/pl_nupd.f90", "max_forks_repo_name": "mikiec84/delphi", "max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z", "avg_line_length": 44.6271186441, "max_line_length": 97, "alphanum_fraction": 0.4401823016, "num_tokens": 849}
|
# -*- coding: utf-8 -*-
#to get these to print all threads in spyder :
# Run > Configuration per file > Execute in an external system terminal
# And then add a read at the end of the program to prevent the console of closing
"""
Created on Fri Jan 10 16:19:58 2020
This script uses the Amadeus framework on an artificial dataset, with the aim of establishing
the virtue of the creation of averaged models.
@author: Dr. Dr. Danny E. P. Vanpoucke
@web : https://dannyvanpoucke.be
"""
import time
import os
import numpy as np
import pandas as pd
#The source is in various files in the folder subroutines,
# so need to add this to the path first
import sys
sys.path.append("subroutines/datahandling")
from DataLoading import DataFrame2NumpyArrays, SelectTarget
from HPCTools import set_num_threads
from TAmadeusFrameWork import TAmadeusFrameWork
from ArtificialDataSets import Create1DData, CreateLinear3D, Lin1DFunc, LinNDFunc, SinFunc
from MLpaperPostProcess import RunPostProcess_MLpaper
if __name__ == '__main__':
print("=============Start of Framework==============")
start = time.perf_counter_ns()
#Some initialisation
parprocs = -1 #give me the number of physical CPU's...
#this is wrong on BReniac, because there are 2 sockets...no solution--> -2
if len(sys.argv)>=2: #unless the user provided the number to use as argument
parprocs = int(sys.argv[1])
set_num_threads(1) #set number of BLAS/OPENMP/MKL-threads that sklearn/numpy is allowed to spawn
#datasizes=[10, 15, 20, 25, 30, 35, 40, 45, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500, 750, 1000]
datasizes=[20]
predsize=1000
Gspread=0.10 # for Sin3x: 0.05, linear 1D: 0.75
MyArtificialModel=[0.50, 6.0]
MyArtModelFunc = SinFunc #SinFunc #function to generate the data
MyFitModelFunc = LinNDFunc #LinNDFunc #the ML-fit function, self-coded
MyPredict = Create1DData(Func1D=MyArtModelFunc, length=predsize, intercept=MyArtificialModel[0],
coefficient=MyArtificialModel[1], dist='normal',dwidth=Gspread)
MyFrame_Base = Create1DData(Func1D=MyArtModelFunc, length=np.amax(datasizes), intercept=MyArtificialModel[0],
coefficient=MyArtificialModel[1], dist='normal',dwidth=Gspread)
NCols=20 #11 columns: index of run,etc for linear fit # Poly6:=16
#artmodname="1Dlin"
artmodname="1Dsin3x"
TargetOfChoice=1
NFeatures=1
NSKFeatures=10 #number of features in sk-learn model 1:linear fit, 3: poly-3
NRuns=1000
TTsplit=0.2 #20% test
for ds in datasizes:
start_ds = time.perf_counter_ns()
InF = "Artificial "+artmodname+" model "+str(ds)+" datapoints."
print(" 1. Loading data :", InF)
MyFrame = pd.DataFrame(MyFrame_Base).iloc[0:ds].copy(deep=True)#no -1 is needed as python has this weirdt behaviour of not including the last element
features, targets, headersfeature, headerstarget, codes=DataFrame2NumpyArrays(MyFrame, NFeature=NFeatures, TargetIndex=1)
#store the data-sets in a file:
#first the train/test data
#then the 1000 point prediction data
basedpf="datapoints_"+artmodname+"_"
datapointfile=basedpf+str(ds)+".dat"
baserf="results_"+artmodname+"_"
resultsfile=baserf+str(ds)+".dat"
if os.path.exists(datapointfile):
os.remove(datapointfile) #clear the file before we start
if os.path.exists(resultsfile):
os.remove(resultsfile) #clear the file before we start
dpf=open(datapointfile,"a+") #open file for appending
dpf.write("# %d \n" % ds)
dpf.close()
MyFrame.to_csv(datapointfile,header=None, index=None, sep=' ', mode='a')
dpf=open(datapointfile,"a+") #open file for appending
dpf.write("\n \n# %d \n" % predsize)
dpf.close()
MyPredict.to_csv(datapointfile,header=None, index=None, sep=' ', mode='a')
print(" 2. Preparation and selection of data")
#The Set function makes a set of the array...
# the target needs to be put in [], otherwise
# we get a set of characters
ModelFrame=SelectTarget(MyFrame,set(headersfeature.values),set([headerstarget[TargetOfChoice-1]]))#-1 as Python starts at zero
dpf=open(resultsfile,"a+") #open file for appending
dpf.write("# %d %d \n" % (NRuns+1, NCols))
dpf.close()
print(" 3.a. Starting model-framework: full dataset")
Amadeus=TAmadeusFrameWork(dataset=ModelFrame,njobs=parprocs,test_size=0,
maxRuns=1, printFileStatistics=resultsfile,PreStdScaler=True)
failcnt=Amadeus.RunModelEnsemble()
print(" 3.b. Starting model-framework: train-test splitting")
Amadeus=TAmadeusFrameWork(dataset=ModelFrame,njobs=parprocs,test_size=TTsplit,
maxRuns=NRuns, printFileStatistics=resultsfile,PreStdScaler=True)
failcnt=Amadeus.RunModelEnsemble()
end_ds = time.perf_counter_ns()
print("Total time:", (end_ds-start_ds)/1000000000, "s")
print("Sanity checks needed : ",failcnt," reshuffles")
print("Starting Post-processing")
RunPostProcess_MLpaper(basedata=basedpf,baseresult=baserf,datasizes=datasizes,
predictionData=MyPredict, NumDim=NFeatures, NumSKDim=NSKFeatures,
theoryModel=MyArtificialModel,
modelFunction=MyArtModelFunc,
fitFunction=MyFitModelFunc)
end = time.perf_counter_ns()
print("Totals time:", (end - start)/1000000000, "s")
if len(sys.argv)<2: #not used in another program...so ask the user to finish the terminal
inp=input("gimme something to read")
|
{"hexsha": "accdf90eb0eedbe1409c74f038e57708fa3b75c4", "size": 5957, "ext": "py", "lang": "Python", "max_stars_repo_path": "Main_averageModelScript.py", "max_stars_repo_name": "DannyVanpoucke/Amadeus", "max_stars_repo_head_hexsha": "6604cbdca43a1e2c8c0df72d251dcfbbd116e11e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Main_averageModelScript.py", "max_issues_repo_name": "DannyVanpoucke/Amadeus", "max_issues_repo_head_hexsha": "6604cbdca43a1e2c8c0df72d251dcfbbd116e11e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Main_averageModelScript.py", "max_forks_repo_name": "DannyVanpoucke/Amadeus", "max_forks_repo_head_hexsha": "6604cbdca43a1e2c8c0df72d251dcfbbd116e11e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1287878788, "max_line_length": 157, "alphanum_fraction": 0.6538526104, "include": true, "reason": "import numpy", "num_tokens": 1608}
|
function u = poisson(varargin)
%POISSON Poisson solver in the unit ball with Dirichlet or Neumann
% boundary conditions.
% U = POISSON(F, BC, M, N, P) is the solution to the Poisson equation in
% the unit ball with right-hand side F and Dirichlet boundary data
% U(1,lambda,theta) = @(lambda,theta) BC(lambda, theta). It uses a
% discretization size of M x N x P.
%
% U = POISSON(F, BC, M) is the same as POISSON(F, BC, M, M, M).
%
% U = POISSON(F, BC, M, N, P, 'neumann') is the solution to the Poisson
% equation with right-hand side F and Neuamnn boundary data U(1,lambda,theta) =
% @(lambda,theta) BC(lambda, theta). It uses a discretization size of M x N x P.
%
% U = POISSON(F, BC, M, 'neumann') is the same as POISSON(F, BC, M, M, M, 'neumann').
%
% Also see HELMHOLTZ.
% Copyright 2019 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
% Call Helmholtz command with zero frequency:
u = helmholtz(varargin{1}, 0, varargin{2:end});
end
|
{"author": "chebfun", "repo": "chebfun", "sha": "8c49396a55e46ddd57a1d108c6a8f32e37536d54", "save_path": "github-repos/MATLAB/chebfun-chebfun", "path": "github-repos/MATLAB/chebfun-chebfun/chebfun-8c49396a55e46ddd57a1d108c6a8f32e37536d54/@ballfun/poisson.m"}
|
"""
Module for unit tests that check if estimated embeddings for toy examples satisfy basic intuitions
@author Siddharth Reddy <sgr45@cornell.edu>
"""
import unittest
import logging
import pandas as pd
import numpy as np
from lentil import models
from lentil import est
from lentil import toy
logging.basicConfig()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class TestModels(unittest.TestCase):
def setUp(self):
# fixes random initializations of parameters
# before parameter estimation
np.random.seed(1997)
def tearDown(self):
pass
def test_1d_embedding(self):
"""
A one-dimensional embedding, where a single latent skill is enough
to explain the data. The key observation here is that the model
recovered positive skill gains for L1, and "correctly" arranged
students and assessments in the latent space. Initially, Carter
fails both assessments, so his skill level is behind the requirements
of both assessments. Lee passes A1 but fails A2, so his skill
level is beyond the requirement for A1, but behind the requirement
for A2. In an effort to improve their results, Lee and Carter
complete lesson L1 and retake both assessments. Now Carter passes
A1, but still fails A2, so his new skill level is ahead of the
requirements for A1 but behind the requirements for A2. Lee
passes both assessments, so his new skill level exceeds the requirements
for A1 and A2. This clear difference in results before completing
lesson L1 and after completing the lesson implies that L1 had a
positive effect on Lee and Carter's skill levels, hence the non-zero
skill gain vector recovered for L1.
"""
history = toy.get_1d_embedding_history()
embedding_dimension = 1
model = models.EmbeddingModel(
history,
embedding_dimension,
using_lessons=True,
using_prereqs=False,
using_bias=False,
learning_update_variance_constant=0.5)
gradient_descent_kwargs = {
'using_adagrad' : False,
'rate' : 0.1,
'debug_mode_on' : False
}
using_scipy_configs = [True, False]
for using_scipy in using_scipy_configs:
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
gradient_descent_kwargs=gradient_descent_kwargs,
using_scipy=using_scipy,
verify_gradient=False,
debug_mode_on=False)
model.fit(estimator)
lee = model.student_embeddings[model.history.idx_of_student_id('Lee'), 0, 1:]
carter = model.student_embeddings[model.history.idx_of_student_id('Carter'), 0, 1:]
a1 = model.assessment_embeddings[model.history.idx_of_assessment_id('A1'), 0]
a2 = model.assessment_embeddings[model.history.idx_of_assessment_id('A2'), 0]
self.assertTrue((carter[0] < a1).all())
self.assertTrue((a1 < lee[0]).all())
self.assertTrue((lee[0] < a2).all())
self.assertTrue((a1 < carter[1]).all())
self.assertTrue((carter[1] < a2).all())
self.assertTrue((a2 < lee[1]).all())
def test_independent_assessments(self):
"""
A two-dimensional embedding, where an intransitivity in assessment
results requires more than one latent skill to explain. The key
observation here is that the assessments are embedded on two different
axes, meaning they require two completely independent skills. This
makes sense, since student results on A1 are uncorrelated with
results on A2. Fogell fails both assessments, so his skill levels
are behind the requirements for A1 and A2. McLovin passes both
assessments, so his skill levels are beyond the requirements for A1
and A2. Evan and Seth are each able to pass one assessment but not
the other. Since the assessments have independent requirements, this
implies that Evan and Seth have independent skill sets
(i.e. Evan has enough of skill 2 to pass A2 but not enough of
skill 1 to pass A1, and Seth has enough of skill 1 to pass A1
but not enough of skill 2 to pass A2).
"""
history = toy.get_independent_assessments_history()
embedding_dimension = 2
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=False,
using_bias=False,
learning_update_variance_constant=0.5)
gradient_descent_kwargs = {
'using_adagrad' : False,
'rate' : 0.1,
'debug_mode_on' : False
}
using_scipy_configs = [True, False]
for using_scipy in using_scipy_configs:
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
gradient_descent_kwargs=gradient_descent_kwargs,
using_scipy=using_scipy,
verify_gradient=False,
debug_mode_on=False)
model.fit(estimator)
mclovin = model.student_embeddings[model.history.idx_of_student_id('McLovin'), :, 1]
fogell = model.student_embeddings[model.history.idx_of_student_id('Fogell'), :, 1]
seth = model.student_embeddings[model.history.idx_of_student_id('Seth'), :, 1]
evan = model.student_embeddings[model.history.idx_of_student_id('Evan'), :, 1]
a1 = model.assessment_embeddings[model.history.idx_of_assessment_id('A1'), :]
a2 = model.assessment_embeddings[model.history.idx_of_assessment_id('A2'), :]
self.assertTrue((mclovin > model.assessment_embeddings[:, :]).all())
self.assertTrue((fogell <= model.assessment_embeddings[:, :]).all())
eps = 1.0
self.assertTrue((seth >= a1-eps).all() and (seth > a1).any())
self.assertTrue((seth < a2).any())
self.assertTrue((evan >= a2-eps).all() and (evan > a2).any())
self.assertTrue((evan < a1).any())
def test_independent_lessons(self):
"""
We replicate the setting in test_independent_assessments, then add two
new students Slater and Michaels, and two new lesson modules L1
and L2. Slater is initially identical to Evan, while Michaels is
initially identical to Seth. Slater reads lesson L1, then passes
assessments A1 and A2. Michaels reads lesson L2, then passes
assessments A1 and A2. The key observation here is that the skill
gain vectors recovered for the two lesson modules are orthogonal,
meaning they help students satisfy completely independent skill
requirements. This makes sense, since initially Slater was lacking
in Skill 1 while Michaels was lacking in Skill 2, but after completing
their lessons they passed their assessments, showing that they gained
from their respective lessons what they were lacking initially.
"""
history = toy.get_independent_lessons_history()
embedding_dimension = 2
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=True,
using_bias=False,
learning_update_variance_constant=0.5)
gradient_descent_kwargs = {
'using_adagrad' : False,
'rate' : 0.1,
'debug_mode_on' : False,
'ftol' : 1e-4
}
using_scipy_configs = [True, False]
for using_scipy in using_scipy_configs:
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
gradient_descent_kwargs=gradient_descent_kwargs,
using_scipy=using_scipy,
verify_gradient=False,
debug_mode_on=False)
model.fit(estimator)
mclovin = model.student_embeddings[model.history.idx_of_student_id('McLovin'), :, 1]
fogell = model.student_embeddings[model.history.idx_of_student_id('Fogell'), :, 1]
seth = model.student_embeddings[model.history.idx_of_student_id('Seth'), :, 1]
evan = model.student_embeddings[model.history.idx_of_student_id('Evan'), :, 1]
slater = model.student_embeddings[model.history.idx_of_student_id('Slater'), :, 1:]
michaels = model.student_embeddings[model.history.idx_of_student_id('Michaels'), :, 1:]
a1 = model.assessment_embeddings[model.history.idx_of_assessment_id('A1'), :]
a2 = model.assessment_embeddings[model.history.idx_of_assessment_id('A2'), :]
l1 = model.lesson_embeddings[model.history.idx_of_lesson_id('L1'), :]
l2 = model.lesson_embeddings[model.history.idx_of_lesson_id('L2'), :]
self.assertTrue((mclovin > model.assessment_embeddings[:, :]).all())
self.assertTrue((fogell <= model.assessment_embeddings[:, :]).all())
eps = 1.0
self.assertTrue((seth >= a1-eps).all() and (seth > a1).any())
self.assertTrue((seth < a2).any())
self.assertTrue((slater[:, 0] >= a1-eps).all() and (slater[0] > a1).any())
self.assertTrue((slater[:, 0] < a2).any())
self.assertTrue((evan >= a2-eps).all() and (evan > a2).any())
self.assertTrue((evan < a1).any())
self.assertTrue((michaels[:, 0] >= a2-eps).all() and (michaels[0] > a2).any())
self.assertTrue((michaels[:, 0] < a1).any())
self.assertTrue((slater[:, 1] > model.assessment_embeddings[:, :]).all())
self.assertTrue((michaels[:, 1] > model.assessment_embeddings[:, :]).all())
max_ind = lambda s: max(range(len(s)), key=lambda k: s[k])
self.assertTrue(max_ind(l1) == max_ind(a2))
self.assertTrue(max_ind(l2) == max_ind(a1))
def test_lesson_prereqs(self):
"""
We replicate the setting in test_independent_assessments, then add a new
assessment module A3 and a new lesson module L1. All students
initially fail assessment A3, then read lesson L1, after which
McLovin passes A3 while everyone else still fails A3. The key
observation here is that McLovin is the only student who initially
satisfies the prerequisites for L1, so he is the only student who
realizes significant gains.
"""
history = toy.get_lesson_prereqs_history()
embedding_dimension = 2
model = models.EmbeddingModel(
history,
embedding_dimension,
using_lessons=True,
using_prereqs=True,
using_bias=False,
learning_update_variance_constant=0.5)
gradient_descent_kwargs = {
'using_adagrad' : False,
'rate' : 0.01,
'ftol' : 1e-4,
'debug_mode_on' : False
}
using_scipy_configs = [True, False]
for using_scipy in using_scipy_configs:
estimator = est.EmbeddingMAPEstimator(
regularization_constant=0.001,
gradient_descent_kwargs=gradient_descent_kwargs,
using_scipy=using_scipy,
verify_gradient=False,
debug_mode_on=False)
model.fit(estimator)
mclovin = model.student_embeddings[model.history.idx_of_student_id('McLovin'), :, 1:]
fogell = model.student_embeddings[model.history.idx_of_student_id('Fogell'), :, 1:]
seth = model.student_embeddings[model.history.idx_of_student_id('Seth'), :, 1:]
evan = model.student_embeddings[model.history.idx_of_student_id('Evan'), :, 1:]
a1 = model.assessment_embeddings[model.history.idx_of_assessment_id('A1'), :]
a2 = model.assessment_embeddings[model.history.idx_of_assessment_id('A2'), :]
a3 = model.assessment_embeddings[model.history.idx_of_assessment_id('A3'), :]
l1 = model.lesson_embeddings[model.history.idx_of_lesson_id('L1'), :]
q1 = model.prereq_embeddings[model.history.idx_of_lesson_id('L1'), :]
self.assertTrue((mclovin[:, 0] > a1).all())
self.assertTrue((mclovin[:, 0] > a2).all())
self.assertTrue((mclovin[:, 0] < a3).any())
for i in xrange(3):
self.assertTrue((fogell[:, 0] <= model.assessment_embeddings[i, :]).any())
self.assertTrue((fogell[:, 1] <= model.assessment_embeddings[i, :]).any())
eps = 0.1
for i in xrange(2):
self.assertTrue((seth[:, i] >= a1 - eps).all() and (seth[:, i] > a1 - eps).any())
self.assertTrue((seth[:, i] < a2).any())
self.assertTrue((seth[:, i] < a3).any())
for i in xrange(2):
self.assertTrue((evan[:, i] >= a2 - eps).all() and (evan[:, i] > a2 - eps).any())
self.assertTrue((evan[:, i] < a1).any())
self.assertTrue((evan[:, i] < a3).any())
self.assertTrue((mclovin[:, 1] > model.assessment_embeddings[:, :]).all())
# prereq satisfaction term
prereq_sat = lambda s: model.prereq_weight(s, q1)
self.assertTrue(prereq_sat(mclovin[0]) > prereq_sat(fogell[0]))
self.assertTrue(prereq_sat(mclovin[0]) > prereq_sat(seth[0]))
self.assertTrue(prereq_sat(mclovin[0]) > prereq_sat(evan[0]))
# TODO: add unit tests for tv_luv_model, forgetting_model, and using_graph_prior=True
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "3ab5bb06409ff9849caf6a7358ec5164b5411aa6", "size": 13865, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_models.py", "max_stars_repo_name": "Knewton/lentil", "max_stars_repo_head_hexsha": "957227de8c48b6c3c5dfd754da30a259b7409084", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2016-03-21T03:00:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-04T13:01:02.000Z", "max_issues_repo_path": "tests/test_models.py", "max_issues_repo_name": "wpmarinho/lentil", "max_issues_repo_head_hexsha": "957227de8c48b6c3c5dfd754da30a259b7409084", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-03-27T09:25:04.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-27T09:25:04.000Z", "max_forks_repo_path": "tests/test_models.py", "max_forks_repo_name": "wpmarinho/lentil", "max_forks_repo_head_hexsha": "957227de8c48b6c3c5dfd754da30a259b7409084", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-09-09T21:54:34.000Z", "max_forks_repo_forks_event_max_datetime": "2017-11-20T20:00:42.000Z", "avg_line_length": 42.2713414634, "max_line_length": 99, "alphanum_fraction": 0.6241615579, "include": true, "reason": "import numpy", "num_tokens": 3167}
|
requiredTypes = [ "AbstractLevel" ]
for reqType in requiredTypes
if !isdefined( Symbol( uppercase( string( reqType[ 1 ] ) ) * reqType[ 2:end ] ) )
include( reqType * ".jl" )
end # if !isdefined( Symbol( ...
end
export SubJob
type SubJob <: AbstractLevel
# Name of the SubJob ex: X, A, B
Name::String
# Maximum and minimum stay in this SubJob
MinStay::Int
MaxStay::Int
# A list of allowed PS. Just to maintain the same structure (no next level needed)
NextLevel::Array{String}
SubJob() = new("", 0, 0, Array{String}())
SubJob(nm::String;
min::Int = 0,
max::Int = 0,
next::Array{String} = Array{String}()) = new(nm, min, max, next)
end
|
{"hexsha": "5f078212dd5acbaf37cee1333d8794596cc18126", "size": 732, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types/SubJob.jl", "max_stars_repo_name": "Omazaria/CareerPathBeDef", "max_stars_repo_head_hexsha": "da5b1cc1a70382444a348da82725d4cfe670d3c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/types/SubJob.jl", "max_issues_repo_name": "Omazaria/CareerPathBeDef", "max_issues_repo_head_hexsha": "da5b1cc1a70382444a348da82725d4cfe670d3c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types/SubJob.jl", "max_forks_repo_name": "Omazaria/CareerPathBeDef", "max_forks_repo_head_hexsha": "da5b1cc1a70382444a348da82725d4cfe670d3c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.875, "max_line_length": 86, "alphanum_fraction": 0.5956284153, "num_tokens": 222}
|
import time
import numpy as np
import py_aca
import pytest
from test_tde import setup_matrix_test
import cutde.fullspace as FS
import cutde.halfspace as HS
from cutde.aca import call_clu_aca
@pytest.mark.slow
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("F_ordered", [True, False])
@pytest.mark.parametrize("field", ["disp", "strain"])
@pytest.mark.parametrize("module_name", ["HS", "FS"])
def test_aca_slow(dtype, F_ordered, field, module_name):
# The fast version of this test doesn't test multiple blocks and would miss
# some potential errors.
runner(dtype, F_ordered, field, module_name, 10, compare_against_py=True)
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("F_ordered", [True, False])
@pytest.mark.parametrize("field", ["disp", "strain"])
@pytest.mark.parametrize("module_name", ["HS"])
def test_aca_fast(dtype, F_ordered, field, module_name):
runner(dtype, F_ordered, field, module_name, 1, compare_against_py=False)
def runner(
dtype,
F_ordered,
field,
module_name,
n_sets,
compare_against_py=False,
benchmark_iters=1,
):
"""
This checks that the OpenCL/CUDA ACA implementation is producing *exactly*
the same results as the prototype Python implementation and that both ACA
implementations are within the expected Frobenius norm tolerance of the
exact calculation.
"""
module = HS if module_name == "HS" else FS
if field == "disp":
matrix_fnc = module.disp_matrix
aca_fnc = module.disp_aca
field_spec = module.DISP_SPEC
vec_dim = 3
else:
matrix_fnc = module.strain_matrix
aca_fnc = module.strain_aca
field_spec = module.STRAIN_SPEC
vec_dim = 6
pts = []
tris = []
set_sizes = np.arange(50, 50 + n_sets)
set_sizes_edges = np.zeros(set_sizes.shape[0] + 1, dtype=np.int32)
set_sizes_edges[1:] = np.cumsum(set_sizes)
for i in range(n_sets):
S = set_sizes[i]
this_pts, this_tris, _ = setup_matrix_test(
dtype, F_ordered, n_obs=S, n_src=S, seed=i
)
this_pts[:, 0] -= 150
pts.append(this_pts)
tris.append(this_tris)
pts = np.concatenate(pts)
tris = np.concatenate(tris)
obs_starts = []
obs_ends = []
src_starts = []
src_ends = []
for i in range(n_sets):
for j in range(n_sets):
obs_starts.append(set_sizes_edges[i])
obs_ends.append(set_sizes_edges[i + 1])
src_starts.append(set_sizes_edges[j])
src_ends.append(set_sizes_edges[j + 1])
M1 = matrix_fnc(pts, tris, 0.25)
M1 = M1.reshape((M1.shape[0] * M1.shape[1], M1.shape[2] * M1.shape[3]))
times = []
for i in range(benchmark_iters):
start = time.time()
if compare_against_py:
M2 = call_clu_aca(
pts,
tris,
obs_starts,
obs_ends,
src_starts,
src_ends,
0.25,
[1e-4] * len(obs_starts),
[200] * len(obs_starts),
field_spec,
Iref0=np.zeros_like(obs_starts),
Jref0=np.zeros_like(obs_starts),
)
else:
M2 = aca_fnc(
pts,
tris,
obs_starts,
obs_ends,
src_starts,
src_ends,
0.25,
[1e-4] * len(obs_starts),
[200] * len(obs_starts),
)
times.append(time.time() - start)
print("aca runtime, min=", np.min(times), " median=", np.median(times))
for block_idx in range(len(obs_starts)):
os = obs_starts[block_idx]
oe = obs_ends[block_idx]
ss = src_starts[block_idx]
se = src_ends[block_idx]
block = M1[(os * vec_dim) : (oe * vec_dim), (3 * ss) : (3 * se)]
U2, V2 = M2[block_idx]
if compare_against_py:
U, V = py_aca.ACA_plus(
block.shape[0],
block.shape[1],
lambda Istart, Iend: block[Istart:Iend, :],
lambda Jstart, Jend: block[:, Jstart:Jend],
1e-4,
verbose=False,
Iref=0,
Jref=0,
vec_dim=3 if field == "disp" else 6,
)
if pts.dtype.type is np.float64:
U2, V2 = M2[block_idx]
np.testing.assert_almost_equal(U, U2, 9)
np.testing.assert_almost_equal(V, V2, 9)
diff = block - U2.dot(V2)
diff_frob = np.sqrt(np.sum(diff ** 2))
assert diff_frob < 1e-3
if __name__ == "__main__":
runner(np.float32, False, "disp", 40, compare_against_py=False, benchmark_iters=2)
|
{"hexsha": "66d489f29f76e189686655eea27459943b58cf0b", "size": 4898, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_aca.py", "max_stars_repo_name": "tbenthompson/cutde", "max_stars_repo_head_hexsha": "7b263e2ac16de536d23d8e4d3165705e2ec9a31f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-05-11T01:48:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:41:41.000Z", "max_issues_repo_path": "tests/test_aca.py", "max_issues_repo_name": "tbenthompson/cutde", "max_issues_repo_head_hexsha": "7b263e2ac16de536d23d8e4d3165705e2ec9a31f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-04-05T20:59:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T13:21:10.000Z", "max_forks_repo_path": "tests/test_aca.py", "max_forks_repo_name": "tbenthompson/cutde", "max_forks_repo_head_hexsha": "7b263e2ac16de536d23d8e4d3165705e2ec9a31f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-04-05T19:44:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T22:42:40.000Z", "avg_line_length": 31.6, "max_line_length": 86, "alphanum_fraction": 0.5749285423, "include": true, "reason": "import numpy", "num_tokens": 1248}
|
#version 330 core
in vec2 uvCoords;
in vec3 normalCameraSpace;
in vec3 toEye;
in vec3 lightDirection;
in float distance;
out vec3 color;
uniform sampler2D albedoTex;
uniform sampler2D metallicTex;
uniform sampler2D roughnessTex;
uniform sampler2D iblbrdf;
const vec3 lightColor = vec3(1.0,1.0,1.0);
const float lightPower = 20000;
const vec3 materialSpecular = vec3(1.0,1.0,1.0);
const float ambientStrength = 0.1;
#define PI 3.1415926
float phong_diffuse()
{
return (1.0 / PI);
}
vec3 phong_specular(in vec3 V, in vec3 L, in vec3 N, in vec3 specular, in float roughness)
{
vec3 R = reflect(-L, N);
float spec = max(0.0, dot(V, R));
float k = 1.999 / (roughness * roughness);
return min(1.0, 3.0 * 0.0398 * k) * pow(spec, min(10000.0, k)) * specular;
}
vec3 fresnel_factor(in vec3 f0, in float product)
{
return mix(f0, vec3(1.0), pow(1.01 - product, 5.0));
}
void main()
{
float A = 20.0 / dot(lightDirection, lightDirection);
vec3 nn = normalize(normalCameraSpace);
vec3 L = normalize(lightDirection);
vec3 V = normalize(toEye);
vec3 H = normalize(L + V);
vec3 N = nn;
vec3 base = texture2D(albedoTex, uvCoords).xyz;
float metallic = texture2D(metallicTex, uvCoords).x;
float roughness = texture2D(roughnessTex, uvCoords).x;
vec3 specular = mix(vec3(0.04), base, metallic);
float NdL = max(0.0, dot(N, L));
float NdV = max(0.001, dot(N, V));
float NdH = max(0.001, dot(N, H));
float HdV = max(0.001, dot(H, V));
float LdV = max(0.001, dot(L, V));
vec3 specfresnel = fresnel_factor(specular, NdV);
vec3 specref = phong_specular(V, L, N, specfresnel, roughness);
specref *= vec3(NdL);
// diffuse is common for any model
vec3 diffref = (vec3(1.0) - specfresnel) * phong_diffuse() * NdL;
// compute lighting
vec3 reflected_light = vec3(0);
vec3 diffuse_light = vec3(0); // initial value == constant ambient light
// point light
vec3 light_color = vec3(1.0) * A;
reflected_light += specref * light_color;
diffuse_light += diffref * light_color;
// IBL lighting
vec2 brdf = texture2D(iblbrdf, vec2(roughness, 1.0 - NdV)).xy;
vec3 iblspec = min(vec3(0.99), fresnel_factor(specular, NdV) * brdf.x + brdf.y);
reflected_light += iblspec * vec3(0.1);
diffuse_light += vec3(0.3) * (1.0 / PI);
vec3 result = diffuse_light * mix(base, vec3(0.0), metallic) + reflected_light;
color = result;
}
|
{"hexsha": "75da120e96343b367f89c0c59440634eff230040", "size": 2410, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "shaders/pbr.f", "max_stars_repo_name": "Faaux/cbLib", "max_stars_repo_head_hexsha": "968f327cee0f92d06724b493c3745f3f35d64038", "max_stars_repo_licenses": ["BSD-3-Clause", "MIT-0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shaders/pbr.f", "max_issues_repo_name": "Faaux/cbLib", "max_issues_repo_head_hexsha": "968f327cee0f92d06724b493c3745f3f35d64038", "max_issues_repo_licenses": ["BSD-3-Clause", "MIT-0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shaders/pbr.f", "max_forks_repo_name": "Faaux/cbLib", "max_forks_repo_head_hexsha": "968f327cee0f92d06724b493c3745f3f35d64038", "max_forks_repo_licenses": ["BSD-3-Clause", "MIT-0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6382978723, "max_line_length": 90, "alphanum_fraction": 0.6705394191, "num_tokens": 817}
|
"""
Solver for a Quordle puzzle
https://www.quordle.com/#/
"""
import os
import pickle
import numpy as np
import helper
import solver
__author__ = "Z Feng"
max_attempts = 9
def accept_quordle_test_result():
"""
Let the user input the word guessed and the results as 4 matching patterns
:return: guess, pattern
"""
while True:
usr_input = input('Please enter the word attempted and patterns received\n')
if usr_input == 'q':
print('Solver exited!')
quit()
splits = usr_input.split()
if len(splits) == 5:
exit_flag = all([len(splits[0]) == len(splits[i + 1])
and all([letter in ('0', '1', '2') for letter in splits[i + 1]])
for i in range(4)])
if exit_flag:
break
print('Unrecognisable input. Please try again.')
return splits[0], (splits[1], splits[2], splits[3], splits[4])
def man_solver():
"""
Manual solver of a Quordle puzzle
"""
legal_guesses = helper.get_guess_dictionary()
potential_answers = [helper.get_answer_dictionary() for i in range(4)]
solved_puzzles = [False for i in range(4)]
for attempt in range(max_attempts):
if attempt == 0:
entropies = initial_entropies
probabilities = initial_probabilities
scores = solver.compute_score_1(entropies, probabilities)
total_entropies = entropies * 4
total_probabilities = probabilities * 4
else:
total_entropies = np.zeros(len(legal_guesses))
total_probabilities = np.zeros(len(legal_guesses))
scores = np.zeros(len(legal_guesses))
for puzzle_num in range(4):
if not solved_puzzles[puzzle_num]:
entropies = solver.compute_entropy(legal_guesses,
potential_answers[puzzle_num], do_print=False)
probabilities = solver.compute_probabilities(legal_guesses,
potential_answers[puzzle_num])
total_entropies += entropies
total_probabilities += probabilities
scores += solver.compute_score_1(entropies, probabilities)
scores /= (4 - sum(solved_puzzles))
solver.print_results(legal_guesses, total_entropies, total_probabilities, scores)
typo = True # in case user has a legal typo
while typo:
guess, patterns = accept_quordle_test_result()
typo = False
for puzzle_num in range(4):
if not solved_puzzles[puzzle_num]:
similarity = helper.pattern_to_similarity(patterns[puzzle_num])
if similarity == 3 ** 5 - 1:
solved_puzzles[puzzle_num] = True
else:
potential_answers[puzzle_num] = solver.refine_potential_answers(
guess, potential_answers[puzzle_num], similarity)
if len(potential_answers[puzzle_num]) == 0:
typo = True
print('Impossible! Please try last input again!')
if all(solved_puzzles):
print('Congratulations!')
break
# initialise similarity LUT
if os.path.exists('similarity_lut.pkl'):
with open('similarity_lut.pkl', 'rb') as pkl_file:
similarity_lut = pickle.load(pkl_file)
else:
print('Initialising similarity LUT...')
similarity_lut = helper.gen_similarity_LUT()
with open('similarity_lut.pkl', 'wb') as pkl_file:
pickle.dump(similarity_lut, pkl_file)
# initial entropies
if os.path.exists('initial_entropies.npy'):
print('loading initial_entropies.npy...')
with open('initial_entropies.npy', 'rb') as npy_file:
initial_entropies = np.load(npy_file)
else:
initial_entropies = solver.compute_entropy()
print('saving initial_entropies.npy...')
with open('initial_entropies.npy', 'wb') as npy_file:
np.save(npy_file, initial_entropies)
# initial probabilities
if os.path.exists('initial_probabilities.npy'):
print('loading initial_probabilities.npy...')
with open('initial_probabilities.npy', 'rb') as npy_file:
initial_probabilities = np.load(npy_file)
else:
initial_probabilities = solver.compute_probabilities()
print('saving initial_probabilities.npy...')
with open('initial_probabilities.npy', 'wb') as npy_file:
np.save(npy_file, initial_probabilities)
if __name__ == "__main__":
man_solver()
# EOF
|
{"hexsha": "3e1c28479203e1fee8da706ee72b08f40e761f3c", "size": 4692, "ext": "py", "lang": "Python", "max_stars_repo_path": "quordle_solver.py", "max_stars_repo_name": "fxh90/WordleSolver", "max_stars_repo_head_hexsha": "7467093b19f09811596fe53a8dad1beedb4c8d09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quordle_solver.py", "max_issues_repo_name": "fxh90/WordleSolver", "max_issues_repo_head_hexsha": "7467093b19f09811596fe53a8dad1beedb4c8d09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quordle_solver.py", "max_forks_repo_name": "fxh90/WordleSolver", "max_forks_repo_head_hexsha": "7467093b19f09811596fe53a8dad1beedb4c8d09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1463414634, "max_line_length": 101, "alphanum_fraction": 0.6052855925, "include": true, "reason": "import numpy", "num_tokens": 1027}
|
import pandas as pd
from random import random
flow = (list(range(1,10,1)) + list(range(10,1,-1)))*100
pdata = pd.DataFrame({"a":flow, "b":flow})
pdata.b = pdata.b.shift(9)
data = pdata.iloc[10:] * random() # some noise
import numpy as np
def _load_data(data, n_prev = 100):
"""
data should be pd.DataFrame()
"""
docX, docY = [], []
for i in range(len(data)-n_prev):
docX.append(data.iloc[i:i+n_prev].as_matrix())
docY.append(data.iloc[i+n_prev].as_matrix())
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1):
"""
This just splits data to training and testing parts
"""
ntrn = int(round(len(df) * (1 - test_size)))
X_train, y_train = _load_data(df.iloc[0:ntrn])
X_test, y_test = _load_data(df.iloc[ntrn:])
return (X_train, y_train), (X_test, y_test)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
in_out_neurons = 2
hidden_neurons = 50
model = Sequential()
# n_prev = 100, 2 values per x axis
model.add(LSTM(hidden_neurons, input_shape=(100, 2)))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error",
optimizer="rmsprop",
metrics=['accuracy'])
(X_train, y_train), (X_test, y_test) = train_test_split(data)
model.fit(X_train, y_train, batch_size=700, nb_epoch=50, validation_data=(X_test, y_test), verbose=1)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
predicted = model.predict(X_test, batch_size=700)
# and maybe plot it
pd.DataFrame(predicted).to_csv("predicted.csv")
pd.DataFrame(y_test).to_csv("test_data.csv")
|
{"hexsha": "4bb4a13bb24798984b89603ceaf4ad28e694868a", "size": 1767, "ext": "py", "lang": "Python", "max_stars_repo_path": "Ground-Truth-Skeletons/Uddeshya/LSTM_test1.py", "max_stars_repo_name": "93TEI/3D_Action_Recognition", "max_stars_repo_head_hexsha": "b648f4cd8e479872c0cd9488120ada18bc64e5ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2018-05-22T08:35:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-06T09:56:07.000Z", "max_issues_repo_path": "Ground-Truth-Skeletons/Uddeshya/LSTM_test1.py", "max_issues_repo_name": "93TEI/3D_Action_Recognition", "max_issues_repo_head_hexsha": "b648f4cd8e479872c0cd9488120ada18bc64e5ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-09-19T19:32:19.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-09T02:27:06.000Z", "max_forks_repo_path": "Ground-Truth-Skeletons/Uddeshya/LSTM_test1.py", "max_forks_repo_name": "Naman-ntc/Action-Recognition", "max_forks_repo_head_hexsha": "b648f4cd8e479872c0cd9488120ada18bc64e5ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-05-06T20:48:38.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-01T07:55:09.000Z", "avg_line_length": 27.1846153846, "max_line_length": 101, "alphanum_fraction": 0.6910016978, "include": true, "reason": "import numpy", "num_tokens": 495}
|
type FractalDict
data::Dict{AbstractString,Function}
end
FractalDict()=FractalDict(Dict{AbstractString,Function}())
const frac_dict=FractalDict()
function add_frac!(fracname::AbstractString,f::Function)
frac_dict.data[fracname]=f
end
fractal_names() = keys(frac_dict.data)
function create_fractal(fractal_name::AbstractString)
if !(fractal_name in keys(frac_dict.data))
error("Unknown fractal name: $fractal_name")
end
return frac_dict.data[fractal_name]
end
@inline num_params(f::Function) = length(Base.uncompressed_ast(f.code).args[2][1])
"""
@fractal name inner_function
@fractal name zinit inner_function
Create a fractal variation and add it to the catalog.
The first macro invocation is used when the complex-valued pixel location is intended to be used
as the initial 'z' value for the fractal iteration (as in Julia sets). In this case,
`inner_function` should not take any parameters.
The second macro invocation is used when the complex-valued pixel location is intended to be used
as an extra parameter to the fractal iteration and an initial 'z' value is needed (as in Mandelbrot
sets). In this case, `zinit` is the initial 'z' value, and `inner_function` should take a
complex-valued parameter representing the pixel location.
"""
macro fractal(fracname,zinit_func...)
frac_name=string(fracname)
closure_name=symbol(frac_name*"_closure")
if length(zinit_func)==1
func=zinit_func[1]
@assert typeof(eval(func))==Function
@assert num_params(eval(func))==0
return quote
@inline function $(esc(closure_name)){T}(param::T)
@fastmath @inline f(z::Complex)=$func()
return f,param
end
add_frac!($fracname,$(esc(closure_name)))
end
elseif length(zinit_func)==2
zinit=zinit_func[1]
func=zinit_func[2]
@assert typeof(eval(func))==Function
@assert num_params(eval(func))==1
return quote
@inline function $(esc(closure_name)){T}(param::T)
@fastmath @inline f(z::Complex)=$func(param)
return f,$zinit
end
add_frac!($fracname,$(esc(closure_name)))
end
end
end
const czero=zero(Complex)
const chalf=Complex(0.5,0.0)
const cone=Complex(1.0,0.0)
const phi=(1.0+sqrt(5.0))/2.0
@fractal "mandelbrot" czero (c::Complex)->z*z+c
@fractal "mandelbrot_minvc" czero (c::Complex)->z*z-inv(c)
@fractal "mandelbrot_minvc_p25" czero (c::Complex)->z*z-inv(c)+0.25
@fractal "mandelbrot_minvc_m25" czero (c::Complex)->z*z-inv(c)-0.25
@fractal "mandelbrot_minvc_m50" czero (c::Complex)->z*z-inv(c)-0.50
@fractal "mandelbrot_minvc_m75" czero (c::Complex)->z*z-inv(c)-0.75
@fractal "mandelbrot_minvc_mmyreberg" czero (c::Complex)->z*z-inv(c)-1.40115
@fractal "mandelbrot_minvc_m2" czero (c::Complex)->z*z-inv(c)-2.0
@fractal "mandelbrot_lambda" chalf (lambda::Complex)->z*(1.0-z)*lambda
@fractal "mandelbrot_invlambda" chalf (lambda::Complex)->z*(1.0-z)*inv(lambda)
@fractal "mandelbrot_invlambda_m1" chalf (lambda::Complex)->z*(1.0-z)*(inv(lambda)+cone)
@fractal "burningship" czero (c::Complex)->Complex(abs(real(z)),abs(imag(z)))^2-c
@fractal "julia_z2_1mphi" ()->z*z+Complex(1.0-phi,0.0)
@fractal "julia_z2_phim1_phim1" ()->z*z+Complex(phi-2.0,phi-1.0)
@fractal "julia_z2_285" ()->z*z+Complex(0.285,0.0)
@fractal "julia_z2_285_01" ()->z*z+Complex(0.285,0.01)
@fractal "julia_z2_n8_156" ()->z*z+Complex(-0.8,0.156)
@fractal "julia_z2_n7629_1889" ()->z*z+Complex(-0.7269,0.1889)
@fractal "julia_expz_n65" ()->exp(z)+Complex(-0.65,0.0)
@fractal "julia_expz3_n59" ()->exp(z^3)+Complex(-0.59,0.0)
@fractal "julia_expz3_n621" ()->exp(z^3)+Complex(-0.621,0.0)
@fractal "julia_sqrtsinhz2" ()->sqrt(sinh(z^2))
@fractal "julia_sqrtsinhz2_065_122" ()->sqrt(sinh(z^2))+Complex(0.065,0.122)
@fractal "tricorn" czero (c::Complex)->z'*z'+c
|
{"hexsha": "1be98f98e5bcef3a9bcef3c2dd700cc732d2199f", "size": 3699, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/fractal_variations.jl", "max_stars_repo_name": "jblondin/Fractal", "max_stars_repo_head_hexsha": "b2ff056dd747f81400e687d0091b3b120f113b22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-31T14:11:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-31T14:11:55.000Z", "max_issues_repo_path": "src/fractal_variations.jl", "max_issues_repo_name": "jblondin/Fractal", "max_issues_repo_head_hexsha": "b2ff056dd747f81400e687d0091b3b120f113b22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fractal_variations.jl", "max_forks_repo_name": "jblondin/Fractal", "max_forks_repo_head_hexsha": "b2ff056dd747f81400e687d0091b3b120f113b22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6237623762, "max_line_length": 99, "alphanum_fraction": 0.7280346039, "num_tokens": 1272}
|
/*
* test_NDFileHDF5ExtraDimensions.cpp
*
* Created on: 10 Nov 2015
* Author: gnx91527
*/
#include <stdio.h>
#include "boost/test/unit_test.hpp"
// AD dependencies
#include <NDPluginDriver.h>
#include <NDArray.h>
#include <asynDriver.h>
#include <string.h>
#include <stdint.h>
#include <deque>
#include <boost/shared_ptr.hpp>
#include "hdf5.h"
#include "testingutilities.h"
#include "HDF5PluginWrapper.h"
#include "HDF5FileReader.h"
#include "NDFileHDF5Dataset.h"
hid_t dataspace;
NDArray* parr;
NDFileHDF5Dataset *createTestDataset(int rank, int *max_dim_size, asynUser *pasynUser, hid_t groupID, const std::string& dsetname)
{
// Add the test dataset.
hid_t datasetID = -1;
hid_t dset_access_plist = H5Pcreate(H5P_DATASET_ACCESS);
hsize_t nbytes = 1024;
hsize_t nslots = 50001;
hid_t datatype = H5T_NATIVE_INT8;
hsize_t *dims = new hsize_t[rank];
for (int i=0; i < rank-2; i++) dims[i] = 1;
for (int i=rank-2; i < rank; i++) dims[i] = max_dim_size[i];
hsize_t *maxdims = new hsize_t[rank];
for (int i=0; i < rank; i++) maxdims[i] = max_dim_size[i];
hsize_t *chunkdims = new hsize_t[rank];
for (int i=0; i < rank-2; i++) chunkdims[i] = 1;
for (int i=rank-2; i < rank; i++) chunkdims[i] = max_dim_size[i];
//hid_t dataspace = H5Screate_simple(rank, dims, maxdims);
dataspace = H5Screate_simple(rank, dims, maxdims);
hid_t cparms = H5Pcreate(H5P_DATASET_CREATE);
H5Pset_chunk(cparms, rank, chunkdims);
void *ptrFillValue = (void*)calloc(8, sizeof(char));
*(char *)ptrFillValue = (char)0;
H5Pset_fill_value(cparms, datatype, ptrFillValue);
H5Pset_chunk_cache(dset_access_plist, (size_t)nslots, (size_t)nbytes, 1.0);
datasetID = H5Dcreate2(groupID, dsetname.c_str(), datatype, dataspace, H5P_DEFAULT, cparms, dset_access_plist);
// Now create a dataset
NDFileHDF5Dataset *dataset = new NDFileHDF5Dataset(pasynUser, dsetname, datasetID);
int extraDims = rank-2;
int *extra_dims = new int[extraDims];
int *chunk_extra_dims = new int[extraDims];
for (int i=0; i < extraDims; i++){
extra_dims[i] = max_dim_size[i];
chunk_extra_dims[i] = 1;
}
int *user_chunking = new int[extraDims];
for (int i=0; i < extraDims; i++) user_chunking[i] = 1;
// Create a test array
NDArrayInfo_t arrinfo;
parr = new NDArray();
parr->dataType = NDInt8;
parr->ndims = 2;
parr->pNDArrayPool = NULL;
parr->getInfo(&arrinfo);
parr->dataSize = arrinfo.bytesPerElement;
for (unsigned int i = 0; i < 2; i++){
unsigned int dim_index = rank-(i+1);
parr->dataSize *= max_dim_size[dim_index];
parr->dims[i].size = max_dim_size[dim_index];
}
parr->pData = calloc(parr->dataSize, sizeof(char));
memset(parr->pData, 0, parr->dataSize);
parr->uniqueId = 0;
dataset->configureDims(parr, true, extraDims, extra_dims, chunk_extra_dims, user_chunking);
delete [] dims;
delete [] extra_dims;
delete [] user_chunking;
delete [] maxdims;
delete [] chunkdims;
return dataset;
}
void testDimensions(NDFileHDF5Dataset *dataset, int ndims, int extradims, int *values)
{
hsize_t val;
int counter = 0;
// Test the maximum dimensions of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify maxdim[" << i << "] == " << values[counter]);
val = dataset->getMaxDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
for (int i=extradims; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify maxdim[" << i << "] == " << values[counter]);
val = dataset->getMaxDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
// Test the current dimension sizes of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify current dim[" << i << "] == " << values[counter]);
val = dataset->getDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
for (int i=extradims; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify current dim[" << i << "] == " << values[counter]);
val = dataset->getDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
// Test the offsets of the dataset
for (int i=0; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify offset[" << i << "] == " << values[counter]);
val = dataset->getOffset(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
// Test the virtual dimension sizes of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify current virtualdim[" << i << "] == " << values[counter]);
val = dataset->getVirtualDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
}
void testOffsets(NDFileHDF5Dataset *dataset, int ndims, int extradims, int *values)
{
hsize_t val;
// Test the offsets of the dataset
for (int i=0; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify offset[" << i << "] == " << values[i]);
val = dataset->getOffset(i);
BOOST_REQUIRE_EQUAL(val, values[i]);
}
}
void testDims(NDFileHDF5Dataset *dataset, int ndims, int extradims, int *values)
{
hsize_t val;
int counter = 0;
// Test the current dimension sizes of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify current dim[" << i << "] == " << values[counter]);
val = dataset->getDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
for (int i=extradims; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify current dim[" << i << "] == " << values[counter]);
val = dataset->getDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
}
void testMaxDims(NDFileHDF5Dataset *dataset, int ndims, int extradims, int *values)
{
hsize_t val;
int counter = 0;
// Test the maximum dimensions of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify maxdim[" << i << "] == " << values[counter]);
val = dataset->getMaxDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
for (int i=extradims; i < extradims+ndims; i++){
//BOOST_TEST_MESSAGE("Verify maxdim[" << i << "] == " << values[counter]);
val = dataset->getMaxDim(i);
BOOST_REQUIRE_EQUAL(val, values[counter]);
counter++;
}
}
void testVirtualDims(NDFileHDF5Dataset *dataset, int ndims, int extradims, int *values)
{
hsize_t val;
// Test the virtual dimension sizes of the dataset
for (int i=0; i < extradims; i++){
//BOOST_TEST_MESSAGE("Verify current virtualdim[" << i << "] == " << values[i]);
val = dataset->getVirtualDim(i);
BOOST_REQUIRE_EQUAL(val, values[i]);
}
}
void updateOffsets(int dim, int *offsets, int *vdimsizes)
{
if (dim >= 0){
offsets[dim]++;
if (offsets[dim] == vdimsizes[dim]){
offsets[dim] = 0;
updateOffsets(dim-1, offsets, vdimsizes);
}
}
}
void updateDimensions(int frame, int dim, int *dims, int *vdimsizes)
{
dims[dim]++;
if (dims[dim] > vdimsizes[dim]){
dims[dim] = vdimsizes[dim];
}
if (dim > 0){
int nextindex = frame / vdimsizes[dim];
if (nextindex >= dims[dim-1]){
updateDimensions(nextindex, dim-1, dims, vdimsizes);
}
}
}
BOOST_AUTO_TEST_CASE(test_ExtraDatasetDimensions)
{
// Create ourselves an asyn user
asynUser *pasynUser = pasynManager->createAsynUser(0, 0);
// Open an HDF5 file for testing
std::string filename = "test_dim1.h5";
hid_t file = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, 0, 0);
BOOST_REQUIRE_GT(file, -1);
// Add a test group.
std::string gname = "group";
hid_t group = H5Gcreate(file, gname.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
BOOST_REQUIRE_GT(group, -1);
// Now create a dataset (2x3x4x10x8)
int rank = 5;
int dims[5] = {2, 3, 4, 10, 8};
NDFileHDF5Dataset *dataset = createTestDataset(rank, dims, pasynUser, group, "test_data");
// Setup the expected dimension sizes
int maxdims[5] = {-1, -1, -1, 10, 8}; // Current dimension sizes
int testdims[5] = { 1, 1, 1, 10, 8}; // Current dimension sizes
int testoffsets[5] = { 0, 0, 0, 0, 0}; // Current offsets
int vdimsizes[3] = { 2, 3, 4}; // Current offsets
// Test the dataset internal dimensions against our expected values
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
// Now extend the dataset
dataset->extendDataSet(rank-3);
// Re-test the dataset internal dimensions, they should be unchanged
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
// Setup the frame size
hsize_t framesize[5] = {1, 1, 1, 10, 8};
// Now perform test writes and extensions
for (int writes = 1; writes <= 24; writes++){
//BOOST_TEST_MESSAGE("Write frame " << writes);
// Write a frame
dataset->writeFile(parr, H5T_NATIVE_INT8, dataspace, framesize);
// Extend the dataset
dataset->extendDataSet(rank-3);
// Update the expected offset values
updateOffsets(2, testoffsets, vdimsizes);
// Update the expected dimension values
updateDimensions(writes, 2, testdims, vdimsizes);
// Re-test the dataset internal dimensions, they should be unchanged
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
}
}
BOOST_AUTO_TEST_CASE(test_TenExtraDimensions)
{
// Create ourselves an asyn user
asynUser *pasynUser = pasynManager->createAsynUser(0, 0);
// Open an HDF5 file for testing
std::string filename = "test_dim2.h5";
hid_t file = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, 0, 0);
BOOST_REQUIRE_GT(file, -1);
// Add a test group.
std::string gname = "group";
hid_t group = H5Gcreate(file, gname.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
BOOST_REQUIRE_GT(group, -1);
// Now create a dataset (2x3x4x2x3x4x2x3x4x2x10x8)
int rank = 12;
int dims[12] = {2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 10, 8};
NDFileHDF5Dataset *dataset = createTestDataset(rank, dims, pasynUser, group, "test_data");
// Setup the expected dimension sizes
int maxdims[12] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 8}; // Current dimension sizes
int testdims[12] = { 1, 1, 1, 1, 1, 1, 1 , 1, 1, 1, 10, 8}; // Current dimension sizes
int testoffsets[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // Current offsets
int vdimsizes[10] = { 2, 3, 4, 2, 3, 4, 2, 3, 4, 2}; // Current offsets
// Test the dataset internal dimensions against our expected values
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
// Now extend the dataset
dataset->extendDataSet(rank-3);
// Re-test the dataset internal dimensions, they should be unchanged
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
// Setup the framesize
hsize_t framesize[12] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 8};
// Now perform test writes and extensions
for (int writes = 1; writes <= 27647; writes++){
//BOOST_TEST_MESSAGE("Write frame " << writes);
// Write a frame
dataset->writeFile(parr, H5T_NATIVE_INT8, dataspace, framesize);
// Extend the dataset
dataset->extendDataSet(rank-3);
// Update the expected offset values
updateOffsets(9, testoffsets, vdimsizes);
// Update the expected dimension values
updateDimensions(writes, 9, testdims, vdimsizes);
// Re-test the dataset internal dimensions
testVirtualDims(dataset, 2, rank-2, vdimsizes);
testMaxDims(dataset, 2, rank-2, maxdims);
testOffsets(dataset, 2, rank-2, testoffsets);
testDims(dataset, 2, rank-2, testdims);
}
}
BOOST_AUTO_TEST_CASE(test_PluginExtraDimensions)
{
boost::shared_ptr<asynNDArrayDriver> driver;
boost::shared_ptr<HDF5PluginWrapper> hdf5;
// Asyn manager doesn't like it if we try to reuse the same port name for multiple drivers (even if only one is ever instantiated at once), so
// change it slightly for each test case.
std::string simport("simHDF5test"), testport("HDF5");
uniqueAsynPortName(simport);
uniqueAsynPortName(testport);
// We need some upstream driver for our test plugin so that calls to connectArrayPort don't fail, but we can then ignore it and send
// arrays by calling processCallbacks directly.
driver = boost::shared_ptr<asynNDArrayDriver>(new asynNDArrayDriver(simport.c_str(), 1, 0, 0, asynGenericPointerMask, asynGenericPointerMask, 0, 0, 0, 0));
NDArrayPool *arrayPool = driver->pNDArrayPool;
// This is the plugin under test
hdf5 = boost::shared_ptr<HDF5PluginWrapper>(new HDF5PluginWrapper(testport.c_str(),
50,
1,
simport.c_str(),
0,
0,
2000000));
// Enable the plugin
hdf5->write(NDPluginDriverEnableCallbacksString, 1);
hdf5->write(NDPluginDriverBlockingCallbacksString, 1);
size_t tmpdims[] = {1024,512};
std::vector<size_t>dims(tmpdims, tmpdims + sizeof(tmpdims)/sizeof(tmpdims[0]));
// Create some test arrays
std::vector<NDArray*>arrays(10);
fillNDArraysFromPool(dims, NDUInt32, arrays, arrayPool);
// Test method: NDFileHDF5::calcNumFrames()
int numCapture = 0;
// First try 1 extra dim, (n)4x6
hdf5->write(str_NDFileHDF5_nExtraDims, 1);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 6);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 24);
// Try 2 extra dims, 5x7x9
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 9);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 315);
// Try 3 extra dims, 2x3x4x5
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 120);
// Try 4 extra dims, 2x4x6x8x10
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 10);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 3840);
// Try 5 extra dims, 2x3x4x5x6x7
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 5040);
// Try 6 extra dims, 2x3x4x5x6x7x8
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[6], 8);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 40320);
// Try 7 extra dims, 2x3x4x5x6x7x8x9
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[6], 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[7], 9);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 362880);
// Try 8 extra dims, 2x3x4x5x6x7x8x9x10
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[6], 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[7], 9);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[8], 10);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 3628800);
// Try 9 extra dims, 2x3x4x5x6x7x8x9x10x11
numCapture = 0;
hdf5->write(str_NDFileHDF5_nExtraDims, 9);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[6], 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[7], 9);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[8], 10);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[9], 11);
hdf5->testCalcNumFrames();
numCapture = hdf5->readInt(NDFileNumCaptureString);
BOOST_REQUIRE_EQUAL(numCapture, 39916800);
// Test method: NDFileHDF5::configureDims()
// Set 2 extra dims
hdf5->write(str_NDFileHDF5_nExtraDims, 2);
// Set multiframe true
hdf5->testSetMultiFrameFile(true);
// Set extra dim sizes n=2 x=3 y=4
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
// Set nFrameChunks = 1
hdf5->write(str_NDFileHDF5_nFramesChunks, 1);
// Set nRowChunks = 512
hdf5->write(NDFileHDF5::str_NDFileHDF5_chunkSize[1], 512);
// Set nColChunks = 1024
hdf5->write(NDFileHDF5::str_NDFileHDF5_chunkSize[0], 1024);
// Set the file write mode to stream
hdf5->write(NDFileWriteModeString, NDFileModeStream);
// Call the configure dims method
hdf5->testConfigureDims(arrays[0]);
// Verify the dimensions
BOOST_REQUIRE_EQUAL(hdf5->getDim(0), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(1), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(2), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(3), 512);
BOOST_REQUIRE_EQUAL(hdf5->getDim(4), 1024);
// Verify the maximum dimensions
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(0), 4);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(1), 3);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(2), 2);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(3), 512);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(4), 1024);
// Verify the chunk dimensions
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(0), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(1), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(2), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(3), 512);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(4), 1024);
// Verify the offsets
BOOST_REQUIRE_EQUAL(hdf5->getOffset(0), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(1), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(2), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(3), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(4), 0);
// Verify the virtual dims
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(0), 4);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(1), 3);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(2), 2);
// Set 9 extra dims
hdf5->write(str_NDFileHDF5_nExtraDims, 9);
// Set multiframe true
hdf5->testSetMultiFrameFile(true);
// Set extra dim sizes 1st=2 2nd=3 3rd=4 4th=5 5th=6 6th=7 7th=8 8th=9 9th=10 10th=11
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[0], 2);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[1], 3);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[2], 4);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[3], 5);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[4], 6);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[5], 7);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[6], 8);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[7], 9);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[8], 10);
hdf5->write(NDFileHDF5::str_NDFileHDF5_extraDimSize[9], 11);
// Set nFrameChunks = 1
hdf5->write(str_NDFileHDF5_nFramesChunks, 1);
// Set nRowChunks = 512
hdf5->write(NDFileHDF5::str_NDFileHDF5_chunkSize[1], 512);
// Set nColChunks = 1024
hdf5->write(NDFileHDF5::str_NDFileHDF5_chunkSize[0], 1024);
// Set the file write mode to stream
hdf5->write(NDFileWriteModeString, NDFileModeStream);
// Call the configure dims method
hdf5->testConfigureDims(arrays[0]);
// Verify the dimensions
BOOST_REQUIRE_EQUAL(hdf5->getDim(0), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(1), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(2), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(3), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(4), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(5), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(6), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(7), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(8), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(9), 1);
BOOST_REQUIRE_EQUAL(hdf5->getDim(10), 512);
BOOST_REQUIRE_EQUAL(hdf5->getDim(11), 1024);
// Verify the maximum dimensions
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(0), 11);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(1), 10);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(2), 9);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(3), 8);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(4), 7);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(5), 6);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(6), 5);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(7), 4);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(8), 3);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(9), 2);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(10), 512);
BOOST_REQUIRE_EQUAL(hdf5->getMaxDim(11), 1024);
// Verify the chunk dimensions
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(0), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(1), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(2), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(3), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(4), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(5), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(6), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(7), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(8), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(9), 1);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(10), 512);
BOOST_REQUIRE_EQUAL(hdf5->getChunkDim(11), 1024);
// Verify the offsets
BOOST_REQUIRE_EQUAL(hdf5->getOffset(0), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(1), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(2), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(3), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(4), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(5), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(6), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(7), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(8), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(9), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(10), 0);
BOOST_REQUIRE_EQUAL(hdf5->getOffset(11), 0);
// Verify the virtual dims
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(0), 11);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(1), 10);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(2), 9);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(3), 8);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(4), 7);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(5), 6);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(6), 5);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(7), 4);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(8), 3);
BOOST_REQUIRE_EQUAL(hdf5->getVirtualDim(9), 2);
}
|
{"hexsha": "037a2fc434edfb59a6df63af66aadd59767ac52c", "size": 25487, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ADApp/pluginTests/test_NDFileHDF5ExtraDimensions.cpp", "max_stars_repo_name": "gregoryrd/ADCore", "max_stars_repo_head_hexsha": "b4be366dec26efa362a2675521123446d90b961a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20.0, "max_stars_repo_stars_event_min_datetime": "2015-01-07T09:02:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T14:35:19.000Z", "max_issues_repo_path": "ADApp/pluginTests/test_NDFileHDF5ExtraDimensions.cpp", "max_issues_repo_name": "gregoryrd/ADCore", "max_issues_repo_head_hexsha": "b4be366dec26efa362a2675521123446d90b961a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 400.0, "max_issues_repo_issues_event_min_datetime": "2015-01-06T14:44:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T17:45:32.000Z", "max_forks_repo_path": "ADApp/pluginTests/test_NDFileHDF5ExtraDimensions.cpp", "max_forks_repo_name": "gregoryrd/ADCore", "max_forks_repo_head_hexsha": "b4be366dec26efa362a2675521123446d90b961a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 72.0, "max_forks_repo_forks_event_min_datetime": "2015-01-23T23:23:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T15:14:35.000Z", "avg_line_length": 37.8707280832, "max_line_length": 157, "alphanum_fraction": 0.6960803547, "num_tokens": 8791}
|
# -*- coding: utf-8 -*-
"""Unit tests for sktime classifier compatability with sklearn interfaces."""
__author__ = ["MatthewMiddlehurst"]
__all__ = [
"test_sklearn_cross_validation",
"test_sklearn_cross_validation_iterators",
"test_sklearn_parameter_tuning",
"test_sklearn_composite_classifiers",
]
import numpy as np
import pytest
from sklearn.calibration import CalibratedClassifierCV
from sklearn.ensemble import VotingClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
# StratifiedGroupKFold, removed because it is not available in sklearn 0.24
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
GroupShuffleSplit,
HalvingGridSearchCV,
HalvingRandomSearchCV,
KFold,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
RandomizedSearchCV,
RepeatedKFold,
ShuffleSplit,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
cross_val_score,
)
from sklearn.pipeline import Pipeline
from sktime.classification.interval_based import CanonicalIntervalForest
from sktime.transformations.panel.pca import PCATransformer
from sktime.utils._testing.estimator_checks import _make_args
DATA_ARGS = [
{"return_numpy": True, "n_columns": 2},
{"return_numpy": False, "n_columns": 2},
]
# StratifiedGroupKFold(n_splits=2), , removed, not available in sklearn 0.24
CROSS_VALIDATION_METHODS = [
KFold(n_splits=2),
RepeatedKFold(n_splits=2, n_repeats=2),
LeaveOneOut(),
LeavePOut(p=5),
ShuffleSplit(n_splits=2, test_size=0.25),
StratifiedKFold(n_splits=2),
StratifiedShuffleSplit(n_splits=2, test_size=0.25),
GroupKFold(n_splits=2),
LeavePGroupsOut(n_groups=5),
GroupShuffleSplit(n_splits=2, test_size=0.25),
TimeSeriesSplit(n_splits=2),
]
PARAMETER_TUNING_METHODS = [
GridSearchCV,
RandomizedSearchCV,
HalvingGridSearchCV,
HalvingRandomSearchCV,
]
COMPOSITE_ESTIMATORS = [
Pipeline(
[
("transform", PCATransformer()),
("clf", CanonicalIntervalForest.create_test_instance()),
]
),
VotingClassifier(
estimators=[
("clf1", CanonicalIntervalForest.create_test_instance()),
("clf2", CanonicalIntervalForest.create_test_instance()),
("clf3", CanonicalIntervalForest.create_test_instance()),
]
),
CalibratedClassifierCV(
base_estimator=CanonicalIntervalForest.create_test_instance(),
cv=3,
),
]
@pytest.mark.parametrize("data_args", DATA_ARGS)
def test_sklearn_cross_validation(data_args):
"""Test sklearn cross-validation works with sktime panel data and classifiers."""
clf = CanonicalIntervalForest.create_test_instance()
fit_args = _make_args(clf, "fit", **data_args)
scores = cross_val_score(clf, *fit_args, cv=KFold(n_splits=3))
assert isinstance(scores, np.ndarray)
@pytest.mark.parametrize("data_args", DATA_ARGS)
@pytest.mark.parametrize("cross_validation_method", CROSS_VALIDATION_METHODS)
def test_sklearn_cross_validation_iterators(data_args, cross_validation_method):
"""Test if sklearn cross-validation iterators can handle sktime panel data."""
clf = CanonicalIntervalForest.create_test_instance()
fit_args = _make_args(clf, "fit", **data_args)
groups = [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10]
for train, test in cross_validation_method.split(*fit_args, groups=groups):
assert isinstance(train, np.ndarray) and isinstance(test, np.ndarray)
@pytest.mark.parametrize("data_args", DATA_ARGS)
@pytest.mark.parametrize("parameter_tuning_method", PARAMETER_TUNING_METHODS)
def test_sklearn_parameter_tuning(data_args, parameter_tuning_method):
"""Test if sklearn parameter tuners can handle sktime panel data and classifiers."""
clf = CanonicalIntervalForest.create_test_instance()
param_grid = {"n_intervals": [2, 3], "att_subsample_size": [2, 3]}
fit_args = _make_args(clf, "fit", **data_args)
parameter_tuning_method = parameter_tuning_method(
clf, param_grid, cv=KFold(n_splits=3)
)
parameter_tuning_method.fit(*fit_args)
assert isinstance(parameter_tuning_method.best_estimator_, CanonicalIntervalForest)
@pytest.mark.parametrize("data_args", DATA_ARGS)
@pytest.mark.parametrize("composite_classifier", COMPOSITE_ESTIMATORS)
def test_sklearn_composite_classifiers(data_args, composite_classifier):
"""Test if sklearn composite classifiers can handle sktime data and classifiers."""
base_clf = CanonicalIntervalForest()
fit_args = _make_args(base_clf, "fit", **data_args)
composite_classifier.fit(*fit_args)
predict_args = _make_args(base_clf, "predict", **data_args)
preds = composite_classifier.predict(*predict_args)
assert isinstance(preds, np.ndarray)
|
{"hexsha": "4dd6025db53e41d65b111e71180c935daad483d5", "size": 4816, "ext": "py", "lang": "Python", "max_stars_repo_path": "sktime/classification/tests/test_sklearn_compatability.py", "max_stars_repo_name": "biologioholic/sktime", "max_stars_repo_head_hexsha": "9d0391a04b11d22bd783b452f01aa5b4529b41a2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-22T02:45:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T02:45:39.000Z", "max_issues_repo_path": "sktime/classification/tests/test_sklearn_compatability.py", "max_issues_repo_name": "biologioholic/sktime", "max_issues_repo_head_hexsha": "9d0391a04b11d22bd783b452f01aa5b4529b41a2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sktime/classification/tests/test_sklearn_compatability.py", "max_forks_repo_name": "biologioholic/sktime", "max_forks_repo_head_hexsha": "9d0391a04b11d22bd783b452f01aa5b4529b41a2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4117647059, "max_line_length": 88, "alphanum_fraction": 0.7410714286, "include": true, "reason": "import numpy", "num_tokens": 1163}
|
/**
* The MIT License (MIT)
*
* Copyright © 2019 Ruben Van Boxem
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
**/
#ifndef SKUI_CSS_GRAMMAR_POSITION_H
#define SKUI_CSS_GRAMMAR_POSITION_H
#include "css/length.h++"
#include "css/position.h++"
#include "css/grammar/as.h++"
#include "css/grammar/length.h++"
#include <core/debug.h++>
#include <boost/spirit/home/x3.hpp>
#include <boost/fusion/adapted/struct.hpp>
#include <boost/fusion/adapted/struct/adapt_struct_named.hpp>
BOOST_FUSION_ADAPT_STRUCT(skui::css::position,
x,
y)
BOOST_FUSION_ADAPT_STRUCT(skui::css::length_with_offset,
value,
offset)
namespace skui::css::grammar
{
namespace x3 = boost::spirit::x3;
constexpr auto swap_x_y = [](auto& context)
{
css::position& position = x3::_attr(context);
std::swap(position.x, position.y);
};
struct horizontal_relative_position_table : x3::symbols<css::length>
{
horizontal_relative_position_table();
} const horizontal_relative_position;
struct vertical_relative_position_table : x3::symbols<css::length>
{
vertical_relative_position_table();
} const vertical_relative_position;
const auto horizontal_position = x3::rule<struct horizontal_position, css::length_with_offset>{"horizontal_position"}
= horizontal_relative_position >> -length_percentage
| x3::lit('0') >> x3::attr(css::length{}) >> x3::attr(css::length{})
| length_percentage >> x3::attr(css::length{})
;
const auto vertical_position = x3::rule<struct vertical_position, css::length_with_offset>{"vertical_position"}
= vertical_relative_position >> -length_percentage
| x3::lit('0') >> x3::attr(css::length{}) >> x3::attr(css::length{})
| length_percentage >> x3::attr(css::length{})
;
//| vertical_relative_position
const auto position = x3::rule<struct position_, css::position>{"position"}
%= horizontal_position >> vertical_position
| (vertical_position >> horizontal_position)[swap_x_y]
| horizontal_position >> x3::attr(css::length_with_offset{{50, unit::percentage}, {}})
| (vertical_position >> x3::attr(css::length_with_offset{{50, unit::percentage}, {}}))[swap_x_y]
;
}
#endif
|
{"hexsha": "9286be102837112c957c8b89fb5642cf1d8e72e7", "size": 3631, "ext": "h++", "lang": "C++", "max_stars_repo_path": "css/grammar/position.h++", "max_stars_repo_name": "rubenvb/skui", "max_stars_repo_head_hexsha": "5bda2d73232eb7a763ba9d788c7603298767a7d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19.0, "max_stars_repo_stars_event_min_datetime": "2016-10-13T22:44:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T20:28:15.000Z", "max_issues_repo_path": "css/grammar/position.h++", "max_issues_repo_name": "rubenvb/skui", "max_issues_repo_head_hexsha": "5bda2d73232eb7a763ba9d788c7603298767a7d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-05-16T15:15:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-16T17:01:26.000Z", "max_forks_repo_path": "css/grammar/position.h++", "max_forks_repo_name": "rubenvb/skui", "max_forks_repo_head_hexsha": "5bda2d73232eb7a763ba9d788c7603298767a7d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2017-03-07T05:37:02.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-05T03:14:48.000Z", "avg_line_length": 41.2613636364, "max_line_length": 119, "alphanum_fraction": 0.645001377, "num_tokens": 762}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_LOG2_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_LOG2_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-exponential
Function object implementing log2 capabilities
base two logarithm function.
@par Semantic:
For every parameter of floating type T
@code
T r = log2(x);
@endcode
is similar to:
@code
T r = log(x)/log(2);;
@endcode
- log2(x) return Nan for negative enties (peculiarly Mzero
for floating numbers).
@par Decorators
std_ for floating entries
@see log10, log, log1p, is_negative, Mzero
**/
Value log2(Value const& x);
} }
#endif
#include <boost/simd/function/scalar/log2.hpp>
#include <boost/simd/function/simd/log2.hpp>
#endif
|
{"hexsha": "0b48f3bdaedf4c199280117f601f3a7dc4746970", "size": 1187, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/function/log2.hpp", "max_stars_repo_name": "xmar/pythran", "max_stars_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T13:45:09.000Z", "max_issues_repo_path": "third_party/boost/simd/function/log2.hpp", "max_issues_repo_name": "xmar/pythran", "max_issues_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/function/log2.hpp", "max_forks_repo_name": "xmar/pythran", "max_forks_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-08T15:55:25.000Z", "avg_line_length": 21.1964285714, "max_line_length": 100, "alphanum_fraction": 0.5787700084, "num_tokens": 268}
|
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{Results of Modification of Physical Properties in Novel 2D materials \label{chap:5}}
\ifpdf
\graphicspath{{Chapter5/Figs/Raster/}{Chapter5/Figs/PDF/}{Chapter5/Figs/}{Chapter5/Figs/Vector/}}
\else
\graphicspath{{Chapter5/Figs/Vector/}{Chapter5/Figs/}}
\fi
This is the second part of the results of this thesis. Here we will discuss some of the possible ways to modify the physical properties of 2D materials. As before, each section will be focused on an unique way to change the properties of materials, namely through the number of layers, mechanical strain, heterostructuring and defect introduction. Following the theme of the thesis, which is about novel 2D materials, we will continue to introduce other new 2D materials that have been discovered and whose properties will be modified.
\section[Number of layers: Few-layer of Calcium hydroxide]{Number of layers: Few-layer of Calcium hydroxide \footnote{This work is published:\cite{Aierken2015.porlandite}} \label{CaOH2_layers}}
\subsection{Introduction}
We have seen several monolayer systems that were extracted from layered materials such as 2D-BN, 2D-MoS$_2$. In this section, we further explore this process for alkaline earth metal hydroxides (AEMHs), which pose a layered structure in the bulk form and are exfoliated to few-layer form. The experimental synthesis and the theoretical modelling is reported in this section. In contrast to the abundant literature on graphene-like ultra-thin structures, few-layer AEMHs have not been investigated so far. Bulk forms of AEMHs are layered structures belonging to the $P\overline{3}m1$ space group\cite{structure1} and the crystal structure of layered AEMHs comprise stacked sheets of MO$_6$ (M=alkaline earth metals) edge-sharing octahedra, see \autoref{fig:str_caoh2}. At each corner of an octahedron, each O atom binds to one H atom and the latter interacts with three neighbouring hydroxyl groups of the adjacent layer. Early studies on bulk AEMHs revealed that the application of temperature and pressure may result in dramatical changes in their crystal structure and electronic properties\cite{amorphization1,amorphization2,amorphization3,amorphization4,transition1,transition2,transition3,transition4} . Moreover, early theoretical studies showed the reliability of the use of first-principles calculations with a plane-waves basis set in combination with the GGA exchange-correlation functional for the investigation of structural and electronic properties of these materials\cite{Winkler1995,Baranek2001,Azuma2011,DArco1993}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\textwidth]{str_caoh2.eps}
\caption{\label{fig:str_caoh2} Atomic structure of bulk Ca(OH)$_2$: (a)
tilted view, (b) top view of one layer. }
\end{figure}
Although the structural and electronic properties of bulk AEMHs have been
investigated before \cite{Azuma2011,Pishtshev,Pishtshev1} , single layers of
these materials have never been studied before and their stability is
still an open question. However, advances
in experimental techniques made exfoliation and growth of such
structures possible\cite{new1,new2}. Especially the Portlandite material, Ca(OH)$_2$,
which has been the
main product of hydration of Portland cement, CaO, is one of the most well-known
AEHMs. Characteristic properties of ultra-thin structures of Portlandite have
not been reported yet. In this study we investigate, both experimentally and theoretically,
the structural, electronic, magnetic, vibrational and mechanical
characteristics of bulk, bilayer and monolayer Ca(OH)$_{2}$ and discuss how these properties change with the number of layers. Particularly, the result of the phonon calculations is presented for the confirmation of the stability of the newly proposed 2D material.
To assess the mechanical strength of the material, in addition to the elastic moduli that we have introduced in \autoref{chap:3}, here we calculate another useful parameter that is closely related to the Young's modulus, which is called the in-plane stiffness of the materials. We
focused on the harmonic range of the elastic deformation, where the structure
responded linearly to strain $\epsilon$. The stretching of the Ca(OH)$_{2}$ is
achieved by increasing the equilibrium lattice constant a$_{0}$ by $\Delta$a,
to attain the axial strain $\epsilon$ = $\Delta$a/a$_{0}$. We optimized the
atomic structure at each increment of the strain, $\Delta\epsilon$ =0.01 and
calculated the total energy under strain E$_{T}$($\epsilon$). Then the strain
energy can be given by, E$_{S}$ = E$_{T}$($\epsilon$) - E$_{T}$($\epsilon$=0);
namely, the total energy at a given strain $\epsilon$ minus the total energy at
zero strain. Then, using the following formula, one can calculate the in-plane
stiffness:
\begin{equation}
C = (\dfrac{1}{A_{0}})(\dfrac{d^{2}E_{S}}{d\epsilon^{2}}),
\end{equation}
where A$_{0}$ is the equilibrium area of the supercell.
As explained in detail in the following sections, unit cells including one Ca,
two O, and two H form the primitive cells of both monolayer and bulk structures,
while it is doubled for a bilayer. The cohesive energy per unit cell,
$E_{coh}$ is presented in \autoref{tab:str2_caoh2} and is calculated according to the formula:
$E_{coh}=E_{tot}-nE_{Ca}-2nE_O-2nE_H$, where $E_{tot}$ is the total energy
of the unit cell of Ca(OH)$_2$, $E_X$ is the single atom total energy of atom $X$
and $n$ is the number of Ca atoms for the corresponding unit cell, i.e.
$n=1$, $n=2$ and $n=1$ for monolayer, bilayer and bulk, respectively.
\subsection{Computational details}
\begin{footnotesize}
\begin{description}
\item[Simulation program:] VASP and PHON\cite{alfe}
\item[Energy cut-off:] 500 eV
\item[Pseudopotentials:] PBE-GGA(PAW)
\item[k points (Monkhorst-Pack):] 35$\times$35$\times$1 and 25$\times$25$\times$11 for few-layer and bulk Ca(OH)$_{2}$, respectively
\item[Vacuum:] 25~\AA
\item[Energy and force convergence criterion:] 10$^{-5}$ eV and 10$^{-2}$ eV/\AA, respectively
\item[vdW corrections:] DFT-D2 method of Grimme \cite{Grimme}
\item[Charge analysis:] Bader's charge analysis method\cite{Bader1,Bader2,Bader3}
\end{description}
\end{footnotesize}
\subsection{Experimental measurements}
Before our theoretical investigation of few-layer Ca(OH)$_{2}$, we
present the experimental realization and detailed theoretical analysis of the
characteristics of bulk Ca(OH)$_{2}$ crystals.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.6\textwidth]{exp_caoh2.eps}
\caption{\label{fig:exp_caoh2} (a) Optical image of the crystal structure and (b) Raman
spectrum measured using 488 nm laser in the low and the high frequency region.
The fundamental phonon branches located at the low frequency (100-400 cm$^{-1}$ range) and the high frequency (~3620 cm$^{-1}$) are associated with the OH stretching mode. (c) XRD measurements}
\end{figure}
Ca(OH)$_{2}$ crystals were grown using the hydrolysis technique by using
Ca$_{3}$SiO$_{5}$ micro-pallets. Ca$_{3}$SiO$_{5}$ was mixed at different water
to solid ratios ranging from 0.2 to 0.9 by molar weight. The mixture was heated
up to 40 $^{\circ}\mathrm{C}$ in a controlled reaction chamber for 3 hours and
controllably cooled down to 5 $^{\circ}\mathrm{C}$ for 24 hours using a
temperature controller. The growth time depends on the total water to solid
ratio as well as the growth temperature. Growth time was around 8 hours for 0.6
water to solid ratio and 40 $^{\circ}\mathrm{C}$ growth temperature.
Longer growth time typically resulted in a dendritic morphology where the
growth was mostly in the c-axis direction. Synthesized crystals displayed rather sharp (the full width at half maximum is around 7 cm$^{-1}$) Raman feature at 280 cm$^{-1}$ and our XRD measurements displayed sharp (00l) reflections at 19.1, 39, 56.2, 77.7 degrees implying that crystals have lamellar nature.
Synthesized crystals were around 0.1-2 mm in size and they
were filtered from the solution. After the filtering process, crystallites were
washed off using 18.2 MOhm.cm DI wafer multiple times and dried under inert Ar
gas. Crystallites were exfoliated using micro-mechanical exfoliation technique
onto thermal silicon oxide / Si substrates. We find that the contrast
was improved for an oxide thickness around 265-285 nm.
Exfoliated flakes displayed rather sharp edges (see \autoref{fig:exp_caoh2}) with
well-defined angles of 120\textdegree ~and 60\textdegree ~implying that the
materials are highly crystalline. Interestingly, synthesized Ca(OH)$_{2}$ flakes
are layered in agreement with theoretical calculations and these flakes can be
easily exfoliated using the Scotch tape technique on different substrates.
The exfoliated flakes do not show any signs of structural imperfection, pit
formation, and overall rather flat surfaces can be obtained. In \autoref{fig:exp_caoh2}, the
yellowish looking regions actually correspond to regions where the thickness is
around 50-100 nm (50-100 layers) while the blue features are only 10-50 nm in
thickness. Considering the ease to exfoliate this material, experimentally and
theoretically we predict that they can be eventually isolated down to mono- and
few-layers on various substrates.
In addition, micro-Raman measurements were performed using a 488 nm laser on a
2 micron square spot using a high intensity laser of 10 mW. We noticed that
few-layers of Ca(OH)$_{2}$ were not subject to local over-heating /
decomposition effects unlike TMDs (MoS$_2$,
WS$_2$, etc.) which typically decompose around 100 microWatt power using a
similar laser excitation spot. We attribute this to the low absorption of the
material associated with the rather large band gap. Raman measurements
displayed various peaks in the 100-1000 cm$^{-1}$ range. The high frequency peak
at 3620 cm$^{-1}$ is associated with the O-H stretching mode $A_{1g}$. In
addition, the low frequency $E_u(T)$ mode is found at 280 cm$^{-1}$.
Here, we note that even though this material is a direct gap semiconductor,
their band gap is well beyond our detectors range and since the insulators
cannot be excited with such high laser wavelength, PL measurements are
virtually impossible.
\subsection{Structure properties}\label{strcuture}
The bulk structure of Portlandite is formed by the stacking of individual Ca(OH)$_2$ monolayers on top of each other, see \autoref{fig:str_caoh2}. As we will examine and discuss the stacking in detail in the following paragraphs, we learned that the AA stacking is the ground state atomic configuration for bulk and multilayer structures of Ca(OH)$_2$. In \autoref{tab:str_caoh2}, optimized lattice parameters of the bulk structure together with experiments and other theoretical calculation are presented. Our results are consistent with reference \cite{Pishtshev} and together they have good agreement with experiments. This justifies the reliability of our calculations.
In the 5-atomic hexagonal primitive unit cell of bulk Ca(OH)$_2$, the Ca atom sits at the geometrical center of the cell, i.e. $\left\lbrace 1/2a, 1/2b, 1/2c \right\rbrace$. Two O and two H atoms form two hydroxyl groups (-OH$^-$) located symmetrically with respect to the Ca atom. In this arrangement, coordinates of H and O only differ by their positions along the \textbf{c} lattice axis and their fractional coordinates can be given as $\left\lbrace1/6a, 1/6b, (1/2c-c_O) \right\rbrace$ and $\left\lbrace 1/6a, 1/6b, (1/2c-c_H) \right\rbrace$ for one hydroxyl, $\left\lbrace 5/6a, 5/6b, (1/2c+c_O) \right\rbrace$ and \\ $\left\lbrace 5/6a, 5/6b, (1/2c+c_H) \right\rbrace$ for the other one, where c$_O$ and c$_H$ are the vertical shifts of the positions of O and H atoms from the Ca plane in units of \AA~, respectively.
\begin{table}[htbp]
\centering
\caption{\label{tab:str_caoh2}
Comparison of calculated results for structures parameter of bulk Ca(OH)$_2$ with experimental results and with theoretical results from other reference: lattice constants
$a$ and $c$, volume $V$ and $c/a$ ratio. }
\begin{threeparttable}
\begin{tabularx}{0.92\linewidth}{X|XXXX}
\hline\hline
Structure parameters & Exp. \tnote{a} & Exp. \tnote{b} & PBE-PAW (this work) & PBE-PAW \tnote{c} \\
\hline
$a$ (\AA) & 3.589 & 3.592 & 3.614 & 3.612 \\
$c$ (\AA) & 4.911 & 4.906 & 4.982 & 4.942 \\
$V$ (\AA$^3$) & 54.78 & 54.82 & 56.35 & 55.85 \\
$c/a$ & 1.368 & 1.366 & 1.379 & 1.368 \\
\hline\hline
\end{tabularx}
\begin{tablenotes}
\begin{footnotesize}
\item[a]Ref. \cite{exp.1}
\item[b]Ref. \cite{exp.2}
\item[c]Ref. \cite{Pishtshev}
\end{footnotesize}
\end{tablenotes}
\end{threeparttable}
\end{table}
In the optimized structure, the lattice constants $a$ and $c$ are 3.61 \AA~and 4.98 \AA~in the bulk structure and the parameters c$_O$ and c$_H$ are calculated to be 1.15 \AA~and 2.12 \AA . The bond lengths of Ca-O and O-H are 2.36 \AA~ and 0.97 \AA . The interlayer distance which is defined as the distance between the uppermost H layer of the underlying layer and the lowermost H layer of the top-lying layer is found to be 0.49 \AA~. Differing from other lamellar bulk crystal structures such as graphite (3.58 \AA) and MoS$_{2}$ (3.42 \AA)\cite{can} , Ca(OH)$_{2}$ layers are more closely stacked on top of each other.
Our calculations revealed that going from bulk to monolayer the in-plane lattice parameter $a$ changes to 3.62 \AA~. In our calculations, the c lattice parameter in the hexagonal unit cell of the monolayer is set to 25 \AA~in order to avoid interlayer interaction between the adjacent layers. In the monolayer Ca(OH)$_2$, parameters c$_O$ and c$_H$ are calculated to be c$_O$=1.14 \AA~and c$_{H}$=2.10 \AA~, respectively. Ca-O and O-H bond distances are 2.38 \AA~and 0.97 \AA~in the monolayer, respectively. We observed only a small change as the system goes from bulk to monolayer, and some of the structure parameters are even left unchanged. From this we can conclude there in quite weak interlayer interaction in Ca(OH)$_2$. In order to study the interlayer interaction we further investigate its effect on stacking, and by including the vdW correction in the functional we are able to identify the nature of this interaction.
\begin{table}[htbp]
\centering
\caption{\label{tab:str2_caoh2}
Calculated results for different structures of Ca(OH)$_2$: lattice constants
$a$, vertical shift of O and H atom c$_O$ c$_H$, Ca-O and O-H bond length, energy band gap $E_{gap}$, cohesive energy
per atom $E_{coh}$, charge transfer from Ca atom to O atom $\Delta Q$, in-plane Young's modulus
$E_{xx},E_{yy}$, in-plane Poisson's ratio $\nu_{xy}$, in-plane shear
modulus $G_{xy}$ and in-plane stiffness $C$. For comparison, theoretical calculation on same quantities of BN are shown in the last row.}
\begin{threeparttable}
\adjustbox{max width=\textwidth}{%
\begin{tabular}{l|llllllllll}
\hline\hline
System & $a$ & c$_O$/c$_H$ & Ca-O/O-H & $E _{gap}$ & $E_{coh}$ & $\Delta Q$ &
$E_{xx},E_{yy}$ & $\nu_{xy}$ & $G_{xy}$ & $C$\\
& (\AA) & (\AA/\AA) & (\AA/\AA) & (eV)& (eV) & (e) & (N/m) & & (N/m) & (J/m$^{2}$)\\
\hline
\textbf{Bulk Ca(OH)$_{2}$} & 3.61 & 1.15/2.12 & 2.36/0.97 & 4.08 & 4.52 & 1.6
& 55.0 & 0.30 & 21.23 & 60.1\\
\textbf{2L Ca(OH)$_{2}$} &3.62& 1.15/2.12 & 2.38/0.97 & 3.70 & 4.48 & 1.6 & 50.7 & 0.32 & 19.16 & 55.6 \\
\textbf{1L Ca(OH)$_{2}$} &3.62& 1.14/2.11 & 2.38/0.97 & 3.67 & 4.39 & 1.6 & 50.7 & 0.33 & 19.08 & 53.2 \\
\textbf{1L BN} &2.51\tnote{a}& - & 1.45\tnote{a}~~\tnote{b} & 4.64\tnote{a} & 8.82\tnote{a} & 0.43\tnote{a}~~\tnote{c} & 278.2\tnote{d} & 0.22\tnote{d} & 113.5\tnote{d} & 267\tnote{e} \\
\hline\hline
\end{tabular}}
\begin{tablenotes}
\begin{footnotesize}
\item[a]Ref. \cite{Topsakal}
\item[b]B-N bond length
\item[c]charge transfer from B to N
\item[d]Ref. \cite{Peng}
\item[e]Ref. \cite{silicene-prb}
\end{footnotesize}
\end{tablenotes}
\end{threeparttable}
\end{table}
Individual layers of layered structures such as graphite, hex-BN and TMDs
are held together mainly by the vdW force in order to form a bulk
layered structure. Such a weak interaction stems from dynamical correlations
between
fluctuating charge distributions in neighboring layers. Here we
investigated the energies of various bilayer configurations. As presented
in \autoref{fig:stack_caoh2} there are six possible types of stacking between two
Ca(OH)$_2$ monolayers. Similar to the stacking nomenclature of bilayer graphene,
we classify the stacking types to be either AA or ABn (n=1,2,...,5). The
same type of atoms from different monolayers are on top of each other in AA
stacking whereas AB stackings can be reached by shifting one of the layers along
certain lattice vectors directions. One set of AB stackings could be realized by shifting
the second layer in the AA stacking towards [$\overline{1}\overline{1}0$],
which gives stacking AB1, and by shifting towards the [110] direction, which
gives stacking AB2, see first row in \autoref{fig:stack_caoh2}. Another set of
bilayers is achieved by first flipping the second layer upside down in AA
stacking, which gives stacking AB3, then AB4 and AB5 can be constructed by doing the
same shifting on the second layer of AB3 towards [$\overline{1}\overline{1}0$]
and [110] directions of the 1st layer, respectively. After relaxation of all
stackings, the variation of the \textbf{a} lattice constant among the different
stacking types is less than 0.01 \AA~. The smallest interlayer
distance, as defined previously for bulk, is for AA stacking and equals
0.49 \AA~, i.e. the same as in bulk. For AB
stacking the interlayer distance is 1$\sim$2 \AA ~larger than that for AA
stacking. As depicted in \autoref{fig:stack_caoh2}, AA stacking is 96$\sim$137 meV
per formula more favorable than all other possible stacking types and hence it
corresponds to the lowest energy configuration.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.8\textwidth]{stack_caoh2.eps}
\caption{\label{fig:stack_caoh2} Different stacked bilayers (bottom layer is blurred)
and their energy difference with respect to the AA stacking of Ca(OH)$_2$, i.e.
$\Delta$E=E$_{\text{ABX}}$-E$_{\text{AA}}$, (X=1,2,...,5). Energies are given
per formula of Ca(OH)$_2$. Blue, green and red circles are for Ca atom,
upper hydroxyl group and lower hydroxyl group, respectively. For clarity, the
bottom layer is shifted slightly.}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.8\textwidth]{int_caoh2.eps}
\caption{\label{fig:int_caoh2} Interlayer interaction energy per formula of
AA-stacked bilayer Ca(OH)$_2$. Blue and red curves are for GGA calculations
without and with vdW correction, respectively.}
\end{figure}
To investigate the nature of the interlayer interaction, we
have calculated the interlayer interaction energy (IE) for the AA stacked
bilayer structure of Ca(OH)$_2$. The IE is the energy difference between the
total energy at a specific interlayer distance and that of a well
separated bilayer. The plot of IE versus interlayer distance is shown in \autoref{fig:int_caoh2}, where the energy of the well separated bilayer is defined as
0 eV. Two sets of calculations were performed: one set only considers GGA
exchange correlation; while another set considers both the GGA and vdW
interaction. At the optimized interlayer distance for the bilayer structure,
almost 2/3 of the attractive interaction comes from the vdWs
interaction, which is consistent with \citet{DArco1993} . They stated that
interlayer interaction of Brucite, one of the isomorphous of Portlandite, is
mainly a dispersion-type interaction. The nature of interlayer interaction in Ca(OH)$_2$ is mainly vdW type weak interaction. Our GGA+vdW calculations revealed that the interlayer interaction between two layers of Ca(OH)$_2$ (149 meV per formula) is much stronger than that of bilayers of MoS$_2$ (76 meV per formula).
\subsection{Electronic properties}\label{sec:electronic}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{elec_caoh2.eps}
\caption{\label{fig:elec_caoh2} (a) and (d) are the Band structures,
(b) and (e) are the partial DOS and (c) and (g) are the band and $k$-point decomposed charge densities of the bulk (c) and monolayer (g) Ca(OH)$_2$, respectively. The charge density are the band edges indicated in (a) and (d), isovalues are kept constant. (f) Band structure around $\Gamma$ point is shown with (dashed line) and without (solid line) spin orbit coupling. }
\end{figure}
Our Bader charge transfer analysis showed that the final (initial) electron
charge on Ca, O and H atoms after (before) the formation of the crystal are
6.4$e$ (8.0$e$), 7.4$e$ (6.0$e$) and 0.4$e$ (1.0$e$), respectively. Therefore,
in the bulk structure of Ca(OH)$_2$, Ca-O bonds, which are
mostly in ionic character, are formed through 0.8$e$ charge transfer
from each Ca to O atom. The charge transfer is kept unchanged when it comes to the monolayer structure, except that the rest charge on H atoms is 0.6$e$ in monolayers.
Our calculations on the electronic structure reveal that bulk Ca(OH)$_2$ is
an insulator with a 4.37 eV direct band gap. As shown in \autoref{fig:elec_caoh2}(a), the VBM and the CBM are located at the $\Gamma$ point. The partial density of
states (DOS) shown in \autoref{fig:str_caoh2}(b) indicates that the major
contribution to the states at the valence and conduction band edge originate
from the O atoms, while deeper in the conduction band, states are mainly composed
of the orbitals of Ca. The orbital character of a state at a particular band can
also be deduced from a band and $k$-point decomposed charge density. As seen
from \autoref{fig:elec_caoh2}(c), edges in the top of VBM have O-$p_x$
and O-$p_y$ orbital character, and the hybridization of these states are also
shown in the same figure. While the CBM has some $p_z$ orbital
character from the O atoms, but as the energy of the state increases, the $d$
orbitals from Ca atoms start to contribute, see \circled{8} in the same figure.
Electronic properties of Ca(OH)$_2$ are quite different from similar
2D graphene-like structures. Unlike TMDs (such as MoS$_2$ and
WSe$_2$) that exhibit indirect-to-direct band gap crossover when going
from bulk to a single layer structure, Ca(OH)$_2$ is a direct band gap
semiconductor which is independent of the number of layers. Although the energy band gap
at the $\Gamma$ point decreases from 4.03 to 3.67 eV for a monolayer structure,
the electronic dispersion of the valence band edge remains almost unchanged, see
\autoref{fig:elec_caoh2}(d). As shown in \autoref{fig:elec_caoh2}(e)
the conduction band states mainly originate from Ca atoms, while the valence states
are mainly composed of the orbitals of O atoms.
Our magnetic state analysis shows that unless a defect is formed in/on the
structure, there is no spin polarization in the ground state of both bulk and monolayer Ca(OH)$_2$. Therefore, Ca(OH)$_2$ is a non-magnetic insulator regardless of its dimension for the structure.
Moreover, it was seen that the spin-orbit interaction has no
considerable effect on the bond lengths and the overall electronic dispersion
(except for a 26 meV splitting in the VBM at the
$\Gamma$ point, see \autoref{fig:elec_caoh2}(f)). Due to the presence of inversion symmetry of Ca(OH)$_2$, the
degeneracy of spin-up and spin-down states still remains, this is also confirmed
by the results of our calculation.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.9\textwidth]{suf_caoh2.eps}
\caption{\label{fig:suf_caoh2} Two lowest conduction band charge density of
monolayer and bilayer Ca(OH)$_2$ at the $\Gamma$ point.}
\end{figure}
Band and $k$-point decomposed charge density in \autoref{fig:elec_caoh2} are
kept with the same isosurface level for comparison. However, as we further
reduced the isosurface level at \circled{4} in (d) and (g) of \autoref{fig:elec_caoh2}, which is the lowest conduction band of the
monolayer at the $\Gamma$ point, $E_{c1}^{~\Gamma}$, charge density forms a planar
state parallel to the layer on both sides, see \autoref{fig:suf_caoh2}(a). This
is also the case for the second lowest non-spin-resolved conduction band at the same
$k$-point, $E_{c2}^{~\Gamma}$, see \autoref{fig:suf_caoh2}(b). These two states are
important due to their unique character and having energies right below the ionization energy. Such exceptional states
having free-electron-like dispersion were reported before\cite{Posternak1,Posternak2} for
doped graphite. To study the trend in these states, the same states were plotted
for bilayer Ca(OH)$_2$, see (c) and (d) in \autoref{fig:suf_caoh2}. $E_{c1}^{~\Gamma}$ and $E_{c2}^{~\Gamma}$ have lower energies than ionization energy. Therefore, electrons are still close and bind to both sides of monolayers as seen from the charge density.
\subsection{Mechanical properties}
We present the quantities that describe the mechanical
properties of Ca(OH)$_2$ in \autoref{tab:str2_caoh2}.
First, the in-plane Young's modulus of the bulk structure is calculated.
Bulk Ca(OH)$_2$ has an in-plane Young's modulus of 55.0 N/m and an in-plane shear
modulus of 21.23 N/m. Both these quantities indicate a flexible nature to in-plane
tensile and shear deformation of bulk Ca(OH)$_2$. In addition, bulk Ca(OH)$_2$ has
an in-plane Poisson's ratio of 0.30. Additionally, the value of the in-plane
stiffness for bulk Ca(OH)$_{2}$ is calculated to be 60.1 J/m$^{2}$.
If we go from bulk to bilayer Ca(OH)$_2$, we see a reduction in both the
in-plane Young's modulus or the in-plane shear modulus, which are 50.7 N/m and
19.16 N/m, respectively. The in-plane Poisson's ratio on the other hand is
slightly increased to 0.32 and becomes more spongy-like as opposed to more
cork-like character\cite{poisson}. In addition, the in-plane stiffness value of
bilayer Ca(OH)$_{2}$ is calculated to be 55.6 J$/$m$^{2}$.
We found that monolayer Ca(OH)$_2$ has a quite low in-plane Young's modulus
(50.7 N/m) when compared to BN (278.2 N/m). The in-plane Poisson's ratio (0.33)
and the in-plane shear modulus (19.08 N/m) of the monolayer are similar with
those for bilayer, and for BN, they are 0.22 and 113.5 N/m respectively. The
calculated values of the in-plane stiffness of monolayer Ca(OH)$_{2}$ is 53.2
J$/$m$^{2}$.
\subsection{Vibrational properties}\label{stability}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{vib_caoh2.eps}
\caption{\label{fig:vib_caoh2} Phonon dispersion of monolayer Ca(OH)$_2$.}
\end{figure}
Lastly, for the analysis of the vibrational spectrum and further examination of
the dynamical stability of monolayer Ca(OH)$_2$, we performed a
calculation of the phonon spectrum using both the first-principles small
displacement methodology (SDM)\cite{alfe} and density functional perturbation
methodology (DFPT)\cite{baroni}. Here the non-quadratic dispersion of the
flexural mode around the zone center is directly related to the insufficient
FFT grid along the vacuum direction. It is seen from \autoref{fig:vib_caoh2} that
similar to the Raman shift measurements observed from the bulk crystal
structure, the monolayer material has also high-frequency OH stretching
modes at 3700-3800 cm$^{-1}$.
Further analysis of the analysis of the phonon
branches shows that the decomposition of the vibrational representation of optical
modes at the zone center is $\Gamma = 4E_{u} + 2A_{2u} + 4E_{g} + 2A_{1g}$. As
shown in the right panel of \autoref{fig:vib_caoh2} there are four Raman-active phonon
branches around 240, 350, 390 and 3700-3800 cm$^{-1}$. It is also worth to note
that differing from other TMD structures having 1T phase, the presence of H atoms
results in the existence of two different $E_{g}$ and $A_{1g}$ modes. Here the
phonon dispersion has real eigenfrequencies in the whole Brillouin Zone,
which is another indication of the stability of monolayer Ca(OH)$_2$.
\subsection{Summary}\label{disc}
By performing first-principles calculations on bulk, bilayer and
monolayer Ca(OH)$_2$ and experimental confirmation of the bulk crystal
layered structure, we have predicted several important properties of this
material and its stability. We found that: (i) Ca(OH)$_2$ crystals are
environmentally stable and their stable structures can be synthesized by
experimental methods; (ii) Experimentally, we also demonstrated that Ca(OH)$_2$
crystals can be grown in layered form and also be exfoliated on arbitrary
substrates; (iii) The dimensionality of Ca(OH)$_2$ will not change the
electronic, structural and magnetic properties qualitatively. Nevertheless, intrinsic mechanical stiffness of each layer will become slightly stiffer as
the system goes from monolayer to bilayer. (iv) Interlayer interaction is mainly a
vdWs dispersion-type force, and
the strength of the interaction is stronger than that of similar layered
materials (e.g MoS$_2$ and graphite). (v) The
conduction states which have a free-electron-like character may be utilized for
high-mobility electron transfer.
We believe that the stable structure and the unique electronic properties of ultra-thin
Ca(OH)$_2$, predicted for the first time here, will trigger interest in this
new class of materials.
\section[Number of layers: Few-layer of pentasilicene]{Number of layers: Few-layer of pentasilicene \footnote{This work is published:\cite{Aierken2016.pentasilicene}} \label{pSi_layers}}
\subsection{Introduction\label{intro}}
Recently, a new 2D structure for carbon was proposed, called penta-graphene\cite{Zhang2015}. This crystal is composed entirely of pentagonal rings of C atoms with mixed sp$^2$/sp$^3$ orbital hybridization. However, the silicon counterpart of this structure, penta-silicene, contains a dynamical instability in its monolayer form. A few attempts have been made to stabilize this new Si structure by hydrogenation\cite{Ding2015} and chemical doping\cite{Li2015b}.
In the present work, we construct multilayer structures of penta-silicene. We use density functional theory to explore their stability and physical properties. Two types of stacking for the penta-silicene layers are found to give stable few-layer structures. These different stacking types lead to completely different electronic properties since one leads to metallic and the other to semiconducting behavior. Somewhat surprisingly, we found that bilayer penta-silicene has lower formation energy than the most stable hexagonal silicene bilayers. Furthermore, we found that the band gaps of these semiconducting penta-silicene bilayers can be tuned by mechanical strain. We first explore the stability of monolayer penta-silicene and demonstrate its dynamical instability. This forms the motivation to study few-layer systems. Then we investigate different stacking possibilities and the resulting stability. Further, we study their mechanical properties by calculating their elastic constants. We also compare bilayer penta-silicene to the most stable bilayer hexagonal silicene structures. Lastly, The electronic properties of multilayered penta-silicene are discussed.
\subsection{Computational details}
\begin{footnotesize}
\begin{description}
\item[Simulation program:] VASP and Phonopy
\item[Energy cut-off:] 500 eV
\item[Pseudopotentials:] PBE-GGA(PAW)
\item[k points (Monkhorst-Pack):] 17$\times$17$\times$1 and 23$\times$23$\times$1 for insulating and metallic systems, respectively
\item[Vacuum:] 20~\AA
\item[Energy and force convergence criterion:] 10$^{-8}$ eV and 10$^{-7}$ eV/\AA, respectively
\item[phonon calculation:] finite displacement method
\item[Supercell for phonon calculation:] $4\times4\times1$ and $3\times3\times2$ for few-layer and bulk systems, respectively
\item[\textit{Ab initio} molecular dynamics:] Parrinello-Rahman (NpT) dynamics \cite{vasp_npt1,vasp_npt2} and a Langevin thermostat \cite{vasp_Lgv}
\item[\textit{Ab initio} molecular dynamics (Energy cut-off):] 300 eV
\item[\textit{Ab initio} molecular dynamics (time step):] 2 fs
\item[\textit{Ab initio} molecular dynamics (temperature):] 100 K
\item[\textit{Ab initio} molecular dynamics (simulation time):] 6 ps
\end{description}
\end{footnotesize}
\subsection{Monolayer pentasilicene}\label{mono}
\begin{figure}[htb]
\centering
\includegraphics[width=\linewidth]{ps_monolayer.eps}%
\caption{(a) Side view and (b) top view of the atomic structure, (c) phonon spectrum and (d) two vibration modes with imaginary frequency of monolayer penta-silicene. Visualization of vibration modes is done with the V\_Sim package \cite{VSim}. \label{fig:ps_monolayer}}
\end{figure}
The layer group symmetry of monolayer penta-silicene (p-Si) is p$\overline{4}$2$_1$m (58). As shown in \autoref{fig:ps_monolayer}(a) and \autoref{fig:ps_monolayer}(b), the primitive cell contains six silicon atoms, of which two have fourfold coordination (Si4) and four have threefold coordination (Si3). Two of the Si3 atoms reside above the Si4 atoms, denoted as Si3\_u, while the other two are below the Si4 atoms, denoted as Si3\_d. The Si4 atoms are bonded to four Si3 atoms while the Si3 atoms are connected to two Si4 atoms and one neighboring Si3 atom. Note that the two Si4 atoms have equivalent environments which are rotated by approximately 41$^{\circ}$ with respect to each other. Therefore, in analogy to graphene, we can relate these two equivalent Si4 atoms to sublattices which in the following will be referred to as the A and B sublattice.
The dynamical stability of this structure can be studied through its phonon spectrum. As noted before\cite{Ding2015,Li2015b} the phonon spectrum of monolayer p-Si contains imaginary frequencies, as shown in \autoref{fig:ps_monolayer}(c), which is a clear signature of its instability. The corresponding atomic vibrations of the two imaginary frequencies at the $\Gamma$ point are shown in \autoref{fig:ps_monolayer}(d). These modes correspond mainly to out-of-plane vibrations of the Si3 atoms with respect to the Si4 atoms. As a consequence, the structure is found to fall apart, indicating that there is no stable form of monolayer p-Si. However, the addition of extra layers could reduce these out of plane vibrations and stabilize the structure. This is the motivation to study few-layer p-Si.
\subsection{Multilayers of pentasilicene structures}\label{fews}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{ps_stackings.eps}%
\caption{ Schematic illustration of the four stacking types for bilayer p-Si. The colors of the symbols correspond to those of the monolayer in \autoref{fig:ps_monolayer}(a) and \autoref{fig:ps_monolayer}(b). The bottom layer in the bilayer is blurred for clarity. The arrow represents translation and the angle represents the rotation of the top layer with respect to the bottom layer. \label{fig:ps_stackings}}
\end{figure}
\begin{table}[htb]
\centering
\caption{The cohesive energy (E$_{\text{coh}}$), the interlayer binding distance (d$_{\text{inter}}$), the interlayer binding energy (E$_{\text{inter}}$), number of interlayer bonds (N$_b$) and energy per bond (E$_{\text{bond}}$) of the four possible stacking types of bilayer p-Si. The interlayer binding energy per unit cell is defined as $E_{\text{inter}}=E_{\text{bi}}-2E_{\text{mono}}$. \label{bilayer_table}}
\begin{tabularx}{\textwidth}{@{\extracolsep{\fill}}X|XXXXX}
\hline\hline
\multirow{2}{*}{structure} & E$_{\text{coh}}$ & d$_{\text{inter}}$ & E$_{\text{inter}}$& N$_b$ & E$_{\text{bond}}$ \\
& (eV/atom) & (\AA) & (eV) & - & (eV) \\ \hline
AA & -4.129 & 0.795 & -3.502 & 4 & -0.875 \\
~AA$_r$ & -4.113 & 2.379 & -3.318 & 2 & -1.659 \\
AB & -3.968 & 2.174 & -1.574 & 2 & -0.787 \\
~AB$_r$ & -4.147 & 1.893 & -3.725 & 2 & -1.862 \\
~AB$_r^d$ & -4.185 & 1.896 & -4.174 & 2 & -2.087 \\
\hline\hline
\end{tabularx}
\end{table}
When considering two layers, different stacking configurations are possible. Here we focus on the so-called AA and AB stacking modes of the aforementioned sublattices (see \autoref{fig:ps_stackings}). The stacking in which both layers have the same in-plane orientation and the Si atoms are put right on top of each other is called AA stacking. AB stacking arises by shifting the A sublattice of one layer to the B sublattice of the other. This nomenclature was also used by \citet{Wang2015} for bilayer penta-graphene. Although penta-silicene has a tetragonal lattice symmetry, the highest proper rotational symmetry order is two. Therefore, there are also two different possible orientations of the upper layer with respect to the lower one: One in which the two layers have the same orientation and another in which one layer is rotated over 90$^{\circ}$ with respect to the other one. We denote this last orientation with a subscript $r$ to show that it results from a 90$^{\circ}$ rotation, e.g. AA$_r$. Therefore, there are four possible stacking types for bilayer p-Si. Note that AB$_r$ stacking corresponds to the recently proposed bulk T12 phase for group IVA elements\cite{Zhisheng2012}. However, as discussed in more detail below, perfect AB$_r$ stacking is not stable in the case of multilayer penta-silicene. A considerable distortion of the outer layers is required to stabilize AB$_r$ stacking. The distorted structure, which will be referred to as AB$_r^d$ in the following, is obtained by breaking the symmetry between the two Si3 atoms at each surface side in AB$_r$ multilayer. In this way, one of the two Si3 atoms acquires sp$^2$ hybridization and looses an electron to the other Si3 atom that has sp$^3$ hybridization.
\begin{figure}[htb]
\centering
\begin{subfigure}{\linewidth}
\captionsetup{singlelinecheck=true}
\includegraphics[width=\linewidth]{ps_AA_structures.eps}%
\caption{AA stacked structures}
\end{subfigure}
\begin{subfigure}{\linewidth}
\captionsetup{singlelinecheck=true}
\includegraphics[width=\linewidth]{ps_AB_structures.eps}%
\caption{AB$_r^d$ stacked few-layer and AB$_r$ stacked bulk structures}
\end{subfigure}
\caption{Atomic structure of the $2\times2$ supercell of few-layer p-Si. The number of atomic layers in the bulk structure is fixed to four for comparison (i.e.\ $2\times2\times4$ and $2\times2\times2$ supercells for AA and AB$_r$ stacked bulk p-Si). (Visualisation using VESTA \cite{vesta}). \label{fig:ps_structures}}
\end{figure}
\begin{landscape}
\begin{table}[htb]
\centering
\begin{threeparttable}[b]
%\adjustbox{max width=\textwidth}{%
\caption{The layer (space) group for few-layer (bulk) systems. The lattice constant ($a$), the interlayer distance (d$_{\text{inter}}$), the nearest-neighbor bond length range (d$_{\text{min/max}}$), the cohesive energy (E$_{\text{coh}}$), and the band gap (PBE) of few-layer and bulk p-Si.}
\label{few-layer-table}
\begin{footnotesize}
\begin{tabular}{ll|lccccccc}
\hline\hline
stacking & structure & layer/space group & $a$ (\AA) & d$_{\text{inter}}$ (\AA) & d$_{\text{min}}$ (\AA) & d$_{\text{max}}$ (\AA) & E$_{\text{coh}}$ (eV/atom) & band gap (eV) & \\[3pt] \hline
-& monolayer & p$\overline{4}$2$_1$m (58) & 5.587 & - & 2.233 & 2.363 & -3.837 & 0.046 (M$\rightarrow \Sigma$) & \\[3pt] \hline
\multirow{4}{*}{AA}
& bilayer & \multirow{3}{*}{p$\overline{4}$2$_1$m (58)} & 5.907 & 0.795 & 2.363 & 2.468 & -4.129 & metal & \\[3pt]
& trilayer & &5.887 & 1.085 & 2.330 & 2.606 & -4.108 & metal & \\[3pt]
& tetralayer & & 5.980 & 0.996/1.794\tnote{a} & 2.368 & 2.478 & -4.150 & metal & \\[3pt]
& bulk & P$\overline{4}$2$_1$m (113) & 6.234 & 1.769 & 2.398 & 2.463 & -4.204 & metal & \\[3pt] \hline
\multirow{3}{*}{AB$_r^d$}
& bilayer & pb2b (30) pm2a (31) & 5.222 & 1.896 &2.303 & 2.403 & -4.185 & 0.119 (M$\rightarrow \Sigma$) & \\[3pt]
& trilayer & p1 (1) & 5.222 & 1.989 & 2.298 & 2.413 & -4.291 & 0.247 (M$\rightarrow \Sigma$) & \\[3pt]
& tetralayer & pb2b (30) pm2a (31) & 5.221 & 1.997 & 2.298 & 2.413 & -4.345 & 0.232 (M$\rightarrow \Sigma$) & \\[3pt]
AB$_r$ & bulk & P4$_2$/ncm(138) & 5.220 & 1.999 & 2.358 & 2.413 & -4.508 & 1.329 (M$\rightarrow \Delta$) & \\ \hline\hline
\end{tabular}
\end{footnotesize}
\begin{tablenotes}
\item [a]The first and the second number indicate the interlayer distance between two monolayers and two bilayers, respectively.
\end{tablenotes}
\end{threeparttable}
\end{table}
\end{landscape}
In \autoref{bilayer_table}, we compare the energies of the different stacking modes. In all cases the Si4 atoms are not involved in interlayer bonding since their possible number of bonds is already saturated. Except for the AA stacking where all Si3 atoms are bound to Si3 atoms from the other layer, only half of the Si3 atoms are bonded to the other layer in the other cases. In \autoref{bilayer_table}, the size of the interlayer binding energy and the strength per bond are given. The size of the bond energies indicate strong chemical bonding. The AB$_r^d$ stacking mode clearly forms the most stable structure. For the rest of the section, we will only focus on the most stable AA and AB-type stacking, i.e. AA and AB$_r^d$.
We also investigated the stability of trilayer, tetralayer and bulk p-Si structures by adding extra layers to the stable bilayers mentioned above, their structures are shown in \autoref{fig:ps_structures}(a) and \autoref{fig:ps_structures}(b). We list their structural and energetic properties in \autoref{few-layer-table}. Extra layers increase the cohesive energy per atom due to a smaller ratio of surface atoms. For AA stacking, adding a 4th layer to a trilayer system results in a double bilayer system with lesser bonding between them. Going to AA bulk, the interlayer interaction appears to be further reduced and the buckled layers become more flat. Adding extra layers to an AB$_r^d$ bilayer results in similar structures in which the Si3 atoms of the surface layers become distorted. For bulk, the undistorted AB$_r$ structure is found in which $\overline{4}$-fold symmetry is restored.
\subsection{Stabilities}\label{stab}
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{ps_phonon_spectrum.eps}%
\caption{Phonon spectra of different-stacked few-layer p-Si. \label{fig:ps_phonon}}
\end{figure}
In this section we investigate the stability of the different multilayer structures discussed above.
Phonon calculations for the AA and AB$_r$ stacking modes reveal that only the AA bilayer is dynamically stable at low temperature. The extra bonds of the Si3 atoms in AA-stacked structures effectively reduce out-of-plane vibrations and stabilize the structure. Although the AA-stacked bulk structure has weaker interlayer bonding, its phonon spectrum contains no imaginary frequencies, indicating its dynamical stability. The AB$_r$-stacked layers, on the other hand, exhibit similar out-of-plane vibrations of the outermost Si atoms as a monolayer. For AB$_r^d$-stacking the distortion of the outermost layer removes the instabilities from the phonon spectrum, so that these structures are also dynamically stable.
It is also interesting to see whether these structures remain stable at finite temperature. To this end, we performed \textit{ab initio} molecular dynamics calculations at a temperature of 100 K. The evolution of the cohesive energy as a function of simulation time is shown in \autoref{fig:ps_MD}. For comparison, the results for the dynamically unstable monolayer are also shown. The monolayer laterally shrinks and becomes a disordered multilayered system. The AA and AB$_r^d$ bilayer systems, on the other hand, remain stable and retain their crystalline structure.
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{ps_ab_initio_MD_100K.eps}%
\caption{The cohesive energy of monolayer and AA and AB$_r^d$ stacked bilayer p-Si as a function of time at a temperature of 100 K under NpT-ensemble. \label{fig:ps_MD}}
\end{figure}
As a final stability check, we investigate the mechanical stability of bilayer p-Si which is determined by the elastic constants of the structures. If the elastic constants satisfy the necessary and sufficient Born criteria generalized by \citet{Mouhat2014}, the structures are mechanically stable. AA bilayer p-Si belong to the layer group symmetry of p$\overline{4}$2$_1$m, which belongs to the tetragonal symmetry groups, and the independent elastic constants in 2D are: $C_{11}$= 101.43 N/m, $C_{12}$= 36.36 N/m and $C_{66}$ = 39.53 N/m. In the case of AB$_r^d$ bilayer p-Si, the crystal possesses pb2b or pm2a layer group symmetry which belongs to the orthorhombic crystal systems, and the independent elastic constants are: $C_{11}$=$C_{22}$= 63.83 N/m, $C_{12}$=26.92 N/m and $C_{66}$ = 50.43 N/m. As discussed in \autoref{chap:3}, the following criteria must be fulfilled for 2D tetragonal systems to be stable:
\begin{equation}
C_{11}>|C_{12}|, C_{66}>0,
\end{equation}
while 2D orthorhombic systems should satisfy:
\begin{equation}
C_{11}>0,C_{11}C_{22}>C_{12}^2, C_{66}>0.
\end{equation}
As one can see, these criteria are satisfied by AA and AB$_r^d$ bilayer p-Si which ensures their mechanical stability. Additionally, in \autoref{mechanical}, we list the (2D) Young's modulus, shear modulus and Poisson's ratio of bilayer p-Si systems. An interesting aspect of the Possion's ratio of AB$_r^d$ is that it is quite high and close to the theoretical limit of 0.5. This means that this 2D material, prefers to change its shape rather than its surface area under strain, similar to the 3D cases of rubber and water.
\begin{table}[htbp]
\centering
\caption{Mechanical properties of AB$_r^d$ bilayer p-Si}
\label{mechanical}
\begin{tabularx}{0.6\textwidth}{l|XXX}
\hline\hline
Stacking & $E[N/m]$ & $G[N/m]$ & $\nu$ \\ \hline
AA & 88.40 & 39.53 & 0.36 \\
AB$_r^d$ & 52.47 & 50.41 & 0.42 \\ \hline\hline
\end{tabularx}
\end{table}
\subsection{Relative phase stability}\label{hexa}
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{ps_h-silicene.eps}%
\caption{Top and side views of the atomic structures of the 2 $\times$ 2 supercell of the three examined hexagonal silicene bilayers. \label{fig:ps_h-Si}}
\end{figure}
\begin{table}[htb]
\centering
\caption{The interlayer distance d$_{\text{inter}}$, the nearest-neighbor bond length range (d$_{\text{min/max}}$) and the cohesive energy per atom E$_{\text{coh}}$ of the most stable hexagonal bilayer silicene and bilayer p-Si. }
\label{h-Si_table}
\begin{tabularx}{0.9\linewidth}{l|XXXX}
\hline\hline
\multirow{2}{*}{structure} & d$_{\text{inter}}$ & d$_{\text{min}}$ & d$_{\text{max}}$ & E$_{\text{coh}}$ \\
& (\AA) & (\AA) & (\AA) & (eV/atom) \\
\hline
AA bilayer p-Si & 0.795 & 2.363 & 2.468 & -4.129 \\
AB$_r^d$ bilayer p-Si & 1.896 & 2.303 & 2.403 & -4.185 \\
h-Si1 & 2.175 & 2.358 & 2.418 & -4.115 \\
h-Si2 & 1.579 & 2.298 & 2.453 & -4.165 \\
h-Si3 & 1.378 & 2.288 & 2.473 & -4.175 \\
\hline\hline
\end{tabularx}
\end{table}
In this section we compare the cohesive energy of bilayer p-Si to the more familiar bilayer hexagonal silicene structures (h-Si). We examined 3 different stacking types for h-Si bilayers, denoted as h-Si1, h-Si2, and h-Si3. To the best of our knowledge, these are the most stable hexagonal bilayer structures of silicene predicted so far. The h-Si2 structure corresponds to the re-DL-Si structure suggested by \citet{Morishita2011} and the h-Si3 is the hex-OR-2$\times$2 structure that was recently proposed by \citet{Sakai2015}. These structures are constructed from the structure information provided by the authors in the supplementary material of the corresponding papers and re-optimized with our computational procedure. The h-Si1 is a new stable bilayer h-Si structure that we discovered. It is composed of two planar, non-buckled, compressed hexagonal silicene planes that are shifted along the crystal plane. This structure is interesting because although its cohesive energy is close to the former two cases, it has a non-buckled nature. To the best of our knowledge, it is the most stable non-buckled bilayer silicene discovered so far.
The cohesive energies of all the stable bilayer Si systems are given in \autoref{h-Si_table}. It is seen that the AB$_r^d$ bilayer p-Si system has the lowest energy, about 10 meV/atom less than the most stable hexagonal silicene bilayer h-Si3. This means that the AB$_r^d$ p-Si structure is the most stable bilayer silicon structure predicted so far, which is a very surprising result. The AA-stacked p-Si has slightly higher energy than h-Si2 and h-Si3.
\subsection{Electronic properties}\label{elec}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{ps_AA_bands.eps}%
\caption{Electronic band structure of AA stacked few-layer and bulk p-Si, and a schematic of the first Brillouin zone. \label{fig:AA_bands}}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.8\linewidth]{ps_AB_bands.eps}%
\caption{Electronic band structures of AB$_r^d$ stacked few-layer and AB$_r$ stacked bulk p-Si. The results for the monolayer are shown for comparison.\label{fig:AB_bands}}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\linewidth]{ps_charge_density.eps}%
\caption{The charge distribution of CBM and VBM in AB$_r^d$ stacked few-layer and AB$_r$ stacked bulk p-Si. The results for the monolayer are shown for comparison.\label{fig:charge_density}}
\end{figure}
In the last part of this work, we investigate the electronic properties of few-layer and bulk p-Si. These electronic properties are mainly determined by the electronic spectrum. In \autoref{fig:AA_bands} and \autoref{fig:AB_bands}, the electronic band structure of respectively AA and AB$_r^d$ p-Si multilayers and their bulk counterpart is shown. The band structure of the unstable monolayer is also calculated for comparison. Monolayer p-Si is an indirect semiconductor with a band gap of 0.046 eV (PBE). The band-edge states are mainly composed of $p_z$ orbitals of Si3 atoms. In contrast to this, all AA-stacked multilayers are metallic. In the case of the AB$_r^d$ structure, the semiconducting properties of monolayer p-Si are preserved, but the band gap changes somewhat with the number of layers. This can be understood from the position of the electron and hole states which correspond to the CBM and the VBM, respectively. As seen in \autoref{fig:AB_bands}, the VBM and CBM states are always localized on the outermost layers. In other words, the electronic properties are mainly determined by the surface region which is nearly independent of the slab thickness. For AB$_r$-stacked bulk p-Si, there is no surface and the VBM and CBM correspond to bulk states. This explains the much larger band gap (1.33 eV) in the bulk case.
\subsection{Summary}
In this work, we proposed several stable structures for few-layer pentasilicene. The stability of these structures was confirmed via their phonon spectrum, finite-temperature molecular dynamics, and their mechanical properties. The type of stacking mode, AA or AB, of few-layer pentasilicene has a crucial influence on the electronic properties: AA-stacked systems are metallic, while AB$_r^d$ stacked ones are semiconducting . Surprisingly, the AB$_r^d$ stacked bilayer pentasilicene has lower energy than the most stable bilayer hexagonal silicene structures, which makes it the most stable predicted form of bilayer silicon.
\section[Mechanical strain: Carrier mobility enhancement in TiS$_3$ monolayer with strain]{Mechanical strain: Carrier mobility enhancement in TiS$_3$ monolayer with strain \footnote{This work is published:\cite{Aierken2016.mobility}} \label{mob_Tis3}}
\subsection{Introduction}
Recently, monolayers of TiS$_3$\cite{ADOM:ADOM201400043,ADMA:ADMA201405632} are synthesized as a new type of promising 2D material having high carrier mobility for high-performance electronic device applications. Here, we will study how strain can be utilized to modify the mobility of the carriers in this material. Enhancement of the mobility under strain has been reported and measured before for silicon layers on Si$_x$Ge$_{1-x}$ substrates \cite{Vogelsang1993,Welser1994}. According to which, a mobility enhancement up to about 76 \% at room temperature was achieved. First-principles calculations have been frequently used to determine the intrinsic mobility of 2D materials.\cite{Kaasbjerg2012,Zhang2014,Yongqing2014}. There are also several works that investigated the mobility of monolayer materials under strain\cite{fei,Henry2015,Sheng2015}, including the strain-controlled anisotropic of the mobility in phosphorene \cite{fei}. The vertical compression of phosphorene bilayers leads to a two orders of magnitude increase of the mobility\cite{Henry2015}, strain-enhanced mobility of MoS$_2$ up to a factor of 10 \cite{Sheng2015}. In the last two cases, enhancement of the mobility results from the decrease of deformation potential constant (DPC) which is consistent with our situation that will be discussed in our results. Inspired by significant improvements of the transport properties of 2D materials under strain, we investigate the strain dependence of the carrier mobility of TiS$_3$ monolayers under mechanical strain at 300 K. We find that more than an order of magnitude enhancement of the electron mobility can be achieved by the tensile strain. Furthermore the hole mobility also has moderate enhancement effects.
\subsection{Computational details}
\begin{footnotesize}
\begin{description}
\item[Simulation program:] VASP and Phonopy
\item[Energy cut-off:] 700 eV
\item[Pseudopotentials:] PBE-GGA(PAW)
\item[k points (Monkhorst-Pack):] 25$\times$25$\times$1 for insulating and metallic systems, respectively
\item[Vacuum:] 20~\AA
\item[Energy and force convergence criterion:] 10$^{-8}$ eV and 10$^{-7}$ eV/\AA, respectively
\item[phonon calculation:] finite displacement method
\item[Supercell for phonon calculation:] $2\times3\times1$
\end{description}
\end{footnotesize}
\vspace{0.5cm}
As discussed before, band gap is underestimated due to the well-known semilocal functional band gap problem. However, for the position shifts of the valence and conduction band edges with an applied tensile strain, the semilocal functional can provide consistent results and trends when compared to hybrid functionals and has been successfully used in previous studies for the determination of the mobility in 2D materials\cite{Meng-Qiu2009,Yongqing2014,fei}.
To determine the mobility of electrons and holes, we use the deformation potential theory together with the effective mass approximation \cite{Bardeen1950}, which has been previously applied to several 2D systems\cite{Xi2012,Qiao2014a,Dai2015,Kang2015}.
The mobility, $\mu$, of a 2D system is given by:
\begin{equation} \label{equ}
\mu=\frac{2e\hbar^3C}{3k_BT|m^*|^2E_d^2}.
\end{equation}
where $e$ is the electron charge and $\hbar$ is the reduced planck constant. $C$ is the elastic modulus along the transport direction. It is defined as $C=(\partial^2E_{total}/\partial\varepsilon^2)/S_0$, where $E_{total}$ is the calculated total energy of TiS$_3$, $\varepsilon$ is the strain applied along the transport direction and $S_0$ is the equilibrium 2D area of the unit cell. $k_B$ is the Boltzmann constant. T is the temperature, which is equal to 300 K throughout the section unless stated otherwise. $m^*$ is the effective mass of the carrier along the transport direction calculated from $1/m^*_{e(h)}=\partial^2E_{c(v)}(k)/\partial k^2\hbar^2$, where $E_{c(v)}(k)$ is the energy dispersion near the CBM (VBM). $E_d$ is the DPC along the transport direction. It is defined as $E_d^{e(h)}=\Delta E_{CBM(VBM)}/(\delta l/l)$, where $\Delta E_{CBM(VBM)}$ is the energy shift of the band edges with respect to the vacuum level under a small dilation $\delta l$ of the lattice constant $l$. We fix $\delta l/l$ to 0.005 as in previous calculations \cite{Dai2015}. In this theory, the dominant scattering process is the longitudinal acoustic phonon scattering (AS). However, for a polar material, scattering on optical phonon modes and other scattering sources should be taken into account at high temperatures\cite{Kaasbjerg2012}. Nevertheless, in this work, we will focus solely to the effect of AS on the mobility and its dependence on the strain for the following reasons: 1) The dominant role of AS is not suppressed in polar materials, so it is still an important part that determines the mobility, 2) To separately study different mobility-controlled mechanisms, specially their strain dependence, and 3) It is computationally less expensive.
\subsection{Unstrained system}
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_str.eps}
\caption{(a) Top and (b) tilted views of a 2$\times$2 supercell of monolayer TiS$_3$. Two crystallographic directions (where the strain is applied or the mobility is calculated) are shown with red and green arrows. \label{tis3_structure} }
\end{figure}
The atomic structure of a TiS$_3$ monolayer is shown in \autoref{tis3_structure}. The unit cell has a monoclinic crystal structure. The calculated lattice constants are \textit{a} = 5.03 ~{\AA} and \textit{b} = 3.41~{\AA}, in good agreement with previous calculations \cite{Kang2015,Jin2015} and close to the experimental values for the bulk structure ($a$=4.958, $b$=3.401)\cite{Furuseth1975}. The monolayer is constructed of connecting quasi-one-dimensional chains of TiS$_3$ triangular prisms extending along the \textit{b} direction, as shown in \autoref{tis3_structure}. One can spot a significant structural anisotropy in the system along the chain direction (green arrow) and the perpendicular direction (red arrow). Therefore, we can make a clear distinction when discussing between these two directions throughout the section.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{Mob_projected_bands.eps}
\caption{(a) The site and orbital-projected band structure (calculated using PBE functional) of the unstrained TiS$_3$ monolayer. Decomposed charge density of CMB (b) and VBM (c) at the $\Gamma$ point are presented. \label{band} }
\end{figure}
As shown in \autoref{band}, monolayer TiS$_3$ is a direct band gap semiconductor having a band gap of 0.29 eV (1.06 eV \cite{Dai2015}) that is calculated with PBE (HSE06) functional at the $\Gamma$-point. The CBM has the largest contribution from d$_{x^2-y^2}$ and d$_{z^2}$ orbitals of the Ti atoms, while the VBM is mostly dominated by the p$_x$ orbital of the S atoms and the $d_{xz}$ orbitals of the Ti atoms. The $x$ and $y$ directions coincide with the \textit{a} and \textit{b} directions, respectively. The $z$ direction is taken perpendicular to the $xy$ plane. Later, we will discuss in detail how these states change their energies when they are exposed to mechanical strain.
To gain deeper insight about the binding nature in the TiS$_3$ monolayer, we calculate the Bader charges\cite{Bader1,Bader2,Bader3,Bader4}. According to the Bader analysis, each Ti atom donates 1.61 $e^-$ to eight S atoms surrounding it. Each of the surface S atoms, labelled as S1 in \autoref{band}, binds only with two Ti atoms and gains 0.34 $e^-$ net charge. Two of these S1 atoms form a covalent bond between them. While each of the S atoms between two Ti layers, S2 in \autoref{band}, binds with four neighbouring Ti atoms, and has 0.81 $e^-$ net charge accumulated on it. The electronegativity differences between S (2.58) and Ti (1.54) is calculated as 1.04, and thus belongs to the polar covalent bond class \cite{David2015}. This value is large as compared to that of MoS$_2$ where Mo (2.16) and S (2.58) have a difference in electronegativity of 0.41. Therefore, S-Ti bonds are considered as covalent bonds.
\begin{table}[ht]
\caption{The electron (hole) effective mass ($m^*$), the elastic modulus ($C$), the DPC ($E_d$), and the electron (hole) mobility ($\mu$) of phosphorene and monolayer TiS$_3$.}
\centering
\begin{footnotesize}
\begin{tabularx}{\textwidth}{l|lXXXXXX}
\hline\hline
Material & Carrier type & Direction & $m^*$($m_e$) & $C$(N/m) & $E_d$(eV) & $\mu$(10$^3$ cm$^2$V$^{-1}$s$^{-1}$)\\ \hline
\multirow{2}{*}{Phosphorene} & electron & armchair & 0.20 & 24.30 & 0.65 & 1.93 \\
& hole & zigzag & 6.89 & 103.14 & 0.11 & 25.36 \\ \hline
\multirow{4}{*}{TiS$_3$ monolayer} & \multirow{2}{*}{electron} & \textit{a} & 1.52 & 82.68 & 0.53 & 1.82 \\
& & \textit{b} & 0.40 & 133.76 & 0.84 & 17.08 \\
& \multirow{2}{*}{hole} & \textit{a} & 0.30 & 82.68 & 2.53 & 2.00 \\
& & \textit{b} & 0.99 & 133.76 & 4.10 & 0.11 \\ \hline\hline
\end{tabularx}
\label{unstrain}
\end{footnotesize}
\end{table}
Before applying strain to our system, we summarize the mobility and related parameters for the unstrained TiS$_3$ in \autoref{unstrain}, which are in agreement with previous calculations \cite{Dai2015}. Similar data for phosphorene are presented for comparison. The results for phosphorene are recalculated with the same computational parameters as TiS$_3$. The mobility of the electron in a TiS$_3$ monolayer has an impressive level, which is an order of magnitude larger than that in phosphorene. However, resulting from very small DPC, the hole mobility in phosphorene is an order of magnitude larger than that in TiS$_3$. Moreover, the anisotropy of the effective mass along different crystallographic directions is much more pronounced for phosphorene as compared to that in TiS$_3$. Overall, phosphorene is softer than TiS$_3$ as far as the elastic modulus is concerned.
\subsection{Dynamical stability under mechanical strain}
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_stiffness.eps}
\caption{Calculated elastic modulus along the \textit{a} and \textit{b} directions under mechanical strain. \label{stiffness}}
\end{figure}
Applying mechanical strain may have important effects on the stability of a structure. Therefore, it is crucial to know if strain applied to the system does not induce any structural instability. To this purpose, we calculate the elastic modulus and phonon spectrum for the TiS$_3$ monolayer under various strain values in order to confirm its dynamical stability. \autoref{stiffness} shows the elastic modulus along the \textit{a} and \textit{b} directions as a function of applied strain. These values correspond to the elastic constants, i.e. C$_{11}$ and C$_{22}$. Here, C$_{11}$ (C$_{22}$) reflects the mechanical response of the TiS$_3$ monolayer to a strain applied along the \textit{a} (\textit{b}) direction. The calculated value of C$_{11}$ (C$_{22}$) for the unstrained TiS$_3$ is 133.76 (82.68) N/m, in good agreement with previous calculations \cite{Dai2015,kang2015m}. Note that the calculated elastic constants are all positive and they fulfill the mechanical stability criteria, i.e. C$_{11}$ and C$_{22}$ $>$ 0. These results clearly imply that monolayer TiS$_3$ is less stiff than graphene and single layer h-BN. However, it is mechanically superior as compared to MoS$_2$ and phosphorene\cite{QHA2}. Furthermore, C$_{11}$ is always larger than C$_{22}$, meaning that the \textit{a} direction is stiffer than the \textit{b} direction.
In order to gain further insight into the stability of TiS$_3$ monolayer, we calculate the phonon dispersion under different types of strain. As shown in \autoref{phn}, uniaxial strain applied along the \textit{a} and \textit{b} direction induce dynamical instability in the system at strain values of 6\% and 8\%, respectively. Note that the highest optical mode reaches up to ~550 cm$^{-1}$ and does not vary with strain. Consistent with our results of the elastic constants, the optical phonon modes are more sensitive to strain applied along the \textit{b} direction, see \autoref{phn}. Except for the topmost mode, phonon branches move downward with strain applied along the \textit{b} direction. We observe an average downward shift of 25 cm$^{-1}$ at 8\%, which can be easily detected by Raman spectroscopy. The observed instability at large strain values is due to the presence of imaginary vibrational frequencies, suggesting a structural phase transition. The dynamical stable range of black phosphorus is 15\% which is much larger than that of TiS$_3$\cite{0953-8984-27-17-175006}. Due to its puckered structure, the former is able to sustain large mechanical deformations. The biaxial case is the combination of two uniaxial strains applied along the \textit{a} and \textit{b} directions, and the stable range is determined by the lowest strain value, i.e. 6\% applied along the \textit{a} direction.
\begin{landscape}
\begin{figure}[htb]
\centering
\includegraphics[width=\linewidth]{Mob_phn.eps}
\caption{Calculated phonon spectrum of (a) pristine monolayer TiS$_3$ and that under strain (b-g). }
\label{phn}
\end{figure}
\end{landscape}
\subsection{Bond lengths and band gap under mechanical strain}
The foremost consequence of the mechanical strain is the change of the structure parameters in the unit cell. Strain is applied by manually changing the lattice constant along the desired direction. After that, the cell parameters are kept fixed whereas the internal atomic positions are allowed to fully relax. Therefore, the latter can be considered as the response of the material to the external strain, which would carry important information about the consequential effects. The variation of the bond length provides information about how the local environment of atoms in the unit cell changes with strain. Considering this, we plot the bond lengths with respect to the different types of strain in \autoref{bonds}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{Mob_bonds.eps}
\caption{Variation of bond lengths with strain. Color of the curves matches with the color of the bonds in the structure depicted in the inset.\label{bonds}}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_bandedges.eps}
\caption{Variation of the band edges with respect to vacuum with strain. \label{bandedges}}
\end{figure}
In the following discussion, we correlate the variation of the CBM and VBM, as shown in \autoref{bandedges}, with the change of bond length by investigating their bonding character. Let us first focus on strain applied along the \textit{a} direction. The most significant change occurs for the Ti-S bond between two neighboring prisms. As the strain increases, the local environment inside the prism hardly changes, in contrast to the bond lengths of Ti-S bonds between prisms. On the other hand, it can be seen from the decomposed charge density of the VBM at the $\Gamma$ point for the unstrained TiS$_3$ (see \autoref{band}(c)) that the Ti-S distance between two prisms is controlled by the Ti-S bonding state at the VBM. Therefore, we would expect that the energy of this state increases (i.e. its binding energy decreases) with tensile strain, which can be seen in \autoref{bandedges}. For strain applied along the \textit{b} direction, one can note from \autoref{band}(c) that the VBM charge distribution along the \textit{b} direction has a node between two prisms. Given the fact that atoms sit close to each other, this is an anti-bonding state. Therefore, we would expect that its energy decreases (i.e. its binding energy increases by depopulating the anti-bonding state) when strain along the \textit{b} direction increases. This is true when we see the trend of the VBM with the strain applied along the \textit{b} direction in \autoref{bandedges}. The CBM mostly gives a nonbonding state regardless of the direction, see \autoref{band}(b). This results in a much slower variation of the energy of the CBM under tensile strain, but an overall small change is observed.
\autoref{bandedges} also provides information about the variation of the band gap under strain, which agrees well with previous calculations \cite{Biele2015,Li2015}. While the band gap decreases under strain applied along the \textit{a} direction, it increases with strain applied along the \textit{b} direction. The band gap increase in the latter case was experimentally confirmed \cite{Biele2015}. By the help of strain engineering, it is possible to tune the optical gap from far infrared to near infrared. The nature of the band gap remains direct within the considered tensile strain range where TiS$_3$ is dynamically stable. We find that biaxial strain is a superposition of the above two situations as can be seen from \autoref{bandedges}.
\subsection{Effective mass, DPC and mobility under mechanical strain}
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_emass.eps}
\caption{Variation of the effective mass of electrons and holes along the \textit{a} and \textit{b} directions at the $\Gamma$-point with strain. \label{emass}}
\end{figure}
Using the computational setup described above, we estimate the acoustic-phonon-limited mobility of monolayer TiS$_3$ under strain via \autoref{equ}. According to this equation, at a particular temperature (e.g. 300 K), three different physical parameters, namely the elastic modulus $C$, the carrier effective mass $m^*$ and the DPC or $E_d$, are subjected to a change under strain. Previously, we already discussed the variation of the elastic modulus with strain, which is shown in \autoref{stiffness}. Now we discuss how the effective mass and DPC change under strain. \autoref{emass} shows the effective mass of both hole and electron for different directions and strain values. All curves change monotonically. There is no abrupt change of the slope of the curves since there is no band crossing in the considered strain range, and one would expect a smooth variation for the band edges with strain. The electron mass along the \textit{a} direction increases regardless of the direction of applied strain, and the magnitude of this change is the largest among both carrier effective masses along the other directions. Because of this, the degree of anisotropy in the effective mass increases as TiS$_3$ is subjected to strain. As for the hole effective mass, the difference along the \textit{a} and \textit{b} directions increases for strain applied along the \textit{a} direction whereas this difference decreases for strain applied along the \textit{b} direction. Such anisotropy can be utilized to modify the electronic and thermoelectric properties of TiS$_3$.
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_dePotential.eps}
\caption{Variation of the (a) DPC ($E_d$) with strain. \label{dePotential}}
\end{figure}
Our calculations unveil that one of the most important factors that determines the mobility is the DPC. In order to evaluate the DPC, we need to apply a small dilation along the direction where the DPC is calculated. The ratio of the CBM (VBM) shift to the amount of this dilation gives DPC for electrons (holes), see \autoref{dePotential}. Mathematically, as shown in \autoref{equ}, it is the square of a small number in the denominator. Thus the variation of DPC determines the whole ratio in \autoref{equ} and hence the mobility, unless other quantities change dramatically, which is not the case as we have discussed. For example, in the work of \citet{fei}, the direction of the highest mobility change by 90$^{\circ}$ was reported, which was a result of the change in the smallest effective mass in a particular direction. In our case, we observe no band crossing and the variation of the effective mass with strain is rather small for the highest mobility directions, see \autoref{emass}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{Mob_mobility.eps}
\caption{Variation of the mobility ($\mu$) with strain.\label{mobility}}
\end{figure}
Next, we explore the mobility variation under strain, see \autoref{mobility}. For uniaxial strain along the \textit{a} direction, the two most apparent changes are the moderate enhancement of the hole mobility along the \textit{b} direction, from 2.00$\times10^3$ to 7.08$\times10^3$ cm$^2$V$^{-1}$s$^{-1}$ and the drop of the electron mobility along the same direction, from 1.82$\times10^3$ to 0.25$\times10^3$ cm$^2$V$^{-1}$s$^{-1}$. Although the electron mobility along the \textit{a} direction for strainless TiS$_3$ is close to the hole mobility along the same direction, an order of magnitude difference can be realized by the help of tensile strain. Therefore, uniaxial strain applied along the \textit{a} direction would be helpful to select the carrier type based on their different transport properties. For uniaxial strain along the \textit{b} direction, we find a dramatic enhancement of the electron mobility along the \textit{a} direction, from 1.71$\times10^4$ to 5.53 $\times10^5$ cm$^2$V$^{-1}$s$^{-1}$ at 300 K. This corresponds to a mobility enhancement from 5.13$\times10^4$ to 1.66$\times10^6$ cm$^2$V$^{-1}$s$^{-1}$ at 100 K. The subsequent drop at 7 \% can be ascribed to being close to the edge of the dynamically stable region. Furthermore, the electron mobility in the other direction (i.e. \textit{b}) has a moderate enhancement at strain value of 6 \% as well. The electron mobility along the \textit{b} direction has a considerable increase when tensile strain is applied along the \textit{b} direction. The hole mobility generally tends to decrease in both directions. As stated before, the biaxial case is an effective combination of two uniaxial strains applied along the \textit{a} and \textit{b} directions. The electron mobility along the \textit{a} direction reaches up to 3.68 $\times10^5$ cm$^2$V$^{-1}$s$^{-1}$ at 5\% biaxial strain.
Lastly, we discuss the effect of the optical phonon scattering on the mobility. Using a Drude-like expression, $\mu$ can be expressed as $\sim$ $q\langle\tau\rangle/m^*$, where $\langle\tau\rangle$ is the average scattering time, and in the Matthiessen's Rule, it is given as the sum of all scattering process, i.e. $1/\tau$=$1/\tau_{ph}$+ $1/\tau_{el}$+$1/\tau_{imp}$+ $\cdots$. Here, $\tau_{ph}$, $\tau_{el}$ and $\tau_{imp}$ are the scattering times related to electron-phonon, electron-electron and electron-impurity scattering, respectively. In a rough estimate (i.e using Einstein's model), $\tau_{ph}$ for the longitudinal optical phonon scattering is inversely proportional to the frequency of the optical modes. According to our calculations, the frequency of the phonon modes is subject to a redshift under strain. This means that the contribution of the optical phonon scattering reduces with increasing tensile strain. As a result, we can claim that our trends are persistent against the contribution of the different scattering processes.
\subsection{Summary}
In this work, we have demonstrated that, by the help of tensile strain, it is possible to enhance the carrier mobility of the TiS$_3$ monolayer more than an order of magnitude at 300 K and two orders of magnitude at 100 K. Phonon dispersion calculations revealed that TiS$_3$ becomes dynamically unstable for an uniaxial tensile strain larger than 6\% (8\%) applied along the \textit{a} (\textit{b}) direction. The degree of effective mass anisotropy can be controlled with uniaxial strain. The determining role of the DPC on the mobility in this material is confirmed. The variation of the CBM and VBM with strain was explained through the bonding character within the TiS$_3$ monolayer. Here, we also showed that strain engineering appears to be a quite exciting way to tune the electrical conductivity of TiS$_3$.
\section[Heterostructures: Electrical transport in 1T/2H/1T MoS2 lateral heterostructure]{Heterostructures: Electrical transport in 1T/2H/1T MoS2 lateral heterostructure \footnote{This work is submitted:\cite{Aierken2017.transport}} \label{trans_mx2}}
Another way to enrich the properties of materials is to combine different materials to form heterostructures\cite{Geim_Grigorieva_2013,Liu2016b,Pomerantseva2017}. They can be composites made of materials with complementary characters those perform better than each of the material individually, or materials which display new phenomena and promising properties only when acting together. Given the obvious structure anisotropy in 2D materials, there are two types of heterostructures: vertical and lateral\cite{Allain2015}, see for example \autoref{fig:ver-lat-stru}. The vertical heterostructures are composed of stacks of different 2D materials. We have already seen a vertical heterostructure in the last chapter. They are held together via what holds graphite layers together: the vdW force. Thus, this type of construction is referred as vdW heterostructures. On the other hand, a lateral heterostructure\cite{Jena2014,Chhowalla2015} is made by joining the edges of different 2D materials and the connection is mostly made through strong bonding, e.g. covalent or ionic. For this case, all the new properties result from the interface. In this section, I will present our work on the transport properties in lateral heterostructure that is made of the 1T and 2H phase of MoS$_2$.
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\textwidth]{ver-lat-stru.png}
\caption{ (a) and (d) vertical and (c) and (d) lateral WS$_2$/MoS$_2$ heterostructures. \label{fig:ver-lat-stru} Image adapted from: \cite{Gong_Lin2014}}
\end{figure}
\subsection{Introduction}
Heterostructures are the essential components of a wide range of solid-state devices, such as transistors, solar cells, and sensors\cite{singh1993,agostini2011}. They are fabricated by combining different types of materials, e.g. metals, semiconductors, and insulators. Therefore, the physical properties of the combined system are enhanced or become more controllable as compared to that of each material individually. These tailored properties are strongly related to the interface of the two different materials where all interesting and new phenomena occur. However, along with the emergence of nanostructured materials, dimensionality has become another major factor affecting the physical properties of materials and devices along with the interface. Thus, solid-state device fabrication with heterostructures based on low-dimensional nanomaterials has attracted significant attention and a new research area in material design has been initiated where researchers are expecting unprecedented results, phenomena and physics\cite{Gan2013,Wu2015c,DiBartolomeo2016}. Indeed, several advantages of 2D phase engineering over the 3D counterpart has already been demonstrated.\cite{Duerloo2015}
As discussed before, In a vertical heterostructure, an overlapping portion of two materials is glued together mainly via interlayer vdWs interaction, while in a lateral heterostructure one-dimensional edges of two materials are contacted with covalent bonds without overlapping. The vdWs interaction in a vertical heterostructure introduces a potential gap between the two layers, which electrons have to tunnel through, resulting in higher resistance due to the reduced carrier transmission probability. Naturally, this resistance is much lower in a lateral heterostructure owing to the formation of covalent bonds that provide a path for carriers to travel across the interfaces\cite{Matsuda2010,Kang2014}. Recently, \citet{Eda2012} have discovered the coexistence of multi-phase MoS$_2$ that is a promising material for heterostructure device fabrication due to their natural metal-semiconductor-metal structure with clear a lateral heterostructure interface\cite{Eda2012}. Considering the distinct electronic nature of these phases, the physical properties of these heterostructures\cite{Kappera2014,Fan2015} can be tuned by phase engineering and novel solid-state device architectures can be realized for several different future applications.
The same research group has synthesized 2D semiconducting heterostructure devices\cite{Huang2014,Zhang2015} by using metal contacts. As a result of their experimental analysis, they have particularly pointed out the vital importance on the device performance of the geometry and electronic nature consistency between the metal contact and the heterostructure \cite{Bai2013,Eda2012}. Considering this fact, \citet{Kappera2014} have locally induced 1T metallic phase of MoS$_2$ in the 1H semiconducting phase, and they measured that the edge resistance was lower than that of metal contacts by more than a factor of two. Subsequently, 1T$\mid$1H lateral heterostructures have been drawn attention as a promising contact structure having a higher carrier injection rate. Different arrangements of the interfaces between 1T and 1H phases were investigated through theoretical calculations\cite{Hu2015,Sivaraman2016} and the structure formed by the connection of armchair edges of 1T and 1H phases has been determined as an energetically more favorable configuration. However, in these calculations the more stable metallic structure (1T$_d$), which arises from a small distortion of the 1T phase, was considered.
The present work aims to investigate the electronic transport properties of MoS$_2$ multi-phase lateral junctions when the more stable metallic phase of MoS$_2$ ( i.e. 1T$_d$) acts as the contact and is compared with the 1T phase. Besides this, this work mainly focuses on the effect of doping on the electrical transport properties. In the results section, we first construct three junction models and calculate their transmission without external bias . Then we calculate the electronic properties for different levels of doping.
\subsection{Computational details}
\begin{footnotesize}
\begin{description}
\item[Simulation program:] VASP
\item[Energy cut-off:] 500 eV
\item[Pseudopotentials:] PBE-GGA(PAW)
\item[k points (Monkhorst-Pack):] 25$\times$25$\times$1 for 2D systems and 9$\times$1$\times$1 for lateral heterostructures
\item[Vacuum:] 15~\AA
\item[Energy and force convergence criterion:] 10$^{-5}$ eV and 10$^{-2}$ eV/\AA, respectively
\end{description}
\end{footnotesize}
\paragraph{Transmission spectrum calculation} Electronic transport across the 1T$_d$/1T-MoS$_2$ $\mid$1H-MoS$_2$ interfaces is calculated using the self-consistent non-equilibrium Green's functions (NEGF) technique as implemented in TranSIESTA\cite{transiesta} which is interfaced with the SIESTA code\cite{siesta}. The transmission amplitude matrix $t$ is expressed as follows:
\begin{equation}
t(E)=(\operatorname{Im}[\Gamma_R(E)])^{1/2}G(E)(\operatorname{Im}[\Gamma_L(E)])^{1/2},
\end{equation}
where $E$ is the energy, $I$ is an identity matrix of the same size as the rest, $G(E)$ is the Green's function defined as $G(E)=(EI-H-\Sigma_R-\Sigma_L)^{-1}$, $H$ is the Hamiltonian, $\Sigma_{R(L)}$ is the self-energies of the electrons in the right (left) side of the junction which can be calculated from the bulk Green's function, and $\Gamma_{R(L)}=i[\Sigma_{R(L)}-\Sigma_{R(L)}^\dagger]$. The transmission probability is $T(E)=\Tr[t^\dagger t](E)$. For this calculation, double-zeta (plus polarization) numerical orbital basis sets are used for all atoms. We employed norm-conserving pseudo-potentials\cite{tm}, the GGA/PBE functional, and an energy cutoff for the real-space mesh of 250 Ry. In order to get accurate transmission spectra, the 2D Brillouin zone normal to the transport direction is sampled by meshes composed of 100 $k$-points in the periodic direction. While the SIESTA code uses a localized basis set and norm-conserving pseudopotentials, the calculated lattice parameters for different phases of MoS$_2$ agree well with those obtained from the VASP code.
\subsection{Structures}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{structure.eps}%
\caption{\label{structure-1t} Device models where the 2H phase of MoS$_2$ is sandwiched between the metallic MoS$_2$ electrodes: (a) the $\alpha$-device, (b) the $\beta$-device, and (c)
the $\gamma$-device. For the $\alpha$ and $\beta$ devices,
the interfaces between metallic and semiconducting MoS$_2$ have an armchair termination while a zigzag termination in the $\gamma$-device. }
\end{figure}
The use of metallic TMDCs as metal electrodes are expected to offer a breakthrough in the semiconductor industry as they have negligible heat dissipation and therefore are energy efficient. Among metallic TMDCs, metallic phases of MoS$_2$ (1T- and 1T$_d$-MoS$_2$) have attracted a growing interest due to their smooth interface with the semiconductor phase of MoS$_2$ (1H-MoS$_2$).
However, the 1H phase is thermodynamically more stable than both 1T and 1T$_d$ phases. Therefore, the stabilization of 1T and 1T$_d$ over the 1H phase becomes an essential requirement for the successful experimental realization of configurable device structures such as 1T/T$_d$-MoS$_{2}\mid$1H-MoS$_2$. On the other hand, 1T MoS$_2$ is meta-stable and undergoes a Peierls transition to a lower-energy state 1T$_d$ (or distorted 1T) and thus, metal contacts with the 1T$_d$ structure are more stable than those with the 1T phase. However, the MoS$_2$ 1T$_d$ phase retransforms to the 1H phase at room temperature.
As far as the relative stability is considered, choosing 1T$_d$ as metal contact further stabilized the junction.
Therefore, understanding the effect of different physical mechanisms on the stability of multiple phases (H, T, T$_d$) of this material is of vital importance to develop a proper control on phase transitions. To this end we mainly focus on the effect of doping (either with charge or atoms) on the stability, electronic and transport properties of 1T/T$_d$-MoS$_2\mid$1H-MoS$_2$ interfaces. For the 1T and 1H hexagonal unit cells, the optimized in-plane lattice constant is obtained as 3.18 {\AA} . On the other hand, the optimized lattice constants are $a$=3.18 {\AA} and $b$=5.72 {\AA} for the tetragonal 1T$_d$ unit cell.
These values are in good agreement with previous calculations\cite{C5NR07715J}. It was previously discovered that the coexistence of the 1T$_d$ phase with the other two phases indicates their experimental stability, yet it is also possible to relax the 1T$_d$ phase to 1T phase using an external source, such as electron beam irradiation\cite{Eda2012}.
In experiment, 1T and 1T$_d$ are indistinguishable, because the S atoms are the same in these two cases. Only the Mo atom form clusters that can not be differentiated in STM image.
In this work, we systematically investigate the electronic and transport properties of three different device architectures, called $\alpha$, $\beta$, and $\gamma$, denoted in \autoref{structure-1t}. In all device models, the semiconducting 1H-MoS$_2$ phase is sandwiched between two 1T$_d$ metal electrodes to create Schottky contacts at the interfaces. In the $\alpha$ structure, the metallic part consists of both 1T and 1T$_d$-MoS$_2$ phases. The size of metallic and semiconducting parts are larger than 20 {\AA} along the transport direction. The interface between the 1T$_d$-MoS2 and 1H-MoS$_2$ phases have either an armchair termination, as in the case of the $\alpha$ and $\beta$ structures, or a zigzag termination as in the case of the $\gamma$ structure, in order to investigate the influence of the contact type on the calculated properties. We predict that the $\gamma$ structure significantly deviates from a planar geometry after structural relaxation, see \autoref{structure-1t}. To check whether such distortion is due to a calculation artefact, we started from a complete planar geometry and allowed both atomic coordinates and cell parameters to relax to their equilibrium values (or lowest energy configuration). We observed that the planar structure is not stable and structural relaxation brought back the original distorted structure. Indeed, such buckling or deviation from a planar structure is mainly restricted to the left interface, in line with a recent work that proposed a new crystal structure model for MoS$_2$\cite{C5NR07715J}. The observed buckling helps to reduce repulsive interaction between S atoms at the left interface, thereby enhancing the stability of this interface.
\subsection{Transmission spectra and DOS}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{TRANSMISSION-ALL-STRUCTURES.eps}%
\caption{\label{transport} The zero bias transmission spectra for (a) the $\alpha$ device, (b) the $\beta$ device, and (c) the $\gamma$ device. }
\end{figure}
The transmission spectra for all three device models at zero bias are depicted in \autoref{transport}. In these plots, the Schottky barrier for holes (electrons) is defined as the difference between Fermi level and the VBM(CBM) of the semiconductor 1H phase of MoS$_2$. The first clear observation is that there is a large barrier height at the pristine interfaces and there is no transmission within an energy range of 1.8 eV around the Fermi level, corresponding to the band gap of 1H-MoS$_2$. The Schottky barrier heights for the $\alpha$, $\beta$ and $\gamma$ structures are predicted as 0.72, 0.80, and 0.63 eV for holes and 1.16, 0.99 and 1.19 eV for electrons, respectively. The estimated size of the scattering region along the transport direction is larger than 23 {\AA}, which is much smaller than the mean free path of electrons in MoS$_2$\cite{mos2-trans} and therefore, the transport properties of these systems can be estimated with ballistic transport calculations. The $\beta$ structure has the largest transmission over the calculated energy range. The Mo atoms form a zigzag chain perpendicular to the interface (or along the transport direction) in the $\beta$ and $\gamma$ structures which enhances the electrical transport in these systems. However, the non-symmetric Mo zigzag chain lying parallel to the transport direction leads to scattering of electrons at the interface and gives rise to a lower transmission as compared to the $\alpha$ and $\beta$ structures. Similar anisotropic electron transport has also been observed for ReS$_2$ where the resistance is the lowest along the Re cluster direction\cite{doi:10.1021/acsnano.5b04851}. Comparing the $\alpha$ and $\beta$ devices, the coexistence of 1T and 1T$_d$ regions in the former device contributes to lowering the transmission due to additional scattering at the 1T/1T$_d$ interface as compared to the latter device where we only have 1T$_d$ phase in the electrode region.
\subsection{The effect of doping}
Next, we turn to the calculations of the electronic properties as a function of doping.
The central part of 1H-MoS$_2$ is the least affected from the interface formation and determining the band gap and the position of the band edges with respect to the Fermi level. The calculated band gap value of 1.75 eV clearly indicates that the size of the 1H part is large enough to achieve the monolayer limit and eliminates the electrode-electrode interaction.
In fact, the band gap of the pristine 1H-MoS$_2$ monolayer calculated with the same functional is around 1.7 eV.
In line with the transport calculations, the Fermi level appears within the band gap of the central region of 1H-MoS$_2$. The calculated Schottky barriers are 0.75 eV for holes and 0.99 eV for electrons in the $\beta$ structure. In the following discussion, we mainly focus on the $\beta$ structure due to its better transport properties as compared to the $\alpha$ and $\gamma$ devices. Other device models exhibit similar properties. Our results contradict experimental findings in the sense that, in experiments, it was shown that 1T (or 1T$_d$)$\mid$1H-MoS$_2$ interfaces exhibit a superior performance over the 3D metal-MoS$_2$ interfaces. However, we predict large Schottky barriers which give rise to a large contact resistance. In order to shed light on this contradiction, we calculate the electronic properties of the $\beta$ structure as a function of electron doping. First of all, the electron doping stabilizes the 1T$_d$ phase over the 1H-MoS$_2$ and prevents the structural phase transition to the semiconducting 1H-MoS$_2$ phase\cite{doi:10.1021/jp4076355}. Also, the electron doping decreases the Schottky barrier height for electrons at the interface, leading to the formation of an $n$-type Schottky barrier. This is attributed to the increase of the density of electrons in the $d$-orbital of the metallic 1T$_d$ MoS$_2$ phase. \autoref{sch-elec} and \autoref{sch-chg} show the variation of the partial density of states (pDOS) and Schottky barrier as a function of the electron concentration, respectively. We find that the Schottky barrier already diminishes for electron concentrations larger than 0.1 electron (per 1T$_d$ MoS$_2$ formula unit). The Fermi level rises about 1 eV when 0.28 electron is placed on the 1T$_d$ part, which is corresponding to 2.05$\times$10$^{14}$ e/cm$^{-2}$ in the heavily doped regime. The Schottky barrier can be reduced by half with 0.05 electron per 1T-MoS$_2$, and that is 5.72$\times$10$^{13}$ e/cm$^{-2}$. Direct electron doping can be achieved by using electron beams in experiments or Li/Na adsorption on the metallic phase\cite{doi:10.1021/acs.nanolett.6b01186,doi:10.1021/jp4076355} . Here, the considered alkali atoms donate electrons to the 1T$_d$ phase and enhance the stability and electronic properties of the metallic part\cite{doi:10.1021/jp4076355}. In addition, absorption of hydrogen atoms on the 1T part of MoS$_2$ has been shown to reduce the barrier at the interface of 1T-MoS$_2\mid$1H-MoS$_2$\cite{doi:10.1021/acs.nanolett.6b03999,doi:10.1021/acs.chemmater.5b00986}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{dos-charge.eps}
\caption{pDOS of the valence and conduction band of 1H-MoS$_2$ as a function of electron concentration for the $\beta$-device. Here, we only show the PDOS of the central part of 1H-MoS$_2$ where the effect of the interface is minimal. The Fermi level marks the zero energy. Electron concentrations (per formula unit of 1T$_d$ phase) are given. \label{sch-elec}}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{Schottky-Charge.eps}
\caption{\label{sch-chg}Variation of Schottky barrier for the $\beta$-device as a function of electron concentration (per formula unit of 1T$_d$ part) for both electrons (red) and holes (black).}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{Re1-Ta1-Bare.eps}
\caption{\label{re-ta-bare-dos}pDOS of the valence and conduction band of the central part of 1H-MoS$_2$ for Re and Ta doped devices. In the top figure, gray region highlights the central part of 1H phase for which PDOS is calculated. For comparison, PDOS of bare device is also shown.}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{pn-doped.eps}
\caption{\label{pn}pDOS of 1H-MoS$_2$ at the different position on 1H-MoS$_2$.
The Fermi level marks the zero energy.}
\end{figure}
Another possible strategy to enhance the stability of metallic phases and electrical conduction at the metal-semiconductor MoS$_2$ interface is to dope the metallic phase with transition metal atoms. Most of the well known TMDCs are either in the 1H or 1T phase when in their ground state. However, the single layer ReS$_2$ has neither H nor T as ground state, but it is stabilized in 1T$_d$ structure\cite{res2-cakir}. Therefore, alloying MoS$_2$ with Re may stabilize the 1T$_d$ structure of MoS$_{2}$ and leads to $n$-type doping of the crystal as similarly proposed by \citet{Raffone} for Sn doped 1T phase. Meanwhile, we have previously shown that doping of ReS$_2$ with Mo results in a $p$-type doping of a ReS$_2$ monolayer\cite{res2-cakir}. Therefore, we investigate the effect of substitutional doping of Re at Mo sites of 1T$_d$-MoS$_2$ on the transport properties. Here, we also consider the group VB element Ta since the pristine TaS$_2$ monolayer crystallizes in the 1T phase and results in a $p$-type doped 1T$_d$ MoS$_{2}$ structure. Indeed, in a recent work, it was shown that the distorted phase of MoS$_2$ becomes energetically stable over 1H phase when the Re concentration exceeds 50\%\cite{doi:10.1021/acs.jpcc.5b10739}. In this work, we did not consider such large dopant concentrations for two reasons. First of all, the lattice mismatch between 1H-MoS$_2$ and doped 1T-MoS$_2$ phases can be kept minimal for small dopant concentrations. At large concentrations, the relaxation of the cell parameters leads to artificial enlargement of the lattice parameters of 1H-MoS$_2$. Secondly, Re doped 1T$_d$-MoS$_2$ becomes a semiconductor. To show the effect of doping, we only considered concentrations smaller than 20\%, which corresponds to 2.42$\times$10$^{14}$ atom/cm$^{-2}$ Re or Ta doping concentration. In this work, we assumed that doping of 1T-MoS$_2$ with Re or Ta may avoid the structural transition to 1H phase due to, for instance, temperature effects. \autoref{re-ta-bare-dos} shows the PDOS for the central part of 1H-MoS$_2$ for Re and Ta doped $\beta$ structure. In the case of Re doping, the Fermi level approaches the conduction band of 1H-MoS$_2$, accompanied by a significant decrease in $n$-type Schottky barrier height. On the other hand Ta doping reduces the $p$-type Schottky barrier height as expected. For a concentration of 14\% (per electrode), the $n$-type Schottky barrier becomes 0.85 eV for Re and the $p$-type Schottky barrier becomes 0.58 eV for Ta.
Since Re and Ta doping give rise to different electronic properties, we can design metal-semiconductor
junctions with different type of Schottky barrier heights (i.e. $n$- and -$p$ type) in the same device geometry. This allows us to design
optical and photovoltaic applications. While a Re doped junction effectively blocks holes Ta doped junction hampers the easy passage of electrons across the junction. In this device geometry, we can separate photo-generated charge carriers for instance. \autoref{pn} shows the device model and pDOS as a function of position in 1H-MoS$_2$. While the left electrode is doped with Re, the right electrode is alloyed with Ta. The central part of 1H-MoS$_2$ clearly has a PDOS similar to free-standing 1H-MoS$_2$ monolayers with a band gap of 1.75 eV. However, we have different electronic properties in the right and left side of the central region.
Due to Re (Ta) doping, the left (right) part has a $n$ ($p$)-type Schottky barrier.
The presence of 1T$_d$-1H MoS$_2$ interfaces develops mid-gap states that mainly come from the atoms in the
boundary region. The electronic properties gradually change from metallic to semiconducting when moving away from the interfaces. For the atoms far away from the interface region (i.e. central region of 1H-MoS$_2$), we observe a clear band gap which is close to that of pristine 1H-MoS$_2$. While the mid-gap states appear below the Fermi level at the left interface (Re-doped side), they are unoccupied and reside above the Fermi level at the right interface (Ta-doped side). About 3.2 {\AA} from the interface, the mid-gap states start to disappear.
\begin{figure}[htb]
\centering
\includegraphics[width=0.6\linewidth]{potential.eps}
\caption{\label{potential}Self-consistent EP profile along the interface of armchair (a) pristine and (b) doped 1T$_d$-1H-1T$_d$ MoS$_2$ heterostructure. The right (left) 1T$_d$ MoS$_2$ is doped with Re (Ta). Blue curve denotes the plane average potential along the heterostructure. Reference energy is taken at the vacuum.}
\end{figure}
\autoref{potential} shows the electrostatic potential (EP) along the heterojunction. We consider both pristine and doped $\beta$-devices.
For an undoped heterojunction, the average potential is symmetric at the left and right interfaces.
However, doped heterojunctions have a different EP, especially, within 1H-MoS$_2$.
Due to its valence configuration, Re (Ta) acts as a donor (an acceptor).
This is reflected in the average effective potential shown in \autoref{potential}(b).
The average EP does not have a sharp variation at the 1T$_d$-1H interface, extending along the 2-3 atomic rows.
This is due to the fact that we form interfaces between two different crystal structures of MoS$_2$ (i.e 1T$_d$ and 1H). The EP converges to the same value at the left and right electrodes.
If one considers a photovoltaic device using the $\beta$ structure co-doped with Re and Ta,
an electron-hole pair is generated after absorbing a photon in the 1H part.
the Re-doped interface has a higher potential as compared to the Ta-doped interface, producing a driving force for dissociation of the electron-hole pair.
The electron flows along the potential decline (i.e. towards Ta-doped electrode)
and the hole in the opposite direction (i.e. towards Re-doped electrode).
In this way, a photocurrent can be generated by the photovoltaic effect. Thus, by proper control of doping and interface roughness, we
can control the quantum efficiency of electron-hole dissociation\cite{doi:10.1021/acs.jpclett.7b00518}.
\subsection{Summary}
In this work, we explored the impact of doping on the electronic and charge transport properties across 1T$_d$-1H MoS$_2$ interfaces by considering various device models.
Doping and alloying (with charge, atoms or molecules)
appear to be the effective methods to tailor and improve the physical-chemical properties and stability of not
only 1T/1T$_d$ phases of MoS$_2$ but also other 2D materials.
The interface structure between 1T$_d$ and 1H phases is one of the decisive factors in the determination of the electrical transport across the heterojunction.
We found that the Schottky barrier height of electrons for pristine heterojunctions can even disappear as a result of electron doping.
While charge doping only reduces the Schottky barrier for electrons, co-doping is able to tune the barriers for
hole and electrons at the same time.
\section[Defect induction: Faceted blue phosphorene nanotube formed by line defects]{Defect induction: Faceted blue phosphorene nanotube formed by line defects \footnote{This work is published:\cite{Aierken2015.nanotubes}} \label{defect_phos}}
\subsection{Introduction}
Previously we have introduced phosphorene as a new member of 2D materials family. In contrast to the C atoms of graphene, the P atoms in phosphorene have $sp^3$-hybridization. This is mainly caused by the extra valence electron of phosphorus in comparison to carbon. Indeed, if these extra electrons are placed in a $sp^2$-hybridized structure, they would occupy the energetically unfavourable (antibonding) $\pi^*$ band. However, with $sp^3$-hybridization, a $\sigma$-bond network can be formed with three $sp^3$ orbitals and the other $sp^3$ orbital is used to host the remaining electron pair. This leads to an essentially tetragonal coordination of the P atoms and results in a buckled nature of $sp^3$-hybridized sheets. The out-of-plane positions of the atoms in $sp^3$-hybridized sheets give rise to various possible structural phases which are absent in $sp^2$-hybridized systems. In the case of phosphorene, this extra freedom leads to a plethora of structural phases\cite{Guan2014a,Wu2015} of which black ($\alpha$-phase) and blue ($\beta$-phase) phosphorene (see \autoref{natu_monolayer}) are the most stable ones.
Most 2D crystals, such as graphene and boron nitride, can also be used to create nanotubes by rolling up the sheets. Due to the buckled nature of the phosphorene family, the traditionally rolled-up nanotubes can be modified in various ways. As shown by Guan \textit{et al.}\cite{Guan2014a}, it is possible to join different structural phases of phosphorene to create so-called faceted nanotubes with lower formation energies than simple rolled nanotubes. These faceted tubes are made up of different phases that form well-defined angles when they are joined together. When a suitable combination of such structural phases is used, the structure can be closed to form a tube without inducing bending strain as in rolled nanotubes.
In the present work, we have taken a different approach to reduce the formation energy of phosphorene nanotubes (PNT). We start from (single-phase) blue phosphorene sheets and introduce various defect lines to induce kinks with well-defined angles in the system. Combining the defect lines in appropriate ways leads to faceted blue PNT with low formation energies. The advantage of this approach is that the energetically unfavorable phases are kept to a minimum. Here, we investigate the stability and electronic properties of these faceted tubes and compare them to the traditional rolled tubes and to the faceted multiphase tubes.
This work is organized as follows: First we study rolled blue PNTs. Then we introduce various defect lines and calculate the corresponding formation energies and the angles of the resulting kinks. We use this information to create energetically interesting faceted PNTs in the next section. Finally, we investigate the electronic properties of the obtained structures and summarize our results.
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{Nanotu_monolayer_structures.eps}%
\caption{Monolayer structures of black (first row) and blue (second row) phosphorenes. Atoms are colored according to the names of the allotropes, and are lighter in color for the bottom layer of the buckled structure. The black boxes indicate the unit cell of each stucture. \label{natu_monolayer}}
\end{figure}
\subsection{Computational details}
\begin{footnotesize}
\begin{description}
\item[Simulation program:] VASP
\item[Energy cut-off:] 500 eV
\item[Pseudopotentials:] PBE-GGA(PAW)
\item[k points (Monkhorst-Pack):] 15$\times$15$\times$1 for 2D systems and 1$\times$1$\times$15 for nanotubes
\item[Vacuum:] 15~\AA
\item[Energy and force convergence criterion:] 10$^{-5}$ eV and 10$^{-3}$ eV/\AA, respectively
\end{description}
\end{footnotesize}
\subsection{Rolled PNT}
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{Nanotu_rolled_tubes.eps}%
\caption{Rolled structures of $\alpha$-PNT and $\beta$-PNT seen from the axial direction. The subscripts indicate the directions of the chiral vector, i.e. \textit{ac} for armchair and \textit{zz} for zigzag.\label{natu_rolled}}
\end{figure}
As a reference, we first investigate the rolled $\alpha$-PNT and $\beta$-PNT that result from rolling up black and blue phosphorene sheets, respectively. A cross section of the structure of some typical examples is shown in \autoref{natu_rolled}. In principle, there is an infinite number of possible tubes but we restrict our study to the chiral tubes rolled up along the armchair and zigzag directions. Such PNTs were also studied previously\cite{Guan2014a,Guo2014} and our results compare well with those recent calculations as shown below. For large tubes, the formation energy of the tubes can be mainly attributed to the strain energy that results from bending the phosphorene sheets. It has been demonstrated with a simple continuum elastic model that the bending energy follows a $R^{-2}$ dependence, in which $R$ denotes the radius of the nanotube\cite{tib84,rob92}. Deviations from this ideal behavior can be expected for small tubes where the finite thickness of the phosphorene sheet and the interatomic interactions between non-nearest neighbours become important.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\linewidth]{Nanotu_energy_tubes.eps}%
\caption{Strain energy versus radius of rolled nanotubes. The curves correspond to $aR^{-b}$ fits, where the fitting parameters are listed in \autoref{tab-RT}. \label{rolled_energy}}
\end{figure}
We calculated the strain energy for armchair and zigzag nanotubes of $\alpha$-PNT and $\beta$-PNT as a function of their radii and show the results in \autoref{rolled_energy} together with the results of Ref. \cite{Guan2014a} and Ref. \cite{Guo2014}. The strain energy is here defined as the energy difference per P atom between the rolled PNT and a corresponding phosphorene sheet, i.e.~$E^{x}_S=(E_{\text{PNT}}^{x}-E_{\text{sheet}}^{x})/N$, in which $N$ denotes the number of atoms and $x=\alpha,\beta$. Since the strain energy of ideal nanotubes follows a $R^{-2}$ law, we try to fit data by a power law of the form $E_S=aR^{-b}$. In \autoref{tab-RT}, we show the coefficients of this fitting function for all the different nanotubes. It is clear that the strain energy of the rolled PNT approximately follows the inverse-square law based on the bending energy. Note, however, that a substantial deviation from this trend is observed for zigzag nanotubes made of black phosphorene. This can be attributed to the large buckling in the transverse direction which effectively increases the thickness of the bended sheet. Another interesting observation is that the blue phosphorene phase becomes more stable than black phosphorene for small nanotubes: The cohesive energy of 2D black phosphorene sheets is about 10 meV/atom larger than that of blue phosphorene\cite{Guan2014a} and this is easily compensated by the reduced bending energy of blue phosphorene.
\begin{table}[htb] \centering
\caption{Fitting coefficients for the strain energy $E_S=aR^{-b}$ of rolled PNTs shown in \autoref{rolled_energy}.\label{tab-RT}}
\begin{tabular}{c|cccc}
\hline\hline
& $\alpha_{zz}$-PNT & $\alpha_{ac}$-PNT & $\beta_{zz}$-PNT & $\beta_{ac}$-PNT \\
\hline
$a$ & 3.11 & 2.70 & 2.63 & 1.70 \\
$b$ & 1.30 & 1.95 & 2.09 & 1.92 \\
\hline\hline
\end{tabular}
\end{table}
The strain energy of all the round nanotubes increases rapidly as the tube radius decreases. At some point the strain energy might become so large that it is more favorable to alter the structure in order to release some of the built-up energy. As discussed in the introduction, Guan \textit{et al.}\cite{Guan2014a} showed that specific partial structural phase transitions can lead to substantial energy gains. As a result, the PNTs loose their round character and acquire a faceted appearance. The different facets of the tube consist of nearly flat phosphorene nanoribbons with well-defined structural phases that are glued together at almost no expense. The structural phase transitions raise the energy, but this is more than compensated by the reduced strain energy for small PNTs.
\subsection{Defect lines} \label{natu_DL}
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{Nanotu_defects.eps}%
\caption{Different types of defect for (a) armchair and (b) zigzag directions. The dark and light shaded atoms indicate that they are buckled up or down, respectively. Effective location of the defect is represented by a thick black line. Overlined defect name refer to the same defect but with an opposite opening direction of the angle. (c) and (d) are tilted view of the $C$ and $H$ defect lines incorporated in the $\beta$ phase.\label{defect_ribbon}}
\end{figure}
Here, we take another approach to release the strain energy. In contrast to the faceted tubes of Guan \textit{et al.}\cite{Guan2014a} that contain considerable large parts with unfavorable structural phases, we try to maximize the amount of the low-energy blue phosphorene ($\beta$ phase) in the nanotubes. This can be done by the introduction of defect lines that create kinks in the phosphorene sheet. We will use the $\beta$ phase for this purpose for two different reasons: (i) as discussed above, the $\beta$ phase has lower energy for small tubes, and (ii) it is easier to introduce defect lines in the $\beta$ phase than in the $\alpha$ phase (black phosphorene). The $\beta$ phase consists of P atoms that are alternately shifted up and down with respect to the crystal plane (i.e. one sublattice (A) buckles up, while the other (B) buckles down). Defect lines are created by breaking this ordered pattern along 1D lines, which can be done in several ways. We restrict our study to lines in the zigzag and armchair directions in order to create armchair and zigzag nanotubes, respectively. Three types of defect lines, $C$, $D$, and $F$, in the zigzag direction are considered and two, $G$ and $H$, in the armchair direction. The structure of these defect lines is depicted in \autoref{defect_ribbon} and we give their formation energies in \autoref{tab-DL}. The defect lines induce kinks in the phosphorene sheets and the angle of these kinks is also given in \autoref{tab-DL}. The formation energy of a defect line is defined as $E_f^l=(E_{\text{defect}}-E_{\text{sheet}})/L$ in which $L$ is the length of the defect line. Practically, this property can be calculated in a system that combines two defects of the same kind but with opposite orientation (i.e. angle) in a periodic structure, as illustrated in \autoref{defect_ribbon}. The following nomenclature will be applied in order to distinguish between the different defects: superscripts ($ac$ or $zz$) are used to indicate the type of tubes that are produced by the defects (armchair or zigzag); and subscripts ($AA$, $AB$, $BA$, and $BB$) correspond to the buckling on the two sides of the defect line, A (B) meaning sublattice A up and B down (B up and A down).
\begin{table}[htb]
\centering
\caption{The formation energy $E_f^l$, and the kink angle $\theta$ of the different defect lines that are illustrated in \autoref{defect_ribbon}. \label{tab-DL}}
\begin{tabularx}{0.8\linewidth}{l|XXXXX}
\hline \hline
& $C^{ac}$ & $D^{ac}$ & $F^{ac}$ & $G^{zz}$ & $H^{zz}$\\
\hline
$E_f^l$ (eV/\AA) & 0.05 & -0.003 & 0.08 & 0.09 & 0.19 \\
$\theta$ ($^{\circ}$) & 139.7 & 112.4 & 89.5 & 118.5 & 126.7 \\
$E_{\text{gap}}$ (eV) & 1.56 & 1.64 & 1.43 & 1.28 & 1.20 \\
\hline \hline
\end{tabularx}
\end{table}
Note that one can distinguish two kinds of defects: (i) defects that leave the $\beta$ phase unaltered on both sides of the defect line and (ii) those defects that invert the buckling orientation on one side with respect to the other. $F_{AA/BB}^{ac}$ and $H_{AA/BB}^{zz}$ belong to the first type, while $C_{AB}^{ac}$, $D_{BA}^{ac}$, and $G_{AB/BA}^{zz}$ belong to the second type.
\subsection{Defect-induced faceted PNTs}
The different defect lines discussed above can now be combined to create defect-induced faceted PNTs (DIF-PNTs).
According to the type, the formation energy, and the preferred angles of the defects, we can make intuitive guesses about which defect combinations are possible and energetically favorable. Some examples of DIF-PNTs are pictured in \autoref{tubes}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.7\linewidth]{Nanotu_tubes.eps}%
\caption{Selected examples of defect-induced faceted PNTs \label{tubes}}
\end{figure}
Not all defect combinations are possible or favorable and we therefore used the following guidelines to make potentially interesting tubes:
(i) An even number of defects that invert the buckling (i.e. the second type of defects discussed in previous section) must be included to make closed nanotubes. (ii) The total number of defects should be as small as possible because every defect line has a finite formation energy. (iii) For a nanotube with $N$ defects, the sum of the defect angles should be close to $(N-2)\times 180^{\circ}$ in order to avoid straining the angles too much.
To compare the DIF-PNTs with round tubes, we need to define some radius for the faceted tubes. To this end, we use the radius of a rolled tube with the same orientation and number of P atoms as the DIF-PNT. The radius of the DIF-PNTs is not only determined by the present defect lines, but also by the size of the defect-free $\beta$-phase regions which can also be varied. The formation energy of the DIF-PNT is defined as $E_f^t=(E_{\text{DIF-PNT}}-E_{\text{sheet}})/N$, where $N$ is the number of atoms. When the angles are perfectly matched in some ideal faceted tube, the formation energy of the tube is expected to decrease as $R^{-1}$ instead of the $R^{-2}$ dependence of round tubes. This $R^{-1}$ dependence simply follows from the fact that the formation energy of the defects is (nearly) independent of the radius, while the number of atoms increases linearly with the size of the radius of the tube. The different behavior of the formation energies of (ideal) faceted and round nanotubes as a function of radius implies that there will be a crossover radius $R_0$ such that faceted tubes with $R<R_0$ are more stable than round ones.
We will first take a look at armchair PNTs and consider zigzag PNTs in the next section.
\paragraph*{armchair PNTs}
For armchair nanotubes, we need defect lines along the zigzag direction. We discussed three of such defects above and we will combine them to make energetically favorable armchair DIF-PNTs. The first two defects, $C_{AB}^{ac}$ and $D_{BA}^{ac}$, have opposite effects on the buckling of the P atoms and should always appear in pairs in order to match the buckling at the two edges of the nanoribbon. The third defect, $F_{AA/BB}^{ac}$, can be regarded as a combination of the first two and can always be added to an existing tube. In order to distinguish the different defected nanotubes we introduce the notation $A(m,m,n)$ to indicate an armchair ($A$) DIF-PNT with $m$ defects of the first and second kind and $n$ of the third. To keep the strain on the defect angles low, we need at least three defects (triangular tube).
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{Nanotu_AE.eps}%
\caption{The formation energy $E_f^t$ of armchair DIF-PNTs compared with the strain energy $E_S^{\beta}$ of $\beta_{ac}$-PNT (black curve with circle symbols). \label{defect_A}}
\end{figure}
The only possibilities for such tubes are $A(1,1,1)$ and $A(0,0,3)$. The strain on the first one is considerable and this kind of tube is therefore not considered. Quadrilateral tubes can be made from the following combinations: $A(2,2,0)$, $A(1,1,2)$, and $A(0,0,4)$. Pentagons and hexagons can be formed with $A(2,2,1)$, $A(1,1,3)$, $A(0,0,5)$, $A(3,3,0)$, $A(2,2,2)$, $A(1,1,4)$, and $A(0,0,6)$. The 3th and 7th type of tubes are not considered because of the excessive incorporation of the most energetically unfavorable $F^{ac}$ defects. Adding more defect lines becomes unfavorable because in this case large tubes are required to incorporate such number of defects. Therefore, we only investigate the smallest possible nanotubes as typical examples and do not consider their behavior as a function of the tube radius.
In \autoref{defect_A}, the formation energy of the different armchair PNTs is shown as a function of tube radius for both faceted and round tubes. The crossover in the formation energy between the faceted and round tubes is nicely observed for several faceted nanotubes such as $A(0,0,3)$. The point where the crossover occurs depends on the type and number of defects and ranges from approximately 7 to more than 15 {\AA}. The most favorable armchair DIF-PNTs are the quadrilateral A(2,2,0) and the hexagonal A(3,3,0) nanotubes with a slight preference for the latter. Due to their larger formation energy, $F^{zz}$ defects are rarely included in the energetically favorable armchair DIF-PNTs.\\
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{Nanotu_ZE.eps}%
\caption{The formation energy $E_f^t$ of zigzag DIF-PNTs compared with the strain energy $E_S^{\beta}$ of $\beta_{zz}$-PNT (black curve with circle symbols). \label{defect_Z}}
\end{figure}
\paragraph*{zigzag PNTs}
For zigzag nanotubes, only two kinds of defect lines are included to make faceted tubes, namely $G^{zz}$ and $H^{zz}$. We use the notation $Z(m,n)$ to indicate a zigzag ($Z$) DIF-PNT with $m$ defects of the first and $n$ of the second kind, see \autoref{tubes} for examples. The total number of $G^{zz}$ defects should be even, while the number of $H^{zz}$ defects is arbitrary. Again, we need at least three defect lines and the only possibility is $Z(0,3)$ in this case. Quadrilateral tubes can be made from $Z(4,0)$, $Z(2,2)$, and $Z(0,4)$. Other tubes that we considered are $Z(4,1)$, $Z(6,0)$, and $Z(8,0)$.
In \autoref{defect_Z}, the formation energy of the different zigzag PNTs is shown as a function of tube radius for both faceted and round tubes. It is seen that the crossover in the formation energy between the faceted and round tubes also occurs for the zigzag nanotube, but at much smaller radii as compared to the armchair nanotubes (from approximately 4 to 6.5 {\AA}). The quadrilateral Z(4,0) tubes are found to be the most stable zigzag DIF-PNTs.
\begin{table}[htb] \centering
\caption{Fitting coefficients for the formation energy $E_f^t=aR^{-b}$ of DIF-PNTs.\label{tab-DT}}
\begin{small}
\begin{tabularx}{\linewidth}{l|lXXXXXX}
\hline\hline
Direction & $\beta_{ac}$-PNT & $A(2,2,0)$ & $A(3,3,0)$ & $A(0,0,3)$ & $A(0,0,4)$ & $A(2,2,1)$ & $A(3,3,1)$ \\
\hline
$a$ & 1.70 & 0.33 & 0.12 & 0.28 & 0.19 & 0.16 & 0.24 \\
$b$ & 1.92 & 1.37 & 0.89 & 1.12 & 0.86 & 0.93 & 1.04 \\
\hline\hline
\end{tabularx}
\end{small}
\vspace{3mm} \\
\begin{tabularx}{0.8\linewidth}{@{\extracolsep{\fill}}l|lXXXXX}
\hline\hline
Direction & $\beta_{zz}$-PNT & $Z(4,0)$ & $Z(6,0)$ & $Z(0,3)$ & $Z(0,4)$ & $Z(4,1)$ \\
\hline
$a$ & 2.63 & 0.61 & 0.30 & 0.87 & 0.34 & 3.70 \\
$b$ & 2.09 & 1.36 & 0.88 & 1.23 & 0.59 & 2.25 \\
\hline\hline
\end{tabularx}
\end{table}
It is interesting to examine the formation energy of the faceted armchair and zigzag PNTs in more detail. We can fit the formation energy of the different tubes with the function $E_f^t=aR^{-b}$. This function has the same form as the strain energy ($E_S$) defined in \autoref{natu_DL}, but it now includes both strain energy and defect energy. A $b$ parameter close to 2 indicates that the strain energy dominates in the system, while $b\approx1$ is expected for non-strained defected tubes. In \autoref{tab-DT}, we give the fitting parameters as obtained for some typical types of DIF-PNT. The round tubes ($\beta_{ac}$-PNT and $\beta_{zz}$-PNT) have indeed parameters close to 2 and the defected tubes have fitting parameters close to 1, although there are some substantial deviations in the case of faceted zigzag nanotubes because of the limited number of data points and the smallness of the tubes.\\
\subsection{Electronic properties}
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{Nanotu_AB.eps}%
\caption{Armchair DIF-PNTs band gaps compared with rolled $\beta_{ac}$-PNT (black curve with circle symbols). \label{fig-band-A}}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{Nanotu_band_pdos.eps}%
\caption{Band structure (along defect line) and PDOS of selected DIF-PNTs, (a) for $C^{av}$ and (b) for $G^{zz}$. Insets: Charge density at CBM and VBM. \label{natu_bands}}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.9\linewidth]{Nanotu_ZB.eps}%
\caption{Zigzag DIF-PNTs band gaps compared with rolled $\beta_{zz}$-PNT (black curve with circle symbols). \label{fig-band-B}}
\end{figure}
In the next section, we will investigate the electronic properties of the DIF-PNTs with a focus on the electronic band gap. In contrast to the inverse proportionality of the band gap with diameter in carbon nanotubes\cite{Odom2000}, the band gap of round PNTs increases with the size of the radius . For multiphase faceted PNTs, it was shown previously that the band gap does not show any clear dependence on the radius but rather spans the range between the composing structural phases\cite{Guan2014a}.
In \autoref{fig-band-A} we show the band gaps of the armchair DIF-PNTs together with the results for the rolled $\beta_{ac}$-PNT. A clear dependence of the band gap on the radius can be observed. The band gap increases with radius and converges to two different limits, well below the limit of the round tubes. The origin of this behavior lies in the character of the VBM and CBM states of the DIF-PNTs which determine the size of the band gap. As illustrated in the insets of \autoref{natu_bands}, the VBM and CBM states are localized on the defect lines. In other words, the defect levels associated with the defect lines fall inside the blue phosphorene gap and determine the band gap of the defect-induced faceted PNTs. This is confirmed by the pDOS in \autoref{natu_bands}, where a clear defect peak in the PDOS can be found inside the band gap of pure (inter-defect) blue phosphorene. Therefore, the electronic band gap will not converge to the blue phosphorene limit, but rather to the gap size of the isolated defects as calculated in \autoref{natu_DL} (\autoref{tab-DL}). The tubes contain various types of defects, but the defect with the smallest gap determines the total band gap of the tube. The decreased band gaps for smaller tubes can be attributed to inter-defect interactions and possibly to some remaining bending stresses. The decrease in the band gap when the distance between the defect lines decreases was also observed for the defected phosphorene sheets, illustrated in \autoref{defect_ribbon}, in which no bending strain was present.
The dependence of the band gap on the radius of the zigzag DIF-PNTs is shown in \autoref{fig-band-B}. As for the zigzag DIF-PNTs, the band gaps converge to that of the isolated defects for large radii. When compared to armchair DIF-PNTs, a different behavior can be observed for tubes with small radii: with decreasing radius the band gap first decreases, but then it increases again for very small radii. The first decrease can be attributed to the interaction between the different defect states. The increase of the band gap for very small radii might be attributed to the structural interaction between the defects: the defects of the zigzag nanotubes are wider (i.e. they distort the blue phosphorene phase over a wider range) than the armchair ones and become structurally distorted when the distance between them becomes smaller.
\subsection{Comparison to multiphase faceted PNT}
In the last part of this work, we will compare our results of DIF-PNTs to the multiphase faceted PNTs proposed by \citet{Guan2014a}. These last tubes are made of fixed combinations of $\alpha$, $\beta$, $\gamma$, and $\delta$-phase phosphorene with variable widths, as depicted in Fig.~1 of Ref. \cite{Guan2014a}. Our most favorable armchair DIF-PNTs, namely the A(3,3,0) tubes, are closely related to some of the tubes of Guan \textit{et al.} and have similar symmetry and formation energies. The (nearly) equally stable quadrilateral A(2,2,0) tubes, on the other hand, fall outside their description due to the different number of facets. The DIF-PNTs are not restricted to the triangular symmetry of the multiphase PNTs and allow therefore for a much larger variety of possible low-energy nanotubes.
For zigzag PNTs the differences are even larger. Our zigzag DIF-PNTs have no analogues in the multiphase description because the multiphase zigzag PNTs are exclusively built from the energetically less favorable $\gamma$ and $\delta$ phases. The multiphase description provides only one structure with lower formation energy than rolled $\beta$-PNT (see Fig. 3(a) of Ref.~\cite{Guan2014a}), in contrast to the multitude of zigzag DIF-PNTs with lower formation energies that we found (see \autoref{defect_Z}). Blue ($\beta$) phosphorene is significantly better suited to make small zigzag PNTs than the other structural phases ($\alpha$, $\gamma$, and $\delta$).
\subsection{Summary}
In this work, we investigated a new class of faceted PNT using first-principles calculations. We started our study by examining round armchair and zigzag PNTs of black and blue phosphorous and showed that blue phosphorene is better suited to make small nanotubes. Then we proposed five different type of defect lines to create kinks in $\beta$ phosphorene sheets. We investigated the formation energy and kink angle of these defects and used this information to create defect-induced faceted PNTs. After identifying some suitable defect combinations, we calculated the formation energy as a function of the PNT radius and demonstrated the enhanced stability of the DIF-PNTs with respect to round tubes. We showed that the VBM and CBM states of the DIF-PNTs are localized on the defect lines and that these states control the electronic properties of the tubes. The band gap of armchair DIF-PNTs increases with the radius and converges to the gap corresponding to isolated (infinitely separated) defect lines. For zigzag DIF-PNTs, a more complicated behavior was observed which originates from the wider structural distortions associated with the defect lines.
Finally, we compared our defect-induced PNTs with the multiphase faceted PNTs proposed by Guan \textit{et al.}\cite{Guan2014a}. We found similar formation energies for armchair PNTs, but more favorable DIF-PNTs can be created with small radii due to less stringent restrictions regarding the structure (symmetry) of the tubes. For zigzag PNTs, the DIF-PNTs are significantly more stable in comparison to rolled $\beta$-PNTs and previously reported multiphase faceted PNTs. Furthermore, we found a much larger variety of stable zigzag PNTs.
|
{"hexsha": "515236e7022a8a87318093b73aefb17f875a7ebd", "size": 130042, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter5/chapter5.tex", "max_stars_repo_name": "ErpanArkin/phd-thesis", "max_stars_repo_head_hexsha": "155d4f8f8c4e87dea894403cdc58739ba5580aef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter5/chapter5.tex", "max_issues_repo_name": "ErpanArkin/phd-thesis", "max_issues_repo_head_hexsha": "155d4f8f8c4e87dea894403cdc58739ba5580aef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter5/chapter5.tex", "max_forks_repo_name": "ErpanArkin/phd-thesis", "max_forks_repo_head_hexsha": "155d4f8f8c4e87dea894403cdc58739ba5580aef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 114.2724077329, "max_line_length": 2551, "alphanum_fraction": 0.7678903739, "num_tokens": 35187}
|
import logging
import numpy as np
import math
import psutil
import time
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from autogluon.core.constants import REGRESSION
from autogluon.core.utils.exceptions import NotEnoughMemoryError
from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor
from ..abstract.model_trial import skip_hpo
from ..abstract.abstract_model import AbstractModel
from ...features.feature_metadata import R_CATEGORY, R_OBJECT, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT
logger = logging.getLogger(__name__)
# TODO: Normalize data!
class KNNModel(AbstractModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._model_type = self._get_model_type()
def _get_model_type(self):
if self.problem_type == REGRESSION:
return KNeighborsRegressor
else:
return KNeighborsClassifier
def _preprocess(self, X, **kwargs):
X = super()._preprocess(X, **kwargs)
X = X.fillna(0).to_numpy(dtype=np.float32)
return X
def _set_default_params(self):
default_params = {
'weights': 'uniform',
'n_jobs': -1,
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_CATEGORY, R_OBJECT], # TODO: Eventually use category features
ignored_type_group_special=[S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
# TODO: Enable HPO for KNN
def _get_default_searchspace(self):
spaces = {}
return spaces
def _fit(self, X_train, y_train, time_limit=None, **kwargs):
time_start = time.time()
X_train = self.preprocess(X_train)
self._validate_fit_memory_usage(X_train=X_train) # TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately
num_rows_max = len(X_train)
# FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model.
if time_limit is None or num_rows_max <= 10000:
self.model = self._model_type(**self.params).fit(X_train, y_train)
else:
self.model = self._fit_with_samples(X_train=X_train, y_train=y_train, time_limit=time_limit - (time.time() - time_start))
def _validate_fit_memory_usage(self, X_train):
max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio']
model_size_bytes = 4 * X_train.shape[0] * X_train.shape[1] # Assuming float32 types
expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size
if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB
available_mem = psutil.virtual_memory().available
model_memory_ratio = expected_final_model_size_bytes / available_mem
if model_memory_ratio > (0.15 * max_memory_usage_ratio):
logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...')
if model_memory_ratio > (0.20 * max_memory_usage_ratio):
raise NotEnoughMemoryError # don't train full model to avoid OOM error
# TODO: Consider making this fully generic and available to all models
def _fit_with_samples(self, X_train, y_train, time_limit):
"""
Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used.
X_train and y_train must already be preprocessed
"""
time_start = time.time()
sample_growth_factor = 2 # Growth factor of each sample in terms of row count
sample_time_growth_factor = 8 # Assume next sample will take 8x longer than previous (Somewhat safe but there are datasets where it is even >8x.
num_rows_samples = []
num_rows_max = len(X_train)
num_rows_cur = 10000
while True:
num_rows_cur = min(num_rows_cur, num_rows_max)
num_rows_samples.append(num_rows_cur)
if num_rows_cur == num_rows_max:
break
num_rows_cur *= sample_growth_factor
num_rows_cur = math.ceil(num_rows_cur)
if num_rows_cur * 1.5 >= num_rows_max:
num_rows_cur = num_rows_max
def sample_func(chunk, frac):
# Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba)
n = max(math.ceil(len(chunk) * frac), 1)
return chunk.sample(n=n, replace=False, random_state=0)
if self.problem_type != REGRESSION:
y_train_df = y_train.to_frame(name='label').reset_index(drop=True)
else:
y_train_df = None
time_start_sample_loop = time.time()
time_limit_left = time_limit - (time_start_sample_loop - time_start)
for i, samples in enumerate(num_rows_samples):
if samples != num_rows_max:
if self.problem_type == REGRESSION:
idx = np.random.choice(num_rows_max, size=samples, replace=False)
else:
idx = y_train_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index
X_train_samp = X_train[idx, :]
y_train_samp = y_train.iloc[idx]
else:
X_train_samp = X_train
y_train_samp = y_train
self.model = self._model_type(**self.params).fit(X_train_samp, y_train_samp)
time_limit_left_prior = time_limit_left
time_fit_end_sample = time.time()
time_limit_left = time_limit - (time_fit_end_sample - time_start)
time_fit_sample = time_limit_left_prior - time_limit_left
time_required_for_next = time_fit_sample * sample_time_growth_factor
logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)')
if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1:
logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)')
break
return self.model
# TODO: Add HPO
def hyperparameter_tune(self, **kwargs):
return skip_hpo(self, **kwargs)
class FAISSModel(KNNModel):
def _get_model_type(self):
if self.problem_type == REGRESSION:
return FAISSNeighborsRegressor
else:
return FAISSNeighborsClassifier
def _set_default_params(self):
default_params = {
'index_factory_string': 'Flat',
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
|
{"hexsha": "7ca7bb1c7c276436dab6cdc0d1172130a9e106c0", "size": 7492, "ext": "py", "lang": "Python", "max_stars_repo_path": "tabular/src/autogluon/tabular/models/knn/knn_model.py", "max_stars_repo_name": "jrzaurin/autogluon", "max_stars_repo_head_hexsha": "b7dbed302c4181f9160b2f8f88f73c1fa37941cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tabular/src/autogluon/tabular/models/knn/knn_model.py", "max_issues_repo_name": "jrzaurin/autogluon", "max_issues_repo_head_hexsha": "b7dbed302c4181f9160b2f8f88f73c1fa37941cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tabular/src/autogluon/tabular/models/knn/knn_model.py", "max_forks_repo_name": "jrzaurin/autogluon", "max_forks_repo_head_hexsha": "b7dbed302c4181f9160b2f8f88f73c1fa37941cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2469135802, "max_line_length": 237, "alphanum_fraction": 0.6749866524, "include": true, "reason": "import numpy", "num_tokens": 1672}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 19:50:39 2019
@author: Brandon
"""
import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
def walk(N):
rand.seed()
x = [0.0]
y = [0.0]
for n in range(N):
x.append(x[-1] + (rand.random() - 0.5)*2.0)
y.append(y[-1] + (rand.random() - 0.5)*2.0)
return np.array(x), np.array(y)
M, N = 0, 1000 #Setting variables for later
Distance = list() #Creating an empty list to hold the distances we will record
Walker = list() #Creating an empty list of the number of steps each walker takes
while(M<=99):
walker1=walk(N) #Our walkers
Distance.append(np.sqrt((walker1[0][-1]**2)+(walker1[1][-1]**2))) #Appending the Distance list with the distances of each walker
Walker.append(N) # Appending the Walker list with the number of steps the walker has taken
M = M+1 #Increasing our M value to progress the while loop
N = N+45 #Increasing our N value to change our walker's number of steps
plt.plot(Walker, Distance, 'r+')
plt.xlabel("Number of steps")
plt.ylabel("Distance from the origin")
plt.show()
# By looking at the plot we receive, we can fairly well determine that the distance
# from the origin is fairly random, and does not necessarily increase, decrease, or
# stagnate as the N value of the walker changes.This is exactly as I imagined would happen.
# there is no reason why the distance should stay around any particular number. In fact,
# I believe if we plotted the average dsitance for several iterations of this code, we would find
# a similar plot of seemingly random values.
|
{"hexsha": "a4adcb45af59385d5bb81ae2c86166c62d4daa1b", "size": 1653, "ext": "py", "lang": "Python", "max_stars_repo_path": "Week 03/Exercise_06_Q4_BSW.py", "max_stars_repo_name": "bswood9321/PHYS-3210", "max_stars_repo_head_hexsha": "d780cac166688338ce91099cba4a4f6628430647", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Week 03/Exercise_06_Q4_BSW.py", "max_issues_repo_name": "bswood9321/PHYS-3210", "max_issues_repo_head_hexsha": "d780cac166688338ce91099cba4a4f6628430647", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Week 03/Exercise_06_Q4_BSW.py", "max_forks_repo_name": "bswood9321/PHYS-3210", "max_forks_repo_head_hexsha": "d780cac166688338ce91099cba4a4f6628430647", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4117647059, "max_line_length": 132, "alphanum_fraction": 0.6836055656, "include": true, "reason": "import numpy", "num_tokens": 439}
|
#!/usr/bin/env python
import cv2
import numpy as np
#listdir:for fetching data from a directory
from os import listdir
from os.path import isfile, join
from flask import render_template
import warnings
data_path = 'C:\\Users\\hp\\PycharmProjects\\faces\\'
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path,f))]
Training_Data, Labels = [], []
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_Data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
Labels = np.asarray(Labels, dtype=np.int32)
model = cv2.face.LBPHFaceRecognizer_create()
model.train(np.asarray(Training_Data), np.asarray(Labels))
print("Model Training Complete")
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
face_classifier = cv2.CascadeClassifier('C:\\Users\\hp\\PycharmProjects\\haarcascade_frontalface_default.xml')
def face_detector(img, size=0.5):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img, []
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi = img[y:y + h, x:x + w]
roi = cv2.resize(roi, (200, 200))
return img, roi
while True:
ret, frame = self.video.read()
image, face = face_detector(frame)
try:
'''faces = face_cascade.detectMultiScale(image, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()'''
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
result = model.predict(face)
if result[1] < 500:
confidence = int(100 * (1 - (result[1]) / 300))
display_string = str(confidence) + '% Confidence it is user'
#ret, jpeg = cv2.imencode('.jpg', cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (250, 120, 255), 2)
# FACE CHECKING
if confidence > 85:
ret, jpeg = cv2.imencode('.jpg', cv2.putText(image, "Unlocked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2))
return jpeg.tobytes()
#cv2.putText(image, "Unlocked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
#cv2.imshow('Face Cropper', image)
else:
ret, jpeg = cv2.imencode('.jpg',cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2))
return render_template("index.html", val=confidence)
return jpeg.tobytes()
#cv2.putText(image, "Locked", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
#cv2.imshow('Face Cropper', image)'''
except:
faces = face_cascade.detectMultiScale(image, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
#success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
#faces = face_cascade.detectMultiScale(image, 1.3, 5)
#for (x, y, w, h) in faces:
# cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
#ret, jpeg = cv2.imencode('.jpg', image)
# return jpeg.tobytes()
|
{"hexsha": "cd8647c57b58f0129f661414c80083db518ce1a5", "size": 4464, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera.py", "max_stars_repo_name": "akshaya26/face_recognition", "max_stars_repo_head_hexsha": "f81b68f86c63b0364aa1c44056183a3ef82257e2", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "camera.py", "max_issues_repo_name": "akshaya26/face_recognition", "max_issues_repo_head_hexsha": "f81b68f86c63b0364aa1c44056183a3ef82257e2", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camera.py", "max_forks_repo_name": "akshaya26/face_recognition", "max_forks_repo_head_hexsha": "f81b68f86c63b0364aa1c44056183a3ef82257e2", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0, "max_line_length": 149, "alphanum_fraction": 0.5629480287, "include": true, "reason": "import numpy", "num_tokens": 1191}
|
[STATEMENT]
lemma partition_on_mset_add_single:
assumes "partition_on_mset A P"
shows "partition_on_mset (add_mset a A) (add_mset {#a#} P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. partition_on_mset (add_mset a A) (add_mset {#a#} P)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
partition_on_mset A P
goal (1 subgoal):
1. partition_on_mset (add_mset a A) (add_mset {#a#} P)
[PROOF STEP]
by (auto simp: partition_on_mset_def)
|
{"llama_tokens": 209, "file": "Design_Theory_Multisets_Extras", "length": 2}
|
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import plotnine as pn
from scipy.stats import skew
from pandas.api.types import CategoricalDtype
from sklearn.base import BaseEstimator, TransformerMixin
from minepy import MINE
from scipy import stats
from dcor import distance_correlation
def nested(obj, retloc = False):
'''
----------
Parameters
----------
obj: a list, series, or dataframe
retloc: True or False
Returns
-------
retloc = True
Returns locations of nested objects: For dataframes, it returns tuples
For other objects it returns a list of indicies
retloc = False
Returns True if any nested objects reside in passed object, False otherwise
Example
-------
a = pd.DataFrame({'first' : [1, 2, 3, (1,2,3), 4, 5, 6],
'second': [2, 4, 5, [1,3,4], 6, 7, 8]}
, columns = ['first', 'second'])
nested(a, locs = True)
[(3, 0), (3, 1)]
nested(a)
Out[59]: False
----------
'''
# object types
otypes = (list, pd.core.series.Series, pd.core.frame.DataFrame)
# store locations of nested items
locs = list()
if isinstance(obj, otypes): pass
else: return "Function only accepts: List, Series, or Dataframe"
# nested types
ntypes = (list, tuple, set, np.ndarray,
pd.core.indexes.base.Index,
pd.core.series.Series,
pd.core.frame.DataFrame)
# dataframes
if isinstance(obj, (pd.core.frame.DataFrame)):
for row in range(len(obj)):
for col in range(len(obj.columns)):
if isinstance(obj.iloc[row,col], ntypes):
locs.append((row,col))
else: #other types
for i in range(len(obj)):
if isinstance(obj[i], ntypes):
locs.append(i)
if retloc: return locs
else: return len(locs) > 0
def loadboston():
'''
----------
Parameters
----------
None
Returns
-------
Boston corrected data objects:
1. df X and y dataframe
2. X predictors dataframe
3. y target series
-------
'''
source = 'https://raw.githubusercontent.com/bxp151/exploretransform/master/data/boston_corrected.txt'
df = pd.read_table(source, skiprows= 9)
df.columns = map(str.lower, df.columns)
df = df.drop( ['obs.', 'town#', 'medv', 'tract'],axis = 1)
df['chas'] = df['chas'].astype('category')
# Modify rad as ordinal
r = pd.Series(range(df['rad'].min(), df['rad'].max() + 1))
rad_cat = CategoricalDtype(categories=list(r), ordered=True)
df['rad'] = df['rad'].astype(rad_cat)
x = df.drop('cmedv', axis = 1)
y = df['cmedv']
return df, x, y
def explore(X):
'''
----------
Parameters
----------
X: dataframe to analyze
Returns
-------
Dataframe with statistics for each variable:
variable name of column
obs number of observations
q_zer number of zeros
p_zer percent zeros
q_na number of missing
p_na percent missing
q_inf quanitity of infinity
p_inf percent infinity
dtype Python dtype
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.explore(df.iloc[:,0:5])
variable obs q_zer p_zer q_na p_na q_inf p_inf dtype
0 town 506 0 0.00 0 0.0 0 0.0 object
1 lon 506 0 0.00 0 0.0 0 0.0 float64
2 lat 506 0 0.00 0 0.0 0 0.0 float64
3 crim 506 0 0.00 0 0.0 0 0.0 float64
4 zn 506 372 73.52 0 0.0 0 0.0 float64
----------
'''
# Input Checks
if isinstance(X, (pd.core.frame.DataFrame)):
if nested(X):
return "Please collapse any nested values in your dataframe"
else:
pass
else:
return "Function only accetps dataframes"
# counts zeros for numeric dtype and returns zero for others
def cntzero(series):
if is_numeric_dtype(series): return sum(series == 0)
else:return 0
# counts inf values for numeric dtype and returns zero for others
def cntinf(series):
if is_numeric_dtype(series): return sum(np.isinf(series))
else: return 0
df = pd.DataFrame({'variable': X.columns})
df['obs'] = len(X)
df['q_zer'] = X.apply(cntzero, axis = 0).values
df['p_zer'] = round(df['q_zer'] / len(X) * 100, 2)
df['q_na'] = X.isna().sum().values
df['p_na'] = round(df['q_na'] / len(X) * 100, 2)
df['q_inf'] = X.apply(cntinf, axis = 0).values
df['p_inf'] = round(df['q_inf'] / len(X) * 100, 2)
df['dtype'] = X.dtypes.to_frame('dtypes').reset_index()['dtypes']
return df
def peek(X):
'''
----------
Parameters
----------
X: dataframe to peek into
Returns
-------
Columns based on passed dataframe:
variable name of variable
dtype Python dtype
lvls unique values of variable
obs number of observations
head first five observations
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.peek(df.iloc[:,0:5])
variable dtype ... obs head
0 town object ... 506 [Nahant, Swampscott, Swampscott, Marblehead, M...
1 lon float64 ... 506 [-70.955, -70.95, -70.936, -70.928, -70.922]
2 lat float64 ... 506 [42.255, 42.2875, 42.283, 42.293, 42.298]
3 crim float64 ... 506 [0.00632, 0.02731, 0.02729, 0.0323699999999999...
4 zn float64 ... 506 [18.0, 0.0, 0.0, 0.0, 0.0]
----------
'''
# Input Checks
if isinstance(X, (pd.core.frame.DataFrame)): pass
else: return "Function only accetps dataframes"
if nested(X): return "Please collapse any nested values in your dataframe"
g = pd.DataFrame({'variable': X.columns,
'dtype': X.dtypes.to_frame('dtypes').reset_index()['dtypes']},
index=(range(0,len(X.columns))))
g['lvls'] = X.nunique().values
g['obs'] = len(X)
g['head'] = ''
# get the first 5 items for each variable
# transpose the data frame and store the values
x = X.apply((pd.DataFrame.head), axis = 0 ).T.values
for i in range(0, len(x)):
g.at[i,'head'] = x[i]
return g
def plotfreq(freqdf):
'''
----------
Parameters
----------
freqdf dataframe generated by freq()
Returns
-------
Bar chart with frequencies & percentages in descending order
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.plotfreq(et.freq(X['town']))
Warning
-------
This function will likely not plot more than 100 unique levels properly.
----------
'''
# input checks
if isinstance(freqdf, (pd.core.frame.DataFrame)): pass
else: return print("\nFunction only accetps dataframes\n")
if len(freqdf.columns) == 4: pass
else: return print("\nInput must be a dataframe generated by freq()\n")
if sum(freqdf.columns[1:4] == ['freq', 'perc', 'cump']) == 3: pass
else: return print("\nInput must be a dataframe generated by freq()\n")
if len(freqdf) < 101: pass
else: return print("\nUnable to plot more than 100 items")
# label for plot
lbl = freqdf['freq'].astype(str).str.cat('[ ' + freqdf['perc'].astype(str) + '%' + ' ]'
, sep = ' ')
# create variable to be used in aes
aesx = 'reorder(' + freqdf.columns[0] + ', freq)'
# build plot
plot = (
pn.ggplot(freqdf) +
pn.aes(x = aesx,
y = 'freq',
fill = 'freq',
label = lbl) +
pn.geom_bar(stat = 'identity') +
pn.coord_flip() +
pn.theme(axis_text_y = pn.element_text(size=6, weight = 'bold'),
legend_position = 'none') +
pn.labs(x=freqdf.columns[0], y="Freq") +
pn.scale_fill_gradient2(mid='bisque', high='blue') +
pn.geom_text(size = 6,
nudge_y = .7)
)
return plot
def freq(srs):
'''
----------
Parameters
----------
srs: series to analyze
Returns
-------
Dataframe with the following columns:
<name> The unique values of the series
freq Count of each level
perc Percent each level contributes
cump Cumulative percent
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.freq(X['town'])
town freq perc cump
0 Cambridge 30 5.93 5.93
1 Boston Savin Hill 23 4.55 10.47
2 Lynn 22 4.35 14.82
3 Boston Roxbury 19 3.75 18.58
4 Newton 18 3.56 22.13
.. ... ... ... ...
87 Topsfield 1 0.20 99.21
88 Manchester 1 0.20 99.41
89 Dover 1 0.20 99.60
90 Hanover 1 0.20 99.80
91 Lincoln 1 0.20 100.00
----------
'''
# input checks
if isinstance(srs, (pd.core.series.Series)): pass
else: return "Function only accetps series"
# Create frequency dataframe
cnts = srs.value_counts()
perc = round(cnts / sum(cnts.values) * 100, 2)
cump = round(100 * (cnts.cumsum() / cnts.sum()), 2)
freqdf = pd.DataFrame(data = dict(var = cnts.keys(),
freq = cnts,
perc = perc,
cump = cump))
freqdf.rename(columns={'var': srs.name}, inplace=True)
freqdf = freqdf.reset_index(drop = True)
return freqdf
def corrtable(X, y = None, cut = 0.9, methodx = 'spearman', methody = None, full = False):
'''
----------
Parameters
----------
X predictors dataframe
y target (unused in exploretransform v 1.0.0)
cut correlation threshold
full
True Returns the full corrtable with drop column
False (default) Returns without the drop column
methodx used to calculate correlations amount predictors
methody* used to calculate correlations between predictors & target
*(unused in exploretransform v 1.0.0)
pearson standard correlation coefficient
kendall Kendall Tau correlation coefficient
spearman Spearman rank correlation
callable callable with input two 1d ndarrays and returning a float. Note
that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
Returns
-------
This function analyzes the correlation matrix for the dataframe. It
uses the average correlation for the row and column in the matrix and
compares it with the cell value to decide on potential drop candidates.
Columns
v1 varaible 1
v2 variable 2
v1.target metric used to compare v1 and v2 for drop
v2.target metric used to compare v1 and v2 for drop
corr pairwise correlation based on method
drop if the correlation > threshold, the drop decision
For more information please visit
https://towardsdatascience.com/are-you-dropping-too-many-correlated-features-d1c96654abe6
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
X = X.select_dtypes('number')
et.corrtable(X, cut = 0.7, full = True)
v1 v2 v1.target v2.target corr drop
52 nox dis 0.578860 0.526551 0.880015 nox
25 crim nox 0.562681 0.578860 0.821465 nox
63 age dis 0.525682 0.526551 0.801610 dis
51 nox age 0.578860 0.525682 0.795153 nox
42 indus nox 0.549707 0.578860 0.791189 nox
.. ... ... ... ... ... ...
8 lon tax 0.242329 0.486066 0.050237
22 lat lstat 0.159767 0.522203 0.039065
14 lat indus 0.159767 0.549707 0.021472
18 lat dis 0.159767 0.526551 0.012832
20 lat ptratio 0.159767 0.391352 0.005332
----------
'''
# Get correlation matrix and upper triagle
corr_mtx = X.corr(method = methodx).abs()
avg_corr = corr_mtx.mean(axis = 1)
up = corr_mtx.where(np.triu(np.ones(corr_mtx.shape), k=1).astype(np.bool))
ct = pd.DataFrame(columns=(['v1', 'v2', 'v1.target',
'v2.target','corr', 'drop' ]))
for row in range(len(up)-1):
col_idx = row + 1
for col in range (col_idx, len(up)):
drop = ' '
if(corr_mtx.iloc[row, col] > cut):
if(avg_corr.iloc[row] > avg_corr.iloc[col]):
drop = corr_mtx.columns[row]
else:
drop = corr_mtx.columns[col]
# Populate results table
s = pd.Series([ corr_mtx.index[row],
up.columns[col],
avg_corr[row],
avg_corr[col],
up.iloc[row,col],
drop],
index = ct.columns)
ct = ct.append(s, ignore_index = True)
ct.sort_values('corr', ascending = False, inplace=True)
if full: return ct
else: return ct.drop('drop', axis = 1)
def calcdrop(ct):
'''
----------
Parameters
----------
ct: results table from correlation functions
Returns
-------
List of columns to drop
Example
-------
No example - Function is called by correlation functions
----------
'''
# All variables with correlation > cutoff
all_corr_vars = list(set(ct['v1'].tolist() + ct['v2'].tolist()))
# All unique variables in drop column
poss_drop = list(set(ct['drop'].tolist()))
# Keep any variable not in drop column
keep = list(set(all_corr_vars).difference(set(poss_drop)))
# Drop any variables in same row as a keep variable
p = ct[ ct['v1'].isin(keep) | ct['v2'].isin(keep) ][['v1', 'v2']]
q = list(set(p['v1'].tolist() + p['v2'].tolist()))
drop = (list(set(q).difference(set(keep))))
# Remove drop variables from possible drop
poss_drop = list(set(poss_drop).difference(set(drop)))
# subset ct dataframe to include possible drop pairs
m = ct[ ct['v1'].isin(poss_drop) | ct['v2'].isin(poss_drop) ][['v1', 'v2','drop']]
# remove rows that are decided (drop), take set and add to drops
more_drop = set(list(m[~m['v1'].isin(drop) & ~m['v2'].isin(drop)]['drop']))
for item in more_drop:
drop.append(item)
return drop
def skewstats(X):
'''
----------
Parameters
----------
X: dataframe to analyze
Returns
-------
Dataframe with the following columns:
index Variable name
dtype Python dtype
skewness Skewness statistic calculated by skew function
magnitude
2-high Skewness less than -1 or greater than 1
1-medium Skewness between -1 and -0.5 or 0.5 and 1
0-approx_symmetric Skewness between -0.5 and 0.5
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
et.skewstats(df)
dtype skewness magnitude
cmedv float64 1.107616 2-high
crim float64 5.207652 2-high
zn float64 2.219063 2-high
dis float64 1.008779 2-high
b float64 -2.881798 2-high
nox float64 0.727144 1-medium
age float64 -0.597186 1-medium
tax int64 0.667968 1-medium
ptratio float64 -0.799945 1-medium
lstat float64 0.903771 1-medium
lon float64 -0.204775 0-approx_symmetric
lat float64 -0.086421 0-approx_symmetric
indus float64 0.294146 0-approx_symmetric
rm float64 0.402415 0-approx_symmetric
----------
'''
# input checks
if isinstance(X, (pd.core.frame.DataFrame)): pass
else: return "Function only accetps dataframes"
d = X.select_dtypes('number')
if len(d.columns) == 0: return "Dataframe has no numeric columns"
def skew_series(srs):
s = skew(srs.array)
a = 'placeholder'
return ([srs.dtype,float(s), a])
result = d.apply(skew_series, axis = 0).transpose()
result.columns = ['dtype', 'skewness', 'magnitude']
result['skewness'] = result['skewness'].astype(float)
def magnitude_skew(x):
# caculate magnitude of skewness
w = abs(x)
if w > 1:
return '2-high'
if w <= 1 and w > 0.5:
return '1-medium'
else:
return '0-approx_symmetric'
# run apply on analysis
result['magnitude'] = result['skewness'].apply(magnitude_skew)
# sort values
result.sort_values('magnitude', ascending = False, inplace=True)
return result
def ascores(X, y):
'''
----------
Parameters
----------
X: numeric dataframe to compute association measure with y
y: series containing target values
Returns
-------
Dataframe with the following association scores:
pearson: pearson correlation
kendall: kendall correlation
spearman: spearman correlation
mic: maximal information coefficient
dcor: distance correlation
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
X = X.select_dtypes('number')
et.ascores(X, y)
pearson kendall spearman mic dcor
lon 0.322947 0.278908 0.420940 0.379753 0.435849
lat 0.006826 0.013724 0.021420 0.234796 0.167030
crim 0.389582 0.406992 0.562982 0.375832 0.528595
zn 0.360386 0.340738 0.438768 0.290145 0.404253
indus 0.484754 0.420263 0.580004 0.414140 0.543948
nox 0.429300 0.398342 0.565899 0.442515 0.523653
rm 0.696304 0.485182 0.635092 0.461610 0.711034
age 0.377999 0.391067 0.551747 0.414676 0.480248
dis 0.249315 0.313745 0.446392 0.316136 0.382746
tax 0.471979 0.418005 0.566999 0.336899 0.518158
ptratio 0.505655 0.397146 0.554168 0.371628 0.520320
b 0.334861 0.126766 0.186011 0.272469 0.385468
lstat 0.740836 0.671445 0.857447 0.615427 0.781028
----------
'''
# Convert any ints to float for dcor calculation
if len(X.select_dtypes(int).columns) > 0:
for col in X.select_dtypes(int).columns:
X.loc[:, col] = X[col].astype('float')
r = pd.DataFrame()
mine = MINE(alpha=0.6, c=15)
for col in X.columns:
mine.compute_score(X[col], y)
r.loc[col, 'pearson'] = abs(stats.pearsonr(X[col], y)[0])
r.loc[col, 'kendall'] = abs(stats.kendalltau(X[col], y)[0])
r.loc[col, 'spearman'] = abs(stats.spearmanr(X[col], y)[0])
r.loc[col, 'mic'] = mine.mic()
r.loc[col, 'dcor'] = distance_correlation(X[col], y)
return r
class ColumnSelect( BaseEstimator, TransformerMixin ):
'''
----------
Parameters
----------
X dataframe
feature_names list of column names to select
Returns
-------
dataframe X subsetted by column using feature_names
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
colnames = ['lat', 'lon']
et.ColumnSelect(colnames).fit_transform(X)
lat lon
0 42.2550 -70.9550
1 42.2875 -70.9500
2 42.2830 -70.9360
3 42.2930 -70.9280
4 42.2980 -70.9220
.. ... ...
501 42.2312 -70.9860
502 42.2275 -70.9910
503 42.2260 -70.9948
504 42.2240 -70.9875
505 42.2210 -70.9825
----------
'''
def __init__( self, feature_names):
self.feature_names = feature_names
def fit( self, X, y = None ):
return self
def transform( self, X, y = None ):
return X[self.feature_names]
class CategoricalOtherLevel( BaseEstimator, TransformerMixin ):
'''
----------
Parameters
----------
colname name of column to create "other" level
threshold* any categories occuring less than this percentage will be in
"other"
*Note: using threshold = 0 will create an "other" category
with no occurances in the training set. In the test set, any
novel categories not seen in train will be assigned "other"
Returns
-------
dataframe X with transformed column "colname"
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
colnames = ['town', 'lat']
cs = et.ColumnSelect(colnames).fit_transform(X)
h = et.CategoricalOtherLevel('town', 0.015).fit_transform(cs)
print(h.head(15))
town lat
0 other 42.2550
1 other 42.2875
2 other 42.2830
3 other 42.2930
4 other 42.2980
5 other 42.3040
6 other 42.2970
7 other 42.3100
8 other 42.3120
9 other 42.3160
10 other 42.3160
11 other 42.3170
12 other 42.3060
13 Lynn 42.2920
14 Lynn 42.2870
----------
'''
def __init__(self, colname, threshold):
self.colname = colname
self.threshold = threshold
self.notothers = pd.Series()
def fit( self, X, y = None):
# get frequency table
f = freq(X[self.colname])[[self.colname, 'perc']]
# get (not "others") to create lookup table
self.notothers = f[ f['perc'] > (self.threshold * 100) ][self.colname]
return self
def transform( self, X, y = None):
# if srs in o then replace with "other"
for i in X.index:
if sum(self.notothers.str.contains(X[self.colname][i])):
pass
else:
X.at[i, self.colname] = 'other'
return X
class CorrelationFilter( BaseEstimator, TransformerMixin ):
'''
----------
Parameters
----------
cut correlation cutoff
methodx used to calculate correlations amount predictors
methody* used to calculate correlations between predictors & target
*(unused in exploretransform v1.0.0)
pearson standard correlation coefficient
kendall Kendall Tau correlation coefficient
spearman Spearman rank correlation
callable callable with input two 1d ndarrays and returning a float. Note
that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
Returns
-------
Dataframe with columns removed using logic from corrtable() and calcdrop()
Example
-------
import exploretransform as et
df, X, y = et.loadboston()
colnames = X.select_dtypes('number').columns
cs = et.ColumnSelect(colnames).fit_transform(X)
cf = et.CorrelationFilter(cut = 0.5).fit_transform(cs)
print(cf)
lon lat crim zn rm ptratio b
0 -70.9550 42.2550 0.00632 18.0 6.575 15.3 396.90
1 -70.9500 42.2875 0.02731 0.0 6.421 17.8 396.90
2 -70.9360 42.2830 0.02729 0.0 7.185 17.8 392.83
3 -70.9280 42.2930 0.03237 0.0 6.998 18.7 394.63
4 -70.9220 42.2980 0.06905 0.0 7.147 18.7 396.90
.. ... ... ... ... ... ... ...
501 -70.9860 42.2312 0.06263 0.0 6.593 21.0 391.99
502 -70.9910 42.2275 0.04527 0.0 6.120 21.0 396.90
503 -70.9948 42.2260 0.06076 0.0 6.976 21.0 396.90
504 -70.9875 42.2240 0.10959 0.0 6.794 21.0 393.45
505 -70.9825 42.2210 0.04741 0.0 6.030 21.0 396.90
----------
'''
def __init__(self, cut = 0.9, methodx = 'pearson', methody = None):
self.cut = cut
self.methodx = methodx
self.methody = methody
self.ct = pd.DataFrame()
self.names = []
def fit( self, X, y = None ):
self.ct = corrtable(X, y,
cut = self.cut,
methodx = self.methodx,
methody = self.methody,
full = True)
self.names = calcdrop(self.ct)
return self
def transform( self, X, y = None ):
return X.drop(self.names, axis = 1)
def printcode(df, path):
'''
----------
Parameters
----------
df: Dataframe from explore step
path: File path to write output code
Returns
-------
text file with comments and code
Example
-------
printcode(df, path)
# columns lowercase
df.columns = map(str.lower, df.columns)
# log transform cmedv
df["cmedv"] = np.log(df["cmedv"])
----------
'''
l = list()
# df = df[df["sequence"] == sequence]
for i in range(len(df)):
l.append("# " + df["action"][i])
l.append(df["code"][i])
l.append("\n")
with open(path,'w') as f:
f.write('\n'.join(l))
def to_csv(df, path):
'''
----------
Parameters
----------
df: dataframe to write
path: write destination
Returns
-------
This function take a Pandas DataFrame and writes dtype metadata on the
first rows of the output file. Used in conjunction with read_csv()
Output is a .csv file.
Example
-------
to_csv(df, ".../filename.csv")
----------
'''
def ordlist(df):
# check for ordinal and store name, catDtype and underlying dtype
l = list()
c = df.select_dtypes('category').columns
if len(c) > 0:
for col in c:
if df[col].unique().ordered == True:
l.append([col, df[col].cat.categories,
str(df[col].unique().categories.dtype)])
return l
df2 = df.copy()
dt = df2.dtypes.to_frame('dtypes')
o = ordlist(df)
if len(o) > 0 :
for item in o:
df2.loc[-1] = ' '
df2.iloc[-1, 0] = item[0]
df2.iloc[-1, 1] = list(item[1].values)
df2.iloc[-1, 2] = item[2]
# df2.iloc[-1, 1] = item[1]
df2.index = df2.index + 1
df2.sort_index(inplace=True)
dt.loc[item[0]] = "ordinal" # change to ordinal to flag read_csv
df2.loc[-1] = dt['dtypes']
df2.index = df2.index + 1
df2.sort_index(inplace=True)
df2.to_csv(path, index=False)
def read_csv(path):
'''
----------
Parameters
----------
df: dataframe to write
path: write destination
Returns
-------
This function reads a .csv created by to_csv() and uses the dtype
metadata from the top rows to assign the correct dtypes to the columns.
Example
-------
read_csv(".../filename.csv")
----------
'''
dtypes = {key:value for (key,value) in pd.read_csv(path,
nrows=1).iloc[0].to_dict().items() if 'date' not in value}
parse_dates = [key for (key,value) in pd.read_csv(path,
nrows=1).iloc[0].to_dict().items() if 'date' in value]
o = list(dtypes.values()).count("ordinal")
if o > 0: # if ordinals exist, read and store metadata from csv file
x = pd.read_csv(path, nrows=o, skiprows = 2, usecols=[0,1,2],
names = ["var", "categories", "dtypes"])
x["CategoricalDtype"] = " "
for row in range(o):
y = list(eval(x["categories"][row]))
x["CategoricalDtype"][row] = CategoricalDtype(categories=y, ordered=True)
# Change dtype from ordinal to original data type
for item in x["var"]:
dtypes[item] = x["dtypes"][x["var"] == item].item()
# read file skipping rows used for storing metadata
df = pd.read_csv(path, dtype=dtypes, parse_dates=parse_dates,
skiprows=list(range(1, o+2)), header=0)
# transform ordinal columns
if o > 0:
for item in x["var"]:
cat_dtype = x["CategoricalDtype"][x["var"] == item].item()
df[item] = df[item].astype(cat_dtype)
return df
|
{"hexsha": "fd9a28676719f9455afe29f5a6fd660bf771126c", "size": 30156, "ext": "py", "lang": "Python", "max_stars_repo_path": "exploretransform/_exploretransform.py", "max_stars_repo_name": "bxp151/exploretransform", "max_stars_repo_head_hexsha": "f67039f22f30319586c8b4861d21b7bf147cecb9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-10-08T13:50:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-24T01:12:01.000Z", "max_issues_repo_path": "exploretransform/_exploretransform.py", "max_issues_repo_name": "bxp151/exploretransform", "max_issues_repo_head_hexsha": "f67039f22f30319586c8b4861d21b7bf147cecb9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exploretransform/_exploretransform.py", "max_forks_repo_name": "bxp151/exploretransform", "max_forks_repo_head_hexsha": "f67039f22f30319586c8b4861d21b7bf147cecb9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4780058651, "max_line_length": 105, "alphanum_fraction": 0.5238426847, "include": true, "reason": "import numpy,from scipy", "num_tokens": 8524}
|
import numpy as np
import nept
task_times = dict()
task_times['on_track'] = nept.Epoch(np.array([2421.1, 4816.9]))
experiment_times = dict()
experiment_times['left_trials'] = nept.Epoch(np.array([[2436.3, 2487.4],
[3398.8, 3423.7],
[3538.1, 3570.7],
[3905.1, 3927.8],
[4209.9, 4225.9],
[4468.7, 4483.6]]))
experiment_times['right_trials'] = nept.Epoch(np.array([[2594.2, 2634.0],
[2771.2, 2818.5],
[2925.0, 2934.7],
[3040.2, 3098.0],
[3172.5, 3197.9],
[3280.0, 3294.4],
[3675.5, 3687.6],
[3779.4, 3789.3],
[4055.2, 4094.5],
[4321.1, 4364.6],
[4600.9, 4636.5]]))
|
{"hexsha": "aa27758ee13e57a04c42f24cfc7c3f4291f58d13", "size": 1400, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/r042d5.py", "max_stars_repo_name": "vandermeerlab/hc_hyperalign", "max_stars_repo_head_hexsha": "f140aca8c9b1c4196ea2c63326fbbbded22bbcc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/r042d5.py", "max_issues_repo_name": "vandermeerlab/hc_hyperalign", "max_issues_repo_head_hexsha": "f140aca8c9b1c4196ea2c63326fbbbded22bbcc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/r042d5.py", "max_forks_repo_name": "vandermeerlab/hc_hyperalign", "max_forks_repo_head_hexsha": "f140aca8c9b1c4196ea2c63326fbbbded22bbcc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.8461538462, "max_line_length": 75, "alphanum_fraction": 0.2571428571, "include": true, "reason": "import numpy", "num_tokens": 294}
|
# In[1]:
import numpy as np
import pandas as pd
# In[2]:
def generate_population():
chromosome = [[i, j, k, l, m, n, o, p]
for i in range(1, 9)
for j in range(1, 9)
for k in range(1, 9)
for l in range(1, 9)
for m in range(1, 9)
for n in range(1, 9)
for o in range(1, 9)
for p in range(1, 9)
if all([i != j, i != k, i != l, i != m, i != n, i != o, i != p,
j != k, j != l, j != m, j != n, j != o, j != p,
k != l, k != m, k != n, k != o, k != p,
l != m, l != n, l != o, l != p,
m != n, m != o, m != p,
n != o, n != p,
o != p])]
chromosome = np.array(chromosome)
chromosome = pd.DataFrame(chromosome)
return chromosome
# In[3]:
initial_population = generate_population()
initial_population
# In[4]:
def fitness(population):
pop_size = population.shape[0]
x = 0
y = 0
b = 0
c = 0
Fit = []
for k in range(pop_size):
for i in range(8):
c = 0
for j in range(8):
if(i != j):
x = abs(i-j)
y = abs(population.iloc[k][i] - population.iloc[k][j])
if(x == y):
c += 1
b = 28-c
Fit.append(b)
Fitness = np.array(Fit)
return Fitness
# In[5]:
Fitness = fitness(initial_population)
Fitness
# In[6]:
data = pd.DataFrame(initial_population)
data['Fit'] = pd.DataFrame(Fitness)
# In[7]:
data_100 = data.sample(n=100)
data_100 = data_100.reset_index(drop = True)
data_100
# In[8]:
def selection(data):
selected_parent = data.sample(n=5)
selected_parent = selected_parent.sort_values("Fit", ascending=False)
selected_parent1 = selected_parent.iloc[0]
selected_parent2 = selected_parent.iloc[1]
return selected_parent1[:8], selected_parent2[:8]
# In[9]:
def crossover(C1, C2):
point = np.random.randint((1,7), size=1)
point = int(point)
C1_1 = C1[:point]
C1_2 = C1[point:]
C2_1 = C2[:point]
C2_2 = C2[point:]
C1_tuple = (C1_1, C2_2)
C1 = np.hstack(C1_tuple)
C2_tuple = (C2_1, C1_2)
C2 = np.hstack(C2_tuple)
return C1, C2
# In[10]:
def mutation(ch):
point1 = np.random.randint(8, size=1)
point1 = int(point1)
point2 = np.random.randint(8, size=1)
point2 = int(point2)
first_ele = ch[point1]
second_ele = ch[point2]
ch[point1] = second_ele
ch[point2] = first_ele
return ch
# In[11]:
Parent1 = []
Child_Gen1 = []
for i in range(25):
Pa1, Pa2 = selection(data_100)
Parent1.append(Pa1)
Parent1.append(Pa2)
Child1, Child2 = crossover(Pa1, Pa2)
Child1 = mutation(Child1)
Child2 = mutation(Child2)
Child_Gen1.append(Child1)
Child_Gen1.append(Child2)
Parent1_df = pd.DataFrame(Parent1)
Parent1_df = Parent1_df.reset_index(drop = True)
Child_Gen1 = pd.DataFrame(Child_Gen1)
Child_Gen1 = Child_Gen1.reset_index(drop = True)
# In[12]:
Child_Gen1
# In[13]:
Gen1_Fitness = fitness(Child_Gen1)
data_Gen1 = Child_Gen1
data_Gen1['Fit'] = pd.DataFrame(Gen1_Fitness)
# In[14]:
data_Gen1
# In[15]:
Parent2 = []
Child_Gen2 = []
for i in range(12):
Pa1, Pa2 = selection(data_Gen1)
Parent2.append(Pa1)
Parent2.append(Pa2)
Child1, Child2 = crossover(Pa1, Pa2)
Child1 = mutation(Child1)
Child2 = mutation(Child2)
Child_Gen2.append(Child1)
Child_Gen2.append(Child2)
Parent2_df = pd.DataFrame(Parent2)
Parent2_df = Parent2_df.reset_index(drop = True)
Child_Gen2 = pd.DataFrame(Child_Gen2)
Child_Gen2 = Child_Gen2.reset_index(drop = True)
# In[16]:
Child_Gen2
# In[17]:
Gen2_Fitness = fitness(Child_Gen2)
data_Gen2 = Child_Gen2
data_Gen2['Fit'] = pd.DataFrame(Gen2_Fitness)
# In[18]:
data_Gen2
# In[19]:
Parent3 = []
Child_Gen3 = []
for i in range(6):
Pa1, Pa2 = selection(data_Gen2)
Parent3.append(Pa1)
Parent3.append(Pa2)
Child1, Child2 = crossover(Pa1, Pa2)
Child1 = mutation(Child1)
Child2 = mutation(Child2)
Child_Gen3.append(Child1)
Child_Gen3.append(Child2)
Parent3_df = pd.DataFrame(Parent3)
Parent3_df = Parent3_df.reset_index(drop = True)
Child_Gen3 = pd.DataFrame(Child_Gen3)
Child_Gen3 = Child_Gen3.reset_index(drop = True)
# In[20]:
Child_Gen3
# In[21]:
Gen3_Fitness = fitness(Child_Gen3)
data_Gen3 = Child_Gen3
data_Gen3['Fit'] = pd.DataFrame(Gen3_Fitness)
# In[22]:
data_Gen3
# In[23]:
Parent4 = []
Child_Gen4 = []
for i in range(4):
Pa1, Pa2 = selection(data_Gen3)
Parent4.append(Pa1)
Parent4.append(Pa2)
Child1, Child2 = crossover(Pa1, Pa2)
Child1 = mutation(Child1)
Child2 = mutation(Child2)
Child_Gen4.append(Child1)
Child_Gen4.append(Child2)
Parent4_df = pd.DataFrame(Parent4)
Parent4_df = Parent4_df.reset_index(drop = True)
Child_Gen4 = pd.DataFrame(Child_Gen4)
Child_Gen4 = Child_Gen4.reset_index(drop = True)
# In[24]:
Child_Gen4
# In[25]:
Gen4_Fitness = fitness(Child_Gen4)
data_Gen4 = Child_Gen4
data_Gen4['Fit'] = pd.DataFrame(Gen4_Fitness)
# In[26]:
data_Gen4
# In[27]:
CHILD = data_Gen1.append(data_Gen2)
CHILD = CHILD.append(data_Gen3)
CHILD = CHILD.append(data_Gen4)
# In[28]:
CHILD = CHILD.sort_values("Fit", ascending=False)
CHILD = CHILD.reset_index(drop = True)
CHILD.head(10)
# In[29]:
print("Finished!")
|
{"hexsha": "a81c82f1816f0c4b6bcb3fbbc63e59c1fb9698bc", "size": 5550, "ext": "py", "lang": "Python", "max_stars_repo_path": "8_Queens/8queen.py", "max_stars_repo_name": "aminizahra/Evolutionary-Computing", "max_stars_repo_head_hexsha": "0dab8222fccb9bd1efca9141481e5aebc73de5ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "8_Queens/8queen.py", "max_issues_repo_name": "aminizahra/Evolutionary-Computing", "max_issues_repo_head_hexsha": "0dab8222fccb9bd1efca9141481e5aebc73de5ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "8_Queens/8queen.py", "max_forks_repo_name": "aminizahra/Evolutionary-Computing", "max_forks_repo_head_hexsha": "0dab8222fccb9bd1efca9141481e5aebc73de5ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.3716814159, "max_line_length": 74, "alphanum_fraction": 0.5958558559, "include": true, "reason": "import numpy", "num_tokens": 1766}
|
!======================================================================!
SUBROUTINE Probe1D
!----------------------------------------------------------------------!
! This program finds the coordinate of cell-centers in non-homogeneous
! direction and write them in file name.1D
!----------------------------------------------------------------------!
USE all_mod
!----------------------------------------------------------------------!
IMPLICIT NONE
!-----------------------------[Parameters]-----------------------------!
LOGICAL :: isit
!------------------------------[Calling]-------------------------------!
INTERFACE
LOGICAL FUNCTION Approx(A,B,tol)
REAL :: A,B
REAL, OPTIONAL :: tol
END FUNCTION Approx
END INTERFACE
!-------------------------------[Locals]-------------------------------!
INTEGER :: Nprob, p, c
REAL :: zp(1000)
CHARACTER :: namPro*80
CHARACTER :: answer*80
!--------------------------------[CVS]---------------------------------!
! $Id: Probe1D.f90,v 1.5 2002/10/30 16:29:33 niceno Exp $
! $Source: /home/muhamed/.CVSROOT/T-Rex/Library/Probe1D.f90,v $
!======================================================================!
write(*,*) '========================================'
write(*,*) ' Looking for non-homogeneous directions '
write(*,*) '----------------------------------------'
write(*,*) 'Insert non-homogeneous direction (x,y,z or skip)'
read(*,*) answer
call touppr(answer)
if(answer=='SKIP') return
NProb = 0
zp=0.0
do c=1,NC
!---- try to find the cell among the probes
do p=1,Nprob
if(answer == 'X') then
if( Approx(xc(c), zp(p)) ) go to 1
else if(answer == 'Y') then
if( Approx(yc(c), zp(p)) ) go to 1
else if(answer == 'Z') then
if( Approx(zc(c), zp(p)) ) go to 1
end if
end do
!---- couldn't find a cell among the probes, add a new one
Nprob = Nprob+1
if(answer=='X') zp(Nprob)=xc(c)
if(answer=='Y') zp(Nprob)=yc(c)
if(answer=='Z') zp(Nprob)=zc(c)
if(Nprob == 1000) then
write(*,*) 'Probe 1D: Not a 1D (channel flow) problem.'
isit = .false.
return
end if
1 end do
isit = .true.
!<<<<<<<<<<<<<<<<<<<<<<<<!
! create 1D file !
!<<<<<<<<<<<<<<<<<<<<<<<<!
namPro = name
namPro(len_trim(name)+1:len_trim(name)+4) = '.1Dc'
write(6, *) 'Now creating the file:', namPro
open(9, FILE=namPro)
!---- write the number of probes
write(9,'(I8)') Nprob
!---- write the probe coordinates out
do p=1,Nprob
write(9,'(I8,1PE17.8)') p, zp(p)
end do
close(9)
END SUBROUTINE Probe1D
|
{"hexsha": "7e8f4b1a83fa2926c50c2ac8b92b1aba8da9a8fe", "size": 2634, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Library/Probe1D.f90", "max_stars_repo_name": "palkinev/T-FlowS-old-compressible", "max_stars_repo_head_hexsha": "7aa7219aa5526ac270b39d52ba8c12847e1d71a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Library/Probe1D.f90", "max_issues_repo_name": "palkinev/T-FlowS-old-compressible", "max_issues_repo_head_hexsha": "7aa7219aa5526ac270b39d52ba8c12847e1d71a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Library/Probe1D.f90", "max_forks_repo_name": "palkinev/T-FlowS-old-compressible", "max_forks_repo_head_hexsha": "7aa7219aa5526ac270b39d52ba8c12847e1d71a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.275862069, "max_line_length": 73, "alphanum_fraction": 0.4229309036, "num_tokens": 717}
|
import numpy as np
from src.Map.Map import Map
from src.StateUtils import pad_centered
from src.base.BaseState import BaseState
class DHMultiState(BaseState):
def __init__(self, map_init: Map, num_agents: int):
super().__init__(map_init)
self.device_list = None
self.device_map = None # Floating point sparse matrix showing devices and their data to be collected
# Multi-agent active agent decides on properties
self.active_agent = 0
self.num_agents = num_agents
# Multi-agent is creating lists
self.positions = [[0, 0]] * num_agents
self.movement_budgets = [0] * num_agents
self.landeds = [False] * num_agents
self.terminals = [False] * num_agents
self.device_coms = [-1] * num_agents
self.initial_movement_budgets = [0] * num_agents
self.initial_total_data = 0
self.collected = None
@property
def position(self):
return self.positions[self.active_agent]
@property
def movement_budget(self):
return self.movement_budgets[self.active_agent]
@property
def initial_movement_budget(self):
return self.initial_movement_budgets[self.active_agent]
@property
def landed(self):
return self.landeds[self.active_agent]
@property
def terminal(self):
return self.terminals[self.active_agent]
@property
def all_landed(self):
return all(self.landeds)
@property
def all_terminal(self):
return all(self.terminals)
def is_terminal(self):
return self.all_terminal
def set_landed(self, landed):
self.landeds[self.active_agent] = landed
def set_position(self, position):
self.positions[self.active_agent] = position
def decrement_movement_budget(self):
self.movement_budgets[self.active_agent] -= 1
def set_terminal(self, terminal):
self.terminals[self.active_agent] = terminal
def set_device_com(self, device_com):
self.device_coms[self.active_agent] = device_com
def get_active_agent(self):
return self.active_agent
def get_remaining_data(self):
return np.sum(self.device_map)
def get_total_data(self):
return self.initial_total_data
def get_scalars(self):
"""
Return the scalars without position, as it is treated individually
"""
return np.array([self.movement_budget])
def get_num_scalars(self):
return len(self.get_scalars())
def get_boolean_map(self):
padded_red = pad_centered(self, np.concatenate([np.expand_dims(self.no_fly_zone, -1),
np.expand_dims(self.obstacles, -1)], axis=-1), 1)
padded_rest = pad_centered(self,
np.concatenate(
[np.expand_dims(self.landing_zone, -1), self.get_agent_bool_maps()],
axis=-1), 0)
return np.concatenate([padded_red, padded_rest], axis=-1)
def get_boolean_map_shape(self):
return self.get_boolean_map().shape
def get_float_map(self):
return pad_centered(self, np.concatenate([np.expand_dims(self.device_map, -1),
self.get_agent_float_maps()], axis=-1), 0)
def get_float_map_shape(self):
return self.get_float_map().shape
def is_in_landing_zone(self):
return self.landing_zone[self.position[1]][self.position[0]]
def is_in_no_fly_zone(self):
# Out of bounds is implicitly nfz
if 0 <= self.position[1] < self.no_fly_zone.shape[0] and 0 <= self.position[0] < self.no_fly_zone.shape[1]:
# NFZ or occupied
return self.no_fly_zone[self.position[1], self.position[0]] or self.is_occupied()
return True
def is_occupied(self):
for i, pos in enumerate(self.positions):
if self.terminals[i]:
continue
if i == self.active_agent:
continue
if pos == self.position:
return True
return False
def get_collection_ratio(self):
return np.sum(self.collected) / self.initial_total_data
def get_collected_data(self):
return np.sum(self.collected)
def reset_devices(self, device_list):
self.device_map = device_list.get_data_map(self.no_fly_zone.shape)
self.collected = np.zeros(self.no_fly_zone.shape, dtype=float)
self.initial_total_data = device_list.get_total_data()
self.device_list = device_list
def get_agent_bool_maps(self):
agent_map = np.zeros(self.no_fly_zone.shape + (1,), dtype=bool)
for agent in range(self.num_agents):
# agent_map[self.positions[agent][1], self.positions[agent][0]][0] = self.landeds[agent]
agent_map[self.positions[agent][1], self.positions[agent][0]][0] = not self.terminals[agent]
return agent_map
def get_agent_float_maps(self):
agent_map = np.zeros(self.no_fly_zone.shape + (1,), dtype=float)
for agent in range(self.num_agents):
agent_map[self.positions[agent][1], self.positions[agent][0]][0] = self.movement_budgets[agent]
return agent_map
|
{"hexsha": "cf824b9f4008a55c155eecd5d901818337e15de5", "size": 5311, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/DHMulti/State.py", "max_stars_repo_name": "fgwy/uavSim", "max_stars_repo_head_hexsha": "a58dc3149a52fc407c288c281eedb176738bd558", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2020-11-16T10:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T15:57:25.000Z", "max_issues_repo_path": "src/DHMulti/State.py", "max_issues_repo_name": "fgwy/uavSim", "max_issues_repo_head_hexsha": "a58dc3149a52fc407c288c281eedb176738bd558", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-11-17T14:51:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T02:59:39.000Z", "max_forks_repo_path": "src/DHMulti/State.py", "max_forks_repo_name": "fgwy/uavSim", "max_forks_repo_head_hexsha": "a58dc3149a52fc407c288c281eedb176738bd558", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-11-27T08:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T18:30:58.000Z", "avg_line_length": 34.264516129, "max_line_length": 115, "alphanum_fraction": 0.6379212954, "include": true, "reason": "import numpy", "num_tokens": 1176}
|
import skimage
import skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(skimage.img_as_float(image))
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_allclose(scan_data.max(), 0.9139, rtol=1e-3)
assert_allclose(scan_data.mean(), 0.2828, rtol=1e-3)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 755)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (152, 3))
assert_allclose(scan_data.max(), 0.772, rtol=1e-3)
assert_allclose(scan_data.mean(), 0.4355, rtol=1e-3)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
{"hexsha": "6b0fd48f017fccf3c188620c7d1025674bba3cf3", "size": 1511, "ext": "py", "lang": "Python", "max_stars_repo_path": "skimage/viewer/tests/test_viewer.py", "max_stars_repo_name": "jeysonmc/scikit-image", "max_stars_repo_head_hexsha": "d652a9ce9a71a3f6f13cb23ee7c5f25148457b24", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skimage/viewer/tests/test_viewer.py", "max_issues_repo_name": "jeysonmc/scikit-image", "max_issues_repo_head_hexsha": "d652a9ce9a71a3f6f13cb23ee7c5f25148457b24", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skimage/viewer/tests/test_viewer.py", "max_forks_repo_name": "jeysonmc/scikit-image", "max_forks_repo_head_hexsha": "d652a9ce9a71a3f6f13cb23ee7c5f25148457b24", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5777777778, "max_line_length": 62, "alphanum_fraction": 0.7021839841, "include": true, "reason": "from numpy", "num_tokens": 384}
|
""" Unit tests for fitness.py"""
# Author: Genevieve Hayes
# License: BSD 3 clause
import unittest
import numpy as np
from mlrose import (OneMax, FlipFlop, FourPeaks, SixPeaks, ContinuousPeaks,
Knapsack, TravellingSales, Queens, MaxKColor,
CustomFitness)
from mlrose.fitness import head, tail, max_run
# The above functions are not automatically imported at initialization, so
# must be imported explicitly from fitness.py.
class TestFitness(unittest.TestCase):
"""Tests for fitness.py."""
@staticmethod
def test_onemax():
"""Test OneMax fitness function"""
state = np.array([0, 1, 0, 1, 1, 1, 1])
assert OneMax().evaluate(state) == 5
@staticmethod
def test_flipflop():
"""Test FlipFlop fitness function"""
state = np.array([0, 1, 0, 1, 1, 1, 1])
assert FlipFlop().evaluate(state) == 3
@staticmethod
def test_head():
"""Test head function"""
state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])
assert head(1, state) == 4
@staticmethod
def test_tail():
"""Test tail function"""
state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])
assert tail(1, state) == 2
@staticmethod
def test_max_run_middle():
"""Test max_run function for case where run is in the middle of the
state"""
state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])
assert max_run(1, state) == 5
@staticmethod
def test_max_run_start():
"""Test max_run function for case where run is at the start of the
state"""
state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])
assert max_run(1, state) == 6
@staticmethod
def test_max_run_end():
"""Test max_run function for case where run is at the end of the
state"""
state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert max_run(1, state) == 9
@staticmethod
def test_fourpeaks_r0():
"""Test FourPeaks fitness function for the case where R=0 and max>0"""
state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
assert FourPeaks(t_pct=0.30).evaluate(state) == 4
@staticmethod
def test_fourpeaks_r_gt0():
"""Test FourPeaks fitness function for the case where R>0 and max>0"""
state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
assert FourPeaks(t_pct=0.15).evaluate(state) == 16
@staticmethod
def test_fourpeaks_r0_max0():
"""Test FourPeaks fitness function for the case where R=0 and max=0"""
state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])
assert FourPeaks(t_pct=0.30).evaluate(state) == 0
@staticmethod
def test_sixpeaks_r0():
"""Test SixPeaks fitness function for the case where R=0 and max>0"""
state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
assert SixPeaks(t_pct=0.30).evaluate(state) == 4
@staticmethod
def test_sixpeaks_r_gt0():
"""Test SixPeaks fitness function for the case where R>0 and max>0"""
state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])
assert SixPeaks(t_pct=0.15).evaluate(state) == 16
@staticmethod
def test_sixpeaks_r0_max0():
"""Test SixPeaks fitness function for the case where R=0 and max=0"""
state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])
assert SixPeaks(t_pct=0.30).evaluate(state) == 0
@staticmethod
def test_sixpeaks_r_gt0_max2():
"""Test SixPeaks fitness function for the case where R>0 and max>0
based on the second condition"""
state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])
assert SixPeaks(t_pct=0.15).evaluate(state) == 16
@staticmethod
def test_continuouspeaks_r0():
"""Test ContinuousPeaks fitness function for case when R = 0."""
state = np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1])
assert ContinuousPeaks(t_pct=0.30).evaluate(state) == 5
@staticmethod
def test_continuouspeaks_r_gt():
"""Test ContinuousPeaks fitness function for case when R > 0."""
state = np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1])
assert ContinuousPeaks(t_pct=0.15).evaluate(state) == 17
@staticmethod
def test_knapsack_weight_lt_max():
"""Test Knapsack fitness function for case where total weight is less
than the maximum"""
weights = [10, 5, 2, 8, 15]
values = [1, 2, 3, 4, 5]
max_weight_pct = 0.6
state = np.array([1, 0, 2, 1, 0])
assert Knapsack(weights, values, max_weight_pct).evaluate(state) == 11
@staticmethod
def test_knapsack_weight_gt_max():
"""Test Knapsack fitness function for case where total weight is
greater than the maximum"""
weights = [10, 5, 2, 8, 15]
values = [1, 2, 3, 4, 5]
max_weight_pct = 0.4
state = np.array([1, 0, 2, 1, 0])
assert Knapsack(weights, values, max_weight_pct).evaluate(state) == 0
@staticmethod
def test_travelling_sales_coords():
"""Test TravellingSales fitness function for case where city nodes
coords are specified."""
coords = [(0, 0), (3, 0), (3, 2), (2, 4), (1, 3)]
state = np.array([0, 1, 4, 3, 2])
assert (round(TravellingSales(coords=coords).evaluate(state), 4)
== 13.8614)
@staticmethod
def test_travelling_sales_dists():
"""Test TravellingSales fitness function for case where distances
between node pairs are specified."""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
state = np.array([0, 1, 4, 3, 2])
assert TravellingSales(distances=dists).evaluate(state) == 29
@staticmethod
def test_travelling_sales_invalid():
"""Test TravellingSales fitness function for invalid tour"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
state = np.array([0, 1, 2, 3, 4])
assert TravellingSales(distances=dists).evaluate(state) == np.inf
@staticmethod
def test_queens():
"""Test Queens fitness function"""
state = np.array([1, 4, 1, 3, 5, 5, 2, 7])
assert Queens().evaluate(state) == 6
@staticmethod
def test_max_k_color():
"""Test MaxKColor fitness function"""
edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]
state = np.array([0, 1, 0, 1, 1])
assert MaxKColor(edges).evaluate(state) == 3
@staticmethod
def test_custom_fitness():
"""Test CustomFitness fitness function"""
# Define custom finess function
def cust_fn(state, c):
return c*np.sum(state)
state = np.array([1, 2, 3, 4, 5])
kwargs = {'c': 10}
assert CustomFitness(cust_fn, **kwargs).evaluate(state) == 150
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "f7aeb47ad84db5974f92fb76382c93411aeae5ca", "size": 7150, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_fitness.py", "max_stars_repo_name": "dreadn0ught/mlrose", "max_stars_repo_head_hexsha": "2a9d604ea464cccc48f30b8fe6b81fe5c4337c80", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2018-10-31T18:55:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T02:22:16.000Z", "max_issues_repo_path": "tests/test_fitness.py", "max_issues_repo_name": "dreadn0ught/mlrose", "max_issues_repo_head_hexsha": "2a9d604ea464cccc48f30b8fe6b81fe5c4337c80", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2018-10-31T12:29:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T08:34:31.000Z", "max_forks_repo_path": "tests/test_fitness.py", "max_forks_repo_name": "dreadn0ught/mlrose", "max_forks_repo_head_hexsha": "2a9d604ea464cccc48f30b8fe6b81fe5c4337c80", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 137, "max_forks_repo_forks_event_min_datetime": "2018-10-31T01:37:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T15:22:48.000Z", "avg_line_length": 34.8780487805, "max_line_length": 78, "alphanum_fraction": 0.5692307692, "include": true, "reason": "import numpy", "num_tokens": 2487}
|
#ifndef POG_GENERIC_HPP
#define POG_GENERIC_HPP
#include <cstddef>
#include <iostream>
#include <type_traits>
#include <boost/mpl/range_c.hpp>
#include <boost/fusion/include/mpl.hpp>
#include <boost/fusion/include/at_c.hpp>
#include <boost/fusion/include/value_at.hpp>
#include <boost/fusion/include/for_each.hpp>
#include <boost/optional/optional.hpp>
#include <boost/optional/optional_io.hpp>
#include <boost/program_options.hpp>
namespace Generic {
namespace Meta = boost::mpl;
namespace Fusion = boost::fusion;
namespace Options = boost::program_options;
template <typename T>
using Optional = boost::optional<T>;
// Implementation detail: recursively walk member attributes and register with Options.
namespace Detail {
template <typename Description, typename Record>
struct AddAttributes final {
template <typename Index>
void operator()(Index index) const {
const auto attrName = Fusion::extension::struct_member_name<Record, index>::call();
using AttrType = typename Fusion::result_of::value_at<Record, Index>::type;
auto& attr = Fusion::at_c<index>(record);
description.add_options()(attrName, Options::value<AttrType>(&attr)->required(), attrName);
}
Description& description;
Record& record;
};
template <typename Description, typename Record>
void addAttributes(Description& description, Record& record) {
using Indices = Meta::range_c<std::size_t, 0, Fusion::result_of::size<Record>::value>;
Fusion::for_each(Indices{}, AddAttributes<Description, Record>{description, record});
}
}
// Provides simple argument parsers for all types Record, for which member attributes are required.
// Precondition: Record is a type that Fusion can handle, or is at least Fusion-adapted.
// Postcondition: empty Optional when help was requested, valid Optional<Record> otherwise.
// Throws: Options::error (derived from std::logic_error) when error populating Record is encountered.
template <typename Record>
Optional<Record> get(int argc, char** argv, const char* heading) {
using Description = Options::options_description;
Description description{heading};
description.add_options()("help", "Help on usage");
Record record;
Detail::addAttributes(description, record);
Options::variables_map map;
Options::store(Options::command_line_parser{argc, argv}.options(description).run(), map);
if (map.count("help")) {
std::cerr << description << std::endl;
return {};
}
Options::notify(map);
return record;
}
}
#endif
|
{"hexsha": "ee8d6e6cd8953acbd872944559bdf3080cabae0b", "size": 2618, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "generic.hpp", "max_stars_repo_name": "daniel-j-h/argparse-generic", "max_stars_repo_head_hexsha": "5746829953cd10ab20c57bf0596549535f73cac6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-03-01T18:02:09.000Z", "max_stars_repo_stars_event_max_datetime": "2016-04-29T11:14:11.000Z", "max_issues_repo_path": "generic.hpp", "max_issues_repo_name": "daniel-j-h/argparse-generic", "max_issues_repo_head_hexsha": "5746829953cd10ab20c57bf0596549535f73cac6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generic.hpp", "max_forks_repo_name": "daniel-j-h/argparse-generic", "max_forks_repo_head_hexsha": "5746829953cd10ab20c57bf0596549535f73cac6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1666666667, "max_line_length": 104, "alphanum_fraction": 0.7139037433, "num_tokens": 577}
|
//F# Compiler for F# 4.1
open System
printfn "Hello, World! How is it going?"
|
{"hexsha": "a846e1195c36a81e775bcf8fb650ab3d47f324b3", "size": 79, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "HelloWorld(F#).f", "max_stars_repo_name": "sharma18526/Hello-World-1", "max_stars_repo_head_hexsha": "d4e59182b71782e5c5731c4d02b371827ed4b090", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HelloWorld(F#).f", "max_issues_repo_name": "sharma18526/Hello-World-1", "max_issues_repo_head_hexsha": "d4e59182b71782e5c5731c4d02b371827ed4b090", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-20T20:02:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-20T20:02:12.000Z", "max_forks_repo_path": "HelloWorld(F#).f", "max_forks_repo_name": "sharma18526/Hello-World-1", "max_forks_repo_head_hexsha": "d4e59182b71782e5c5731c4d02b371827ed4b090", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8, "max_line_length": 40, "alphanum_fraction": 0.6835443038, "num_tokens": 27}
|
[STATEMENT]
lemma normalize_positive_dst_ports_normalized_n_primitive:
assumes n: "normalized_nnf_match m"
and noneg: "\<not> has_disc_negated is_Dst_Ports False m"
shows "\<forall>m' \<in> set (normalize_positive_dst_ports m). normalized_dst_ports m'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>m'\<in>set (normalize_positive_dst_ports m). normalized_dst_ports m'
[PROOF STEP]
unfolding normalized_dst_ports_def2
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>m'\<in>set (normalize_positive_dst_ports m). normalized_n_primitive (is_Dst_Ports, dst_ports_sel) (case_ipt_l4_ports (\<lambda>x pts. length pts \<le> 1)) m'
[PROOF STEP]
unfolding normalize_positive_dst_ports_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>m'\<in>set (normalize_positive_ports_step (is_Dst_Ports, dst_ports_sel) Dst_Ports m). normalized_n_primitive (is_Dst_Ports, dst_ports_sel) (case_ipt_l4_ports (\<lambda>x pts. length pts \<le> 1)) m'
[PROOF STEP]
using normalize_positive_ports_step_normalized_n_primitive[OF n wf_disc_sel_common_primitive(2) noneg]
[PROOF STATE]
proof (prove)
using this:
\<forall>m'\<in>set (normalize_positive_ports_step (is_Dst_Ports, dst_ports_sel) Dst_Ports m). normalized_n_primitive (is_Dst_Ports, dst_ports_sel) (\<lambda>ps. case ps of L4Ports x pts \<Rightarrow> length pts \<le> 1) m'
goal (1 subgoal):
1. \<forall>m'\<in>set (normalize_positive_ports_step (is_Dst_Ports, dst_ports_sel) Dst_Ports m). normalized_n_primitive (is_Dst_Ports, dst_ports_sel) (case_ipt_l4_ports (\<lambda>x pts. length pts \<le> 1)) m'
[PROOF STEP]
by blast
|
{"llama_tokens": 675, "file": "Iptables_Semantics_Primitive_Matchers_Ports_Normalize", "length": 4}
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.topology.metric_space.emetric_space
import Mathlib.PostPort
universes u_1 u_4 u_2
namespace Mathlib
/-!
# `Gδ` sets
In this file we define `Gδ` sets and prove their basic properties.
## Main definitions
* `is_Gδ`: a set `s` is a `Gδ` set if it can be represented as an intersection
of countably many open sets;
* `residual`: the filter of residual sets. A set `s` is called *residual* if it includes a dense
`Gδ` set. In a Baire space (e.g., in a complete (e)metric space), residual sets form a filter.
For technical reasons, we define `residual` in any topological space but the definition agrees
with the description above only in Baire spaces.
## Main results
We prove that finite or countable intersections of Gδ sets is a Gδ set. We also prove that the
continuity set of a function from a topological space to an (e)metric space is a Gδ set.
## Tags
Gδ set, residual set
-/
/-- A Gδ set is a countable intersection of open sets. -/
def is_Gδ {α : Type u_1} [topological_space α] (s : set α) :=
∃ (T : set (set α)), (∀ (t : set α), t ∈ T → is_open t) ∧ set.countable T ∧ s = ⋂₀T
/-- An open set is a Gδ set. -/
theorem is_open.is_Gδ {α : Type u_1} [topological_space α] {s : set α} (h : is_open s) : is_Gδ s :=
sorry
theorem is_Gδ_univ {α : Type u_1} [topological_space α] : is_Gδ set.univ :=
is_open.is_Gδ is_open_univ
theorem is_Gδ_bInter_of_open {α : Type u_1} {ι : Type u_4} [topological_space α] {I : set ι}
(hI : set.countable I) {f : ι → set α} (hf : ∀ (i : ι), i ∈ I → is_open (f i)) :
is_Gδ (set.Inter fun (i : ι) => set.Inter fun (H : i ∈ I) => f i) :=
sorry
theorem is_Gδ_Inter_of_open {α : Type u_1} {ι : Type u_4} [topological_space α] [encodable ι]
{f : ι → set α} (hf : ∀ (i : ι), is_open (f i)) : is_Gδ (set.Inter fun (i : ι) => f i) :=
sorry
/-- A countable intersection of Gδ sets is a Gδ set. -/
theorem is_Gδ_sInter {α : Type u_1} [topological_space α] {S : set (set α)}
(h : ∀ (s : set α), s ∈ S → is_Gδ s) (hS : set.countable S) : is_Gδ (⋂₀S) :=
sorry
theorem is_Gδ_Inter {α : Type u_1} {ι : Type u_4} [topological_space α] [encodable ι]
{s : ι → set α} (hs : ∀ (i : ι), is_Gδ (s i)) : is_Gδ (set.Inter fun (i : ι) => s i) :=
is_Gδ_sInter (iff.mpr set.forall_range_iff hs) (set.countable_range s)
theorem is_Gδ_bInter {α : Type u_1} {ι : Type u_4} [topological_space α] {s : set ι}
(hs : set.countable s) {t : (i : ι) → i ∈ s → set α}
(ht : ∀ (i : ι) (H : i ∈ s), is_Gδ (t i H)) :
is_Gδ (set.Inter fun (i : ι) => set.Inter fun (H : i ∈ s) => t i H) :=
sorry
theorem is_Gδ.inter {α : Type u_1} [topological_space α] {s : set α} {t : set α} (hs : is_Gδ s)
(ht : is_Gδ t) : is_Gδ (s ∩ t) :=
eq.mpr (id (Eq._oldrec (Eq.refl (is_Gδ (s ∩ t))) set.inter_eq_Inter))
(is_Gδ_Inter (iff.mpr bool.forall_bool { left := ht, right := hs }))
/-- The union of two Gδ sets is a Gδ set. -/
theorem is_Gδ.union {α : Type u_1} [topological_space α] {s : set α} {t : set α} (hs : is_Gδ s)
(ht : is_Gδ t) : is_Gδ (s ∪ t) :=
sorry
theorem is_Gδ_set_of_continuous_at_of_countably_generated_uniformity {α : Type u_1} {β : Type u_2}
[topological_space α] [uniform_space β] (hU : filter.is_countably_generated (uniformity β))
(f : α → β) : is_Gδ (set_of fun (x : α) => continuous_at f x) :=
sorry
/-- The set of points where a function is continuous is a Gδ set. -/
theorem is_Gδ_set_of_continuous_at {α : Type u_1} {β : Type u_2} [topological_space α]
[emetric_space β] (f : α → β) : is_Gδ (set_of fun (x : α) => continuous_at f x) :=
is_Gδ_set_of_continuous_at_of_countably_generated_uniformity
emetric.uniformity_has_countable_basis f
/-- A set `s` is called *residual* if it includes a dense `Gδ` set. If `α` is a Baire space
(e.g., a complete metric space), then residual sets form a filter, see `mem_residual`.
For technical reasons we define the filter `residual` in any topological space but in a non-Baire
space it is not useful because it may contain some non-residual sets. -/
def residual (α : Type u_1) [topological_space α] : filter α :=
infi fun (t : set α) => infi fun (ht : is_Gδ t) => infi fun (ht' : dense t) => filter.principal t
end Mathlib
|
{"author": "AurelienSaue", "repo": "Mathlib4_auto", "sha": "590df64109b08190abe22358fabc3eae000943f2", "save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto", "path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/topology/G_delta_auto.lean"}
|
import networkx as nx
import itertools
G=nx.Graph()
import time
start_time=time.time()
steps=0
def countVertexDegree(matrix): #Crea una lista con los grados de cada vrtice en el grafo que recibe
global steps
degree=[]
deg=0
for row in matrix:
for bit in row:
if bit==1:
deg+=1
steps+=1
degree.append(deg)
deg=0
return degree
def buildM(h,g): #Crea una matriz M0 comparando los grados de cada vrtice de los grafos H y G
m0=[]
for n in range(len(g)):
m0.append([])
for i in range(len(g)):
for j in range(len(h)):
m0[i].append((h[j]>=g[i]).real) #1 si el grado del vrtice Vhj >= grado del vrtice Vgi
return m0
def prune(m,i,j,ag,ah):
global steps
#print(i,j,m)
#print("\n")
for x in range(len(ag)):
if(ag[i][x]):
for y in range(len(ah)):
steps+=1
if(m[x][y]*ah[y][j]):
return True
def permute(m0,row,h,g): #Crea todas las permutaciones de la matriz padre M0 posibles y prueba si son isomrficas
global steps
if(row==len(m0)):
n=0
for i in range(len(m0[0])):
for j in range(len(m0)):
n+=m0[j][i]
steps+=1
if(n>1):
return
n=0
res=[[sum(a*b for a,b in zip(m0_row,h_col)) for h_col in zip(*h)] for m0_row in m0]
steps+=((2*len(h)-1)*(len(res)*len(res[0])))
resTr=[[res[j][i] for j in range(len(res))] for i in range(len(res[0]))]
arr=[[sum(a*b for a,b in zip(m0_row,resTr_col)) for resTr_col in zip(*resTr)] for m0_row in m0]
steps+=((2*len(resTr)-1)*(len(arr)*len(arr[0])))
if(g==arr): #<= para subgrafos inducidos
for ro in m0:
print(ro)
print("\n")
return True
else:
arr1=m0[row][:] #Guarda copia de m0[row]
arr2=[i for i, x in enumerate(arr1) if x] #Crea una lista con los ndices donde hay 1 en arr1
for n in range(len(arr2)): #Cambia todos los 1 a 0 excepto el n-simo
for i in arr2:
if i!=arr2[n]:
m0[row][i]=0
if(prune(m0,row,m0[row].index(1),g,h)):
if permute(m0,row+1,h,g):
return True
else:
m0[row]=arr1[:]
else:
m0[row]=arr1[:]
return False
def create(formula):
global steps
mat = getfor(formula)
ah=mat[0]
ag=mat[1]
degh=countVertexDegree(ah)
degg=countVertexDegree(ag)
m0=buildM(degh,degg)
if (permute(m0,0,ah,ag)):
pasos=str(steps)
steps=0
return("YES! "+pasos+" pasos basicos")
else:
pasos=str(steps)
steps=0
return("No "+pasos+" pasos basicos")
def getfor(formula):
retCla=[]
clauses=formula.split("/")
clause1=hacMa(clauses[0][2:-2])
clause2=hacMa(clauses[1][2:-2])
retCla.append(clause1)
retCla.append(clause2)
return retCla
def hacMa(algo):
clauses=algo.split("],[")
tuplas = []
arr=[]
for clause in clauses:
clause = map(int,clause.split(","))
tuplas.append(clause)
length=len(tuplas)
for n in range(length):
arr.append([])
for m in range(length):
arr[n].append(0)
for v in range(len(tuplas)):
for ve in tuplas[v]:
arr[v][ve-1]=1
return arr
'''def main():
#ah=[[0,1,0,0],[1,0,1,1],[0,1,0,0],[0,1,0,0]] #Matriz de adyacencia del grafo H
#ag=[[0,0,1],[0,0,1],[1,1,0]] #Matriz de adyacencia del grafo G
#ah=[[0,1,0,1,1],[1,0,1,0,1],[0,1,0,1,1],[1,0,1,0,1],[1,1,1,1,0]]
#ag=[[0,1,0,1],[1,0,1,0],[0,1,0,1],[1,0,1,0]]
#ah=[[0,1,0,1,1],[1,0,1,0,1],[0,1,0,1,1],[1,0,1,0,1],[1,1,1,1,0]]
#ag=[[0,1,1,1],[1,0,1,1],[1,1,0,1],[1,1,1,0]]
#ah=[[0,1,1,0,1],[1,0,1,1,1],[1,1,0,1,1],[0,1,1,0,1],[1,1,1,1,0]]
#ag=[[0,1,1,1],[1,0,1,1],[1,1,0,1],[1,1,1,0]]
ah=[[0,1,0,0,0,0,0,0,0,0,0,0,0],[1,0,1,0,0,0,0,0,0,0,0,1,0],[0,1,0,1,0,0,0,0,0,0,0,0,0],[0,0,1,0,1,0,0,0,0,0,0,0,0],[0,0,0,1,0,1,0,0,0,0,0,0,1],[0,0,0,0,1,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,1,1,0,1,0,0,0,0],[0,0,0,0,0,0,0,1,0,1,1,0,0],[0,0,0,0,0,0,0,0,1,0,0,0,0],[0,0,0,0,0,0,0,0,1,0,0,0,0],[0,1,0,0,0,0,0,0,0,0,0,0,1],[0,0,0,0,1,0,0,0,0,0,0,1,0]]
ag=[[0,1,0,0,0,0,0,0],[1,0,0,1,0,0,0,0],[0,0,0,1,0,0,0,0],[0,1,1,0,1,0,0,0],[0,0,0,1,0,1,0,0],[0,0,0,0,1,0,1,1],[0,0,0,0,0,1,0,0],[0,0,0,0,0,1,0,0]]
#Hamiltonian Path
#ah=[[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0]]
#ag=[[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0]]
#Hamiltonian Circuit
#ah=[[0, 1, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0], [1, 0, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 0, 0, 0, 1, 0]]
#ag=[[0, 1, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0], [1, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0, 1, 0]]
#ah=[[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0]]
#ag=[[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0]]
degh=countVertexDegree(ah)
degg=countVertexDegree(ag)
m0=buildM(degh,degg)
#for row in m0:
# print(row)
#print("\n")
if (permute(m0,0,ah,ag)):
print("YES!")
else:
print("No")
print("--- %s seconds ---" % (time.time() - start_time))
main()'''
|
{"hexsha": "dd729cfec6a41a9049befec52f290e2732b2f290", "size": 10526, "ext": "py", "lang": "Python", "max_stars_repo_path": "subgraphIsomorphism.py", "max_stars_repo_name": "rJuarez96/subgraph", "max_stars_repo_head_hexsha": "3164e4d6b2dacd670a8f8e158ae1bcace23eff99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "subgraphIsomorphism.py", "max_issues_repo_name": "rJuarez96/subgraph", "max_issues_repo_head_hexsha": "3164e4d6b2dacd670a8f8e158ae1bcace23eff99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "subgraphIsomorphism.py", "max_forks_repo_name": "rJuarez96/subgraph", "max_forks_repo_head_hexsha": "3164e4d6b2dacd670a8f8e158ae1bcace23eff99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.7939393939, "max_line_length": 1248, "alphanum_fraction": 0.4040471214, "include": true, "reason": "import networkx", "num_tokens": 7276}
|
# that's still a very lame function, but might be useful once more sampling
# algorithms are implemented (or not...)
function sample(model::Model, algorithm)
run(algorithm, model)
end
|
{"hexsha": "2aaa9d8950752d9925a61934fb07819cb22daa88", "size": 189, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sampling/sample.jl", "max_stars_repo_name": "matthieubulte/Probabilistic.jl", "max_stars_repo_head_hexsha": "8eb68f7c9372570657efb8362ff5bc7b09bca62b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sampling/sample.jl", "max_issues_repo_name": "matthieubulte/Probabilistic.jl", "max_issues_repo_head_hexsha": "8eb68f7c9372570657efb8362ff5bc7b09bca62b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sampling/sample.jl", "max_forks_repo_name": "matthieubulte/Probabilistic.jl", "max_forks_repo_head_hexsha": "8eb68f7c9372570657efb8362ff5bc7b09bca62b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 75, "alphanum_fraction": 0.746031746, "num_tokens": 43}
|
#!/usr/bin/env python3
import numpy as np
import dask.array as da
from numcodecs import Zlib
from pathlib import Path
import argparse
def tile_zarr(
zarr_pyramid_base, max_level=None, compressor=None, dtype=None,
):
img = da.from_zarr(zarr_pyramid_base)
pyramid_path = Path(zarr_pyramid_base).parent
chunks = img.chunksize
tile_size = int(img.chunksize[-1])
if dtype is None:
dtype = img.dtype
if max_level is None:
# create all levels up to 512 x 512
max_level = (
int(np.ceil(np.log2(np.maximum(img.shape[1], img.shape[2])))) - 9
)
if compressor is None:
compressor = Zlib(level=1)
for i in range(1, max_level):
img = da.coarsen(np.mean, img, {1: 2, 2: 2}, trim_excess=True)
# Edge Case: Need to pad smallest thumbnail sometimes.
#
# If a dimension of an array is larger than TILE_SIZE,
# zarr will respect the chunk size requested an automatically
# pad with zeros in the store. However, if an array dimension
# is smaller than the tile size, `da.to_zarr` will change the
# chunking and not pad with zeros. We need sometimes need to pad
# for the smallest tiles because x and y might not be square.
if img.shape[1] < tile_size:
img = da.pad(
img,
((0, 0), (0, tile_size - img.shape[1]), (0, 0)),
"constant",
)
if img.shape[2] < tile_size:
img = da.pad(
img,
((0, 0), (0, 0), (0, tile_size - img.shape[2])),
"constant",
)
# Define pyramid level path
out_path = str(pyramid_path / str(i))
# Write to zarr store
img.astype(dtype).rechunk(chunks).to_zarr(
out_path, compressor=compressor
)
# Read from last store so dask doesn't need to re-compute
# task graph starting at base.
img = da.from_zarr(out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create zarr tile pyramid from base resolution zarr"
)
parser.add_argument(
"--zarr_pyramid_base", required=True, help="zarr store with base image"
)
args = parser.parse_args()
tile_zarr(args.zarr_pyramid_base)
|
{"hexsha": "b5ae5e2c60c1c5c802036d130ae4d387c820b9eb", "size": 2341, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tile_zarr_base.py", "max_stars_repo_name": "haniffalab/vitessce-data", "max_stars_repo_head_hexsha": "9f70405bae373783f9e6ca32f88b6b19b7325fea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-09T17:55:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T14:54:01.000Z", "max_issues_repo_path": "python/tile_zarr_base.py", "max_issues_repo_name": "haniffalab/vitessce-data", "max_issues_repo_head_hexsha": "9f70405bae373783f9e6ca32f88b6b19b7325fea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2020-01-09T20:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-18T16:39:41.000Z", "max_forks_repo_path": "python/tile_zarr_base.py", "max_forks_repo_name": "haniffalab/vitessce-data", "max_forks_repo_head_hexsha": "9f70405bae373783f9e6ca32f88b6b19b7325fea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-18T14:50:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T12:49:24.000Z", "avg_line_length": 30.4025974026, "max_line_length": 79, "alphanum_fraction": 0.5958991884, "include": true, "reason": "import numpy", "num_tokens": 590}
|
import csv
import numpy as np
import os
from sklearn.model_selection import train_test_split
import urllib.request
from sklearn.cluster import MiniBatchKMeans
import cv2
def get_filepaths_in_folder(folder):
directory = os.fsencode(folder)
filenames = []
for file in os.listdir(directory):
filename = os.fsdecode(file)
filenames.append(os.path.join(str(directory.decode("utf-8")), str(filename)))
return filenames
def to_greyscale(img_folder, target_folder=None):
print("Grey-Scaling Images")
filepaths = get_filepaths_in_folder(img_folder)
# create target folders if not exist
if target_folder is not None:
if not os.path.exists(target_folder):
os.makedirs(target_folder)
for i, single_filepath in enumerate(filepaths):
# open the image
cur_image = cv2.imread(single_filepath)
# convert to greyscale
gray = cv2.cvtColor(cur_image, cv2.COLOR_BGR2GRAY)
if target_folder is None:
# overwrite old image
cv2.imwrite(single_filepath, gray)
else:
file_name = single_filepath.split("/")[-1]
target_path = target_folder + "/" + file_name
cv2.imwrite(target_path, gray)
print("Processed image {}/{}\r".format(i+1, len(filepaths)), end="")
def color_quantization(folder, color_count, target_folder):
filepaths = get_filepaths_in_folder(folder)
# create target folders if not exist
if target_folder is not None:
if not os.path.exists(target_folder):
os.makedirs(target_folder)
print("Quantizing Images")
for i, single_filepath in enumerate(filepaths):
# load the image and grab its width and height
image = cv2.imread(single_filepath)
(h, w) = image.shape[:2]
# convert the image from the RGB color space to the L*a*b*
# color space -- since we will be clustering using k-means
# which is based on the euclidean distance, we'll use the
# L*a*b* color space where the euclidean distance implies
# perceptual meaning
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# reshape the image into a feature vector so that k-means
# can be applied
image = image.reshape((image.shape[0] * image.shape[1], 3))
# apply k-means using the specified number of clusters and
# then create the quantized image based on the predictions
clt = MiniBatchKMeans(n_clusters = color_count)
labels = clt.fit_predict(image)
quant = clt.cluster_centers_.astype("uint8")[labels]
# reshape the feature vectors to images
quant = quant.reshape((h, w, 3))
# convert from L*a*b* to RGB
quant = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR)
if target_folder is None:
# overwrite old image
cv2.imwrite(single_filepath, quant)
else:
file_name = single_filepath.split("/")[-1]
target_path = target_folder + "/" + file_name
cv2.imwrite(target_path, quant)
print("Processed image {}/{}\r".format(i+1, len(filepaths)), end="")
if __name__ == "__main__":
base_source = "datasets/before2after"
# color count for quantization
col_count = 8
color_quantization(base_source + "/A/train", col_count)
color_quantization(base_source + "/A/val", col_count)
color_quantization(base_source + "/A/test", col_count)
color_quantization(base_source + "/B/train", col_count)
color_quantization(base_source + "/B/val", col_count)
color_quantization(base_source + "/B/test", col_count)
to_greyscale(base_source + "/A/train")
to_greyscale(base_source + "/A/val")
to_greyscale(base_source + "/A/test")
to_greyscale(base_source + "/B/train")
to_greyscale(base_source + "/B/val")
to_greyscale(base_source + "/B/test")
|
{"hexsha": "1eaf212ed5b7c3a2a6fc1889866960a7e8d3c46d", "size": 4115, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/before2after/preprocessing.py", "max_stars_repo_name": "b-yogesh/pytorch-CycleGAN-and-pix2pix", "max_stars_repo_head_hexsha": "52ceb9b2918ee8cae81ebddfaf38e0f90e85f333", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/before2after/preprocessing.py", "max_issues_repo_name": "b-yogesh/pytorch-CycleGAN-and-pix2pix", "max_issues_repo_head_hexsha": "52ceb9b2918ee8cae81ebddfaf38e0f90e85f333", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/before2after/preprocessing.py", "max_forks_repo_name": "b-yogesh/pytorch-CycleGAN-and-pix2pix", "max_forks_repo_head_hexsha": "52ceb9b2918ee8cae81ebddfaf38e0f90e85f333", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-13T15:05:37.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-13T15:05:37.000Z", "avg_line_length": 31.1742424242, "max_line_length": 86, "alphanum_fraction": 0.6255164034, "include": true, "reason": "import numpy", "num_tokens": 920}
|
from comsyl.autocorrelation.CompactAFReader import CompactAFReader
import numpy as np
from srxraylib.plot.gol import plot_image, plot
import matplotlib.pylab as plt
#
# auxiliary functions
#
def test_equal(af1,af2):
np.testing.assert_almost_equal(1e-8*np.real(af1.eigenvalue(5)),1e-8*np.real(af2.eigenvalue(5)),4)
np.testing.assert_almost_equal(af1.photon_energy(),af2.photon_energy())
np.testing.assert_almost_equal(1e-21*af1.total_intensity_from_spectral_density(),1e-21*af2.total_intensity_from_spectral_density(),4)
np.testing.assert_almost_equal(1e-21*af1.total_intensity(),1e-21*af2.total_intensity(),4)
np.testing.assert_almost_equal(af1.number_modes(),af2.number_modes())
np.testing.assert_almost_equal(1e-7*af1.eigenvalues(), 1e-7*af2.eigenvalues(),2)
#TODO adjust these ones
# np.testing.assert_almost_equal(af1.x_coordinates(), af2.x_coordinates())
# np.testing.assert_almost_equal(af1.y_coordinates(), af2.y_coordinates())
# np.testing.assert_almost_equal(af1.spectral_density(), af2.spectral_density())
# np.testing.assert_almost_equal(af1.reference_electron_density(), af2.reference_electron_density())
# np.testing.assert_almost_equal(af1.reference_undulator_radiation(), af2.reference_undulator_radiation())
# np.testing.assert_almost_equal(af1.mode(25), af2.mode(25))
# np.testing.assert_almost_equal(af1.shape, af2.shape)
# np.testing.assert_almost_equal(af1.total_intensity_from_modes(),af2.total_intensity_from_modes()) #SLOW
def print_scattered_info(af1,af2=None):
if af2 is None:
af2 = af1
print("File is: ",af1._filename,af2._filename)
print("Eigenvalue 5: ",af1.eigenvalue(5),af2.eigenvalue(5))
print("photon_energy : ",af1.photon_energy(),af2.photon_energy())
print("total_intensity_from_spectral_density : ",af1.total_intensity_from_spectral_density(),af2.total_intensity_from_spectral_density())
print("total_intensity : ",af1.total_intensity(),af2.total_intensity())
print("number_modes : ",af1.number_modes(),af2.number_modes())
print("Eigenvalues shape: ", af1.eigenvalues().shape, af2.eigenvalues().shape)
print("x_coordinates shape: ", af1.x_coordinates().shape, af2.x_coordinates().shape)
print("y_coordinates shape: ", af1.y_coordinates().shape, af2.y_coordinates().shape)
print("spectral_density shape: ", af1.spectral_density().shape, af2.spectral_density().shape)
print("reference_electron_density shape: ", af1.reference_electron_density().shape, af2.reference_electron_density().shape)
print("reference_undulator_radiation shape: ",af1.reference_undulator_radiation().shape, af2.reference_undulator_radiation().shape)
print("mode 25 shape: ", af1.mode(25).shape, af2.mode(25).shape)
print("shape : ", af1.shape, af2.shape)
print("keys : ",af1.keys(),af2.keys())
print("total_intensity_from_modes [SLOW]: ",af1.total_intensity_from_modes(),af2.total_intensity_from_modes())
af1.close_h5_file()
af2.close_h5_file()
def tic():
import time
global startTime
startTime = time.time()
def toc(text=""):
import time
print('')
if 'startTime' in globals():
deltaT = time.time() - startTime
hours, minutes = divmod(deltaT, 3600)
minutes, seconds = divmod(minutes, 60)
print("Elapsed time "+text+" : " + str(int(hours)) + "h " + str(int(minutes)) + "min " + str(seconds) + "s ")
else:
print("Warning: start time not set.")
def test_compare_h5_np(filename_h5,filename_np,do_plot_CSD=False):
af1 = CompactAFReader.initialize_from_file(filename_h5)
#
#
af2 = CompactAFReader.initialize_from_file(filename_np)
test_equal(af1,af2)
tic()
for i in range(af1.number_modes()):
print(i,af1.mode(i).sum())
toc()
print(af1.info())
tic()
for i in range(af2.number_modes()):
print(i,af2.mode(i).sum())
toc()
# Cross spectral density
Wx1x2,Wy1y2 = af2.CSD_in_one_dimension(mode_index_max=None)
if do_plot_CSD:
plot_image(np.abs(Wx1x2),1e6*af2.x_coordinates(),1e6*af2.x_coordinates(),show=False,title="Wx1x2")
plot_image(np.abs(Wy1y2),1e6*af2.y_coordinates(),1e6*af2.y_coordinates(),show=True,title="Wy1y2")
def test_id16(filename_ebs_np,filename_hb_np,plot_spectrum=True,plot_mode=False):
if plot_spectrum:
af_hb = CompactAFReader.initialize_from_file(filename_hb_np)
x_hb = np.arange(af_hb.number_modes())
y_hb = np.cumsum(np.abs(af_hb.occupation_array()))
plt.plot(x_hb,y_hb,label="High Beta")
af_ebs = CompactAFReader.initialize_from_file(filename_ebs_np)
x_ebs = np.arange(af_ebs.number_modes())
y_ebs = np.cumsum(np.abs(af_ebs.occupation_array()))
# y_fit_coeff = np.polyfit(x_ebs, np.log(y_ebs), 1, w=np.sqrt(y_ebs))
# print(y_fit_coeff)
# y_fit = np.exp(y_fit_coeff[1]) * np.exp(y_fit_coeff[0] * x_ebs)
plt.plot(x_ebs,y_ebs,label="EBS")
plt.xlabel("Mode index")
plt.ylabel("Cumulated occupation")
ax = plt.subplot(111)
ax.legend(bbox_to_anchor=None)
plt.show()
if plot_mode:
af_hb = CompactAFReader.initialize_from_file(filename_hb_h5)
plot_image(np.abs(af_hb.mode(10)),title=filename_hb_h5)
if __name__ == "__main__":
filename_h5 = "/users/srio/COMSYLD/comsyl/comsyl/calculations/septest_cm_new_u18_2m_1h_s2.5.h5"
filename_np = "/users/srio/COMSYLD/comsyl/comsyl/calculations/septest_cm_new_u18_2m_1h_s2.5.npz"
test_compare_h5_np(filename_h5,filename_np,do_plot_CSD=True)
# ID16 hb
filename_hb_h5 = "/scisoft/data/srio/COMSYL/ID16/id16s_hb_u18_1400mm_1h_s1.0.h5"
filename_hb_np = "/scisoft/data/srio/COMSYL/ID16/id16s_hb_u18_1400mm_1h_s1.0.npz"
filename_ebs_h5 = "/scisoft/data/srio/COMSYL/ID16/id16s_ebs_u18_1400mm_1h_s1.0.h5"
filename_ebs_np = "/scisoft/data/srio/COMSYL/ID16/id16s_ebs_u18_1400mm_1h_s1.0.npz"
test_id16(filename_ebs_np,filename_hb_np)
# CompactAFReader.convert_to_h5("/scisoft/data/srio/COMSYL/ID16/id16s_ebs_u18_1400mm_1h_new_s1.0.npz")
|
{"hexsha": "aab25081cbd5adf3f3400a5c2fa56ba4775cbcd8", "size": 6373, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/autocorrelation/CompactAFReaderTest.py", "max_stars_repo_name": "s-sajid-ali/comsyl", "max_stars_repo_head_hexsha": "f2a5d984b1e870d203a9152bbeca804c4304850e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-22T06:50:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-10T05:26:59.000Z", "max_issues_repo_path": "tests/autocorrelation/CompactAFReaderTest.py", "max_issues_repo_name": "s-sajid-ali/comsyl", "max_issues_repo_head_hexsha": "f2a5d984b1e870d203a9152bbeca804c4304850e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-07-06T09:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-06T09:55:30.000Z", "max_forks_repo_path": "tests/autocorrelation/CompactAFReaderTest.py", "max_forks_repo_name": "s-sajid-ali/comsyl", "max_forks_repo_head_hexsha": "f2a5d984b1e870d203a9152bbeca804c4304850e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-12-09T18:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-10T05:27:03.000Z", "avg_line_length": 41.3831168831, "max_line_length": 145, "alphanum_fraction": 0.6926094461, "include": true, "reason": "import numpy", "num_tokens": 1811}
|
import cv2
import numpy as np
from mabopy.config.load_config import LoadConfig
import cvlib
conf = LoadConfig("config.toml").config
capture = cv2.VideoCapture(conf["app"]["camera_uri"])
i = 0
pt1 = (conf["app"]["crop_start"][0],conf["app"]["crop_start"][1])
w = conf["app"]["corp_width"]
pt2 = (pt1[0]+w,pt1[1]+w)
while True:
ret, img_read = capture.read()
print capture.get()
img = img_read[pt1[1]:pt2[1], pt1[0]:pt2[0]]
#img = cv2.imread('N17.png',0)
img = cv2.medianBlur(img,5)
"""
try:
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
except Exception as ex:
print ex
"""
#gray = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)
#gb_kernel = cv2.getGaborKernel((10, 10),18,10,16,0,0,cv2.CV_32F)
#img = cv2.filter2D(gray, cv2.CV_32F, gb_kernel.transpose())
#img = cv2.filter2D(gray, cv2.CV_8U, gb_kernel.transpose())
"""
circles = cv2.HoughCircles(cimg,cv2.cv.CV_HOUGH_GRADIENT,1,150,
param1=150,param2=20,minRadius=0,maxRadius=100)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
# if i[2] >40:
#cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
#cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)
pass
"""
cv2.imshow('detected circles',img_read)
#cv2.imwrite("cd34.jpg", cimg)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
|
{"hexsha": "64d0d37bc8a24773d73261de809c25454488b9b7", "size": 1536, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/vision/vision25/dc4.py", "max_stars_repo_name": "mabotech/mabo.io", "max_stars_repo_head_hexsha": "7f646db9d5ee3cd0b137866bf8eaf295890f134c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/vision/vision25/dc4.py", "max_issues_repo_name": "mabotech/mabo.io", "max_issues_repo_head_hexsha": "7f646db9d5ee3cd0b137866bf8eaf295890f134c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/vision/vision25/dc4.py", "max_forks_repo_name": "mabotech/mabo.io", "max_forks_repo_head_hexsha": "7f646db9d5ee3cd0b137866bf8eaf295890f134c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-30T01:47:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-30T01:47:01.000Z", "avg_line_length": 23.2727272727, "max_line_length": 79, "alphanum_fraction": 0.58203125, "include": true, "reason": "import numpy", "num_tokens": 500}
|
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9.
Copyright (c) 2021, Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl
-}
open import LibraBFT.Base.Types
import LibraBFT.Impl.Consensus.ConsensusTypes.Block as Block
import LibraBFT.Impl.Consensus.ConsensusTypes.TimeoutCertificate as TimeoutCertificate
open import LibraBFT.Impl.OBM.Logging.Logging
open import LibraBFT.ImplShared.Base.Types
open import LibraBFT.ImplShared.Consensus.Types
open import Optics.All
open import Util.Hash
open import Util.Prelude
------------------------------------------------------------------------------
open import Data.String using (String)
module LibraBFT.Impl.Consensus.ConsensusTypes.ProposalMsg where
verifyWellFormed : ProposalMsg → Either ErrLog Unit
verifyWellFormed self = do
lcheck (not (Block.isNilBlock (self ^∙ pmProposal)))
(here' ("Proposal for a NIL block" ∷ []))
withErrCtx' ("Failed to verify ProposalMsg's block" ∷ [])
(Block.verifyWellFormed (self ^∙ pmProposal))
lcheck (self ^∙ pmProposal ∙ bRound >? 0)
(here' ("Proposal for has round <= 0" ∷ []))
lcheck (self ^∙ pmProposal ∙ bEpoch == self ^∙ pmSyncInfo ∙ siEpoch)
(here' ("ProposalMsg has different epoch than SyncInfo" ∷ [])) -- lsSI (self ^∙ pmSyncInfo)
lcheck (self ^∙ pmProposal ∙ bParentId == self ^∙ pmSyncInfo ∙ siHighestQuorumCert ∙ qcCertifiedBlock ∙ biId)
(here' ( "Proposal SyncInfo HQC CertifiedBlock id not eq to block parent id" ∷ []))
-- lsSI (self ^∙ pmSyncInfo)
let previousRound = self ^∙ pmProposal ∙ bRound ∸ 1 -- NOTE: monus usage
let highestCertifiedRound =
max (self ^∙ pmProposal ∙ bQuorumCert ∙ qcCertifiedBlock ∙ biRound)
(maybe 0 (_^∙ tcRound) (self ^∙ pmSyncInfo ∙ siHighestTimeoutCert))
lcheck (previousRound == highestCertifiedRound)
(here' ("Proposal does not have a certified round" ∷ []))
-- lsMTC (self ^∙ pmSyncInfo ∙ siHighestTimeoutCert)
lcheck (is-just (self ^∙ pmProposal ∙ bAuthor))
(here' ("Proposal does not have an author" ∷ []))
-- LBFT-DIFF : this check used to live in EventProcessor ∙ processProposedBlockM
-- TODO: is it needed?
-- Safety invariant: For any valid proposed block
-- , its parent block == the block pointed to by its QC.
lcheck (self ^∙ pmProposal ∙ bParentId == self ^∙ pmProposal ∙ bQuorumCert ∙ qcCertifiedBlock ∙ biId)
(here' ("parent id /= qcCB" ∷ [])) -- show (self ^∙ pmProposal)
where
here' : List String → List String
here' t = "ProposalMsg" ∷ "verifyWellFormed" {-∷ lsPM self-} ∷ t
verify : ProposalMsg → ValidatorVerifier → Either ErrLog Unit
verify self validator = do
Block.validateSignature (self ^∙ pmProposal) validator
TimeoutCertificate.verify' (self ^∙ pmSyncInfo ∙ siHighestTimeoutCert) validator
verifyWellFormed self
|
{"hexsha": "0d7df635d46c8cce0d46316206a62f825ebff5f9", "size": 3051, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/LibraBFT/Impl/Consensus/ConsensusTypes/ProposalMsg.agda", "max_stars_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_stars_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LibraBFT/Impl/Consensus/ConsensusTypes/ProposalMsg.agda", "max_issues_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_issues_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LibraBFT/Impl/Consensus/ConsensusTypes/ProposalMsg.agda", "max_forks_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_forks_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0163934426, "max_line_length": 111, "alphanum_fraction": 0.6624057686, "num_tokens": 845}
|
import tvm, tvm.relay.testing
import tvm.contrib.graph_runtime as runtime
import os, argparse
import numpy as np
from tvm import autotvm, relay, auto_scheduler
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
from tvm.contrib.utils import tempdir
from tvm.contrib.debugger import debug_runtime
from tvm.relay.testing.mobilenet import conv_block, separable_conv_block, get_workload
from utils import DEVICES, get_runner_args
from relay_helper import fuse_preprocess, graph_tuning_preprocess
from pprint import pprint
def test_network(image_shape, layout='NHWC'):
shape = (1, 224, 224, 3) if layout == 'NHWC' else (1, 3, 224, 224)
data = relay.var('data', shape=shape)
body = conv_block(data, "conv_block_1", 32, strides=(2, 2), layout=layout)
body = separable_conv_block(body, 'separable_conv_block_1', 32, 64, layout=layout)
body = separable_conv_block(body, 'separable_conv_block_2', 64, 128, downsample=True, layout=layout)
_, model_params = get_workload(batch_size=1, dtype='float32', image_shape=image_shape, layout=layout)
params = {}
for k, v in model_params.items():
if ("conv_block_1" in k) or ('separable_conv_block_1' in k) or ('separable_conv_block_2' in k):
params[k] = v
return relay.Function(relay.analysis.free_vars(body), body), params
# image_shape and layout are made consistent outside the function.
def get_network(name, batch_size, dtype="float32", image_shape=(3, 224, 224), layout="NCHW"):
assert (layout == "NHWC" or layout == "NCHW")
"""Get the symbol definition and random weight of a network"""
input_shape = tuple([batch_size] + list(image_shape))
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split('_')[1])
mod, params = relay.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype, image_shape=image_shape, layout=layout)
elif "vgg" in name:
n_layer = int(name.split('_')[1])
mod, params = relay.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet_v1':
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype, image_shape=image_shape, version='v1', layout=layout)
elif name == 'mobilenet_v2':
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype, image_shape=image_shape, version='v2', layout=layout)
elif name == 'mnasnet_a1':
mod, params = relay.testing.mnasnet.get_workload(batch_size=batch_size, dtype=dtype, image_shape=image_shape, version='a1', layout=layout)
elif name == 'mnasnet_b1':
mod, params = relay.testing.mnasnet.get_workload(batch_size=batch_size, dtype=dtype, image_shape=image_shape, version='b1', layout=layout)
elif name == 'squeezenet_v1.1':
mod, params = relay.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1', dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'test':
f, params = test_network(image_shape, layout=layout)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
def tune_autotvm_tasks(tasks,
tuning_opt,
log_filename='tuning.log'):
print("Tuning...")
for i, task in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " %(i+1, len(tasks))
print(task.config_space)
print(task.workload)
# AutoTVM setting
device_name = tuning_opt.device
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(**(get_runner_args(device_name)))
)
tuner = autotvm.tuner.XGBTuner(task, feature_type="curve")
# Transfer learning if the training log exists
if tuning_opt.autotvm_transfer_learning and os.path.isfile(log_filename):
tuner.load_history(autotvm.record.load_from_file(log_filename))
task_trial = min(tuning_opt.tuning_trials, len(task.config_space))
tuner.tune(n_trial=task_trial,
early_stopping=tuning_opt.autotvm_early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(task_trial, prefix=prefix),
autotvm.callback.log_to_file(log_filename)
])
# Pick best records to a cache file
autotvm.record.pick_best(log_filename, '{}_best.log'.format(log_filename.split('.')[0]))
# Use graph tuner to achieve graph level optimal schedules
# Set use_DP=False if it takes too long to finish.
def tune_graph(graph, dshape, target_str, records, opt_sch_file, use_DP=True):
target = tvm.target.Target(target_str)
target_op = [relay.op.get("nn.conv2d"), relay.op.get("nn.fused_conv2d")] # Tune fused_conv2d too.
Tuner = DPTuner if use_DP else PBQPTuner
executor = Tuner(graph, {'data': dshape}, records, target_op, target, max_sch_num=100)
executor.benchmark_layout_transform(min_exec_num=5000)
executor.run()
executor.write_opt_sch2record_file(opt_sch_file)
def tune_auto_scheduler_tasks(tasks, task_weights, tuning_opt, device_name, log_filename):
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=tuning_opt.tuning_trials,
measure_callbacks=[auto_scheduler.RecordToFile(log_filename)],
verbose=2,
builder=auto_scheduler.LocalBuilder(),
runner=auto_scheduler.RPCRunner(**(get_runner_args(device_name)))
)
tuner.tune(tune_option)
def tune_and_evaluate(tuning_opt, dtype='float32'):
device_name = tuning_opt.device
target_str = DEVICES[device_name]["target"]
network = tuning_opt.network
assert target_str in ['cuda', 'llvm -mcpu=core-avx2', 'llvm -mcpu=skylake-avx512']
assert network in ['mobilenet_v1', 'mobilenet_v2', 'mnasnet_a1', 'resnet_18', 'resnet_50', 'test']
if tuning_opt.use_auto_scheduler:
# Extract workloads from relay program
print('Extract tasks...')
if 'llvm' in target_str: # CPU & NCHWC, use NCHW to get the network though
image_shape, layout = (3, 224, 224), 'NCHW'
folder_name = 'logs/auto_scheduler/model/cpu/{}'.format(network)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
log_filename = '{}/nchwc_{}.json'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
else:
folder_name = 'logs/auto_scheduler/model/gpu/{}'.format(network)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
if tuning_opt.use_nchw: # GPU & NCHW
image_shape, layout = (3, 224, 224), 'NCHW'
log_filename = '{}/nchw_{}.json'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
else: # GPU & NHWC
image_shape, layout = (224, 224, 3), "NHWC"
log_filename = '{}/nhwc_{}.json'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
mod, params, input_shape, _ = get_network(network, batch_size=1, dtype=dtype, image_shape=image_shape, layout=layout)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target_str)
print('Tuning...')
if not tuning_opt.skip_tuning:
tune_auto_scheduler_tasks(tasks, task_weights, tuning_opt, target_str, log_filename)
print("############### Compile... ###############")
with auto_scheduler.ApplyHistoryBest(log_filename):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
graph_factory = relay.build(mod, target=target_str, params=params)
graph, lib, params = graph_factory.graph_json, graph_factory.lib, graph_factory.params
else:
if 'llvm' in target_str: # CPU & NCHWC, use NCHW to get the network though
image_shape, layout = (3, 224, 224), 'NCHW'
folder_name = 'logs/autotvm/model/cpu/{}'.format(network)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
log_filename = '{}/nchwc_{}.log'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
graph_opt_sch_file = '{}/graph_opt_{}.log'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
else:
folder_name = 'logs/autotvm/model/gpu/{}'.format(network)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
if tuning_opt.use_nchw: # GPU & NCHW
image_shape, layout = (3, 224, 224), 'NCHW'
log_filename = '{}/nchw_{}.log'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
else: # GPU & NHWC
image_shape, layout = (224, 224, 3), "NHWC"
log_filename = '{}/nhwc_{}.log'.format(folder_name, 'unfused' if tuning_opt.no_fusion else 'fused')
mod, params, input_shape, _ = get_network(network, batch_size=1, dtype=dtype, image_shape=image_shape, layout=layout)
# Extract workloads from relay program
if not tuning_opt.skip_tuning:
print('Extract tasks...')
if tuning_opt.no_fusion:
tasks = autotvm.task.extract_from_program(mod['main'], target=target_str, params=params, ops=(relay.op.get('nn.conv2d'), relay.op.get('nn.dense')))
else:
tmp_f = graph_tuning_preprocess(mod["main"], model_name=network, layout=layout)
tasks = autotvm.task.extract_from_program(tmp_f, target=target_str, params=params, ops=(relay.op.get('nn.conv2d'), relay.op.get('nn.fused_conv2d'), relay.op.get('nn.dense')))
# pprint(tasks)
tune_autotvm_tasks(tasks, tuning_opt, log_filename=log_filename)
# Tune graph for CPU
if not tuning_opt.autotvm_skip_graph_tuning and ('llvm' in target_str):
tmp_f = mod['main']
if not tuning_opt.no_fusion:
tmp_f = graph_tuning_preprocess(tmp_f, model_name=network, layout=layout)
tune_graph(tmp_f, input_shape, target_str, log_filename, graph_opt_sch_file)
# Compile kernels with history best records
print("############### Compile... ###############")
with autotvm.apply_history_best(log_filename) if 'cuda' in target_str else autotvm.apply_graph_best(graph_opt_sch_file):
if not tuning_opt.no_fusion:
mod = fuse_preprocess(mod['main'], params, target_str, model_name=network, layout=layout)
with tvm.transform.PassContext(opt_level=3):
# """
# build = optimize + generate_code
# build / generate_code: return mod
# optimize: return mod and params
# """
# # Merged
# graph_factory = relay.build_module.build(mod, target=target_str, params=params)
# Splitted
mod, params = relay.build_module.optimize(mod, target=target_str, params=params) # This step finish processing the relay graph
graph_factory = relay.build_module.generate_code(mod, target=target_str, params=params)
graph, lib, params = graph_factory.graph_json, graph_factory.lib, graph_factory.params
# Export library
tmp = tempdir()
filename = '{}.tar'.format(network)
lib.export_library(tmp.relpath(filename))
# Load parameters
ctx = tvm.context(target_str, 0)
if tuning_opt.enable_debugger:
module = debug_runtime.create(graph, lib, ctx, dump_root='/tmp/tvmdbg')
else:
module = runtime.create(graph, lib, ctx)
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input('data', data_tvm)
module.set_input(**params)
module.run()
# Evaluate
print('Evaluate inference time cost...')
ftimer = module.module.time_evaluator('run', ctx, number=1, repeat=600)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print('Mean inference time (std dev): %.2f ms (%.2f ms)' % (np.mean(prof_res), np.std(prof_res)))
if __name__ == '__main__':
def get_options():
parser = argparse.ArgumentParser(description="Parses command.")
parser.add_argument("-c", "--use_nchw", action="store_true", help="Use NCHW as the layout for baseline.")
parser.add_argument("-d", "--enable_debugger", action="store_true", help="Enable debugger.")
parser.add_argument("-e", "--autotvm_early_stopping", type=int, default=800, help="Number of AutoTVM early stopping trials.")
parser.add_argument("-k", "--skip_tuning", action="store_true", help="Run AutoTVM/AutoScheduler tuned kernel.")
parser.add_argument("-l", "--autotvm_transfer_learning", action="store_true", help="Load existing kernel tuning log.")
parser.add_argument("-p", "--autotvm_skip_graph_tuning", action="store_true", help="Load existing graph tuning log.")
parser.add_argument("-n", "--no_fusion", action="store_true", help="No fusion.")
parser.add_argument("-r", "--use_auto_scheduler", action="store_true", help="Use auto scheduler.")
parser.add_argument("-t", "--tuning_trials", type=int, default=2000, help="Number of AutoTVM trials.")
parser.add_argument("-v", "--device", type=str, default="i7_7700K", help="Device name.")
parser.add_argument("-w", "--network", type=str, default="mobilenet_v1", help="Network type.")
options = parser.parse_args()
return options
options = get_options()
tune_and_evaluate(options)
|
{"hexsha": "b2f3102de96aa66e396cbeeaae13479636474299", "size": 14025, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_test.py", "max_stars_repo_name": "moderato/LayerFusion", "max_stars_repo_head_hexsha": "3acdbe18e9575eee6e4ab3b60293393a40eedac5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_test.py", "max_issues_repo_name": "moderato/LayerFusion", "max_issues_repo_head_hexsha": "3acdbe18e9575eee6e4ab3b60293393a40eedac5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_test.py", "max_forks_repo_name": "moderato/LayerFusion", "max_forks_repo_head_hexsha": "3acdbe18e9575eee6e4ab3b60293393a40eedac5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.5305343511, "max_line_length": 190, "alphanum_fraction": 0.6661675579, "include": true, "reason": "import numpy", "num_tokens": 3438}
|
# import the necessary packages
import os
import cv2
import imutils
import numpy as np
def align_images(image, template, maxFeatures=700, keepPercent=0.2,
debug=False, output_dir=''):
'''
Aligns image to template via the steps:
1. ORB keypoint detection
2. Brute force hamming distance keypoint matching
3. RANSAC homography estimation
Input:
image - the image to be aligned
template - the template the image should be aligned to
[Optional] maxFeatures - the maximum number of keypoints to detect
[Optional] keepPercent - the percentage of keypoint matches to actually use during the homography estimation
Output: (aligned, RANSAC_inliers)
aligned - the aliged image
RANSAC_inliers - the number of keypoint matches which are determined as inliers for the RANSAC homography estimation
'''
# convert both the input image and template to grayscale
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
templateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# use ORB to detect keypoints and extract (binary) local
# invariant features
orb = cv2.ORB_create(maxFeatures)
(kpsA, descsA) = orb.detectAndCompute(imageGray, None)
(kpsB, descsB) = orb.detectAndCompute(templateGray, None)
# match the features
method = cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING
matcher = cv2.DescriptorMatcher_create(method)
matches = matcher.match(descsA, descsB, None)
# sort the matches by their distance (the smaller the distance,
# the "more similar" the features are)
matches = sorted(matches, key=lambda x: x.distance)
# keep only the top matches
keep = int(len(matches) * keepPercent)
matches = matches[:keep]
# allocate memory for the keypoints (x,y-coordinates) from the
# top matches -- we'll use these coordinates to compute our
# homography matrix
ptsA = np.zeros((len(matches), 2), dtype="float")
ptsB = np.zeros((len(matches), 2), dtype="float")
# loop over the top matches
for (i, m) in enumerate(matches):
# indicate that the two keypoints in the respective images
# map to each other
ptsA[i] = kpsA[m.queryIdx].pt
ptsB[i] = kpsB[m.trainIdx].pt
# compute the homography matrix between the two sets of matched points
try:
(H, mask) = cv2.findHomography(ptsA, ptsB, method=cv2.RANSAC, maxIters=4000, ransacReprojThreshold=15)
except cv2.error as e: # raise error if homography cannot be computed (occurs if < 4 keypoint matches)
raise e
mask_inliers = np.array(mask)
mask_outliers = np.logical_not(mask_inliers).astype(int)
# print(f'matches: {len(matches)}, output mask: {mask.shape}, mask_inliers.count_nonzero(): {np.count_nonzero(mask_inliers)}, mask_outliers.count_nonzero(): {np.count_nonzero(mask_outliers)}')
# check to see if we should visualize the matched keypoints
if debug:
# draw matches (all)
matchedVisAll = cv2.drawMatches(image, kpsA, template, kpsB,
matches, None)
# draw matches (RANSAC inliers)
matchedVis = cv2.drawMatches(image, kpsA, template, kpsB,
matches, None, matchColor=(0, 255, 0), matchesMask=mask_inliers)
# draw matches (RANSAC outliers)
matchedVis = cv2.drawMatches(image, kpsA, template, kpsB,
matches, matchedVis, matchColor=(0, 0, 255), matchesMask=mask_outliers, flags=cv2.DrawMatchesFlags_DRAW_OVER_OUTIMG)
matchedVisAll = imutils.resize(matchedVisAll, width=1000)
matchedVis = imutils.resize(matchedVis, width=1000)
cv2.imwrite(os.path.join(output_dir, 'matched_keypoints.jpg'), matchedVisAll)
cv2.imwrite(os.path.join(output_dir, 'matched_keypoints_inliers.jpg'), matchedVis)
# use the homography matrix to align the images
(h, w) = template.shape[:2]
aligned = cv2.warpPerspective(image, H, (w, h))
# return the aligned image
return aligned, np.count_nonzero(mask_inliers)
|
{"hexsha": "0fae1c16eba77ad9b589a00f13b358d84884ac64", "size": 4129, "ext": "py", "lang": "Python", "max_stars_repo_path": "vcv/pyimagesearch/alignment/align_images.py", "max_stars_repo_name": "mhudnell/vaccination-card-verification", "max_stars_repo_head_hexsha": "f2db657e80ac77b2845192cc606b0d1e9a66e8a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-12T03:03:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:50:42.000Z", "max_issues_repo_path": "vcv/pyimagesearch/alignment/align_images.py", "max_issues_repo_name": "mhudnell/vaccination-card-verification", "max_issues_repo_head_hexsha": "f2db657e80ac77b2845192cc606b0d1e9a66e8a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-12T17:45:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-11T14:52:51.000Z", "max_forks_repo_path": "vcv/pyimagesearch/alignment/align_images.py", "max_forks_repo_name": "mhudnell/vaccination-card-verification", "max_forks_repo_head_hexsha": "f2db657e80ac77b2845192cc606b0d1e9a66e8a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-12T17:42:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T08:17:45.000Z", "avg_line_length": 41.7070707071, "max_line_length": 196, "alphanum_fraction": 0.6807943812, "include": true, "reason": "import numpy", "num_tokens": 1045}
|
# -*- coding: utf-8 -*-
# ## Grafting models together
# Much of scientific thought and scholarship involves reading papers and textbooks with different models and synthesizing a novel model from components found in existing, well studied models.
#
# We refer to this as model grafting, where a component is taking from one model and grafted onto another model. SemanticModels supports automating much of the low level code details from this task to free scientists to think about what features to combine instead of the mechanical aspects of changing the code by hand.
#
# This notebook is an example based on the SEIR model and the ScalingModel examples in the epirecipes cookbook.
using DifferentialEquations
using SemanticModels.Parsers
using SemanticModels.ModelTools
using SemanticModels.ModelTools.ExpODEModels
# ## Loading the original model
# We use parsefile to load the model into an expression. The original model is an SEIR model which has 4 states suceptible, exposed, infected, and recovered. It has parameters $\beta, \gamma, \mu, \sigma$.
expr1 = parsefile("../examples/epicookbook/src/SEIRmodel.jl")
model1 = model(ExpODEModel, expr1)
module1 = eval(model1.expr)
# ## Running our baseline model
#
# The code that defines the baseline model creates a module for that model to run in. This ensures that the code will not have unintended sideeffects when run in a julia process with other models. The entrypoint to this module is called `main`, which is a function that has no arguments and does the setup and execution of the model.
module1.main()
# seir_ode is the name of the function we want to modify
# an ODEProblem is defined by the right hand side of the equation.
# $du/dt = f(u, t)$
# The ScalingModel provides a population growth component that we want to graft onto the SEIR model to create an SEIR model with population dynamics. We load that model from its source file. You can inspect this file to see the definition of $dS/dt = r * (1 - S / K) * S - \beta * S * I$ which includes a population growth rate parameter $r$.
expr2 = parsefile("../examples/epicookbook/src/ScalingModel.jl")
# Once the ASTs are processed into a structured representation we can manipulate with regular julia code, we are able to write manipulations of the models that operate on a higher level than textual changes to the code.
model2 = model(ExpODEModel, expr2)
fluxes(x::ExpODEModel) = x.variables[1].flux
# Find the expression we want to graft
# vital dynamics S rate expression
fluxvar = fluxes(model2)[1].args[2].args[1]
popgrowth = replacevar(findassign(model2.funcs[1], fluxvar)[1], :K, :N).args[2].args[2]
ex = model1.variables[1].flux[1]
ex.args[2] = :($(popgrowth)+$(ex.args[2]))
# define N as the sum of the entries of Y ie. S+E+I
@assert argslist(:(function foo(x, y); return x+y; end)) == [:foo, :x, :y]
pushfirst!(bodyblock(model1.funcs[1]), :(N = sum(Y)))
# we need to add a new paramter to the function we are going to define
# this signature doesn't match the old signature so we are going to
# do some surgery on the main function to add that parameter
# this parameter instead could be added to the vector of parameters.
pusharg!(model1.funcs[1], :r)
# gensym gives us a unique name for the new function
g_func = gensym(argslist(model1.funcs[1])[1])
argslist(model1.funcs[1])[1] = g_func
# ## Model Augmentations often require new parameters
#
# When we add the population growth term to the SEIR model, we introduce a new parameter $r$
# that needs to be supplied to the model. One problem with approaches that require scientists
# to modify source code is the fact that adding the new features necessitates changes to the
# APIs provided by the original author. SemanticModels.ModelTools provides a higher level API
# for making these changes that assist in propagating the necessary changes to the API.
#
# For example, in this code we need to add an argument to the entrypoint function `main` and
# provide an anonymous function that conforms to the API that `DifferentialEquations` expects
# from its inputs.
mainx = findfunc(model1.expr, :main)[end]
pusharg!(mainx, :λ)
# An `ODEProblem` expects the user to provide a function $f(du, u, p, t)$ which takes the current fluxes, current system state, parameters, and current time as its arguments and updates the value of `du`. Since our new function `g_func` does not satisfy this interface, we need to introduce a wrapper function that does.
#
# Here is an instance where having a smart compiler helps julia. In many dynamic languages where this kind of metaprogramming would be easy, the runtime is not smart enough to inline these anonymous functions, which means that there is additional runtime performance overhead to metaporgramming like this. Julia's compiler (and LLVM) can inline these functions which drastically reduces that overhead.
setarg!(model1.calls[end], :seir_ode, :((du,u,p,t)->$g_func(du,u,p,t,λ)))
@show model1.expr
NewModule = eval(model1.expr)
# ## Modeling configuration
#
# The following code sets up our modeling configuration with initial conditions and parameters. It represents the entry point to solving the model.
findfunc(model1.expr, :main)[end]
# ## Solving the new model
#
# Once we have changed the function `seir_ode` and adapted the API of `main` to suit we can do a parameter sweep over our new parameter by solving the problem with different values of $\lambda$.
newsol = NewModule.main(1)
# ## Parameter Estimation
# Adding a capability to a model usually introduces additional parameters that must be chosen. Analyzing a model requires developing a procedure for estimating those parameters or characterizing the effect of that parameter on the behavior of the modeled system.
# Here we sweep over population growth rates to show what happens when the population growth rate changes.
scalegrowth(λ=1.0) = NewModule.main(λ)
println("S\tI\tR")
for λ in [1.0,1.1,1.2,1.3,1.4,1.5]
S,I,R = scalegrowth(λ)(365)
println("$S\t$I\t$R")
end
# ## It Works!
#
# This simulation allows an epidemiologist to examine the effects of population growth on an SEIR disease outbreak. A brief analysis of this simulation shows that as you increase the population growth rate, you increase the final population of infected people. More sophisticated analysis could be employed to show something more interesting about this model.
#
# We have shown how you can use SemanticModels.jl to combine features of various ODE systems and solve them with a state of the art solver to increase the capabilities of a code that implements a scientific model. We call this combination process grafting and believe that it supports a frequent use case of scientific programming.
|
{"hexsha": "782e2cf845d35994fca66b3d1c7acaf49ee40951", "size": 6710, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/odegraft.jl", "max_stars_repo_name": "mikiec84/SemanticModels.jl", "max_stars_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/odegraft.jl", "max_issues_repo_name": "mikiec84/SemanticModels.jl", "max_issues_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/odegraft.jl", "max_forks_repo_name": "mikiec84/SemanticModels.jl", "max_forks_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.0, "max_line_length": 401, "alphanum_fraction": 0.7691505216, "num_tokens": 1597}
|
// Copyright (C) 2015-2019 Internet Systems Consortium, Inc. ("ISC")
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <config.h>
#include <asiolink/io_address.h>
#include <dhcp/duid.h>
#include <dhcp_ddns/ncr_msg.h>
#include <dhcpsrv/ncr_generator.h>
#include <dhcpsrv/cfgmgr.h>
#include <dhcpsrv/d2_client_mgr.h>
#include <dhcpsrv/lease.h>
#include <ctime>
#include <boost/bind.hpp>
#include <gtest/gtest.h>
#include <stdint.h>
#include <string>
using namespace isc;
using namespace isc::asiolink;
using namespace isc::dhcp;
using namespace isc::dhcp_ddns;
namespace {
/// @brief Base test fixture class for testing generation of the name
/// change requests from leases.
///
/// @tparam LeasePtrType One of the @c Lease4Ptr or @c Lease6Ptr.
template<typename LeasePtrType>
class NCRGeneratorTest : public ::testing::Test {
public:
/// @brief Reference to the D2 client manager.
D2ClientMgr& d2_mgr_;
/// @brief Pointer to the lease object used by the tests.
LeasePtrType lease_;
/// @brief Constructor.
NCRGeneratorTest()
: d2_mgr_(CfgMgr::instance().getD2ClientMgr()), lease_() {
}
/// @brief Initializes the lease pointer used by the tests and starts D2.
///
/// This method initializes the pointer to the lease which will be used
/// throughout the tests. Because the lease may be either a v4 or v6 lease
/// it calls a virtual function @c initLease, which must be implemented
/// in the derived classes as appropriate. Note that lease object can't
/// be initialized in the constructor, because it is not allowed to
/// call virtual functions in the constructors. Hence, the @c SetUp
/// function is needed.
virtual void SetUp() {
// Base class SetUp.
::testing::Test::SetUp();
// Initialize lease_ object.
initLease();
// Start D2 by default.
enableD2();
}
/// @brief Stops D2.
virtual void TearDown() {
// Stop D2 if running.
disableD2();
// Base class TearDown.
::testing::Test::TearDown();
}
/// @brief Enables DHCP-DDNS updates.
///
/// Replaces the current D2ClientConfiguration with a configuration
/// which has updates enabled and the control options set based upon
/// the bit mask of options.
void enableD2() {
D2ClientConfigPtr cfg(new D2ClientConfig());
ASSERT_NO_THROW(cfg->enableUpdates(true));
ASSERT_NO_THROW(CfgMgr::instance().setD2ClientConfig(cfg));
d2_mgr_.startSender(boost::bind(&NCRGeneratorTest::d2ErrorHandler, this,
_1, _2));
}
/// @brief Disables DHCP-DDNS updates.
void disableD2() {
d2_mgr_.stopSender();
// Default constructor creates a config with DHCP-DDNS updates
// disabled.
D2ClientConfigPtr cfg(new D2ClientConfig());
CfgMgr::instance().setD2ClientConfig(cfg);
}
/// @brief No-op error handler for D2.
void d2ErrorHandler(const NameChangeSender::Result, NameChangeRequestPtr&) {
// no-op
}
/// @brief Abstract method to initialize @c lease_ object.
virtual void initLease() = 0;
/// @brief Verify that NameChangeRequest holds valid values.
///
/// This function picks first NameChangeRequest from the internal server's
/// queue and checks that it holds valid parameters. The NameChangeRequest
/// is removed from the queue.
///
/// @param type An expected type of the NameChangeRequest (Add or Remove).
/// @param reverse An expected setting of the reverse update flag.
/// @param forward An expected setting of the forward update flag.
/// @param addr A string representation of the IPv6 address held in the
/// NameChangeRequest.
/// @param dhcid An expected DHCID value.
/// @note This value is the value that is produced by
/// dhcp_ddns::D2Dhcid::createDigest() with the appropriate arguments. This
/// method uses encryption tools to produce the value which cannot be
/// easily duplicated by hand. It is more or less necessary to generate
/// these values programmatically and place them here. Should the
/// underlying implementation of createDigest() change these test values
/// will likely need to be updated as well.
/// @param expires A timestamp when the lease associated with the
/// NameChangeRequest expires.
/// @param len A valid lifetime of the lease associated with the
/// NameChangeRequest.
/// @param fqdn The expected string value of the FQDN, if blank the
/// check is skipped
void verifyNameChangeRequest(const isc::dhcp_ddns::NameChangeType type,
const bool reverse, const bool forward,
const std::string& addr,
const std::string& dhcid,
const uint64_t expires,
const uint16_t len,
const std::string& fqdn="") {
NameChangeRequestPtr ncr;
ASSERT_NO_THROW(ncr = CfgMgr::instance().getD2ClientMgr().peekAt(0));
ASSERT_TRUE(ncr);
EXPECT_EQ(type, ncr->getChangeType());
EXPECT_EQ(forward, ncr->isForwardChange());
EXPECT_EQ(reverse, ncr->isReverseChange());
EXPECT_EQ(addr, ncr->getIpAddress());
EXPECT_EQ(dhcid, ncr->getDhcid().toStr());
EXPECT_EQ(expires, ncr->getLeaseExpiresOn());
EXPECT_EQ(len, ncr->getLeaseLength());
EXPECT_EQ(isc::dhcp_ddns::ST_NEW, ncr->getStatus());
if (!fqdn.empty()) {
EXPECT_EQ(fqdn, ncr->getFqdn());
}
// Process the message off the queue
ASSERT_NO_THROW(CfgMgr::instance().getD2ClientMgr().runReadyIO());
}
/// @brief Sets the FQDN information for a lease and queues an NCR.
///
/// @param fwd Perform forward update.
/// @param rev Perform reverse update.
/// @param fqdn Hostname.
void queueRemovalNCR(const bool fwd, const bool rev, const std::string& fqdn) {
lease_->fqdn_fwd_ = fwd;
lease_->fqdn_rev_ = rev;
lease_->hostname_ = fqdn;
/// Send NCR to D2.
ASSERT_NO_THROW(queueNCR(CHG_REMOVE, lease_));
}
/// @brief Sets the FQDN information for a lease and queues an NCR.
///
/// @param chg_type Name change type.
/// @param fwd Perform forward update.
/// @param rev Perform reverse update.
/// @param fqdn Hostname.
void sendNCR(const NameChangeType chg_type, const bool fwd, const bool rev,
const std::string& fqdn) {
lease_->fqdn_fwd_ = fwd;
lease_->fqdn_rev_ = rev;
lease_->hostname_ = fqdn;
/// Send NCR to D2.
ASSERT_NO_THROW(queueNCR(chg_type, lease_));
}
/// @brief Test that for the given values the NCR is not generated.
///
/// @param chg_type Name change type.
/// @param fwd Perform forward update.
/// @param rev Perform reverse update.
/// @param fqdn Hostname.
void testNoUpdate(const NameChangeType chg_type, const bool fwd, const bool rev,
const std::string& fqdn) {
ASSERT_NO_FATAL_FAILURE(sendNCR(chg_type, fwd, rev, fqdn));
ASSERT_EQ(0, d2_mgr_.getQueueSize());
}
/// @brief Test that sending an NCR while DNS updates would not throw.
///
/// @param chg_type Name change type.
void testD2Disabled(const NameChangeType chg_type) {
// Disable DDNS updates.
disableD2();
ASSERT_NO_FATAL_FAILURE(sendNCR(chg_type, true, true, "MYHOST.example.com."));
}
/// @brief Test that NCR is generated as expected.
///
/// @param chg_type Name change type.
/// @param fwd Perform forward update.
/// @param rev Perform reverse update.
/// @param fqdn Hostname.
/// @param exp_dhcid Expected DHCID.
void testNCR(const NameChangeType chg_type, const bool fwd, const bool rev,
const std::string& fqdn, const std::string exp_dhcid) {
// Queue NCR.
ASSERT_NO_FATAL_FAILURE(sendNCR(chg_type, fwd, rev, fqdn));
// Expecting one NCR be generated.
ASSERT_EQ(1, d2_mgr_.getQueueSize());
// Check the details of the NCR.
verifyNameChangeRequest(chg_type, rev, fwd, lease_->addr_.toText(), exp_dhcid,
lease_->cltt_ + lease_->valid_lft_,
lease_->valid_lft_);
}
/// @brief Test that calling queueNCR for NULL lease doesn't cause
/// an exception.
///
/// @param chg_type Name change type.
void testNullLease(const NameChangeType chg_type) {
lease_.reset();
ASSERT_NO_FATAL_FAILURE(queueNCR(chg_type, lease_));
EXPECT_EQ(0, d2_mgr_.getQueueSize());
}
};
/// @brief Test fixture class implementation for DHCPv6.
class NCRGenerator6Test : public NCRGeneratorTest<Lease6Ptr> {
public:
/// @brief Pointer to the DUID used in the tests.
DuidPtr duid_;
/// @brief Constructor.
///
/// Initializes DUID.
NCRGenerator6Test()
: duid_() {
duid_.reset(new DUID(DUID::fromText("01:02:03:04:05:06:07:08:09")));
}
/// @brief Implementation of the method creating DHCPv6 lease instance.
virtual void initLease() {
lease_.reset(new Lease6(Lease::TYPE_NA, IOAddress("2001:db8:1::1"),
duid_, 1234, 501, 502, 1, HWAddrPtr(), 0));
}
};
// Test creation of the NameChangeRequest for both forward and reverse
// mapping for the given lease.
TEST_F(NCRGenerator6Test, fwdRev) {
// Part of the domain name is in upper case, to test that it gets converted
// to lower case before DHCID is computed. So, we should get the same DHCID
// as if we typed domain-name in lower case.
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, true, "MYHOST.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
// Now try the same test with all lower case.
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, true, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, true, "MYHOST.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, true, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
}
// Checks that NameChangeRequests are not created when ddns updates are disabled.
TEST_F(NCRGenerator6Test, d2Disabled) {
{
SCOPED_TRACE("case CHG_REMOVE");
testD2Disabled(CHG_REMOVE);
}
{
SCOPED_TRACE("case CHG_ADD");
testD2Disabled(CHG_ADD);
}
}
// Test creation of the NameChangeRequest for reverse mapping in the
// given lease.
TEST_F(NCRGenerator6Test, revOnly) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, false, true, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, false, true, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
}
// Test creation of the NameChangeRequest for forward mapping in the
// given lease.
TEST_F(NCRGenerator6Test, fwdOnly) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, false, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, false, "myhost.example.com.",
"000201BE0D7A66F8AB6C4082E7F8B81E2656667A102E3"
"D0ECCEA5E0DD71730F392119A");
}
}
// Test that NameChangeRequest is not generated when neither forward
// nor reverse DNS update has been performed for a lease.
TEST_F(NCRGenerator6Test, noFwdRevUpdate) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "myhost.example.com.");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "myhost.example.com.");
}
}
// Test that NameChangeRequest is not generated if the hostname hasn't been
// specified for a lease for which forward and reverse mapping has been set.
TEST_F(NCRGenerator6Test, noHostname) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "");
}
}
// Test that NameChangeRequest is not generated if an invalid hostname has
// been specified for a lease for which forward and reverse mapping has been
// set.
TEST_F(NCRGenerator6Test, wrongHostname) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "myhost...example.com.");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "myhost...example.com.");
}
}
// Test that NameChangeRequest is not generated if the lease is not an
// address lease, i.e. is a prefix.
TEST_F(NCRGenerator6Test, wrongLeaseType) {
// Change lease type to delegated prefix.
lease_->type_ = Lease::TYPE_PD;
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, true, true, "myhost.example.org.");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, true, true, "myhost.example.org.");
}
}
// Test that NameChangeRequest is not generated if the lease is NULL,
// and that the call to queueNCR doesn't cause an exception or
// assertion.
TEST_F(NCRGenerator6Test, nullLease) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNullLease(CHG_REMOVE);
}
{
SCOPED_TRACE("case CHG_ADD");
testNullLease(CHG_ADD);
}
}
/// @brief Test fixture class implementation for DHCPv4.
class NCRGenerator4Test : public NCRGeneratorTest<Lease4Ptr> {
public:
/// @brief Pointer to HW address used by the tests.
HWAddrPtr hwaddr_;
/// @brief Constructor.
///
/// Initializes HW address.
NCRGenerator4Test()
: hwaddr_(new HWAddr(HWAddr::fromText("01:02:03:04:05:06"))) {
}
/// @brief Implementation of the method creating DHCPv4 lease instance.
virtual void initLease() {
lease_.reset(new Lease4(IOAddress("192.0.2.1"), hwaddr_, ClientIdPtr(),
100, time(NULL), 1));
}
};
// Test creation of the NameChangeRequest for both forward and reverse
// mapping for the given lease.
TEST_F(NCRGenerator4Test, fwdRev) {
// Part of the domain name is in upper case, to test that it gets converted
// to lower case before DHCID is computed. So, we should get the same DHCID
// as if we typed domain-name in lower case.
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, true, "MYHOST.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3"
"B03AB370BFF46BFA309AE7BFD");
}
// Now try the same with all lower case.
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, true, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3"
"B03AB370BFF46BFA309AE7BFD");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, true, "MYHOST.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3"
"B03AB370BFF46BFA309AE7BFD");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, true, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3"
"B03AB370BFF46BFA309AE7BFD");
}
}
// Checks that NameChangeRequests are not created when ddns updates are disabled.
TEST_F(NCRGenerator4Test, d2Disabled) {
{
SCOPED_TRACE("case CHG_REMOVE");
testD2Disabled(CHG_REMOVE);
}
{
SCOPED_TRACE("case CHG_ADD");
testD2Disabled(CHG_ADD);
}
}
// Test creation of the NameChangeRequest for reverse mapping in the
// given lease.
TEST_F(NCRGenerator4Test, revOnly) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, false, true, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3B"
"03AB370BFF46BFA309AE7BFD");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, false, true, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3B"
"03AB370BFF46BFA309AE7BFD");
}
}
// Test creation of the NameChangeRequest for forward mapping in the
// given lease.
TEST_F(NCRGenerator4Test, fwdOnly) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, false, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3B"
"03AB370BFF46BFA309AE7BFD");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, false, "myhost.example.com.",
"000001E356D43E5F0A496D65BCA24D982D646140813E3B"
"03AB370BFF46BFA309AE7BFD");
}
}
// Test that NameChangeRequest is not generated when neither forward
// nor reverse DNS update has been performed for a lease.
TEST_F(NCRGenerator4Test, noFwdRevUpdate) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "myhost.example.com.");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "myhost.example.com.");
}
}
// Test that NameChangeRequest is not generated if the hostname hasn't been
// specified for a lease for which forward and reverse mapping has been set.
TEST_F(NCRGenerator4Test, noHostname) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "");
}
}
// Test that NameChangeRequest is not generated if the invalid hostname has
// been specified for a lease for which forward and reverse mapping has been
// set.
TEST_F(NCRGenerator4Test, wrongHostname) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNoUpdate(CHG_REMOVE, false, false, "myhost...example.org.");
}
{
SCOPED_TRACE("case CHG_ADD");
testNoUpdate(CHG_ADD, false, false, "myhost...example.org.");
}
}
// Test that the correct NameChangeRequest is generated when the lease
// includes client identifier.
TEST_F(NCRGenerator4Test, useClientId) {
lease_->client_id_ = ClientId::fromText("01:01:01:01");
ASSERT_NO_FATAL_FAILURE(queueRemovalNCR(true, true, "myhost.example.com."));
ASSERT_EQ(1, d2_mgr_.getQueueSize());
verifyNameChangeRequest(isc::dhcp_ddns::CHG_REMOVE, true, true,
"192.0.2.1",
"000101C7AA5420483BDA99C437636EA7DA2FE18"
"31C9679FEB031C360CA571298F3D1FA",
lease_->cltt_ + lease_->valid_lft_, 100);
{
SCOPED_TRACE("case CHG_REMOVE");
testNCR(CHG_REMOVE, true, true, "myhost.example.com.",
"000101C7AA5420483BDA99C437636EA7DA2FE1831C9679"
"FEB031C360CA571298F3D1FA");
}
{
SCOPED_TRACE("case CHG_ADD");
testNCR(CHG_ADD, true, true, "myhost.example.com.",
"000101C7AA5420483BDA99C437636EA7DA2FE1831C9679"
"FEB031C360CA571298F3D1FA");
}
}
// Test that NameChangeRequest is not generated if the lease is NULL,
// and that the call to queueNCR doesn't cause an exception or
// assertion.
TEST_F(NCRGenerator4Test, nullLease) {
{
SCOPED_TRACE("case CHG_REMOVE");
testNullLease(CHG_REMOVE);
}
{
SCOPED_TRACE("case CHG_ADD");
testNullLease(CHG_ADD);
}
}
} // end of anonymous namespace
|
{"hexsha": "b44aba1a305dfd1f8506a42956a52c2c01ad4205", "size": 20638, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/lib/dhcpsrv/tests/ncr_generator_unittest.cc", "max_stars_repo_name": "kphf1995cm/kea", "max_stars_repo_head_hexsha": "2f6940ef5ed697f3f683035ed7a16046253add4d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/dhcpsrv/tests/ncr_generator_unittest.cc", "max_issues_repo_name": "kphf1995cm/kea", "max_issues_repo_head_hexsha": "2f6940ef5ed697f3f683035ed7a16046253add4d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/dhcpsrv/tests/ncr_generator_unittest.cc", "max_forks_repo_name": "kphf1995cm/kea", "max_forks_repo_head_hexsha": "2f6940ef5ed697f3f683035ed7a16046253add4d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2255389718, "max_line_length": 86, "alphanum_fraction": 0.6402752205, "num_tokens": 5600}
|
import numpy as np
import matplotlib.pyplot as plt
def plot_confusion(conf, norm_conf, true_classes,
pred_classes, cmap=plt.cm.jet):
"""
A function to plot the confusion matrix for a given dataset.
The number of points given the [i, j] label is the value in each
box, and the normalized confusion score is shown by the color
of each box.
**Positional Arguments:**
- conf:
- the confusion matrix.
- norm_conf:
- the normalized confusion matrix.
- true_classes:
- the true labels.
- pred_classes:
- the predicted labels.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
res = ax.imshow(norm_conf, cmap=cmap, interpolation='nearest',
aspect='auto')
ntrue, npred = conf.shape
for i in xrange(ntrue):
for j in xrange(npred):
ax.annotate(str(conf[i, j]), xy=(j, i),
horizontalalignment='center',
verticalalignment='center')
plt.yticks(range(ntrue), true_classes)
plt.xticks(range(npred), pred_classes)
ax.set_xlabel('Predicted Label')
ax.set_ylabel('True Label')
ax.set_title('Confusion Matrix')
cb = fig.colorbar(res)
fig.tight_layout()
return fig
def confusion_matrix(M, D):
"""
A utility to compute the confusion matrix for a clustering algorithm.
Returns the raw confusion numbers, and also a normalized matrix
where the value is the confusion normalized by the total number
of points for a given cluster.
**Positional Arguments:**
- M:
- the true labels.
- D:
- the predicted labels.
"""
M = np.array(M)
D = np.array(D)
true_classes = np.unique(M)
pred_classes = np.unique(D)
# cmtx shows TP, FP, TN, FN
cmtx = np.zeros((len(true_classes), len(pred_classes)))
# pmtx shows the confusion matrix normalized
pmtx = np.zeros((len(true_classes), len(pred_classes)))
for i, pred in enumerate(pred_classes):
# get the indices that should be labelled
# true
Mcol = M[D == pred]
n = len(Mcol)
for j, true in enumerate(true_classes):
cmtx[i, j] = np.sum(Mcol == true)
pmtx[i, j] = cmtx[i, j]/float(n)
fig_conf = plot_confusion(cmtx, pmtx, M, D)
return (cmtx, pmtx, fig_conf)
def purity(M, D):
"""
A utility to compute the purity of a clustering algorithm.
**Positional Arguments:**
- M:
- the true labels.
- D:
- the predicted labels.
"""
(cmtx, pmtx, fig_conf) = confusion_matrix(M, D)
purity = np.sum(cmtx.max(axis=1))/float(np.sum(cmtx))
return (purity, cmtx, pmtx, fig_conf)
def plot_laplacian(L, cmap=plt.cm.jet):
"""
A function to plot the graph laplacian for use with spectral
clustering visualizations.
**Positional Arguments:**
- L:
- the graph laplacian.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
minval = np.percentile(L, 0)
# since the diagonal will be very significant, this
# will eliminate it from our plots
maxval = np.percentile(L, 95)
res = ax.imshow(L, cmap=cmap, interpolation='nearest', vmin=minval,
vmax=maxval)
ax.set_ylabel('Training Example')
ax.set_xlabel('Training Example')
ax.set_title('Graph Laplacian')
cb = fig.colorbar(res)
return fig
|
{"hexsha": "696b9f6d92bc33f922a18d64a8146e5bf135f35e", "size": 3492, "ext": "py", "lang": "Python", "max_stars_repo_path": "clustering/utils.py", "max_stars_repo_name": "ebridge2/ams_446", "max_stars_repo_head_hexsha": "2c7d8db96c5875ee5b4c0e2d44dad80812ac4140", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clustering/utils.py", "max_issues_repo_name": "ebridge2/ams_446", "max_issues_repo_head_hexsha": "2c7d8db96c5875ee5b4c0e2d44dad80812ac4140", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clustering/utils.py", "max_forks_repo_name": "ebridge2/ams_446", "max_forks_repo_head_hexsha": "2c7d8db96c5875ee5b4c0e2d44dad80812ac4140", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7454545455, "max_line_length": 73, "alphanum_fraction": 0.6028064147, "include": true, "reason": "import numpy", "num_tokens": 846}
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
⊢ ∀ (a : α ⊕ β), ¬LiftRel r s a a
[PROOFSTEP]
rintro _ (⟨h⟩ | ⟨h⟩)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
a✝ : α
h : r a✝ a✝
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
b✝ : β
h : s b✝ b✝
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
⊢ ∀ (a b : α ⊕ β), LiftRel r s a b → LiftRel r s b a → a = b
[PROOFSTEP]
rintro _ _ (⟨hab⟩ | ⟨hab⟩) (⟨hba⟩ | ⟨hba⟩)
[GOAL]
case inl.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
a✝ c✝ : α
hab : r a✝ c✝
hba : r c✝ a✝
⊢ inl a✝ = inl c✝
[PROOFSTEP]
rw [antisymm hab hba]
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
b✝ d✝ : β
hab : s b✝ d✝
hba : s d✝ b✝
⊢ inr b✝ = inr d✝
[PROOFSTEP]
rw [antisymm hab hba]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsRefl α r
inst✝ : IsRefl β s
⊢ ∀ (a : α ⊕ β), Lex r s a a
[PROOFSTEP]
rintro (a | a)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsRefl α r
inst✝ : IsRefl β s
a : α
⊢ Lex r s (inl a) (inl a)
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsRefl α r
inst✝ : IsRefl β s
a : β
⊢ Lex r s (inr a) (inr a)
[PROOFSTEP]
exacts [Lex.inl (refl _), Lex.inr (refl _)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
⊢ ∀ (a : α ⊕ β), ¬Lex r s a a
[PROOFSTEP]
rintro _ (⟨h⟩ | ⟨h⟩)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
a₁✝ : α
h : r a₁✝ a₁✝
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsIrrefl α r
inst✝ : IsIrrefl β s
b₁✝ : β
h : s b₁✝ b₁✝
⊢ False
[PROOFSTEP]
exact irrefl _ h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrans α r
inst✝ : IsTrans β s
⊢ ∀ (a b c : α ⊕ β), Lex r s a b → Lex r s b c → Lex r s a c
[PROOFSTEP]
rintro _ _ _ (⟨hab⟩ | ⟨hab⟩) (⟨hbc⟩ | ⟨hbc⟩)
[GOAL]
case inl.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrans α r
inst✝ : IsTrans β s
a₁✝ a₂✝¹ : α
hab : r a₁✝ a₂✝¹
a₂✝ : α
hbc : r a₂✝¹ a₂✝
⊢ Lex r s (inl a₁✝) (inl a₂✝)
case inl.sep
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrans α r
inst✝ : IsTrans β s
a₁✝ a₂✝ : α
hab : r a₁✝ a₂✝
b✝ : β
⊢ Lex r s (inl a₁✝) (inr b✝)
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrans α r
inst✝ : IsTrans β s
b₁✝ b₂✝¹ : β
hab : s b₁✝ b₂✝¹
b₂✝ : β
hbc : s b₂✝¹ b₂✝
⊢ Lex r s (inr b₁✝) (inr b₂✝)
case sep.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsTrans α r
inst✝ : IsTrans β s
a✝ : α
b✝ b₂✝ : β
hbc : s b✝ b₂✝
⊢ Lex r s (inl a✝) (inr b₂✝)
[PROOFSTEP]
exacts [.inl (_root_.trans hab hbc), .sep _ _, .inr (_root_.trans hab hbc), .sep _ _]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
⊢ ∀ (a b : α ⊕ β), Lex r s a b → Lex r s b a → a = b
[PROOFSTEP]
rintro _ _ (⟨hab⟩ | ⟨hab⟩) (⟨hba⟩ | ⟨hba⟩)
[GOAL]
case inl.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
a₁✝ a₂✝ : α
hab : r a₁✝ a₂✝
hba : r a₂✝ a₁✝
⊢ inl a₁✝ = inl a₂✝
[PROOFSTEP]
rw [antisymm hab hba]
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
r : α → α → Prop
s : β → β → Prop
inst✝¹ : IsAntisymm α r
inst✝ : IsAntisymm β s
b₁✝ b₂✝ : β
hab : s b₁✝ b₂✝
hba : s b₂✝ b₁✝
⊢ inr b₁✝ = inr b₂✝
[PROOFSTEP]
rw [antisymm hab hba]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
a b : α ⊕ β
⊢ a < b ↔ a ≤ b ∧ ¬b ≤ a
[PROOFSTEP]
refine' ⟨fun hab => ⟨hab.mono (fun _ _ => le_of_lt) fun _ _ => le_of_lt, _⟩, _⟩
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
a b : α ⊕ β
hab : a < b
⊢ ¬b ≤ a
[PROOFSTEP]
rintro (⟨hba⟩ | ⟨hba⟩)
[GOAL]
case refine'_1.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
a✝ c✝ : α
hba : a✝ ≤ c✝
hab : inl c✝ < inl a✝
⊢ False
[PROOFSTEP]
exact hba.not_lt (inl_lt_inl_iff.1 hab)
[GOAL]
case refine'_1.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
b✝ d✝ : β
hba : b✝ ≤ d✝
hab : inr d✝ < inr b✝
⊢ False
[PROOFSTEP]
exact hba.not_lt (inr_lt_inr_iff.1 hab)
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
a b : α ⊕ β
⊢ a ≤ b ∧ ¬b ≤ a → a < b
[PROOFSTEP]
rintro ⟨⟨hab⟩ | ⟨hab⟩, hba⟩
[GOAL]
case refine'_2.intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
a✝ c✝ : α
hab : a✝ ≤ c✝
hba : ¬inl c✝ ≤ inl a✝
⊢ inl a✝ < inl c✝
[PROOFSTEP]
exact LiftRel.inl (hab.lt_of_not_le fun h => hba <| LiftRel.inl h)
[GOAL]
case refine'_2.intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (α ⊕ β) := instLESum
src✝ : LT (α ⊕ β) := instLTSum
b✝ d✝ : β
hab : b✝ ≤ d✝
hba : ¬inr d✝ ≤ inr b✝
⊢ inr b✝ < inr d✝
[PROOFSTEP]
exact LiftRel.inr (hab.lt_of_not_le fun h => hba <| LiftRel.inr h)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a : α
⊢ ∃ b, b < a
[PROOFSTEP]
obtain ⟨b | b, h⟩ := exists_lt (inl a : Sum α β)
[GOAL]
case intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a b : α
h : inl b < inl a
⊢ ∃ b, b < a
[PROOFSTEP]
exact ⟨b, inl_lt_inl_iff.1 h⟩
[GOAL]
case intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a : α
b : β
h : inr b < inl a
⊢ ∃ b, b < a
[PROOFSTEP]
exact (not_inr_lt_inl h).elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a : β
⊢ ∃ b, b < a
[PROOFSTEP]
obtain ⟨b | b, h⟩ := exists_lt (inr a : Sum α β)
[GOAL]
case intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a : β
b : α
h : inl b < inr a
⊢ ∃ b, b < a
[PROOFSTEP]
exact (not_inl_lt_inr h).elim
[GOAL]
case intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMinOrder (α ⊕ β)
a b : β
h : inr b < inr a
⊢ ∃ b, b < a
[PROOFSTEP]
exact ⟨b, inr_lt_inr_iff.1 h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a : α
⊢ ∃ b, a < b
[PROOFSTEP]
obtain ⟨b | b, h⟩ := exists_gt (inl a : Sum α β)
[GOAL]
case intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a b : α
h : inl a < inl b
⊢ ∃ b, a < b
[PROOFSTEP]
exact ⟨b, inl_lt_inl_iff.1 h⟩
[GOAL]
case intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a : α
b : β
h : inl a < inr b
⊢ ∃ b, a < b
[PROOFSTEP]
exact (not_inl_lt_inr h).elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a : β
⊢ ∃ b, a < b
[PROOFSTEP]
obtain ⟨b | b, h⟩ := exists_gt (inr a : Sum α β)
[GOAL]
case intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a : β
b : α
h : inr a < inl b
⊢ ∃ b, a < b
[PROOFSTEP]
exact (not_inr_lt_inl h).elim
[GOAL]
case intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : NoMaxOrder (α ⊕ β)
a b : β
h : inr a < inr b
⊢ ∃ b, a < b
[PROOFSTEP]
exact ⟨b, inr_lt_inr_iff.1 h⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : α
h : a < b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
obtain ⟨c | c, ha, hb⟩ := @exists_between (Sum α β) _ _ _ _ (inl_lt_inl_iff.2 h)
[GOAL]
case intro.inl.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : α
h : a < b
c : α
ha : inl a < inl c
hb : inl c < inl b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
exact ⟨c, inl_lt_inl_iff.1 ha, inl_lt_inl_iff.1 hb⟩
[GOAL]
case intro.inr.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : α
h : a < b
c : β
ha : inl a < inr c
hb : inr c < inl b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
exact (not_inl_lt_inr ha).elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : β
h : a < b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
obtain ⟨c | c, ha, hb⟩ := @exists_between (Sum α β) _ _ _ _ (inr_lt_inr_iff.2 h)
[GOAL]
case intro.inl.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : β
h : a < b
c : α
ha : inr a < inl c
hb : inl c < inr b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
exact (not_inl_lt_inr hb).elim
[GOAL]
case intro.inr.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : LT α
inst✝ : LT β
x✝ : DenselyOrdered (α ⊕ β)
a b : β
h : a < b
c : β
ha : inr a < inr c
hb : inr c < inr b
⊢ ∃ a_1, a < a_1 ∧ a_1 < b
[PROOFSTEP]
exact ⟨c, inr_lt_inr_iff.1 ha, inr_lt_inr_iff.1 hb⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a b : _root_.Lex (α ⊕ β)
⊢ a < b ↔ a ≤ b ∧ ¬b ≤ a
[PROOFSTEP]
refine' ⟨fun hab => ⟨hab.mono (fun _ _ => le_of_lt) fun _ _ => le_of_lt, _⟩, _⟩
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a b : _root_.Lex (α ⊕ β)
hab : a < b
⊢ ¬b ≤ a
[PROOFSTEP]
rintro (⟨hba⟩ | ⟨hba⟩ | ⟨b, a⟩)
[GOAL]
case refine'_1.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a₁✝ a₂✝ : α
hba : a₁✝ ≤ a₂✝
hab : inl a₂✝ < inl a₁✝
⊢ False
[PROOFSTEP]
exact hba.not_lt (inl_lt_inl_iff.1 hab)
[GOAL]
case refine'_1.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
b₁✝ b₂✝ : β
hba : b₁✝ ≤ b₂✝
hab : inr b₂✝ < inr b₁✝
⊢ False
[PROOFSTEP]
exact hba.not_lt (inr_lt_inr_iff.1 hab)
[GOAL]
case refine'_1.sep
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
b : α
a : β
hab : inr a < inl b
⊢ False
[PROOFSTEP]
exact not_inr_lt_inl hab
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a b : _root_.Lex (α ⊕ β)
⊢ a ≤ b ∧ ¬b ≤ a → a < b
[PROOFSTEP]
rintro ⟨⟨hab⟩ | ⟨hab⟩ | ⟨a, b⟩, hba⟩
[GOAL]
case refine'_2.intro.inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a₁✝ a₂✝ : α
hab : a₁✝ ≤ a₂✝
hba : ¬inl a₂✝ ≤ inl a₁✝
⊢ inl a₁✝ < inl a₂✝
[PROOFSTEP]
exact Lex.inl (hab.lt_of_not_le fun h => hba <| Lex.inl h)
[GOAL]
case refine'_2.intro.inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
b₁✝ b₂✝ : β
hab : b₁✝ ≤ b₂✝
hba : ¬inr b₂✝ ≤ inr b₁✝
⊢ inr b₁✝ < inr b₂✝
[PROOFSTEP]
exact Lex.inr (hab.lt_of_not_le fun h => hba <| Lex.inr h)
[GOAL]
case refine'_2.intro.sep
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : Preorder α
inst✝ : Preorder β
src✝¹ : LE (_root_.Lex (α ⊕ β)) := Lex.LE
src✝ : LT (_root_.Lex (α ⊕ β)) := Lex.LT
a : α
b : β
hba : ¬inr b ≤ inl a
⊢ inl a < inr b
[PROOFSTEP]
exact Lex.sep _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : OrderBot α
inst✝ : LE β
⊢ ∀ (a : _root_.Lex (α ⊕ β)), ⊥ ≤ a
[PROOFSTEP]
rintro (a | b)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : OrderBot α
inst✝ : LE β
a : α
⊢ ⊥ ≤ inl a
[PROOFSTEP]
exact Lex.inl bot_le
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : OrderBot α
inst✝ : LE β
b : β
⊢ ⊥ ≤ inr b
[PROOFSTEP]
exact Lex.sep _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : LE β
inst✝ : OrderTop β
⊢ ∀ (a : _root_.Lex (α ⊕ β)), a ≤ ⊤
[PROOFSTEP]
rintro (a | b)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : LE β
inst✝ : OrderTop β
a : α
⊢ inl a ≤ ⊤
[PROOFSTEP]
exact Lex.sep _ _
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : LE α
inst✝¹ : LE β
inst✝ : OrderTop β
b : β
⊢ inr b ≤ ⊤
[PROOFSTEP]
exact Lex.inr le_top
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a✝ : α✝
b✝ : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
a b : (α ⊕ β) ⊕ γ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
a ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
a ≤ b
[PROOFSTEP]
rcases a with ((_ | _) | _)
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b✝ : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
b : (α ⊕ β) ⊕ γ
val✝ : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
inl (inl val✝) ≤ b
[PROOFSTEP]
rcases b with ((_ | _) | _)
[GOAL]
case inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b✝ : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
b : (α ⊕ β) ⊕ γ
val✝ : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
inl (inr val✝) ≤ b
[PROOFSTEP]
rcases b with ((_ | _) | _)
[GOAL]
case inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b✝ : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
b : (α ⊕ β) ⊕ γ
val✝ : γ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
inr val✝ ≤ b
[PROOFSTEP]
rcases b with ((_ | _) | _)
[GOAL]
case inl.inl.inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ val✝ : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝)) ↔
inl (inl val✝¹) ≤ inl (inl val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inl.inl.inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : α
val✝ : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝)) ↔
inl (inl val✝¹) ≤ inl (inr val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inl.inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : α
val✝ : γ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝) ↔
inl (inl val✝¹) ≤ inr val✝
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inl.inr.inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : β
val✝ : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝)) ↔
inl (inr val✝¹) ≤ inl (inl val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inl.inr.inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ val✝ : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝)) ↔
inl (inr val✝¹) ≤ inl (inr val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inl.inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : β
val✝ : γ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝¹)) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝) ↔
inl (inr val✝¹) ≤ inr val✝
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inr.inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : γ
val✝ : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝¹) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inl val✝)) ↔
inr val✝¹ ≤ inl (inl val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inr.inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ : γ
val✝ : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝¹) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl (inr val✝)) ↔
inr val✝¹ ≤ inl (inr val✝)
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝⁵ : LE α✝
inst✝⁴ : LE β✝
inst✝³ : LE γ✝
a : α✝
b : β✝
c : γ✝
α : Type u_5
β : Type u_6
γ : Type u_7
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
src✝ : (α ⊕ β) ⊕ γ ≃ α ⊕ β ⊕ γ := Equiv.sumAssoc α β γ
val✝¹ val✝ : γ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝¹) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr val✝) ↔
inr val✝¹ ≤ inr val✝
[PROOFSTEP]
simp [Equiv.sumAssoc]
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a : α✝
b : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
⊢ ∀ {a b : (α ⊕ β)ᵒᵈ},
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
a ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
a ≤ b
[PROOFSTEP]
rintro (a | a) (b | b)
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a b : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl b) ↔
inl a ≤ inl b
[PROOFSTEP]
change inl (toDual a) ≤ inl (toDual b) ↔ toDual (inl a) ≤ toDual (inl b)
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a b : α
⊢ inl (↑toDual a) ≤ inl (↑toDual b) ↔ ↑toDual (inl a) ≤ ↑toDual (inl b)
[PROOFSTEP]
simp [toDual_le_toDual, inl_le_inl_iff]
[GOAL]
case inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a : α
b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inl a ≤ inr b
[PROOFSTEP]
exact iff_of_false (@not_inl_le_inr (OrderDual β) (OrderDual α) _ _ _ _) not_inr_le_inl
[GOAL]
case inr.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a : β
b : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl b) ↔
inr a ≤ inl b
[PROOFSTEP]
exact iff_of_false (@not_inr_le_inl (OrderDual α) (OrderDual β) _ _ _ _) not_inl_le_inr
[GOAL]
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inr a ≤ inr b
[PROOFSTEP]
change inr (toDual a) ≤ inr (toDual b) ↔ toDual (inr a) ≤ toDual (inr b)
[GOAL]
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : (α ⊕ β)ᵒᵈ ≃ (α ⊕ β)ᵒᵈ := Equiv.refl (α ⊕ β)ᵒᵈ
a b : β
⊢ inr (↑toDual a) ≤ inr (↑toDual b) ↔ ↑toDual (inr a) ≤ ↑toDual (inr b)
[PROOFSTEP]
simp [toDual_le_toDual, inr_le_inr_iff]
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : (_root_.Lex (α ⊕ β))ᵒᵈ
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
a ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
a ≤ b
[PROOFSTEP]
rcases a with (a | a)
[GOAL]
case inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
b : (_root_.Lex (α ⊕ β))ᵒᵈ
a : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
inl a ≤ b
[PROOFSTEP]
rcases b with (b | b)
[GOAL]
case inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
b : (_root_.Lex (α ⊕ β))ᵒᵈ
a : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
b ↔
inr a ≤ b
[PROOFSTEP]
rcases b with (b | b)
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl b) ↔
inl a ≤ inl b
case inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a : α
b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inl a ≤ inr b
case inr.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a : β
b : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl b) ↔
inr a ≤ inl b
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inr a ≤ inr b
[PROOFSTEP]
simp
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : α
⊢ inr a ≤ inr b ↔ inl a ≤ inl b
[PROOFSTEP]
change toLex (inr <| toDual a) ≤ toLex (inr <| toDual b) ↔ toDual (toLex <| inl a) ≤ toDual (toLex <| inl b)
[GOAL]
case inl.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : α
⊢ ↑toLex (inr (↑toDual a)) ≤ ↑toLex (inr (↑toDual b)) ↔ ↑toDual (↑toLex (inl a)) ≤ ↑toDual (↑toLex (inl b))
[PROOFSTEP]
simp [toDual_le_toDual, Lex.inl_le_inl_iff, Lex.inr_le_inr_iff]
[GOAL]
case inl.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a : α
b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inl a ≤ inr b
[PROOFSTEP]
exact iff_of_false (@Lex.not_inr_le_inl (OrderDual β) (OrderDual α) _ _ _ _) Lex.not_inr_le_inl
[GOAL]
case inr.inl
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a : β
b : α
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inl b) ↔
inr a ≤ inl b
[PROOFSTEP]
exact iff_of_true (@Lex.inl_le_inr (OrderDual β) (OrderDual α) _ _ _ _) (Lex.inl_le_inr _ _)
[GOAL]
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : β
⊢ ↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr a) ≤
↑{ toFun := src✝.toFun, invFun := src✝.invFun, left_inv := (_ : Function.LeftInverse src✝.invFun src✝.toFun),
right_inv := (_ : Function.RightInverse src✝.invFun src✝.toFun) }
(inr b) ↔
inr a ≤ inr b
[PROOFSTEP]
change toLex (inl <| toDual a) ≤ toLex (inl <| toDual b) ↔ toDual (toLex <| inr a) ≤ toDual (toLex <| inr b)
[GOAL]
case inr.inr
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : LE α✝
inst✝³ : LE β✝
inst✝² : LE γ
a✝ : α✝
b✝ : β✝
c : γ
α : Type u_5
β : Type u_6
inst✝¹ : LE α
inst✝ : LE β
src✝ : α ⊕ β ≃ β ⊕ α := Equiv.sumComm α β
a b : β
⊢ ↑toLex (inl (↑toDual a)) ≤ ↑toLex (inl (↑toDual b)) ↔ ↑toDual (↑toLex (inr a)) ≤ ↑toDual (↑toLex (inr b))
[PROOFSTEP]
simp [toDual_le_toDual, Lex.inl_le_inl_iff, Lex.inr_le_inr_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
a b : WithBot α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) a ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) b ↔
a ≤ b
[PROOFSTEP]
rcases a with (a | _)
[GOAL]
case none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
b : WithBot α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) none ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) b ↔
none ≤ b
[PROOFSTEP]
rcases b with (b | _)
[GOAL]
case some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
b : WithBot α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) (Option.some val✝) ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) b ↔
Option.some val✝ ≤ b
[PROOFSTEP]
rcases b with (b | _)
[GOAL]
case none.none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) none ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) none ↔
none ≤ none
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case none.some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) none ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) (Option.some val✝) ↔
none ≤ Option.some val✝
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case some.none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) (Option.some val✝) ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) none ↔
Option.some val✝ ≤ none
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case some.some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝¹ val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) (Option.some val✝¹) ≤
↑((Equiv.optionEquivSumPUnit α).trans ((Equiv.sumComm α PUnit).trans toLex)) (Option.some val✝) ↔
Option.some val✝¹ ≤ Option.some val✝
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case some.none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ¬Option.some val✝ ≤ none
[PROOFSTEP]
exact not_coe_le_bot _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
a b : WithTop α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) a ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) b ↔ a ≤ b
[PROOFSTEP]
rcases a with (a | _)
[GOAL]
case none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
b : WithTop α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) none ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) b ↔ none ≤ b
[PROOFSTEP]
rcases b with (b | _)
[GOAL]
case some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
b : WithTop α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) (Option.some val✝) ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) b ↔
Option.some val✝ ≤ b
[PROOFSTEP]
rcases b with (b | _)
[GOAL]
case none.none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) none ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) none ↔ none ≤ none
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case none.some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) none ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) (Option.some val✝) ↔
none ≤ Option.some val✝
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case some.none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) (Option.some val✝) ≤ ↑((Equiv.optionEquivSumPUnit α).trans toLex) none ↔
Option.some val✝ ≤ none
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case some.some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝¹ val✝ : α
⊢ ↑((Equiv.optionEquivSumPUnit α).trans toLex) (Option.some val✝¹) ≤
↑((Equiv.optionEquivSumPUnit α).trans toLex) (Option.some val✝) ↔
Option.some val✝¹ ≤ Option.some val✝
[PROOFSTEP]
simp [swap, Equiv.optionEquivSumPUnit]
[GOAL]
case none.some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝ : LE α
val✝ : α
⊢ ¬none ≤ Option.some val✝
[PROOFSTEP]
exact not_top_le_coe _
|
{"mathlib_filename": "Mathlib.Data.Sum.Order", "llama_tokens": 24017}
|
SUBROUTINE MB03OD( JOBQR, M, N, A, LDA, JPVT, RCOND, SVLMAX, TAU,
$ RANK, SVAL, DWORK, LDWORK, INFO )
C
C SLICOT RELEASE 5.7.
C
C Copyright (c) 2002-2020 NICONET e.V.
C
C PURPOSE
C
C To compute (optionally) a rank-revealing QR factorization of a
C real general M-by-N matrix A, which may be rank-deficient,
C and estimate its effective rank using incremental condition
C estimation.
C
C The routine uses a QR factorization with column pivoting:
C A * P = Q * R, where R = [ R11 R12 ],
C [ 0 R22 ]
C with R11 defined as the largest leading submatrix whose estimated
C condition number is less than 1/RCOND. The order of R11, RANK,
C is the effective rank of A.
C
C MB03OD does not perform any scaling of the matrix A.
C
C ARGUMENTS
C
C Mode Parameters
C
C JOBQR CHARACTER*1
C = 'Q': Perform a QR factorization with column pivoting;
C = 'N': Do not perform the QR factorization (but assume
C that it has been done outside).
C
C Input/Output Parameters
C
C M (input) INTEGER
C The number of rows of the matrix A. M >= 0.
C
C N (input) INTEGER
C The number of columns of the matrix A. N >= 0.
C
C A (input/output) DOUBLE PRECISION array, dimension
C ( LDA, N )
C On entry with JOBQR = 'Q', the leading M by N part of this
C array must contain the given matrix A.
C On exit with JOBQR = 'Q', the leading min(M,N) by N upper
C triangular part of A contains the triangular factor R,
C and the elements below the diagonal, with the array TAU,
C represent the orthogonal matrix Q as a product of
C min(M,N) elementary reflectors.
C On entry and on exit with JOBQR = 'N', the leading
C min(M,N) by N upper triangular part of A contains the
C triangular factor R, as determined by the QR factorization
C with pivoting. The elements below the diagonal of A are
C not referenced.
C
C LDA INTEGER
C The leading dimension of the array A. LDA >= max(1,M).
C
C JPVT (input/output) INTEGER array, dimension ( N )
C On entry with JOBQR = 'Q', if JPVT(i) <> 0, the i-th
C column of A is an initial column, otherwise it is a free
C column. Before the QR factorization of A, all initial
C columns are permuted to the leading positions; only the
C remaining free columns are moved as a result of column
C pivoting during the factorization. For rank determination
C it is preferable that all columns be free.
C On exit with JOBQR = 'Q', if JPVT(i) = k, then the i-th
C column of A*P was the k-th column of A.
C Array JPVT is not referenced when JOBQR = 'N'.
C
C RCOND (input) DOUBLE PRECISION
C RCOND is used to determine the effective rank of A, which
C is defined as the order of the largest leading triangular
C submatrix R11 in the QR factorization with pivoting of A,
C whose estimated condition number is less than 1/RCOND.
C RCOND >= 0.
C NOTE that when SVLMAX > 0, the estimated rank could be
C less than that defined above (see SVLMAX).
C
C SVLMAX (input) DOUBLE PRECISION
C If A is a submatrix of another matrix B, and the rank
C decision should be related to that matrix, then SVLMAX
C should be an estimate of the largest singular value of B
C (for instance, the Frobenius norm of B). If this is not
C the case, the input value SVLMAX = 0 should work.
C SVLMAX >= 0.
C
C TAU (output) DOUBLE PRECISION array, dimension ( MIN( M, N ) )
C On exit with JOBQR = 'Q', the leading min(M,N) elements of
C TAU contain the scalar factors of the elementary
C reflectors.
C Array TAU is not referenced when JOBQR = 'N'.
C
C RANK (output) INTEGER
C The effective (estimated) rank of A, i.e. the order of
C the submatrix R11.
C
C SVAL (output) DOUBLE PRECISION array, dimension ( 3 )
C The estimates of some of the singular values of the
C triangular factor R:
C SVAL(1): largest singular value of R(1:RANK,1:RANK);
C SVAL(2): smallest singular value of R(1:RANK,1:RANK);
C SVAL(3): smallest singular value of R(1:RANK+1,1:RANK+1),
C if RANK < MIN( M, N ), or of R(1:RANK,1:RANK),
C otherwise.
C If the triangular factorization is a rank-revealing one
C (which will be the case if the leading columns were well-
C conditioned), then SVAL(1) will also be an estimate for
C the largest singular value of A, and SVAL(2) and SVAL(3)
C will be estimates for the RANK-th and (RANK+1)-st singular
C values of A, respectively.
C By examining these values, one can confirm that the rank
C is well defined with respect to the chosen value of RCOND.
C The ratio SVAL(1)/SVAL(2) is an estimate of the condition
C number of R(1:RANK,1:RANK).
C
C Workspace
C
C DWORK DOUBLE PRECISION array, dimension ( LDWORK )
C On exit, if INFO = 0, DWORK(1) returns the optimal value
C of LDWORK.
C
C LDWORK INTEGER
C The length of the array DWORK.
C LDWORK >= 3*N + 1, if JOBQR = 'Q';
C LDWORK >= max( 1, 2*min( M, N ) ), if JOBQR = 'N'.
C For good performance when JOBQR = 'Q', LDWORK should be
C larger. Specifically, LDWORK >= 2*N + ( N + 1 )*NB, where
C NB is the optimal block size for the LAPACK Library
C routine DGEQP3.
C
C If LDWORK = -1, then a workspace query is assumed;
C the routine only calculates the optimal size of the
C DWORK array, returns this value as the first entry of
C the DWORK array, and no error message related to LDWORK
C is issued by XERBLA.
C
C Error Indicator
C
C INFO INTEGER
C = 0: successful exit
C < 0: if INFO = -i, the i-th argument had an illegal
C value.
C
C METHOD
C
C The routine computes or uses a QR factorization with column
C pivoting of A, A * P = Q * R, with R defined above, and then
C finds the largest leading submatrix whose estimated condition
C number is less than 1/RCOND, taking the possible positive value of
C SVLMAX into account. This is performed using the LAPACK
C incremental condition estimation scheme and a slightly modified
C rank decision test.
C
C CONTRIBUTOR
C
C V. Sima, Katholieke Univ. Leuven, Belgium, Nov. 1996.
C
C REVISIONS
C
C V. Sima, Research Institute for Informatics, Bucharest, Mar. 2005,
C Aug. 2011.
C
C ******************************************************************
C
C .. Parameters ..
INTEGER IMAX, IMIN
PARAMETER ( IMAX = 1, IMIN = 2 )
DOUBLE PRECISION ZERO, ONE
PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 )
C .. Scalar Arguments ..
CHARACTER JOBQR
INTEGER INFO, LDA, LDWORK, M, N, RANK
DOUBLE PRECISION RCOND, SVLMAX
C .. Array Arguments ..
INTEGER JPVT( * )
DOUBLE PRECISION A( LDA, * ), SVAL( 3 ), TAU( * ), DWORK( * )
C .. Local Scalars ..
LOGICAL LJOBQR, LQUERY
INTEGER I, ISMAX, ISMIN, MAXWRK, MINWRK, MN
DOUBLE PRECISION C1, C2, S1, S2, SMAX, SMAXPR, SMIN, SMINPR
C ..
C .. External Functions ..
LOGICAL LSAME
EXTERNAL LSAME
C .. External Subroutines ..
EXTERNAL DGEQP3, DLAIC1, XERBLA
C .. Intrinsic Functions ..
INTRINSIC ABS, INT, MAX, MIN
C ..
C .. Executable Statements ..
C
LJOBQR = LSAME( JOBQR, 'Q' )
MN = MIN( M, N )
IF( LJOBQR ) THEN
MINWRK = 3*N + 1
ELSE
MINWRK = MAX( 1, 2*MN )
END IF
MAXWRK = MINWRK
C
C Test the input scalar arguments.
C
INFO = 0
IF( .NOT.LJOBQR .AND. .NOT.LSAME( JOBQR, 'N' ) ) THEN
INFO = -1
ELSE IF( M.LT.0 ) THEN
INFO = -2
ELSE IF( N.LT.0 ) THEN
INFO = -3
ELSE IF( LDA.LT.MAX( 1, M ) ) THEN
INFO = -5
ELSE IF( RCOND.LT.ZERO ) THEN
INFO = -7
ELSE IF( SVLMAX.LT.ZERO ) THEN
INFO = -8
ELSE
LQUERY = LDWORK.EQ.-1
IF ( LJOBQR ) THEN
CALL DGEQP3( M, N, A, LDA, JPVT, TAU, DWORK, -1, INFO )
MAXWRK = MAX( MAXWRK, INT( DWORK(1) ) )
END IF
IF( LDWORK.LT.MINWRK .AND. .NOT.LQUERY )
$ INFO = -13
END IF
C
IF( INFO.NE.0 ) THEN
CALL XERBLA( 'MB03OD', -INFO )
RETURN
ELSE IF( LQUERY ) THEN
DWORK( 1 ) = MAXWRK
RETURN
END IF
C
C Quick return if possible
C
IF( MN.EQ.0 ) THEN
RANK = 0
SVAL( 1 ) = ZERO
SVAL( 2 ) = ZERO
SVAL( 3 ) = ZERO
DWORK( 1 ) = ONE
RETURN
END IF
C
IF ( LJOBQR ) THEN
C
C Compute QR factorization with column pivoting of A:
C A * P = Q * R
C Workspace need 3*N + 1;
C prefer 2*N + (N+1)*NB.
C Details of Householder rotations stored in TAU.
C
CALL DGEQP3( M, N, A, LDA, JPVT, TAU, DWORK, LDWORK, INFO )
END IF
C
C Determine RANK using incremental condition estimation
C
ISMIN = 1
ISMAX = MN + 1
DWORK( ISMIN ) = ONE
DWORK( ISMAX ) = ONE
SMAX = ABS( A( 1, 1 ) )
SMIN = SMAX
IF( SMAX.EQ.ZERO .OR. SVLMAX*RCOND.GT.SMAX ) THEN
RANK = 0
SVAL( 1 ) = SMAX
SVAL( 2 ) = ZERO
SVAL( 3 ) = ZERO
ELSE
RANK = 1
SMINPR = SMIN
C
10 CONTINUE
IF( RANK.LT.MN ) THEN
I = RANK + 1
CALL DLAIC1( IMIN, RANK, DWORK( ISMIN ), SMIN, A( 1, I ),
$ A( I, I ), SMINPR, S1, C1 )
CALL DLAIC1( IMAX, RANK, DWORK( ISMAX ), SMAX, A( 1, I ),
$ A( I, I ), SMAXPR, S2, C2 )
C
IF( SVLMAX*RCOND.LE.SMAXPR ) THEN
IF( SVLMAX*RCOND.LE.SMINPR ) THEN
IF( SMAXPR*RCOND.LE.SMINPR ) THEN
DO 20 I = 1, RANK
DWORK( ISMIN+I-1 ) = S1*DWORK( ISMIN+I-1 )
DWORK( ISMAX+I-1 ) = S2*DWORK( ISMAX+I-1 )
20 CONTINUE
DWORK( ISMIN+RANK ) = C1
DWORK( ISMAX+RANK ) = C2
SMIN = SMINPR
SMAX = SMAXPR
RANK = RANK + 1
GO TO 10
END IF
END IF
END IF
END IF
SVAL( 1 ) = SMAX
SVAL( 2 ) = SMIN
SVAL( 3 ) = SMINPR
END IF
C
DWORK( 1 ) = MAXWRK
RETURN
C *** Last line of MB03OD ***
END
|
{"hexsha": "70f6f0d85b6592c0d139a8ee11b30b7211e5bbc2", "size": 11412, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/MB03OD.f", "max_stars_repo_name": "bnavigator/SLICOT-Reference", "max_stars_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-10T23:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:34:43.000Z", "max_issues_repo_path": "src/MB03OD.f", "max_issues_repo_name": "RJHKnight/slicotr", "max_issues_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-07T22:26:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:01:07.000Z", "max_forks_repo_path": "src/MB03OD.f", "max_forks_repo_name": "RJHKnight/slicotr", "max_forks_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-26T11:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:37:21.000Z", "avg_line_length": 37.0519480519, "max_line_length": 72, "alphanum_fraction": 0.542849632, "num_tokens": 3334}
|
import sys
sys.path.append("..")
import utils
from utils import *
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
def augment_feature_vector(X):
"""
Adds the x[i][0] = 1 feature for each data point x[i].
Args:
X - a NumPy matrix of n data points, each with d - 1 features
Returns: X_augment, an (n, d) NumPy array with the added feature for each datapoint
"""
column_of_ones = np.zeros([len(X), 1]) + 1
return np.hstack((column_of_ones, X))
def compute_probabilities(X, theta, temp_parameter):
"""
Computes, for each datapoint X[i], the probability that X[i] is labeled as j
for j = 0, 1, ..., k-1
Args:
X - (n, d) NumPy array (n datapoints each with d features)
theta - (k, d) NumPy array, where row j represents the parameters of our model for label j
temp_parameter - the temperature parameter of softmax function (scalar)
Returns:
H - (k, n) NumPy array, where each entry H[j][i] is the probability that X[i] is labeled as j
"""
# Compute the matrix of theta*X' (each row is a category, column an example)
R = (theta.dot(X.T)) / temp_parameter
# Compute fixed deduction factor for numerical stability (c is a vector: 1xn)
c = np.max(R, axis=0)
# Compute H matrix
H = np.exp(R - c)
# Divide H by the normalizing term
H = H / np.sum(H, axis=0)
return H
def compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):
"""
Computes the total cost over every datapoint.
Args:
X - (n, d) NumPy array (n datapoints each with d features)
Y - (n, ) NumPy array containing the labels (a number from 0-9) for each
data point
theta - (k, d) NumPy array, where row j represents the parameters of our
model for label j
lambda_factor - the regularization constant (scalar)
temp_parameter - the temperature parameter of softmax function (scalar)
Returns
c - the cost value (scalar)
"""
# Get number of labels
k = theta.shape[0]
# Get number of examples
n = X.shape[0]
# avg error term
# Clip prob matrix to avoid NaN instances
clip_prob_matrix = np.clip(compute_probabilities(X, theta, temp_parameter), 1e-15, 1 - 1e-15)
# Take the log of the matrix of probabilities
log_clip_matrix = np.log(clip_prob_matrix)
# Create a sparse matrix of [[y(i) == j]]
M = sparse.coo_matrix(([1] * n, (Y, range(n))), shape=(k, n)).toarray()
# Only add terms of log(matrix of prob) where M == 1
error_term = (-1 / n) * np.sum(log_clip_matrix[M == 1])
# Regularization term
reg_term = (lambda_factor / 2) * np.linalg.norm(theta) ** 2
return error_term + reg_term
def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):
"""
Runs one step of batch gradient descent
Args:
X - (n, d) NumPy array (n datapoints each with d features)
Y - (n, ) NumPy array containing the labels (a number from 0-9) for each
data point
theta - (k, d) NumPy array, where row j represents the parameters of our
model for label j
alpha - the learning rate (scalar)
lambda_factor - the regularization constant (scalar)
temp_parameter - the temperature parameter of softmax function (scalar)
Returns:
theta - (k, d) NumPy array that is the final value of parameters theta
"""
# Get number of labels
k = theta.shape[0]
# Get number of examples
n = X.shape[0]
# Create spare matrix of [[y(i) == j]]
M = sparse.coo_matrix(([1] * n, (Y, range(n))), shape=(k, n)).toarray()
# Matrix of Probabilities
P = compute_probabilities(X, theta, temp_parameter)
# Gradient matrix of theta
grad_theta = (-1 / (temp_parameter * n)) * ((M - P) @ X) + lambda_factor * theta
# Gradient descent update of theta matrix
theta = theta - alpha * grad_theta
return theta
def update_y(train_y, test_y):
"""
Changes the old digit labels for the training and test set for the new (mod 3)
labels.
Args:
train_y - (n, ) NumPy array containing the labels (a number between 0-9)
for each datapoint in the training set
test_y - (n, ) NumPy array containing the labels (a number between 0-9)
for each datapoint in the test set
Returns:
train_y_mod3 - (n, ) NumPy array containing the new labels (a number between 0-2)
for each datapoint in the training set
test_y_mod3 - (n, ) NumPy array containing the new labels (a number between 0-2)
for each datapoint in the test set
"""
train_y_mod3 = np.mod(train_y, 3)
test_y_mod3 = np.mod(test_y, 3)
return train_y_mod3, test_y_mod3
def compute_test_error_mod3(X, Y, theta, temp_parameter):
"""
Returns the error of these new labels when the classifier predicts the digit. (mod 3)
Args:
X - (n, d - 1) NumPy array (n datapoints each with d - 1 features)
Y - (n, ) NumPy array containing the labels (a number from 0-2) for each
data point
theta - (k, d) NumPy array, where row j represents the parameters of our
model for label j
temp_parameter - the temperature parameter of softmax function (scalar)
Returns:
test_error - the error rate of the classifier (scalar)
"""
y_pred = get_classification(X, theta, temp_parameter)
return 1 - (np.mod(y_pred, 3) == Y).mean()
def softmax_regression(X, Y, temp_parameter, alpha, lambda_factor, k, num_iterations):
"""
Runs batch gradient descent for a specified number of iterations on a dataset
with theta initialized to the all-zeros array. Here, theta is a k by d NumPy array
where row j represents the parameters of our model for label j for
j = 0, 1, ..., k-1
Args:
X - (n, d - 1) NumPy array (n data points, each with d-1 features)
Y - (n, ) NumPy array containing the labels (a number from 0-9) for each
data point
temp_parameter - the temperature parameter of softmax function (scalar)
alpha - the learning rate (scalar)
lambda_factor - the regularization constant (scalar)
k - the number of labels (scalar)
num_iterations - the number of iterations to run gradient descent (scalar)
Returns:
theta - (k, d) NumPy array that is the final value of parameters theta
cost_function_progression - a Python list containing the cost calculated at each step of gradient descent
"""
X = augment_feature_vector(X)
theta = np.zeros([k, X.shape[1]])
cost_function_progression = []
for i in range(num_iterations):
cost_function_progression.append(compute_cost_function(X, Y, theta, lambda_factor, temp_parameter))
theta = run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter)
return theta, cost_function_progression
def get_classification(X, theta, temp_parameter):
"""
Makes predictions by classifying a given dataset
Args:
X - (n, d - 1) NumPy array (n data points, each with d - 1 features)
theta - (k, d) NumPy array where row j represents the parameters of our model for
label j
temp_parameter - the temperature parameter of softmax function (scalar)
Returns:
Y - (n, ) NumPy array, containing the predicted label (a number between 0-9) for
each data point
"""
X = augment_feature_vector(X)
probabilities = compute_probabilities(X, theta, temp_parameter)
return np.argmax(probabilities, axis=0)
def plot_cost_function_over_time(cost_function_history):
plt.plot(range(len(cost_function_history)), cost_function_history)
plt.ylabel('Cost Function')
plt.xlabel('Iteration number')
plt.show()
def compute_test_error(X, Y, theta, temp_parameter):
error_count = 0.
assigned_labels = get_classification(X, theta, temp_parameter)
return 1 - np.mean(assigned_labels == Y)
|
{"hexsha": "907dc1d12c4e4f966c28bfeddc871d89e9b3bf0c", "size": 8158, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ml_starter/mnist/softmax.py", "max_stars_repo_name": "pi-projects/ml-starter", "max_stars_repo_head_hexsha": "b3de630d68d9fe879cef626189fc07dee8568851", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ml_starter/mnist/softmax.py", "max_issues_repo_name": "pi-projects/ml-starter", "max_issues_repo_head_hexsha": "b3de630d68d9fe879cef626189fc07dee8568851", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ml_starter/mnist/softmax.py", "max_forks_repo_name": "pi-projects/ml-starter", "max_forks_repo_head_hexsha": "b3de630d68d9fe879cef626189fc07dee8568851", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8632478632, "max_line_length": 113, "alphanum_fraction": 0.6529786712, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2057}
|
import torch
import librosa
import numpy as np
import mir_eval
import separation_model as md
import data_set as dts
from shutil import copyfile
import os
class AudioSeparator:
r"""Implements a framework for using a SeparationModel to produce separated source for all files in the
validation set and measure the separation performances in terme of signal to distortion ratio (SDR),
signal to interference ratio (SIR) and signal to artifact ratio (SAR).
"""
@classmethod
def default_config(cls):
r"""Get the required parameters for instantiating a AudioSeparator
The configuration parameters for the model and the AudioDataSet are saved in the model checkpoint. All we
need for instantiation is the path to the check point.
The path to the folder to use for saving the separated audio tracks is also exposed.
Returns:
dict containing the required parameters
"""
config = {
"checkpoint_path": "", # path to model checkpoint
"separated_audio_folder": "" # path to folder where to save the separated audio tracks.
}
return config
def __init__(self, data_set, model, config):
r"""Constructor. Receives the AudioDataSet and the Model and stores them as class members.
Note: The received data_set features should not be scaled or centered.
Args:
data_set (AudioDataSet): The data set with the mixtures to separate
model (SeparationModel): The separation model for performing separation
config (dict): Configuration dictionary with parameters for the model, dataset and self.
"""
self.config = config
self.data_set = data_set
# Normalize or standardize the features, to have them ready to use as model input
self.data_set.shift_and_scale(self.config["shift"], self.config["scaling"])
self.model = model
self.model.eval()
self.device = torch.device("cpu") if not self.config["use_gpu"] \
else torch.device("cuda:" + str(self.config["gpu_no"]))
@classmethod
def from_checkpoint(cls, config, which_data_set="test"):
r"""Instantiate an AudioSeparator from a model checkpoint.
Loads the model from its checkpoint.
The checkpoint also contains the configuration dictionary required to create the validation set related
to the set used to train the model.
Args:
config (dict): Configuration dictionary with the parameters in defined in 'default_config()'
which_data_set (str): Identifier of the set type for the 'split' method of the AudiodataSet. 'train',
'test' or 'val'
Returns:
AudioSeparator using the model loaded from the checkpoint path in 'config'
"""
# Load the checkpoint
filename = config["checkpoint_path"]
if not os.path.isfile(filename):
raise ValueError("File " + filename + " is not a valid file.")
print("Loading model ...'{}'".format(filename))
state = torch.load(filename, 'cpu')
# Get the configuration paramters used during the training of the model.
train_config = state["config"]
# Update those parameters with the AudioSeparator parameters.
train_config.update(config)
# Build the data set containing the audio to separate.
val_set = dts.find_data_set_class(train_config["data_set_type"]).split(train_config, which_data_set)
# Build the SeparationModel and load its parameters
model = md.SeparationModel(train_config, val_set.features_shape(), val_set.n_classes())
model.load_state_dict(state["model_state_dict"])
# Build the AudioSeparator
return cls(val_set, model, train_config)
def separate_spectrogram(self, masks, features, features_idx):
r"""Apply masks to models input features to generate a spectrogram for each audio source.
There are many ways to use separation masks to produce spectrograms for each sources in the input features.
This function does the following:
- Rescale the masks to the shape of the SeparationModel input
(this is only useful if the MaskModel in the SeparationModel does not preserve the shape of its input
with padding)
- Shift the features to [0, +inf[, apply the mask and shift back.
(This is because the features can have negative values, and we want a value of 0 in the mask to
correspond to the lowest possible energy)
- The previous step provides us with 'masked features': these features should correspond to separated
sources. The last step is to convert back these features (scaled and centered log-Mel-spectrogram,
PCEN, ...) to a 'spectrogram' representation that can be converted back to audio with Inverse STFT.
Note: It has be found experimentally that apply the masks at the 'features' level give worst results than
converting the masks to 'spectrogram' representation and applying them directly to the mixture
spectrogram, because converting the features back to the spectrogram scale often implies to take the
exponential of the fetures which amplifies a lot the noise.
The other processing is performed by 'separate_spectrogram_in_lin_scale()'.
Args:
masks (torch.Tensor): Shape: [n_class, ~freq, ~time]. The masks produced by the separation model.
features (torch.Tensor): Shape [channel, freq, time]. The input features to the separation model.
features_idx (int): index of the features in data_set.features
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features.shape[1], features.shape[2]),
mode='bilinear',
align_corners=False)
# Multiply each mask with the features (shape: [n_masks, channel, features.shape[0], features.shape[1]]
shift = features.abs().max()
spectrograms = masks * (features + shift) - shift
# Undo the feature scaling and centering
self.data_set.rescale_to_initial(spectrograms, self.config["shift"], self.config["scaling"])
# From Log Mel spectrogram or PCEN to STFT magnitude (energy spectrogram)
return self.data_set.features_to_stft_magnitudes(spectrograms.cpu().numpy(), features_idx)
def separate_spectrogram_in_lin_scale(self, masks, features_shape, mixture_spectrogram):
r"""Apply masks to the mixture spectrogram to generate spectrograms for each separated sources.
The masks received in argument have the shape of the output of the MaskModel. In this function,
these masks will first be converted to the shape of the mixture energy spectrogram (inverse Mel scaling)
and then be directly applied to the mixture spectrogram.
Args:
masks (torch.tensor): Shape: [n_class, ~freq, ~time] The masks produced by the separation model
features_shape (torch.tensor.shape): Shape of the input features to the separation model.
mixture_spectrogram (np.ndarray): shape: [Frequency, Time] Mixture spectrogram.
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features_shape[1], features_shape[2]),
mode='bilinear',
align_corners=False)
# If Mel spectrogram were used as features: reverse Mel-scaling
# Here we use the same inverse processing as in the implementation of
# Qiuqiang Kong et al. "A joint-separation-classification model for sound event detection of weakly-labelled
# data"; In: CoRR abs/1711.03037 (2017). axXiv: 1711.03037 URL: http://arxiv.org/abs/1711.03037
if self.config['feature_type'] != 'spectrogram':
masks = np.asarray([np.transpose(
self.data_set.mel_filterbank / (np.sum(self.data_set.mel_filterbank, axis=0) + 1e-8)) @ mask.numpy()
for mask in masks.squeeze()])
# Apply the masks to the mixture spectrogram. Mask.shape: [n_sources, channel=1, Frequency, Time]
# mixture_spectrogram.shape: [Frequency, Time]
# output.shape: [n_sources, channel=1, Frequency, Time]
return masks * mixture_spectrogram
def spectrogram_to_audio(self, spectrogram, phase):
r"""Compute waveform from spectrogram using inverse short-time Fourier transform.
Wrapper to call the istft function from the AudioDataSet class that performs the ISTFT with the
parameters corresponding to the STFT.
Args:
spectrogram (np.ndarray): shape: [Frequency, Time]. Magnitude of STFT result
phase (np.ndarray): shape: [Frequency, Time]. Phase of STFT result
Returns:
audio waveform. (1D np.ndarray)
"""
return self.data_set.istft(spectrogram * phase)
def save_separated_audio(self, audios, filename):
r"""Save the audios tracks in audios, in a subfolder of self.config['separated_audio_folder'].
'audios' should be the sources separated by the SeparationModel for the audio mixture saved in 'filename'.
The separated tracks are saved in a folder with the same name than their corresponding mixture.
The mixture is also copied inside the folder.
Args:
audios (np.ndarray): shape: [n_sources, time]. Audio waveforms of the separated sources
filename (str): Name of the file containing the audio mixture.
"""
# Create folder with mixture name
folder_path = os.path.join(self.config["separated_audio_folder"], os.path.splitext(filename)[0])
os.makedirs(folder_path)
# Save each separated source
for class_idx, audio in enumerate(audios):
librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',
audio.T,
sr=self.data_set.config["sampling_rate"])
# Also copy the mixture in the folder
copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, "original_mix.wav"))
def separate(self, separation_method='in_lin'):
r"""Run separation with self.model for all the files in self.data_set and save the separated sources.
Args:
separation_method (str): Identifier to chose between methods for applying the masks. Chose between
separate at the feature level ('separate_spectrogram') or at the energy
spectrogram level ('separate_spectrogram_in_lin').
Advised: 'in_lin'
"""
# Check if the output folder exists, if not creates it, otherwise inform user and stop execution
if not os.path.exists(self.config["separated_audio_folder"]):
os.makedirs(self.config["separated_audio_folder"])
else:
if os.listdir(self.config["separated_audio_folder"]): # if folder is not empty
raise ValueError('Output folders already exist !')
self.model.to(self.device)
self.model.eval()
self.data_set.to(self.device)
# Loop over all the files in the dataset.
for idx in range(self.data_set.__len__()):
# Get the features
features = self.data_set.get_features(idx)
# Get the separation masks
_, masks = self.model(features.unsqueeze(0)) # (add batch dimension)
masks = masks.detach().squeeze() # move "mask" dim in first position
# Apply the masks
if separation_method == 'in_log':
spectrograms = self.separate_spectrogram(masks, features, idx)
elif separation_method == 'in_lin':
spectrograms = self.separate_spectrogram_in_lin_scale(masks, features.shape,
self.data_set.get_magnitude(idx))
else:
raise ValueError('Separation method ' + separation_method + ' is not available.')
# Get the separated audio and save
audios = [self.spectrogram_to_audio(spectrogram, self.data_set.get_phase(idx)) for spectrogram in spectrograms]
self.save_separated_audio(audios, self.data_set.filenames[idx])
def evaluate_separation(self, indices=None):
r"""Compute separation metrics using the separated sources in self.config['separated_audio_folder']
Assuming 'separate()' has been previously called: the separated sources for all the audio files in
self.data_set are stored in self.config['separated_audio_folder'].
This function loads the separated sources and the ground-truth sources to compute separation metrics.
Separation metrics used here are:
- Signal to Distortion ratio (SDR)
- Signal to Interference ratio (SIR)
- Signal to Artifact ratio (SAR)
Those are computed using the mir_eval library.
Note: These estimators are not very reliable to estimate separation quality. Unfortunately they are the
most common used in the litterature. Here we use the 'bss_eval_images' function that does use a filtered
version of the ground-truth sources, but also do not allow for scale changes.
For discussions of the measurements quality, see:
Jonathan Le Roux et al. (2018). "SDR - half-baked or well done?". CoRR, abs/1811.02508.
Args:
indices (int): If passed: compute separation metrics for the file of the given indices. Otherwise: do
entire data set
Returns:
sdr, sir, sar: np.ndarray of shape [n_files, n_sources]
"""
# if indices is passed: evaluate separation for the file of the given indices. Otherwise: do entire data set
if indices is None:
indices = np.arange(self.data_set.__len__())
sdr = np.zeros((indices.shape[0], len(self.data_set.classes)))
sir = np.zeros((indices.shape[0], len(self.data_set.classes)))
sar = np.zeros((indices.shape[0], len(self.data_set.classes)))
for idx in indices:
# Load separated sources
# Take care of sorting the sources here and in data_set class in the same way to have consistent labels
# Take care not to load the 'original_mix' file which is the mixture file.
separated_sources = np.asarray([self.data_set.load_audio(os.path.join(self.config["separated_audio_folder"],
os.path.splitext(
self.data_set.filenames[idx])[0],
filename))
for filename in sorted( # sort in the same order than in data_set class
os.listdir(os.path.join(self.config["separated_audio_folder"],
os.path.splitext(self.data_set.filenames[idx])[0])))
if 'mix' not in filename]) # original mix is copied over with sep. sources
# Get the ground-truth sources from self.data_set
reference_sources = self.data_set.load_audio_source_files(idx)
# Crop to length of reconstructed signal (because last non-complete frame of the stft is dropped)
# Add small offset to avoid having sources always 0 (mir_eval does not like that)
reference_sources = reference_sources[:, :separated_sources.shape[1]] + 1e-15
sdr[idx], _, sir[idx], sar[idx], _ = mir_eval.separation.bss_eval_images(reference_sources,
separated_sources,
compute_permutation=False)
return sdr, sir, sar
|
{"hexsha": "6f7b5287bdaa69ad3c7f2b006fbfc4ad8b0d9db0", "size": 17449, "ext": "py", "lang": "Python", "max_stars_repo_path": "separator.py", "max_stars_repo_name": "4p0pt0Z/Audio_blind_source_separation", "max_stars_repo_head_hexsha": "a91851172680c86f3faea0a7ad31eedb023e172f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2019-01-15T12:55:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T01:13:44.000Z", "max_issues_repo_path": "separator.py", "max_issues_repo_name": "4p0pt0Z/Audio_blind_source_separation", "max_issues_repo_head_hexsha": "a91851172680c86f3faea0a7ad31eedb023e172f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-02-28T15:32:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-24T08:46:48.000Z", "max_forks_repo_path": "separator.py", "max_forks_repo_name": "4p0pt0Z/Audio_blind_source_separation", "max_forks_repo_head_hexsha": "a91851172680c86f3faea0a7ad31eedb023e172f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-02-23T05:57:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-08T04:29:55.000Z", "avg_line_length": 55.9262820513, "max_line_length": 123, "alphanum_fraction": 0.6278296751, "include": true, "reason": "import numpy", "num_tokens": 3409}
|
#!/usr/bin/env python3
# Copyright <2019> <Chen Wang [https://chenwang.site], Carnegie Mellon University>
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
import copy
import tqdm
import torch
import os.path
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.utils.data as Data
from torch.autograd import Variable
from torchvision.models.vgg import VGG
import torchvision.transforms as transforms
from torchvision.datasets import CocoDetection
from torch.optim.lr_scheduler import ReduceLROnPlateau
from dataset import ImageData, Dronefilm, DroneFilming, SubT, SubTF, PersonalVideo
from interestingness import AE, VAE, Interestingness
from torchutil import EarlyStopScheduler, count_parameters, show_batch, RandomMotionBlur, CosineLoss, PearsonLoss
def performance(loader, net):
test_loss = 0
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
if torch.cuda.is_available():
inputs = inputs.cuda()
inputs = Variable(inputs).view(-1,inputs.size(-3),inputs.size(-2),inputs.size(-1))
outputs = net(inputs)
loss = criterion(outputs, inputs)
test_loss += loss.item()
show_batch(torch.cat([inputs,outputs], dim=0), name='train')
return test_loss/(batch_idx+1)
def test(loader, net):
test_loss = 0
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
if torch.cuda.is_available():
inputs = inputs.cuda()
inputs = Variable(inputs).view(-1,inputs.size(-3),inputs.size(-2),inputs.size(-1))
outputs = net.listen(inputs)
loss = criterion(outputs, inputs)
test_loss += loss.item()
show_batch(torch.cat([inputs,outputs], dim=0), name='test')
return test_loss/(batch_idx+1)
if __name__ == "__main__":
# Arguements
parser = argparse.ArgumentParser(description='Train Interestingness Networks')
parser.add_argument("--data-root", type=str, default='/data/datasets', help="dataset root folder")
parser.add_argument("--model-save", type=str, default='saves/ae.pt', help="learning rate")
parser.add_argument('--save-flag', type=str, default='n1000', help='save name flag')
parser.add_argument("--memory-size", type=int, default=1000, help="number of training epochs")
parser.add_argument("--lr", type=float, default=1e-1, help="learning rate")
parser.add_argument("--factor", type=float, default=0.1, help="ReduceLROnPlateau factor")
parser.add_argument("--min-lr", type=float, default=1e-1, help="minimum lr for ReduceLROnPlateau")
parser.add_argument("--patience", type=int, default=10, help="patience of epochs for ReduceLROnPlateau")
parser.add_argument("--epochs", type=int, default=20, help="number of training epochs")
parser.add_argument("--batch-size", type=int, default=1, help="number of minibatch size")
parser.add_argument("--momentum", type=float, default=0, help="momentum of the optimizer")
parser.add_argument("--alpha", type=float, default=0.1, help="weight of TVLoss")
parser.add_argument("--w-decay", type=float, default=1e-2, help="weight decay of the optimizer")
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--loss', type=str, default='mse', help='loss criterion')
parser.add_argument("--crop-size", type=int, default=320, help='loss compute by grid')
parser.add_argument("--rr", type=float, default=5, help="reading rate")
parser.add_argument("--wr", type=float, default=5, help="writing rate")
parser.add_argument('--dataset', type=str, default='SubTF', help='dataset type (subT ot drone')
args = parser.parse_args(); print(args)
torch.manual_seed(args.seed)
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
if args.dataset == 'DroneFilming':
train_data = DroneFilming(root=args.data_root, train=True, transform=transform)
elif args.dataset == 'SubTF':
train_data = SubTF(root=args.data_root, train=True, transform=transform)
train_loader = Data.DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True)
net,_ = torch.load(args.model_save)
net = Interestingness(net, args.memory_size, 512, 10, 10, 10, 10)
net.memory.set_learning_rate(rr=args.rr, wr=args.wr)
net.set_train(True)
if torch.cuda.is_available():
net = net.cuda()
if args.loss == 'l1':
criterion = nn.L1Loss()
elif args.loss == 'mse':
criterion = nn.MSELoss()
elif args.loss == 'cos':
criterion = CosineLoss()
elif args.loss == 'pearson':
criterion = PearsonLoss()
optimizer = optim.RMSprop(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
scheduler = EarlyStopScheduler(optimizer, factor=args.factor, verbose=True, min_lr=args.min_lr, patience=args.patience)
print('number of parameters:', count_parameters(net))
best_loss = float('Inf')
for epoch in range(args.epochs):
train_loss = performance(train_loader, net)
val_loss = test(train_loader, net)
print('epoch:{} train:{} val:{}'.format(epoch, train_loss, val_loss))
if val_loss < best_loss:
print("New best Model, saving...")
torch.save(net, args.model_save+'.'+args.dataset+'.'+args.save_flag+'.'+args.loss)
best_loss = val_loss
no_decrease = 0
if scheduler.step(val_loss, epoch):
print("Early Stopping!")
break
print('test_loss, %.4f'%(best_loss))
|
{"hexsha": "3187a5cef52f91a0cc4eaafdac9db81a130090f9", "size": 7325, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_interest.py", "max_stars_repo_name": "castacks/interestingness", "max_stars_repo_head_hexsha": "b614818ab11dcc15c5fe6b55fe993882add3e8e6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-20T14:58:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T14:58:36.000Z", "max_issues_repo_path": "train_interest.py", "max_issues_repo_name": "castacks/interestingness", "max_issues_repo_head_hexsha": "b614818ab11dcc15c5fe6b55fe993882add3e8e6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_interest.py", "max_forks_repo_name": "castacks/interestingness", "max_forks_repo_head_hexsha": "b614818ab11dcc15c5fe6b55fe993882add3e8e6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-17T08:25:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T08:25:05.000Z", "avg_line_length": 46.0691823899, "max_line_length": 123, "alphanum_fraction": 0.697883959, "include": true, "reason": "import numpy", "num_tokens": 1666}
|
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from collections import Counter
import os
import pkg_resources
import matplotlib.pyplot as plt
import numpy as np
import pandas
qDiptab_file = pkg_resources.resource_filename('modality', 'data/qDiptab.csv')
if not os.path.exists(qDiptab_file):
qDiptab_df = None
else:
qDiptab_df = pandas.read_csv(qDiptab_file, index_col=0)
class DataError(Exception):
pass
def hartigan_diptest(data):
'''
P-value according to Hartigan's dip test for unimodality.
The dip is computed using the function
dip_and_closest_unimodal_from_cdf. From this the p-value is
interpolated using a table imported from the R package diptest.
References:
Hartigan and Hartigan (1985): The dip test of unimodality.
The Annals of Statistics. 13(1).
Input:
data - one-dimensional data set.
Value:
p-value for the test.
'''
return pval_hartigan(data)
def pval_hartigan(data):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
return dip_pval_tabinterpol(dip, len(data))
def dip_resampled_from_unimod(unimod, N):
data = sample_from_unimod(unimod, N)
xF, yF = cum_distr(data)
return dip_from_cdf(xF, yF)
def sample_from_unimod(unimod, N):
xU, yU = unimod
#print "zip(xU, yU) = {}".format(zip(xU, yU))
dxU = np.diff(xU)
t = np.random.rand(N)
bins = np.searchsorted(yU, t)-1
bin_cnt = Counter(bins).most_common()
data = np.zeros((N,))
i = 0
for bin, cnt in bin_cnt:
data[i:i+cnt] = np.random.rand(cnt)*dxU[bin]+xU[bin]
i += cnt
return data
def dip_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
dip, _ = dip_and_closest_unimodal_from_cdf(xF, yF, plotting, verbose, eps)
return dip
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
'''
Dip computed as distance between empirical distribution function (EDF) and
cumulative distribution function for the unimodal distribution with
smallest such distance. The optimal unimodal distribution is found by
the algorithm presented in
Hartigan (1985): Computation of the dip statistic to test for
unimodaliy. Applied Statistics, vol. 34, no. 3
If the plotting option is enabled the optimal unimodal distribution
function is plotted along with (xF, yF-dip) and (xF, yF+dip)
xF - x-coordinates for EDF
yF - y-coordinates for EDF
'''
## TODO! Preprocess xF and yF so that yF increasing and xF does
## not have more than two copies of each x-value.
if (xF[1:]-xF[:-1] < -eps).any():
raise ValueError('Need sorted x-values to compute dip')
if (yF[1:]-yF[:-1] < -eps).any():
raise ValueError('Need sorted y-values to compute dip')
if plotting:
Nplot = 5
bfig = plt.figure(figsize=(12, 3))
i = 1 # plot index
D = 0 # lower bound for dip*2
# [L, U] is interval where we still need to find unimodal function,
# the modal interval
L = 0
U = len(xF) - 1
# iGfin are the indices of xF where the optimal unimodal distribution is greatest
# convex minorant to (xF, yF+dip)
# iHfin are the indices of xF where the optimal unimodal distribution is least
# concave majorant to (xF, yF-dip)
iGfin = L
iHfin = U
while 1:
iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iG = np.arange(L, U+1)[iGG]
iH = np.arange(L, U+1)[iHH]
# Interpolate. First and last point are in both and does not need
# interpolation. Might cause trouble if included due to possiblity
# of infinity slope at beginning or end of interval.
if iG[0] != iH[0] or iG[-1] != iH[-1]:
raise ValueError('Convex minorant and concave majorant should start and end at same points.')
hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH])
gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG])
hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]])
gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]])
#hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH])
#gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG])
# Find largest difference between GCM and LCM.
gdiff = hipl - yF[iG]
hdiff = yF[iH] - gipl
imaxdiffg = np.argmax(gdiff)
imaxdiffh = np.argmax(hdiff)
d = max(gdiff[imaxdiffg], hdiff[imaxdiffh])
# Plot current GCM and LCM.
if plotting:
if i > Nplot:
bfig = plt.figure(figsize=(12, 3))
i = 1
bax = bfig.add_subplot(1, Nplot, i)
bax.plot(xF, yF, color='red')
bax.plot(xF, yF-d/2, color='black')
bax.plot(xF, yF+d/2, color='black')
bax.plot(xF[iG], yF[iG]+d/2, color='blue')
bax.plot(xF[iH], yF[iH]-d/2, color='blue')
if d <= D:
if verbose:
print("Difference in modal interval smaller than current dip")
break
# Find new modal interval so that largest difference is at endpoint
# and set d to largest distance between current GCM and LCM.
if gdiff[imaxdiffg] > hdiff[imaxdiffh]:
L0 = iG[imaxdiffg]
U0 = iH[iH >= L0][0]
else:
U0 = iH[imaxdiffh]
L0 = iG[iG <= U0][-1]
# Add points outside the modal interval to the final GCM and LCM.
iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]])
iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin])
# Plot new modal interval
if plotting:
ymin, ymax = bax.get_ylim()
bax.axvline(xF[L0], ymin, ymax, color='orange')
bax.axvline(xF[U0], ymin, ymax, color='red')
bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L]))
# Compute new lower bound for dip*2
# i.e. largest difference outside modal interval
gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG])
D = max(D, np.amax(yF[L:(L0+1)] - gipl))
hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH])
D = max(D, np.amax(hipl - yF[U0:(U+1)]))
if xF[U0]-xF[L0] < eps:
if verbose:
print("Modal interval zero length")
break
if plotting:
mxpt = np.argmax(yF[L:(L0+1)] - gipl)
bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2, gipl[mxpt]+d/2], '+', color='red')
mxpt = np.argmax(hipl - yF[U0:(U+1)])
bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2, hipl[mxpt]-d/2], '+', color='red')
i += 1
# Change modal interval
L = L0
U = U0
if d <= D:
if verbose:
print("Difference in modal interval smaller than new dip")
break
if plotting:
# Add modal interval to figure
bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed')
bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed')
## Plot unimodal function (not distribution function)
bfig = plt.figure()
bax = bfig.add_subplot(1, 1, 1)
bax.plot(xF, yF, color='red')
bax.plot(xF, yF-D/2, color='black')
bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
if plotting:
bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])],
np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2,
yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue')
#bax.plot(xF[iM], yM_lower, color='orange')
## Plot unimodal distribution function
bfig = plt.figure()
bax = bfig.add_subplot(1, 1, 1)
bax.plot(xF, yF, color='red')
bax.plot(xF, yF-D/2, color='black')
bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# Closest unimodal curve
xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])]
yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2,
yM_lower[iMM_concave], yF[iHfin[1:]] - D/2])
# Add points so unimodal curve goes from 0 to 1
k_start = (yU[1]-yU[0])/(xU[1]-xU[0])
xU_start = xU[0] - yU[0]/k_start
k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2])
xU_end = xU[-1] + (1-yU[-1])/k_end
xU = np.hstack([xU_start, xU, xU_end])
yU = np.hstack([0, yU, 1])
if plotting:
bax.plot(xU, yU, color='blue')
#bax.plot(xF[iM], yM_lower, color='orange')
plt.show()
return D/2, (xU, yU)
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
if qDiptab_df is None:
raise DataError("Tabulated p-values not available, {} missing. "
"See installation instructions.".format(qDiptab_file))
if np.isnan(N) or N < 10:
return np.nan
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol
def transform_dip_to_other_nbr_pts(dip_n, n, m):
dip_m = np.sqrt(n/m)*dip_n
return dip_m
def cum_distr(data, w=None):
if w is None:
w = np.ones(len(data))*1./len(data)
eps = 1e-10
data_ord = np.argsort(data)
data_sort = data[data_ord]
w_sort = w[data_ord]
data_sort, indices = unique(data_sort, return_index=True, eps=eps, is_sorted=True)
if len(indices) < len(data_ord):
w_unique = np.zeros(len(indices))
for i in range(len(indices)-1):
w_unique[i] = np.sum(w_sort[indices[i]:indices[i+1]])
w_unique[-1] = np.sum(w_sort[indices[-1]:])
w_sort = w_unique
wcum = np.cumsum(w_sort)
wcum /= wcum[-1]
N = len(data_sort)
x = np.empty(2*N)
x[2*np.arange(N)] = data_sort
x[2*np.arange(N)+1] = data_sort
y = np.empty(2*N)
y[0] = 0
y[2*np.arange(N)+1] = wcum
y[2*np.arange(N-1)+2] = wcum[:-1]
return x, y
# def lin_interpol(xquery, x, y):
# xq_ord = np.argsort(xquery)
# xord = np.argsort(x)
# values = lin_interpol_sorted(xquery[xq_ord], x[xord], y[xord])
# return values[np.argsort(xq_ord)]
# def lin_interpol_sorted(xquery, x, y, eps=1e-10):
# x, i = unique(x, return_index=True, eps=eps, is_sorted=True)
# y = y[i]
# if len(x) == 1:
# if np.abs(x - xquery).all() < eps:
# return y*np.ones(len(xquery))
# else:
# raise ValueError('interpolation points outside interval')
# i = 0
# j = 1
# if xquery[0] < x[0]-eps:
# raise ValueError('interpolation points outside interval: xquery[0] = {}, x[0] = {}'.format(xquery[0], x[0]))
# values = np.zeros(len(xquery))
# indices = np.zeros(len(xquery))
# while i < len(xquery) and j < len(x):
# if xquery[i] <= x[j]+eps:
# q = (y[j]-y[j-1])/(x[j]-x[j-1])
# values[i] = y[j-1] + q*(xquery[i]-x[j-1])
# indices[i] = x[j-1]
# i += 1
# else:
# j += 1
# if i < len(xquery) - 1:
# raise ValueError('interpolation points outside interval: xquery[-1] = {}, x[-1] = {}'.format(xquery[-1], x[-1]))
# return values
def unique(data, return_index, eps, is_sorted=True):
if not is_sorted:
ord = np.argsort(data)
rank = np.argsort(ord)
data_sort = data[ord]
else:
data_sort = data
isunique_sort = np.ones(len(data_sort), dtype='bool')
j = 0
for i in range(1, len(data_sort)):
if data_sort[i] - data_sort[j] < eps:
isunique_sort[i] = False
else:
j = i
if not is_sorted:
isunique = isunique_sort[rank]
data_unique = data[isunique]
else:
data_unique = data[isunique_sort]
if not return_index:
return data_unique
if not is_sorted:
ind_unique = np.nonzero(isunique)[0]
else:
ind_unique = np.nonzero(isunique_sort)[0]
return data_unique, ind_unique
def greatest_convex_minorant(x, y):
i, xnew, negy = least_concave_majorant(x, -y)
return i, xnew, -negy
def greatest_convex_minorant_sorted(x, y):
i = least_concave_majorant_sorted(x, -y)
return i
def least_concave_majorant(x, y, eps=1e-12):
if (x[1:]-x[:-1] < -eps).any():
raise ValueError('need sorted x-values to find least concave majorant')
ind = least_concave_majorant_sorted(x, y, eps)
ind = np.sort(ind)
return ind, x[ind], y[ind]
def least_concave_majorant_sorted(x, y, eps=1e-12):
i = [0]
icurr = 0
while icurr < len(x) - 1:
if np.abs(x[icurr+1]-x[icurr]) > eps:
q = (y[(icurr+1):]-y[icurr])/(x[(icurr+1):]-x[icurr])
icurr += 1 + np.argmax(q)
i.append(icurr)
elif y[icurr+1] > y[icurr] or icurr == len(x)-2:
icurr += 1
i.append(icurr)
elif np.abs(x[icurr+2]-x[icurr]) > eps:
q = (y[(icurr+2):]-y[icurr])/(x[(icurr+2):]-x[icurr])
icurr += 2 + np.argmax(q)
i.append(icurr)
else:
print("x[icurr] = {}, x[icurr+1] = {}, x[icurr+2] = {}".format(x[icurr], x[icurr+1], x[icurr+2]))
raise ValueError('Maximum two copies of each x-value allowed')
return np.array(i)
if __name__ == '__main__':
#seed = np.random.randint(1000)
for seed in [None, 403, 796]:
if seed is None:
dat = np.hstack([np.arange(0, 1, .1), np.arange(2, 3, 0.1)])
else:
print("seed = {}".format(seed))
np.random.seed(seed)
dat = np.hstack([np.random.randn(10), np.random.randn(10)+2])
xcum, ycum = cum_distr(dat, np.ones(len(dat))*1./len(dat))
dip = dip_from_cdf(xcum, ycum, verbose=True, plotting=True)
print("dip = {}".format(dip))
for (dip, N, M) in [(0.005, 20000, 50000), (0.01, 2000, 5000), (0.001, 70000, 10000), (0.0005, 1000000, 10000)]:
print("dip_pval_tabinterpol(dip, N) = {}".format(dip_pval_tabinterpol(dip, N)))
print("dip_pval_tabinterpol(transform_dip_to_other_nbr_pts(dip, N, M), M) = {}".format(dip_pval_tabinterpol(transform_dip_to_other_nbr_pts(dip, N, M), M)))
|
{"hexsha": "1b8d8407b206fc4def1a410fbb57a825089d510c", "size": 16853, "ext": "py", "lang": "Python", "max_stars_repo_path": "modality/diptest.py", "max_stars_repo_name": "tompollard/modality", "max_stars_repo_head_hexsha": "dbe6d407a44fa6b1b93fba1433d62cb0b2c4a132", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2016-08-24T12:39:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T08:50:24.000Z", "max_issues_repo_path": "modality/diptest.py", "max_issues_repo_name": "tompollard/modality", "max_issues_repo_head_hexsha": "dbe6d407a44fa6b1b93fba1433d62cb0b2c4a132", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-03-19T04:45:40.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-05T08:59:43.000Z", "max_forks_repo_path": "modality/diptest.py", "max_forks_repo_name": "tompollard/modality", "max_forks_repo_head_hexsha": "dbe6d407a44fa6b1b93fba1433d62cb0b2c4a132", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-03-18T19:40:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T07:54:00.000Z", "avg_line_length": 33.9778225806, "max_line_length": 163, "alphanum_fraction": 0.5842876639, "include": true, "reason": "import numpy", "num_tokens": 5566}
|
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(Project_Test_Suite)
BOOST_AUTO_TEST_SUITE(Module_Test_Suite)
BOOST_AUTO_TEST_CASE(DummyTest_Passes) {
BOOST_CHECK_EQUAL(1,1);
BOOST_CHECK(true);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "b98dbe53a5dfb6b50c522a13fbd2fcfa7d9e048e", "size": 313, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/module/module_test.cpp", "max_stars_repo_name": "przestaw/Cpp-CMake-template", "max_stars_repo_head_hexsha": "ce71db7b9635bd57e51ec812e0bfe04dac2355be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/module/module_test.cpp", "max_issues_repo_name": "przestaw/Cpp-CMake-template", "max_issues_repo_head_hexsha": "ce71db7b9635bd57e51ec812e0bfe04dac2355be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/module/module_test.cpp", "max_forks_repo_name": "przestaw/Cpp-CMake-template", "max_forks_repo_head_hexsha": "ce71db7b9635bd57e51ec812e0bfe04dac2355be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3571428571, "max_line_length": 48, "alphanum_fraction": 0.7188498403, "num_tokens": 68}
|
# Helper function returning a customized URDF of the 1R planar manipulator
import numpy as np
def generate_custom_urdf(joint_ang_bias: tuple = (0, 0, 0, 0, 0, 0, 0),
joint_pos_bias: tuple = ((0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0),
(0,0,0),(0,0,0),(0,0,0),(0,0,0))) -> str:
robot_urdf = \
f"""<?xml version="1.0" ?>
<!-- =================================================================================== -->
<!-- | This document was autogenerated by xacro from panda_arm_hand.urdf.xacro | -->
<!-- | EDITING THIS FILE BY HAND IS NOT RECOMMENDED | -->
<!-- =================================================================================== -->
<robot name="panda" xmlns:xacro="http://www.ros.org/wiki/xacro">
<link name="panda_link0">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0.05"/>
<mass value="2.9"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<geometry>
<mesh filename="package://meshes/collision/link0.obj"/>
</geometry>
<material name="panda_white">
<color rgba="1. 1. 1. 1."/>
</material>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link0.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint1" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/>
<origin rpy="0 0 0" xyz="{joint_pos_bias[0][0]} {joint_pos_bias[0][1]} {0.333 + joint_pos_bias[0][2]}"/>
<parent link="panda_link0"/>
<child link="panda_link1"/>
<axis xyz="0 0 1"/>
<limit effort="87" lower="-2.9671" upper="2.9671" velocity="2.1750"/>
</joint>
<link name="panda_link1">
<inertial>
<origin rpy="0 0 0" xyz="0 -0.04 -0.05"/>
<mass value="2.7"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[0]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link1.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link1.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint2" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-1.7628" soft_upper_limit="1.7628"/>
<origin rpy="-1.57079632679 0 {joint_ang_bias[0]}" xyz="{joint_pos_bias[1][0]} {joint_pos_bias[1][1]} {joint_pos_bias[1][2]}"/>
<parent link="panda_link1"/>
<child link="panda_link2"/>
<axis xyz="0 0 1"/>
<limit effort="87" lower="-1.8326" upper="1.8326" velocity="2.1750"/>
</joint>
<link name="panda_link2">
<inertial>
<origin rpy="0 0 0" xyz="0 -0.04 0.06"/>
<mass value="2.73"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[1]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link2.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link2.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint3" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/>
<origin rpy="1.57079632679 0 {joint_ang_bias[1]}" xyz="{0.316 * np.sin(joint_ang_bias[1]) + joint_pos_bias[2][0]} {-0.316 * np.cos(joint_ang_bias[1]) + joint_pos_bias[2][1]} {joint_pos_bias[2][2]}"/>
<parent link="panda_link2"/>
<child link="panda_link3"/>
<axis xyz="0 0 1"/>
<limit effort="87" lower="-2.9671" upper="2.9671" velocity="2.1750"/>
</joint>
<link name="panda_link3">
<inertial>
<origin rpy="0 0 0" xyz="0.01 0.01 -0.05"/>
<mass value="2.04"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[2]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link3.obj"/>
</geometry>
<material name="panda_red">
<color rgba="1. 1. 1. 1."/>
</material>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link3.obj"/>
</geometry>
</collision>
</link>
<joint name="panda_joint4" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-3.0718" soft_upper_limit="-0.0698"/>
<origin rpy="1.57079632679 0 {joint_ang_bias[2]}" xyz="{0.0825*np.cos(joint_ang_bias[2]) + joint_pos_bias[3][0]} {0.0825*np.sin(joint_ang_bias[2]) + joint_pos_bias[3][1]} {joint_pos_bias[3][2]}"/>
<parent link="panda_link3"/>
<child link="panda_link4"/>
<axis xyz="0 0 1"/>
<limit effort="87" lower="-3.1416" upper="0.0" velocity="2.1750"/>
</joint>
<link name="panda_link4">
<inertial>
<origin rpy="0 0 0" xyz="-0.03 0.03 0.02"/>
<mass value="2.08"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[3]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link4.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link4.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint5" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/>
<origin rpy="-1.57079632679 0 {joint_ang_bias[3]}" xyz="{-0.0825*np.cos(joint_ang_bias[3])-0.384*np.sin(joint_ang_bias[3]) + joint_pos_bias[4][0]} {-0.0825*np.sin(joint_ang_bias[3])+0.384*np.cos(joint_ang_bias[3]) + joint_pos_bias[4][1]} {joint_pos_bias[4][2]}"/>
<parent link="panda_link4"/>
<child link="panda_link5"/>
<axis xyz="0 0 1"/>
<limit effort="12" lower="-2.9671" upper="2.9671" velocity="2.6100"/>
</joint>
<link name="panda_link5">
<inertial>
<origin rpy="0 0 0" xyz="0 0.04 -0.12"/>
<mass value="3"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[4]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link5.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link5.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint6" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-0.0175" soft_upper_limit="3.7525"/>
<origin rpy="1.57079632679 0 {joint_ang_bias[4]}" xyz="{joint_pos_bias[5][0]} {joint_pos_bias[5][1]} {joint_pos_bias[5][2]}"/>
<parent link="panda_link5"/>
<child link="panda_link6"/>
<axis xyz="0 0 1"/>
<limit effort="12" lower="-0.0873" upper="3.8223" velocity="2.6100"/>
</joint>
<link name="panda_link6">
<inertial>
<origin rpy="0 0 0" xyz="0.04 0 0"/>
<mass value="1.3"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[5]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/visual/link6.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link6.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint7" type="revolute">
<safety_controller k_position="100.0" k_velocity="40.0" soft_lower_limit="-2.8973" soft_upper_limit="2.8973"/>
<origin rpy="1.57079632679 0 {joint_ang_bias[5]}" xyz="{0.088*np.cos(joint_ang_bias[5]) + joint_pos_bias[6][0]} {0.088*np.sin(joint_ang_bias[5]) + joint_pos_bias[6][1]} {joint_pos_bias[6][2]}"/>
<parent link="panda_link6"/>
<child link="panda_link7"/>
<axis xyz="0 0 1"/>
<limit effort="12" lower="-2.9671" upper="2.9671" velocity="2.6100"/>
</joint>
<link name="panda_link7">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0.08"/>
<mass value=".2"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 {joint_ang_bias[6]}" xyz = "0 0 0" />
<geometry>
<mesh filename="package://meshes/collision/link7.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/link7.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_joint8" type="fixed">
<origin rpy="0 0 {joint_ang_bias[6]}" xyz="{joint_pos_bias[7][0]} {joint_pos_bias[7][1]} {0.107 + joint_pos_bias[7][2]}"/>
<parent link="panda_link7"/>
<child link="panda_link8"/>
<axis xyz="0 0 0"/>
</joint>
<link name="panda_link8">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0"/>
<mass value="0.0"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
</link>
<joint name="panda_hand_joint" type="fixed">
<parent link="panda_link8"/>
<child link="panda_hand"/>
<origin rpy="0 0 -0.785398163397" xyz="{joint_pos_bias[8][0]} {joint_pos_bias[8][1]} {joint_pos_bias[8][2]}"/>
</joint>
<link name="panda_hand">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0.04"/>
<mass value=".81"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<geometry>
<mesh filename="package://meshes/visual/hand.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/hand.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<link name="panda_leftfinger">
<contact>
<friction_anchor/>
<stiffness value="30000.0"/>
<damping value="1000.0"/>
<spinning_friction value="0.1"/>
<lateral_friction value="1.0"/>
</contact>
<inertial>
<origin rpy="0 0 0" xyz="0 0.01 0.02"/>
<mass value="0.1"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<geometry>
<mesh filename="package://meshes/visual/finger.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<geometry>
<mesh filename="package://meshes/collision/finger.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<link name="panda_rightfinger">
<contact>
<friction_anchor/>
<stiffness value="30000.0"/>
<damping value="1000.0"/>
<spinning_friction value="0.1"/>
<lateral_friction value="1.0"/>
</contact>
<inertial>
<origin rpy="0 0 0" xyz="0 -0.01 0.02"/>
<mass value="0.1"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
<visual>
<origin rpy="0 0 3.14159265359" xyz="0 0 0"/>
<geometry>
<mesh filename="package://meshes/visual/finger.obj"/>
</geometry>
<material name="panda_white"/>
</visual>
<collision>
<origin rpy="0 0 3.14159265359" xyz="0 0 0"/>
<geometry>
<mesh filename="package://meshes/collision/finger.obj"/>
</geometry>
<material name="panda_white"/>
</collision>
</link>
<joint name="panda_finger_joint1" type="prismatic">
<parent link="panda_hand"/>
<child link="panda_leftfinger"/>
<origin rpy="0 0 0" xyz="0 0 0.0584"/>
<axis xyz="0 1 0"/>
<limit effort="20" lower="0.0" upper="0.04" velocity="0.2"/>
</joint>
<joint name="panda_finger_joint2" type="prismatic">
<parent link="panda_hand"/>
<child link="panda_rightfinger"/>
<origin rpy="0 0 0" xyz="0 0 0.0584"/>
<axis xyz="0 -1 0"/>
<limit effort="20" lower="0.0" upper="0.04" velocity="0.2"/>
<mimic joint="panda_finger_joint1"/>
</joint>
<link name="panda_grasptarget">
<inertial>
<origin rpy="0 0 0" xyz="0 0 0"/>
<mass value="0.0"/>
<inertia ixx="0.1" ixy="0" ixz="0" iyy="0.1" iyz="0" izz="0.1"/>
</inertial>
</link>
<joint name="panda_grasptarget_hand" type="fixed">
<parent link="panda_hand"/>
<child link="panda_grasptarget"/>
<origin rpy="0 0 0" xyz="0 0 0.105"/>
</joint>
</robot>
"""
return robot_urdf
|
{"hexsha": "0c7ca66879a2bfc22af1393dd38dcef86cb11e50", "size": 13159, "ext": "py", "lang": "Python", "max_stars_repo_path": "crisp/robots/panda_urdf_randomizer.py", "max_stars_repo_name": "traversaro/CRiSP-for-Misspecified-Robot-Model", "max_stars_repo_head_hexsha": "6b5e0c28dc52463ac3b1c72c61fea0c5f7ac21f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-21T16:09:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T10:51:40.000Z", "max_issues_repo_path": "crisp/robots/panda_urdf_randomizer.py", "max_issues_repo_name": "traversaro/CRiSP-for-Misspecified-Robot-Model", "max_issues_repo_head_hexsha": "6b5e0c28dc52463ac3b1c72c61fea0c5f7ac21f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crisp/robots/panda_urdf_randomizer.py", "max_forks_repo_name": "traversaro/CRiSP-for-Misspecified-Robot-Model", "max_forks_repo_head_hexsha": "6b5e0c28dc52463ac3b1c72c61fea0c5f7ac21f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-02T16:50:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T15:02:19.000Z", "avg_line_length": 37.0676056338, "max_line_length": 267, "alphanum_fraction": 0.5720039517, "include": true, "reason": "import numpy", "num_tokens": 4495}
|
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LeakyReLU
##
import pandas as pd
import pdb
import numpy as np
import random
import math
import argparse
import tensorflow as tf
import matplotlib.pyplot as plt
from keras import losses
from keras import models
from keras import regularizers
from keras.layers.core import Dense
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.recurrent import LSTM
from keras.layers.recurrent import GRU
from keras.layers.convolutional import Conv1D
from keras.callbacks import TensorBoard
from tensorflow.keras.optimizers import Adam
from keras.layers.advanced_activations import LeakyReLU
from shutil import copyfile
import pandas as pd
##
# Neural network
def train_network(params_dict, train_X, train_Y):
'''
inputs: parameters dictionary contianing layer values, mapping by indicies
outputs:
'''
# model = Sequential()
# model.add(Dense(1, input_dim=input_dim))
# # model.add(Dense(16, input_dim=20, activation=’relu’))
# model.add(Dense(12, activation=’relu’))
# model.add(Dense(4, activation=’softmax’))
num_inputs = params_dict["num_inputs"]
layer_sizes = params_dict["layer_sizes"]
model = Sequential()
model.add(Dense(num_inputs)) #, input_shape=(input_dim,))) # params_dict["num_inputs"]
model.add(LeakyReLU(alpha=0.03))
for size in layer_sizes:
model.add(Dense(size))
model.add(LeakyReLU(alpha=0.03))
model.add(Dense(1))
model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=['mse'])
if("epochs" in params_dict):
model.fit(train_X, train_Y, epochs=params_dict["epochs"])
else:
model.fit(train_X, train_Y, epochs=300)
return model
# For users that dont want to specify network specific params
def mlp_network(params_dict, train_X, train_Y):
num_inputs = params_dict["num_inputs"]
return train_network({"num_inputs" : num_inputs, "layer_sizes": [10, 10, 6], "epochs": 100}, train_X, train_Y)
# train_X = [1, 2, 3]
# train_Y = [4, 5, 6]
def test_network(test_X, model):
return model.predict(test_X)
# test sine
def test():
# lindata = pd.read_csv('../traindata/lindata.csv')
# lindata = np.genfromtxt('../traindata/lindata.csv')
sine_train_data = np.genfromtxt('../traindata/sine_train_data.csv')
sine_test_data = np.genfromtxt('../traindata/sine_test_data.csv')
# train_X = lindata[:, 0]
# train_Y = lindata[:, 1]
train_X = sine_train_data[:, 0]
train_Y = sine_train_data[:, 1]
# model = train_network({"num_inputs": 1, "layer_sizes": [100], "epochs": 200}, train_X, train_Y)
model = mlp_network({"num_inputs": 1}, train_X, train_Y)
# model = train_network({}, train_X, train_Y)
test_Y = test_network(train_X, model)
print(test_Y)
|
{"hexsha": "23adce8124adb328422444d56f065feccfa5d939", "size": 2920, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nn.py", "max_stars_repo_name": "plug-ml/engine", "max_stars_repo_head_hexsha": "d713a0f4c10690667f68906b6fc6241c77d172a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/nn.py", "max_issues_repo_name": "plug-ml/engine", "max_issues_repo_head_hexsha": "d713a0f4c10690667f68906b6fc6241c77d172a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nn.py", "max_forks_repo_name": "plug-ml/engine", "max_forks_repo_head_hexsha": "d713a0f4c10690667f68906b6fc6241c77d172a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4166666667, "max_line_length": 114, "alphanum_fraction": 0.7133561644, "include": true, "reason": "import numpy", "num_tokens": 751}
|
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import scipy as sp
import scipy.stats
import matplotlib as mpl
import matplotlib.pyplot as plt
def jitter(data, bins=100):
data = np.asarray(data)
(hist,edges) = np.histogram(data,bins=bins)
hist = np.float_(hist) / max(hist)
idxs = np.searchsorted(edges[:-2],data)
return hist[idxs]
def jitter_x(x,y,width=None,bins=100):
x = np.asarray(x)
y = np.asarray(y)
x_argsort = np.argsort(x)
x_uniq = sorted(list(set(x)))
# find smallest interval between any two x-values
if width == None:
if len(x_uniq) == 1:
width = 1.
else:
interval = min([x[x_argsort[i+1]]-x[x_argsort[i]] for i in xrange(len(x)-1)])
width = interval / 3.
x_jit = []
y_jit = []
for val in x_uniq:
idx = (x==val)
scaling_factors = jitter(y[idx],bins=bins)
for (x_val,y_val,scaling) in zip(x[idx],y[idx],scaling_factors):
x_jit.append( x_val + width * scaling * random.choice([-1,1]) * np.random.uniform(0,1))
y_jit.append( y_val )
return (x_jit,y_jit)
# def jitter_x(x,y,width=None):
# x = np.asarray(x)
# y = np.asarray(y)
#
# x_argsort = np.argsort(x)
# x_uniq = sorted(list(set(x)))
#
# # find smallest interval between any two x-values
# if width == None:
# interval = min([x[x_argsort[i+1]]-x[x_argsort[i]] for i in xrange(len(x)-1)])
# width = interval / 3.
#
# x_jit = []
# y_jit = []
# for val in x_uniq:
# idx = (x==val)
# kernel = sp.stats.kde.gaussian_kde(y[idx])
# kernel_max = max([kernel(v) for v in set(y[idx])])
# for (x_val,y_val) in zip(x[idx],y[idx]):
# x_jit.append( x_val + np.random.uniform(-1,1) * width * kernel(y_val) / kernel_max)
# y_jit.append( y_val )
#
# return (x_jit,y_jit)
class ConstWidthRectangle(mpl.patches.Patch):
def __init__(self, x, y1, y2, w, **kwargs):
self.x = x
self.y1 = y1
self.y2 = y2
self.w = w
mpl.patches.Patch.__init__(self,**kwargs)
def get_path(self):
return mpl.path.Path.unit_rectangle()
def get_transform(self):
box = np.array([[self.x,self.y1],
[self.x,self.y2]])
box = self.axes.transData.transform(box)
w = self.w * self.axes.bbox.width / 2.0
box[0,0] -= w
box[1,0] += w
return mpl.transforms.BboxTransformTo(mpl.transforms.Bbox(box))
class ConstWidthLine(mpl.lines.Line2D):
def __init__(self,x,y,w,**kwargs):
self.x = x
self.y = y
self.w = w
mpl.lines.Line2D.__init__(self,[0,1],[0,0],**kwargs) # init to unit line
def get_transform(self):
# define transform that takes unit horiz line seg
# and places it in correct position using display
# coords
box = np.array([[self.x,self.y],
[self.x,self.y+1]])
box = self.axes.transData.transform(box)
w = self.w * self.axes.bbox.width / 2.0
box[0,0] -= w
box[1,0] += w
#xdisp,ydisp = self.axes.transData.transform_point([self.x,self.y])
#xdisp -= w
#xleft = xdisp - w
#xright = xdisp + w
return mpl.transforms.BboxTransformTo(mpl.transforms.Bbox(box))
#return mpl.transforms.Affine2D().scale(w,1).translate(xdisp,ydisp)
def draw(self,renderer):
# the ONLY purpose of redefining this function is to force the Line2D
# object to execute recache(). Otherwise, certain changes in the scale
# do not invalidate the Line2D object, and the transform will not be
# recomputed (and so the Axes coords computed earlier will be obsolete)
self.recache()
return mpl.lines.Line2D.draw(self,renderer)
class ConstHeightRectangle(mpl.patches.Patch):
def __init__(self, x1, x2, y, h, **kwargs):
self.x1 = x1
self.x2 = x2
self.y = y
self.h = h
mpl.patches.Patch.__init__(self,**kwargs)
def get_path(self):
return mpl.path.Path.unit_rectangle()
def get_transform(self):
box = np.array([[self.x1,self.y],
[self.x2,self.y]])
box = self.axes.transData.transform(box)
h = self.h * self.axes.bbox.height / 2.0
box[0,1] -= h
box[1,1] += h
return mpl.transforms.BboxTransformTo(mpl.transforms.Bbox(box))
class ConstHeightLine(mpl.lines.Line2D):
def __init__(self,x,y,h,**kwargs):
self.x = x
self.y = y
self.h = h
mpl.lines.Line2D.__init__(self,[0,0],[0,1],**kwargs) # init to unit line
# self.x = x
# self.y = y
# self.w = w
# mpl.lines.Line2D.__init__(self,[0,1],[0,0],**kwargs) # init to unit line
def get_transform(self):
# define transform that takes unit horiz line seg
# and places it in correct position using display
# coords
box = np.array([[self.x,self.y],
[self.x+1,self.y]])
box = self.axes.transData.transform(box)
h = self.h * self.axes.bbox.height / 2.0
box[0,1] -= h
box[1,1] += h
#xdisp,ydisp = self.axes.transData.transform_point([self.x,self.y])
#xdisp -= w
#xleft = xdisp - w
#xright = xdisp + w
return mpl.transforms.BboxTransformTo(mpl.transforms.Bbox(box))
#return mpl.transforms.Affine2D().scale(w,1).translate(xdisp,ydisp)
def draw(self,renderer):
# the ONLY purpose of redefining this function is to force the Line2D
# object to execute recache(). Otherwise, certain changes in the scale
# do not invalidate the Line2D object, and the transform will not be
# recomputed (and so the Axes coords computed earlier will be obsolete)
self.recache()
return mpl.lines.Line2D.draw(self,renderer)
def boxplot(ax, x, positions=None, widths=None, vert=1):
# adapted from matplotlib
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
widths = min(0.3/len(positions),0.05)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get distrib info
q1, med, q3 = mpl.mlab.prctile(d,[25,50,75])
dmax = np.max(d)
dmin = np.min(d)
line_color = '#074687'
face_color = '#96B7EC'
if vert == 1:
medline = ConstWidthLine(pos,med,widths[i],color=line_color,zorder=3)
box = ConstWidthRectangle(pos,q1,q3,widths[i],facecolor=face_color,edgecolor=line_color,zorder=2)
vertline = mpl.lines.Line2D([pos,pos],[dmin,dmax],color=line_color,zorder=1)
else:
medline = ConstHeightLine(med,pos,widths[i],color=line_color,zorder=3)
box = ConstHeightRectangle(q1,q3,pos,widths[i],facecolor=face_color,edgecolor=line_color,zorder=2)
vertline = mpl.lines.Line2D([dmin,dmax],[pos,pos],color=line_color,zorder=1)
ax.add_line(vertline)
ax.add_patch(box)
ax.add_line(medline)
# define colormap for -1 to 1 (green-black-red) like gene expression
_redgreencdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 0.0)],
'green':[(0.0, 0.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
redgreen = mpl.colors.LinearSegmentedColormap('redgreen',_redgreencdict,256)
redgreen.set_bad(color='w')
def compute_log_view_lim(data):
lo_lim = 10**np.floor(np.log10(np.min(data)))
hi_lim = 10**np.ceil(np.log10(np.max(data)))
return (lo_lim, hi_lim)
def generate_counthist(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):
"""Generate count size histogram.
counts -- dictionary of (key,count) pairs
label -- for the legend
"""
max_size = max(counts.values())
num_chains = sum(counts.values())
sizes = np.arange(1,max_size+1)
freqs = np.float_(sizes) / num_chains
(hist,garbage) = np.histogram(counts.values(),bins=sizes)
idxs = hist > 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = ax.twiny()
ax.spines['top'].set_position(('outward',5))
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('outward',5))
ax.spines['left'].set_position(('outward',5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(freqs[idxs],hist[idxs],marker='o',linestyle='None',color='#e31a1c',markeredgewidth=0,markersize=4,clip_on=False,label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(view_lim[:2])
ax.set_ylim(view_lim[2:])
ax2.spines['top'].set_position(('outward',5))
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.xaxis.set_ticks_position('top')
ax2.yaxis.set_ticks_position('none')
ax2.set_xscale('log')
ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])
ax.set_xlabel('junction frequency (bottom) or count (top)')
ax.set_ylabel('number of junctions')
leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))
leg.get_frame().set_visible(False)
return fig
def generate_counthistline(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):
"""Generate count size histogram.
counts -- dictionary of (key,count) pairs
label -- for the legend
"""
max_size = max(counts.values())
num_chains = sum(counts.values())
bins = np.logspace(0,np.log10(max_size),21)
bins_freqs = np.float_(bins) / num_chains
(hist,garbage) = np.histogram(counts.values(),bins=bins)
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = ax.twiny()
ax.spines['top'].set_position(('outward',5))
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('outward',5))
ax.spines['left'].set_position(('outward',5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(bins_freqs,list(hist)+[hist[-1]],color='#e31a1c',drawstyle='steps-post',clip_on=False,label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(view_lim[:2])
ax.set_ylim(view_lim[2:])
ax2.spines['top'].set_position(('outward',5))
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.xaxis.set_ticks_position('top')
ax2.yaxis.set_ticks_position('none')
ax2.set_xscale('log')
ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])
ax.set_xlabel('junction frequency (bottom) or count (top)')
ax.set_ylabel('number of junctions')
leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))
leg.get_frame().set_visible(False)
return fig
def generate_rankaccum(counts,label,view_lim=[1e0,1e5,1e-6,1e0]):
"""Generate rankaccum curve.
counts -- dictionary of (key,count) pairs
label -- for the legend
"""
num_chains = sum(counts.values())
freqs = np.float_(counts.values()) / num_chains
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = ax.twinx()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_position(('outward',5))
ax.spines['bottom'].set_position(('outward',5))
ax.spines['left'].set_position(('outward',5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(range(1,len(counts.values())+1),sorted(freqs,reverse=True),marker='o',linestyle='None',color='#377db8',markeredgewidth=0,markersize=4,clip_on=False,label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(view_lim[:2])
ax.set_ylim(view_lim[2:])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_position(('outward',5))
ax2.spines['bottom'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.xaxis.set_ticks_position('none')
ax2.yaxis.set_ticks_position('right')
ax2.set_yscale('log')
ax2.set_ylim([view_lim[2]*num_chains,view_lim[3]*num_chains])
ax.set_xlabel('rank')
ax.set_ylabel('junction frequency (left) or count (right)')
leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))
leg.get_frame().set_visible(False)
return fig
|
{"hexsha": "2b2238427295a70caf4a34b16d537f29e39b4f36", "size": 14490, "ext": "py", "lang": "Python", "max_stars_repo_path": "ulutil/mplextensions.py", "max_stars_repo_name": "churchlab/ulutil", "max_stars_repo_head_hexsha": "7f9a427274acd99ae4d2dfe35123feb7c2dc9625", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-04T13:50:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T13:50:01.000Z", "max_issues_repo_path": "ulutil/mplextensions.py", "max_issues_repo_name": "churchlab/ulutil", "max_issues_repo_head_hexsha": "7f9a427274acd99ae4d2dfe35123feb7c2dc9625", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ulutil/mplextensions.py", "max_forks_repo_name": "churchlab/ulutil", "max_forks_repo_head_hexsha": "7f9a427274acd99ae4d2dfe35123feb7c2dc9625", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5416666667, "max_line_length": 172, "alphanum_fraction": 0.5895100069, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4041}
|
# coding: utf-8
"""
Name: low_pressure_detecter.py
detecting low pressure.
Usage: python3 low_pressure_detecter.py
Author: Ryosuke Tomita
Date: 2021/12/19
"""
import argparse
from os.path import abspath, dirname, join
import sys
import numpy as np
from scipy import ndimage
import japanmap
sys.path.append(join(abspath(dirname(__file__)), "../cyclonetrack"))
import readnc
import fetchtime
def parse_args() -> dict:
"""parse_args.
set file path.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="set ncfile.", type=str)
parser.add_argument("-t", "--type", help="GPV or jra55", type=str)
p = parser.parse_args()
args = {"file": p.file, "type": p.type}
return args
def d_from_filterd_min(prmsl: np.ndarray, lat: np.ndarray, lon: np.ndarray) -> np.ndarray:
"""d_from_filterd_min.
Args:
prmsl (np.ndarray): prmsl
lat (np.ndarray): lat
lon (np.ndarray): lon
Returns:
np.ndarray:
"""
# minimum value filter
filterd_prmsl = np.where(
ndimage.filters.minimum_filter(
prmsl, size=(9, 9), mode=('nearest', 'wrap')
) == prmsl
)
# spherical trigonometry (球面三角法)
dx_s = np.array([
np.deg2rad(lon[filterd_prmsl[1]] - lo)
for lo in lon
])
y0_s = np.deg2rad(lat)
y1_s = np.deg2rad(lat[filterd_prmsl[0]])
cos_d_part1 = np.array([
np.sin(y0) * np.sin(y1_s)
for y0 in y0_s
])
cos_d_part2_ = np.array([
np.cos(y0) * np.cos(y1_s)
for y0 in y0_s
])
cos_d_part2 = np.array([
cos_d_part2_[i] * np.cos(dx)
for i in range(len(cos_d_part2_))
for dx in dx_s
]).reshape(len(cos_d_part2_), len(dx_s), len(filterd_prmsl[0]))
cos_d = np.array([
cos_d_part1[i] + cos_d_part2[i][j]
for i in range(len(cos_d_part1))
for j in range(len(dx_s))
]).T.reshape(len(filterd_prmsl[0]), len(cos_d_part2_), len(dx_s))
cos_d[cos_d > 1.0] = 1.0
d_from_min = np.arccos(cos_d) * 6400
return d_from_min
def _around_mean(prmsl, i: int, j: int):
"""_around_mean.
Args:
prmsl:
i:
j:
"""
sum_data = 0
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
if i == 0 and j == 0:
continue
sum_data += prmsl[i+i][j+i]
return sum_data / 8
def define_low_prmsl(prmsl: np.ndarray, d: np.ndarray) -> np.ndarray:
"""define_low_prmsl.
Args:
prmsl (np.ndarray): prmsl
d (np.ndarray): d
Returns:
np.ndarray:
"""
min_around = np.where(d <= 300)
n = np.argmin(prmsl[min_around])
min_lat_index = min_around[0][n]
min_lon_index = min_around[1][n]
prmsl_min_around_mean = _around_mean(prmsl, min_lat_index, min_lon_index)
prmsl_min = prmsl[min_around].min()
if prmsl_min_around_mean - prmsl_min >= 0.5:
return np.where(d <= 300, True, np.nan)
def output_name(ncfile: str) -> str:
"""output_name.
Args:
ncfile (str): ncfile
Returns:
str:
"""
date_time = fetchtime.fetch_time(ncfile)
if "_06" in ncfile or "_18" in ncfile:
date_time = fetchtime.fix_datetime(date_time)
outname = (date_time + "low_pressure")
return outname
def main():
"""main.
"""
args = parse_args()
ncfile = args["file"]
calc_phys = readnc.CalcPhysics(ncfile, args["type"])
jp_lat, jp_lon = calc_phys.get_lat_lon()
if args["type"] == "GPV":
prmsl = calc_phys.get_parameter("prmsl")
else:
prmsl = calc_phys.get_parameter("msl") / 100
d_from_min = d_from_filterd_min(prmsl, jp_lat, jp_lon)
jp_map = japanmap.JpMap()
jp_map.contour_plot(jp_lon, jp_lat, prmsl, contour_type="pressure")
for d in d_from_min:
min_around = define_low_prmsl(prmsl, d)
if min_around is not None:
jp_map.hatch_plot(jp_lon, jp_lat, min_around)
outname = output_name(ncfile)
jp_map.save_fig(outname, None)
if __name__ == "__main__":
main()
|
{"hexsha": "215fe4b8ffaf81e1a7ce57bf0f89846d0dca3d59", "size": 4159, "ext": "py", "lang": "Python", "max_stars_repo_path": "analyze_tool/low_pressure_detecter.py", "max_stars_repo_name": "RyosukeDTomita/cyclone_track", "max_stars_repo_head_hexsha": "91238a7a8af2e23b3ae697c35f3b6c40516dc80a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analyze_tool/low_pressure_detecter.py", "max_issues_repo_name": "RyosukeDTomita/cyclone_track", "max_issues_repo_head_hexsha": "91238a7a8af2e23b3ae697c35f3b6c40516dc80a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analyze_tool/low_pressure_detecter.py", "max_forks_repo_name": "RyosukeDTomita/cyclone_track", "max_forks_repo_head_hexsha": "91238a7a8af2e23b3ae697c35f3b6c40516dc80a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-31T09:08:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-31T09:08:53.000Z", "avg_line_length": 24.1802325581, "max_line_length": 90, "alphanum_fraction": 0.5991824958, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1262}
|
import numpy as np
from model.filters.dotproduct import DotProduct
class Sepia(DotProduct):
def __init__(self):
pass
def apply(self, matrix):
kernel = np.array([[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]])
return self.fast_dotproduct(matrix, kernel)
|
{"hexsha": "e5dc2923bb78cadd0fb74b89659a5f12627ef110", "size": 366, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/filters/sepia.py", "max_stars_repo_name": "DennisPing/image-processor-mvc", "max_stars_repo_head_hexsha": "b687185500404a84f21e16b6c56937be4afc5a1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/filters/sepia.py", "max_issues_repo_name": "DennisPing/image-processor-mvc", "max_issues_repo_head_hexsha": "b687185500404a84f21e16b6c56937be4afc5a1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/filters/sepia.py", "max_forks_repo_name": "DennisPing/image-processor-mvc", "max_forks_repo_head_hexsha": "b687185500404a84f21e16b6c56937be4afc5a1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1428571429, "max_line_length": 51, "alphanum_fraction": 0.5464480874, "include": true, "reason": "import numpy", "num_tokens": 100}
|
from __future__ import division
import os, sys, shutil, time, random, math
import argparse
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch
from torch.optim import SGD
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.utils.data.distributed
from scalablebdl.mean_field import PsiSGD, to_bayesian, to_deterministic
from scalablebdl.bnn_utils import freeze, unfreeze, disable_dropout
from utils import AverageMeter, RecorderMeter, time_string, \
convert_secs2time, _ECELoss, plot_mi, plot_ens, ent, accuracy, \
reduce_tensor, dist_collect, print_log, save_checkpoint, verify
from dataset.face import load_dataset_ft
import models.mobilenet as models
parser = argparse.ArgumentParser(description='Training script for Face', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Data / Model
parser.add_argument('--data_path', metavar='DPATH', type=str,
default='/data/xiaoyang/data/faces_emore/')
parser.add_argument('--data_path_fake', metavar='DPATH', type=str,
default='/data/zhijie/autobayes/deepfake_samples/face/')
parser.add_argument('--dataset', metavar='DSET', type=str, default='face')
parser.add_argument('--arch', metavar='ARCH', default='mobilenet_v2')
# Optimization
parser.add_argument('--epochs', metavar='N', type=int, default=16)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--learning_rate', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--schedule', type=int, nargs='+', default=[4, 8, 12],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1, 0.1],
help='LR for psi is multiplied by gamma on schedule')
#Regularization
parser.add_argument('--decay', type=float, default=5e-4,
help='Weight decay')
# Checkpoints
parser.add_argument('--save_path', type=str, default='/data/zhijie/snapshots_ba/',
help='Folder to save checkpoints and log.')
parser.add_argument('--job-id', type=str, default='bayesadapter-face')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Path to latest checkpoint (default: none)')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N')
parser.add_argument('--evaluate', dest='evaluate', action='store_true',
help='Evaluate model on test set')
# Acceleration
parser.add_argument('--workers', type=int, default=4,
help='number of data loading workers (default: 4)')
# Random seed
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
# Bayesian
parser.add_argument('--psi_init_range', type=float, nargs='+', default=[-6, -5])
parser.add_argument('--num_fake', type=int, default=1000)
parser.add_argument('--uncertainty_threshold', type=float, default=0.75)
# Fake generated data augmentation
parser.add_argument('--blur_prob', type=float, default=0.5)
parser.add_argument('--blur_sig', type=float, nargs='+', default=[0., 3.])
parser.add_argument('--jpg_prob', type=float, default=0.5)
parser.add_argument('--jpg_method', type=str, nargs='+', default=['cv2', 'pil'])
parser.add_argument('--jpg_qual', type=int, nargs='+', default=[30, 100])
# Attack settings
parser.add_argument('--epsilon', default=16./255., type=float,
help='perturbation')
parser.add_argument('--num-steps', default=20, type=int,
help='perturb number of steps')
parser.add_argument('--step-size', default=1./255., type=float,
help='perturb step size')
parser.add_argument('--random', default=True,
help='random initialization for PGD')
# Dist
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-port', default='1234', type=str,
help='port used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc = 0
def main():
args = parser.parse_args()
if not os.path.isdir(args.data_path): os.makedirs(args.data_path)
job_id = args.job_id
args.save_path = args.save_path + job_id
if not os.path.isdir(args.save_path): os.makedirs(args.save_path)
args.use_cuda = torch.cuda.is_available()
if args.manualSeed is None: args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if args.use_cuda: torch.cuda.manual_seed_all(args.manualSeed)
cudnn.deterministic = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
else:
args.multiprocessing_distributed = True
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc
args.gpu = gpu
assert args.gpu is not None
print("Use GPU: {} for training".format(args.gpu))
log = open(os.path.join(args.save_path, 'log_seed{}{}.txt'.format(
args.manualSeed, '_eval' if args.evaluate else '')), 'w')
log = (log, args.gpu)
net = models.__dict__[args.arch](pretrained=True, num_classes=10341)
disable_dropout(net)
net = to_bayesian(net, args.psi_init_range)
unfreeze(net)
print_log("Python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("PyTorch version : {}".format(torch.__version__), log)
print_log("CuDNN version : {}".format(torch.backends.cudnn.version()), log)
print_log("Number of parameters: {}".format(sum([p.numel() for p in net.parameters()])), log)
print_log(str(args), log)
if args.distributed:
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url+":"+args.dist_port,
world_size=args.world_size, rank=args.rank)
torch.cuda.set_device(args.gpu)
net.cuda(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[args.gpu])
else:
torch.cuda.set_device(args.gpu)
net = net.cuda(args.gpu)
criterion = torch.nn.CrossEntropyLoss().cuda(args.gpu)
mus, psis = [], []
for name, param in net.named_parameters():
if 'psi' in name: psis.append(param)
else: mus.append(param)
mu_optimizer = SGD(mus, args.learning_rate, args.momentum,
weight_decay=args.decay)
psi_optimizer = PsiSGD(psis, args.learning_rate, args.momentum,
weight_decay=args.decay)
recorder = RecorderMeter(args.epochs)
if args.resume:
if args.resume == 'auto':
args.resume = os.path.join(args.save_path, 'checkpoint.pth.tar')
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume, map_location='cuda:{}'.format(args.gpu))
recorder = checkpoint['recorder']
recorder.refresh(args.epochs)
args.start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'] if args.distributed
else {k.replace('module.', ''): v for k,v in checkpoint['state_dict'].items()})
mu_optimizer.load_state_dict(checkpoint['mu_optimizer'])
psi_optimizer.load_state_dict(checkpoint['psi_optimizer'])
best_acc = recorder.max_accuracy(False)
print_log("=> loaded checkpoint '{}' accuracy={} (epoch {})".format(
args.resume, best_acc, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
else:
print_log("=> do not use any checkpoint for the model", log)
cudnn.benchmark = True
train_loader, ood_train_loader, val_loaders, fake_loader = load_dataset_ft(args)
psi_optimizer.num_data = len(train_loader.dataset)
if args.evaluate:
evaluate(val_loaders, fake_loader, net, criterion, args, log, 20, 100)
return
start_time = time.time()
epoch_time = AverageMeter()
train_los = -1
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
ood_train_loader.sampler.set_epoch(epoch)
cur_lr, cur_slr = adjust_learning_rate(mu_optimizer, psi_optimizer, epoch, args)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log('\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f} {:6.4f}]'.format(
time_string(), epoch, args.epochs, need_time, cur_lr, cur_slr) \
+ ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)
train_acc, train_los = train(train_loader, ood_train_loader, net,
criterion, mu_optimizer, psi_optimizer,
epoch, args, log)
val_acc, val_los = 0, 0
recorder.update(epoch, train_los, train_acc, val_acc, val_los)
is_best = False
if val_acc > best_acc:
is_best = True
best_acc = val_acc
if args.gpu == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': net.state_dict(),
'recorder': recorder,
'mu_optimizer' : mu_optimizer.state_dict(),
'psi_optimizer' : psi_optimizer.state_dict(),
}, False, args.save_path, 'checkpoint.pth.tar')
epoch_time.update(time.time() - start_time)
start_time = time.time()
recorder.plot_curve(os.path.join(args.save_path, 'log.png'))
evaluate(val_loaders, fake_loader, net, criterion, args, log, 20, 100)
log[0].close()
def train(train_loader, ood_train_loader, model, criterion,
mu_optimizer, psi_optimizer, epoch, args, log):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
ur_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
ood_train_loader_iter = iter(ood_train_loader)
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
input1 = next(ood_train_loader_iter)
input1 = input1.cuda(args.gpu, non_blocking=True)
bs = input.shape[0]
bs1 = input1.shape[0]
output = model(torch.cat([input, input1.repeat(2, 1, 1, 1)]))
loss = criterion(output[:bs], target)
out1_0 = output[bs:bs+bs1].softmax(-1)
out1_1 = output[bs+bs1:].softmax(-1)
mi1 = ent((out1_0 + out1_1)/2.) - (ent(out1_0) + ent(out1_1))/2.
ur_loss = torch.nn.functional.relu(args.uncertainty_threshold - mi1).mean()
prec1, prec5 = accuracy(output[:bs], target, topk=(1, 5))
losses.update(loss.detach().item(), bs)
ur_losses.update(ur_loss.detach().item(), bs1)
top1.update(prec1.item(), bs)
top5.update(prec5.item(), bs)
mu_optimizer.zero_grad()
psi_optimizer.zero_grad()
(loss+ur_loss).backward()
mu_optimizer.step()
psi_optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i == len(train_loader) - 1:
print_log(' Epoch: [{:03d}][{:03d}/{:03d}] '
'Time {batch_time.avg:.3f} '
'Data {data_time.avg:.3f} '
'Loss {loss.avg:.4f} '
'UR Loss {ur_loss.avg:.4f} '
'Prec@1 {top1.avg:.3f} '
'Prec@5 {top5.avg:.3f} '.format(
epoch, i, len(train_loader), batch_time=batch_time,
ur_loss=ur_losses, data_time=data_time, loss=losses,
top1=top1, top5=top5) + time_string(), log)
return top1.avg, losses.avg
def evaluate(val_loaders, fake_loader, net,
criterion, args, log, num_mc_samples, num_mc_samples2):
freeze(net)
if args.gpu == 0:
print("-----------------deterministic-----------------")
deter_rets = ens_validate(val_loaders, net, criterion, args, log, 1)
unfreeze(net)
if args.gpu == 0:
print("-----------------ensemble {} times-----------------".format(num_mc_samples2))
rets = ens_validate(val_loaders, net, criterion, args, log, num_mc_samples2)
ens_attack(val_loaders, net, criterion, args, log, num_mc_samples, min(num_mc_samples, 8))
if args.gpu == 0:
for k in val_loaders:
print_log('{} vs. adversarial: AP {}'.format(k[0],
plot_mi(args.save_path, 'adv_'+k[0], k[0])), log)
ens_validate(fake_loader, net, criterion, args, log, num_mc_samples, suffix='fake')
if args.gpu == 0:
for k in val_loaders:
print_log('{} vs. DeepFake: AP {}'.format(k[0],
plot_mi(args.save_path, 'fake', k[0])), log)
def ens_validate(val_loaders, model, criterion, args, log, num_ens=100, suffix=''):
model.eval()
if isinstance(val_loaders, list):
name, val_loader, issame = val_loaders[args.gpu % len(val_loaders)]
else:
name, val_loader, issame = suffix, val_loaders, None
with torch.no_grad():
with model.no_sync():
embeddings = []
mis = [0 for _ in range(len(val_loader))]
preds = [0 for _ in range(len(val_loader))]
for i, input in enumerate(val_loader):
if isinstance(input, tuple) or isinstance(input, list): input = input[0]
input = input.cuda(args.gpu, non_blocking=True)
embedding_b = 0
for ens in range(num_ens):
output, output_logits = model(input, return_both=True)
embedding_b += output/num_ens
mis[i] = (mis[i] * ens + (-output_logits.softmax(-1)
* output_logits.log_softmax(-1)).sum(1)) / (ens + 1)
preds[i] = (preds[i] * ens + output_logits.softmax(-1)) / (ens + 1)
norm = torch.norm(embedding_b, 2, 1, True)
embedding = torch.div(embedding_b, norm)
embeddings.append(embedding)
embeddings = torch.cat(embeddings).data.cpu().numpy()
preds = torch.cat(preds, 0)
mis = (- preds * preds.log()).sum(1) - (0 if num_ens == 1 else torch.cat(mis, 0))
if (isinstance(val_loaders, list) and args.gpu < len(val_loaders)) or \
((not isinstance(val_loaders, list)) and args.gpu == 0):
np.save(os.path.join(args.save_path, 'mis_{}.npy'.format(name)), mis.data.cpu().numpy())
if issame is not None:
tpr, fpr, accuracy, best_thresholds = verify(embeddings, issame, 10)
print_log(' **Test** {}: {:.3f}'.format(name, accuracy.mean()), log, True)
torch.distributed.barrier()
def ens_attack(val_loaders, model, criterion, args, log, num_ens=20, num_ens_a=8):
def _grad(X, y, mean, std):
with model.no_sync():
with torch.enable_grad():
X.requires_grad_()
output = model(X.sub(mean).div(std).repeat(num_ens_a, 1, 1, 1), True)
output = output.reshape(num_ens_a, X.size(0)//2, 2, output.size(-1))
loss = ((output[:, :, 0, :].mean(0) - y[:, 1, :].detach())**2).sum(1) \
+ ((output[:, :, 1, :].mean(0) - y[:, 0, :].detach())**2).sum(1)
grad_ = torch.autograd.grad(
[loss], [X], grad_outputs=torch.ones_like(loss), retain_graph=False)[0].detach()
return grad_
def _pgd_whitebox(X, mean, std):
freeze(model)
y = model(X.sub(mean).div(std), True).reshape(X.size(0)//2, 2, -1)
unfreeze(model)
X_pgd = X.clone()
if args.random:
X_pgd += torch.cuda.FloatTensor(*X_pgd.shape).uniform_(-args.epsilon, args.epsilon)
for _ in range(args.num_steps):
grad_ = _grad(X_pgd, y, mean, std)
X_pgd += args.step_size * grad_.sign()
eta = torch.clamp(X_pgd - X, -args.epsilon, args.epsilon)
X_pgd = torch.clamp(X + eta, 0, 1.0)
mis = 0
preds = 0
embedding_b = 0
for ens in range(num_ens):
output, output_logits = model(X_pgd.sub(mean).div(std), return_both=True)
embedding_b += output/num_ens
mis = (mis * ens + (-output_logits.softmax(-1) * (output_logits).log_softmax(-1)).sum(1)) / (ens + 1)
preds = (preds * ens + output_logits.softmax(-1)) / (ens + 1)
norm = torch.norm(embedding_b, 2, 1, True)
embedding = torch.div(embedding_b, norm)
mis = (- preds * (preds+1e-8).log()).sum(1) - (0 if num_ens == 1 else mis)
return embedding, mis
mean = torch.from_numpy(np.array([0.5, 0.5, 0.5])).view(1,3,1,1).cuda(args.gpu).float()
std = torch.from_numpy(np.array([0.5, 0.5, 0.5])).view(1,3,1,1).cuda(args.gpu).float()
model.eval()
name, val_loader, issame = val_loaders[args.gpu % len(val_loaders)]
with torch.no_grad():
with model.no_sync():
mis = []
embeddings = []
for i, input in enumerate(val_loader):
is_pair = issame[i*args.batch_size//2:min(len(issame), i*args.batch_size//2+args.batch_size//2)]
if np.all(is_pair == False): continue
input = input.cuda(args.gpu, non_blocking=True).mul_(std).add_(mean)
input = input.reshape(args.batch_size//2, 2, 3, 112, 112)
assert len(is_pair) == input.shape[0], (len(is_pair), input.shape[0])
mask = torch.from_numpy(is_pair).cuda(args.gpu, non_blocking=True) == True
input = input[mask, :, :, :, :].view(-1, 3, 112, 112)
embedding, mis_ = _pgd_whitebox(input, mean, std)
mis.append(mis_)
embeddings.append(embedding)
mis = torch.cat(mis, 0)
torch.distributed.barrier()
if args.gpu < len(val_loaders): np.save(os.path.join(args.save_path, 'mis_adv_{}.npy'.format(name)), mis.data.cpu().numpy())
def adjust_learning_rate(mu_optimizer, psi_optimizer, epoch, args):
lr = args.learning_rate
slr = args.learning_rate
assert len(args.gammas) == len(args.schedule), \
"length of gammas and schedule should be equal"
for (gamma, step) in zip(args.gammas, args.schedule):
if (epoch >= step): slr = slr * gamma
else: break
lr = lr * np.prod(args.gammas)
for param_group in mu_optimizer.param_groups: param_group['lr'] = lr
for param_group in psi_optimizer.param_groups: param_group['lr'] = slr
return lr, slr
if __name__ == '__main__': main()
|
{"hexsha": "08eed8391cd5f53e911b94467b6e0b7c752448ed", "size": 20648, "ext": "py", "lang": "Python", "max_stars_repo_path": "reproduction/finetune_face.py", "max_stars_repo_name": "thudzj/BayesAdapter.github.io", "max_stars_repo_head_hexsha": "243b8b8686e2c9f1ea0bcda5ede317ab98405845", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2020-10-06T05:24:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:33:57.000Z", "max_issues_repo_path": "reproduction/finetune_face.py", "max_issues_repo_name": "thudzj/BayesAdapter.github.io", "max_issues_repo_head_hexsha": "243b8b8686e2c9f1ea0bcda5ede317ab98405845", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reproduction/finetune_face.py", "max_forks_repo_name": "thudzj/BayesAdapter.github.io", "max_forks_repo_head_hexsha": "243b8b8686e2c9f1ea0bcda5ede317ab98405845", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-09T12:41:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T03:08:18.000Z", "avg_line_length": 43.838641189, "max_line_length": 140, "alphanum_fraction": 0.6157497094, "include": true, "reason": "import numpy", "num_tokens": 5074}
|
#!/usr/bin/env python
# Software License Agreement (MIT License)
#
# Copyright (c) 2020, tri_star
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Meiying Qin, Jake Brawer
import numpy as np
import rospy
from std_msgs.msg import String
from tri_star.msg import TargetPosition
from tri_star import perception_util
from tri_star import robot_util
from tri_star import transformation_util
from tri_star import constants
def get_Tx(T, theta):
theta = np.deg2rad(theta)
T_x = np.array([[1., 0., 0., 0.],
[0., np.cos(theta), -np.sin(theta), 0.],
[0., np.sin(theta), np.cos(theta), 0.],
[0., 0., 0., 1.]])
return np.matmul(T_x, T)
def get_Ty(T, theta):
theta = np.deg2rad(theta)
T_y = np.array([[np.cos(theta), 0., np.sin(theta), 0.],
[0., 1., 0., 0.],
[-np.sin(theta), 0., np.cos(theta), 0.],
[0., 0., 0., 1.]])
return np.matmul(T_y, T)
def get_Tz(T, theta):
theta = np.deg2rad(theta)
T_z = np.array([[np.cos(theta), -np.sin(theta), 0., 0.],
[np.sin(theta), np.cos(theta), 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return np.matmul(T_z, T)
class SimulatorInteraction(object):
def __init__(self):
self.tool_name = TargetPosition.NAME_TOOL
self.goal_name = TargetPosition.NAME_GOAL
self.task_name = None
self.tool_type = None
#self.task_name = "push"
#self.tool_type = "plunger"
#self.goal_type = "blue_puck"
#self.task_name = "knock"
#self.tool_type = "gavel"
#self.goal_type = "gavel_target"
#self.task_name = "knock" #
#self.tool_type = "xylo_stick"
#self.goal_type = "xylophone"
#self.task_name = "knock"
#self.tool_type = "percusive_stick"
#self.goal_type = "drum"
#self.task_name = "cut" # TODO
#self.tool_type = "butcher_knife"
#self.goal_type = "square_playdough"
#self.task_name = "scoop" # TODO
#self.tool_type = "blue_scooper"
#self.goal_type = "duck"
#self.task_name = "stir"
#self.tool_type = "small_blue_spatula"
#self.goal_type = "large_bowl"
#self.task_name = "draw"
#self.tool_type = "writing_brush"
#self.goal_type = "buddha_board"
#self.task_name = "screw"
#self.tool_type = "paint_scraper"
#self.goal_type = "screw"
self.task_name = "stir"
self.tool_type = "small_plastic_spoon"
self.goal_type = "ramen_bowl"
self.robot = robot_util.Robot()
rospy.Subscriber("tri_star/task", String, self.get_task_name)
rospy.Subscriber("tri_star/tool", String, self.get_tool_type)
rospy.Subscriber("tri_star/goal", String, self.get_goal_type)
def get_tool_type(self, data):
if len(data.data) != 0:
self.tool_type = data.data
else:
self.tool_type = None
def get_goal_type(self, data):
if len(data.data) != 0:
self.goal_type = data.data
else:
self.goal_type = None
def get_task_name(self, data):
if len(data.data) != 0:
self.task_name = data.data
else:
self.task_name = None
def get_pose(self):
pose_matrix = self.robot.get_robot_pose()
x, y, z, ax, ay, az = transformation_util.get_euler_angle_from_transformation_matrix(pose)
print "x: {}\ny: {}\ny: {}\nz: {}\nrx: {}\nry: {}\nrz: {}\n".format(x, y, z, transformation_util.radian_to_degrees(ax), transformation_util.radian_to_degrees(ay), transformation_util.radian_to_degrees(az))
def get_angle(self):
print self.robot.get_robot_angle()
def add_goal(self):
self.remove_goal()
print "add goal: {}".format(self.goal_type)
desk_top = -0.023 + 0.045 / 2
print "get the pose of the {} of the goal object. Unit is meter".format(constants.get_perception_method())
x = float(raw_input("x: "))
y = float(raw_input("y: "))
robot_platform = constants.get_robot_platform()
if robot_platform == constants.ROBOT_PLATFORM_UR5E:
z = desk_top + 0.045
else:
z = float(raw_input("z: "))
rx = transformation_util.degrees_to_radian(float(raw_input("rx: ")))
ry = transformation_util.degrees_to_radian(float(raw_input("ry: ")))
rz = transformation_util.degrees_to_radian(float(raw_input("rz: ")))
Tworld_goal = transformation_util.get_transformation_matrix_from_euler_angle(x, y, z, rx, ry, rz)
perception_util.add_goal(self.goal_type, Tworld_goal)
def remove_goal(self):
perception_util.remove_goal()
def attach_goal(self):
pose = perception_util.get_Tworld_goal()
perception_util.attach_goal(pose, self.task_name)
def detach_goal(self):
perception_util.detach_goal()
def toggle_goal_collision(self, enable_collision):
if enable_collision:
perception_util.enable_goal_collision()
else:
perception_util.disable_goal_collision()
def attach_tool(self):
#Tee_tool = np.identity(4)
Tee_tool = {}
robot_platform = constants.get_robot_platform()
print "robot platform is: ", robot_platform
if robot_platform == constants.ROBOT_PLATFORM_UR5E:
if constants.get_perception_method() == constants.PERCEPTION_METHOD_ARUCO:
Tee_tool["plunger_normalized"] = np.array([[0, 1, 0, 0.08],
[1, 0, 0, 0],
[0, 0, -1, 0.04],
[0, 0, 0, 1]])
elif constants.get_perception_method() == constants.PERCEPTION_METHOD_POINTCLOUD:
Tee_tool["plunger_normalized"] = np.array([[-1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., -1., 0.],
[0., 0., 0., 1.]])
Tee_tool["plunger"] = np.array([[-1., 0., 0., 0.],
[0., -1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
#Tee_tool["xylo_stick"] = np.array([[0., 0., 1., 0.],
#[0., 1., 0., 0.],
#[-1., 0., 0., 0.05],
#[0., 0., 0., 1.]])
Tee_tool["xylo_stick"] = np.array([[-0.74096603, 0.65540231, 0.146346, 0.00451146],
[-0.00930174 ,0.20788756 ,-0.9781085 , 0.00348856],
[-0.67147809 ,-0.72610645, -0.14794124 , 0.03092808],
[ 0. ,0. , 0. , 1. ]])
theta = 0.
Tx = np.array([[1., 0., 0., 0.],
[0., np.cos(theta), -np.sin(theta), 0.],
[0., np.sin(theta), np.cos(theta), 0],
[0., 0., 0., 1]])
ee_theta_z = np.pi / 3. # y
Tee = np.array([[ np.cos(ee_theta_z), 0., np.sin(ee_theta_z), 0.],
[0., 1., 0., 0.],
[-np.sin(ee_theta_z), 0., np.cos(ee_theta_z), 0],
[0., 0., 0., 1]])
Tee_tool["xylo_stick"] = np.matmul(Tee_tool["xylo_stick"], Tx)
Tee_tool["xylo_stick"] = np.matmul(Tee, Tee_tool["xylo_stick"])
Tee_tool["wooden_knife"] = np.array([[1., 0., 0., 0.05],
[0., 0., -1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]])
Tee_tool["butcher_knife"] = np.array([[-1., 0., 0., 0.05],
[0., 0., -1., 0.],
[0., -1., 0., 0.],
[0., 0., 0., 1.]])
Tee_tool["blue_scooper"] = np.array([[-1., 0., 0., 0.1],
[0., 1., 0., 0.],
[0., 0., -1., 0.02],
[0., 0., 0., 1.]])
Tee_tool["small_blue_spatula"] = np.array([[0., 0., -1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.05],
[0., 0., 0., 1]])
Tee_tool["writing_brush"] = np.array([[0., 0., -1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.05],
[0., 0., 0., 1]])
#Tee_tool["percusive_stick"] = np.array([[-1.000000e+00, 0.000000e+00, 0.000000e+00, 8.000000e-02],
#[ 0.000000e+00, -1.000000e+00, -6.123234e-17, 0.000000e+00],
#[ 0.000000e+00, -6.123234e-17, 1.000000e+00, 3.000000e-02],
#[ 0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]])
Tee_tool["percusive_stick"] = np.array([[-1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 8.0000000e-02],
[ 0.0000000e+00, 1.0000000e+00, 1.8369702e-16, 0.0000000e+00],
[ 0.0000000e+00, 1.8369702e-16, -1.0000000e+00, 3.0000000e-02],
[ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]]) # sample 1
Tee_tool["gavel"] = np.array([[-1., 0., 0., 0.],
[0., 0., -1., 0.],
[0., -1., 0., 0.],
[0., 0., 0., 1.]])
Tee_tool["paint_scraper"] = np.array([[0., 0., -1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.05],
[0., 0., 0., 1.]])
Tee_tool["paint_scraper"] = get_Tz(Tee_tool["paint_scraper"], 90)
Tee_tool["paint_scraper"] = get_Ty(Tee_tool["paint_scraper"], 25)
Tee_tool["small_plastic_spoon"] = np.array([[0., 0., 1., 0.],
[0., 1., 0., 0.],
[-1., 0., 0., 0.05],
[0., 0., 0., 1]])
# stir, screw tasks
#Tee_tool["plunger_normalized"] = np.array([[0., 0., 1., 0.],
#[0., 1., 0., 0.],
#[-1., 0., 0., 0.15],
#[0., 0., 0., 1.]])
elif robot_platform == constants.ROBOT_PLATFORM_KUKA:
Tee_tool = np.array([[-1, 0, 0, 0.0],
[0, -1, 0, 0.0],
[0, 0, 1, 0.08],
[0, 0, 0, 1]])
elif robot_platform == constants.ROBOT_PLATFORM_BAXTER:
Tee_tool = np.array([[-1, 0, 0, -0.08],
[0, -1, 0, 0],
[0, 0, 1, 0.04],
[0, 0, 0, 1]])
print "Tee_{}_{}".format(self.tool_type, constants.get_perception_method())
print Tee_tool[self.tool_type]
Tworld_ee = self.robot.get_robot_pose()
print "Tworld_ee"
print Tworld_ee
Tworld_tool = np.matmul(Tworld_ee, Tee_tool[self.tool_type])
print "Tworld_tool{}".format(constants.get_perception_method())
print Tworld_tool
perception_util.attach_tool(self.tool_type, Tworld_tool) # todo: select more tools
def detach_tool(self):
perception_util.detach_tool()
def move_arm(self, direction):
x, y, z, rx, ry, rz = transformation_util.get_euler_angle_from_transformation_matrix(self.robot.get_robot_pose())
if direction.startswith("x"):
value = float(direction[1:])
x += value
elif direction.startswith("y"):
value = float(direction[1:])
y += value
elif direction.startswith("z"):
value = float(direction[1:])
z += value
value = transformation_util.degrees_to_radian(float(direction[2:]))
if direction.startswith("rx"):
rx += value
elif direction.startswith("ry"):
ry += value
elif direction.startswith("rz"):
rz += value
pose = transformation_util.get_transformation_matrix_from_euler_angle(x, y, z, rx, ry, rz)
self.robot.set_robot_pose(pose, ask_before_move=False)
def run(self):
while not rospy.is_shutdown():
print "====================================================="
command_input = raw_input("Goals:\n\tag(add goal);\n\trg(remove goal);\n\tatg(attach goal);\n\tdtg(detach goal);\n\tegc(enable goal collision);\n\tdgc(disable goal collision)\nTools:\n\tat(attach tool);\n\tdt(detach tool);\nMove arm:\n\tx+n(e.g., x+5.13, x direction move up 5.13 cm. x could be replaced by y or z. + could be replaced by -);\n\tSimilarly, rx+n(e.g., rotate n degree around x axis)\nGet Pose: gp\nGet Joint angles; ja.\n\nYou Choice: ")
if command_input == "gp":
self.get_pose()
elif command_input == "ja":
self.get_angle()
elif command_input == "ag":
self.add_goal()
elif command_input == "rg":
self.remove_goal()
elif command_input == "atg":
self.attach_goal()
elif command_input == "dtg":
self.detach_goal()
elif command_input == "egc":
self.toggle_goal_collision(True)
elif command_input == "dgc":
self.toggle_goal_collision(False)
elif command_input == "at":
self.attach_tool()
elif command_input == "dt":
self.detach_tool()
else: # move arm
direction = command_input
self.move_arm(direction)
if __name__ == '__main__':
try:
rospy.init_node('simulator_interaction', anonymous=True)
interaction = SimulatorInteraction()
interaction.run()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "a35b6499dcac0e3cf0582224369bd74c40106bf5", "size": 16852, "ext": "py", "lang": "Python", "max_stars_repo_path": "tri_star/src/nodes/simulator_interaction.py", "max_stars_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_stars_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tri_star/src/nodes/simulator_interaction.py", "max_issues_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_issues_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tri_star/src/nodes/simulator_interaction.py", "max_forks_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_forks_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.9415041783, "max_line_length": 464, "alphanum_fraction": 0.4608355091, "include": true, "reason": "import numpy", "num_tokens": 4162}
|
\documentclass{article}
\usepackage{enumitem}
\usepackage{hyperref}
\usepackage{ulem}
\title{Markademic}
\author{Mike Chung}
\begin{document}
\maketitle
\section{Introduction}
Wouldn't it be great if you can write academic papers using markdown instead of latex?
I present markademic, a pipeline for creating an academic paper-style pdf from a markdown file!
\section{Approach}
\begin{enumerate}
[start=1]
\item
Convert markdown to tex using \href{https://github.com/Paperist/remark-latex}{remark-latex}
\item
Convert tex to pdf using \href{https://www.tug.org/applications/pdftex/}{pdflatex}
\item
Update tex and pdf on-the-fly using \href{https://www.npmjs.com/package/npm-watch}{npm-watch}
\end{enumerate}
\section{Conclusion}
Try writing your next paper with markademic!
\end{document}
|
{"hexsha": "b6960fed6b81905e6cef405fd093a4fc5054db08", "size": 810, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/draft.tex", "max_stars_repo_name": "mjyc/markademic", "max_stars_repo_head_hexsha": "c0e6a5e12138fb5f0dd2edc6f6f5f812ef25b778", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-25T17:35:20.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-25T17:35:20.000Z", "max_issues_repo_path": "tex/draft.tex", "max_issues_repo_name": "NakuraMino/markademic", "max_issues_repo_head_hexsha": "0606f873d211966928c8562d21586bfe2bba8b78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/draft.tex", "max_forks_repo_name": "NakuraMino/markademic", "max_forks_repo_head_hexsha": "0606f873d211966928c8562d21586bfe2bba8b78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-09T20:42:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-09T20:42:24.000Z", "avg_line_length": 19.2857142857, "max_line_length": 95, "alphanum_fraction": 0.7666666667, "num_tokens": 214}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% [TITLE] Discussion
%
% [AUTHOR] Chen Zhang (CSISS, GMU)
% [E-MAIL] czhang11@gmu.edu
%
% [PROJECT PAGE] https://github.com/czhang11/latex-manuscript-template
% [PROJECT DESC] ___edit_description_here___
%
% [FILE NAME] 4-discussion.tex
% [FILE DESC] ___edit_description_here___
% [FILE REVISION HISTORY]
% - Last Modified: 3/22/2019, 11:56:30
% - Created Date: 3/17/2019, 15:10:58
%
%
% Please visit http://chenzhang.org for more information about the author
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DISCUSSION %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Discussion}\label{sec:discussion}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
{"hexsha": "31973b69b1117f28228afc98ef11fe2f77977664", "size": 1020, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "4-discussion.tex", "max_stars_repo_name": "czhang11/latex-manuscript-template", "max_stars_repo_head_hexsha": "4aaaa4c961db4cb19c97f391ad882baef4380521", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-12T08:07:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-12T08:07:50.000Z", "max_issues_repo_path": "4-discussion.tex", "max_issues_repo_name": "yinzhi1221/latex-manuscript-template", "max_issues_repo_head_hexsha": "4aaaa4c961db4cb19c97f391ad882baef4380521", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "4-discussion.tex", "max_forks_repo_name": "yinzhi1221/latex-manuscript-template", "max_forks_repo_head_hexsha": "4aaaa4c961db4cb19c97f391ad882baef4380521", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-12T08:07:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T08:07:34.000Z", "avg_line_length": 36.4285714286, "max_line_length": 160, "alphanum_fraction": 0.3725490196, "num_tokens": 197}
|
import distutils.spawn
import numpy as np
import pytest
from ..yorick import PDBReader
SKIP_YORICK = distutils.spawn.find_executable("yorick") is None
SKIP_YORICK_REASON = "Could not find yorick executable. Skipping tests."
@pytest.fixture
def pdb():
return PDBReader("data/galaxies-040.pdb")
def walker_helper(keys, root):
for key, val in root.items():
if isinstance(val, dict):
for _ in walker_helper(keys + [key], val):
yield _
else:
if isinstance(val, tuple):
exp_type = val[0]
else:
exp_type = val
yield keys + [key], exp_type
def deep_dict_compare(a, b, prefix=[], print_pre=""):
assert len(a) == len(b)
key_a = set(a.keys())
key_b = set(b.keys())
if len(key_a.difference(key_b)) > 0:
raise AssertionError("Found different set of keys at %s" % prefix)
for key, va in a.items():
print(print_pre, end="")
vb = b[key]
if type(va) != type(vb):
raise AssertionError("Type differ at %s[%s]" % (prefix, key))
if isinstance(va, dict):
print("checking key «%s»" % key)
deep_dict_compare(va, vb, prefix + [key], print_pre + "\t")
else:
print("comparing %s %s" % (prefix, key), end="...")
if va != vb:
raise AssertionError(
"Value differ at %s[%s], %s ≠ %s" % (prefix, key, va, vb)
)
print("ok!")
@pytest.mark.skipif(SKIP_YORICK, reason=SKIP_YORICK_REASON)
def test_structure(pdb):
expected_structure = {
"catalog": {
"vir": {"rvir": float, "mvir": float, "tvir": float, "cvel": float},
"shape": (float, 3),
"pos": (float, 3),
"vel": (float, 3),
"L": (float, 3),
"profile": (float, 2),
"age": float,
"aexp": float,
"num": int,
"slice": (int, 2),
"level": int,
"hosthalo": int,
"hostsub": int,
"nbsub": int,
"nextsub": int,
"mass": float,
"rad": float,
"spin": float,
"ek": float,
"ep": float,
"et": float,
"macc": float,
"tree": {
"nbfather": int,
"father": "yorick_pointer",
"mfrac": "yorick_pointer",
"nbson": int,
"son": "yorick_pointer",
},
"npart": int,
"index": "yorick_pointer",
"bulge": (float, 3),
}
}
deep_dict_compare(pdb.structure, expected_structure)
@pytest.mark.skipif(SKIP_YORICK, reason=SKIP_YORICK_REASON)
def test_access(pdb):
# This should work
for keys, expected_type in walker_helper([], pdb.structure):
# Bake access path as "a/b/c"
path = "/".join(keys)
res = pdb[path]
if expected_type in (int, float):
assert isinstance(res, np.ndarray)
@pytest.mark.skipif(SKIP_YORICK, reason=SKIP_YORICK_REASON)
def test_access_incorrect(pdb):
with pytest.raises(KeyError):
pdb["this/does/not/exists"]
|
{"hexsha": "560c2abecea571eb7b99dd5f25a7c68668c9045a", "size": 3236, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrophysics_toolset/io/tests/test_io.py", "max_stars_repo_name": "cphyc/astrophysics_toolset", "max_stars_repo_head_hexsha": "36be3f459a1bbca73af6f39f0957bfac0cb122eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-19T15:46:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-02T21:58:49.000Z", "max_issues_repo_path": "astrophysics_toolset/io/tests/test_io.py", "max_issues_repo_name": "cphyc/astrophysics_toolset", "max_issues_repo_head_hexsha": "36be3f459a1bbca73af6f39f0957bfac0cb122eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2020-05-12T11:07:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T12:54:08.000Z", "max_forks_repo_path": "astrophysics_toolset/io/tests/test_io.py", "max_forks_repo_name": "cphyc/astrophysics_toolset", "max_forks_repo_head_hexsha": "36be3f459a1bbca73af6f39f0957bfac0cb122eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3859649123, "max_line_length": 80, "alphanum_fraction": 0.510815822, "include": true, "reason": "import numpy", "num_tokens": 819}
|
from __future__ import print_function
from clBeergame import *
from utilities import *
import numpy as np
#from clGeneralParameters import generalParameters
import random
from config import get_config, update_config
import tensorflow as tf
config = None
#def main(config, beerGame):
def main(config):
random.seed(10)
# prepare loggers and directories
prepare_dirs_and_logger(config)
config = update_config(config)
# save the current configuration of the problem in a json file
save_config(config)
# get the address of data
if config.observation_data:
adsr = 'data/demandTr-obs-'
elif config.demandDistribution == 3:
if config.scaled:
adsr = 'data/basket_data/scaled'
else:
adsr = 'data/basket_data'
elif config.demandDistribution == 4:
if config.scaled:
adsr = 'data/forecast_data/scaled'
else:
adsr = 'data/forecast_data'
else:
adsr = 'data/demandTr'
# load demands
# demandTr = np.load('demandTr'+str(config.demandDistribution)+'-'+str(config.demandUp)+'.npy')
if config.demandDistribution == 0:
direc = os.path.realpath(adsr+str(config.demandDistribution)+'-'+str(config.demandUp)+'-'+str(config.maxEpisodesTrain)+'.npy')
if not os.path.exists(direc):
direc = os.path.realpath(adsr+str(config.demandDistribution)+'-'+str(config.demandUp)+'.npy')
elif config.demandDistribution == 1:
direc = os.path.realpath(adsr+str(config.demandDistribution)+'-'+str(int(config.demandMu))+'-'+str(int(config.demandSigma))+'.npy')
elif config.demandDistribution == 2:
direc = os.path.realpath(adsr+str(config.demandDistribution)+'.npy')
elif config.demandDistribution == 3:
direc = os.path.realpath(adsr+'/demandTr-'+str(config.data_id)+'.npy')
elif config.demandDistribution == 4:
direc = os.path.realpath(adsr+'/demandTr-'+str(config.data_id)+'.npy')
demandTr = np.load(direc)
print("loaded training set=", direc)
if config.demandDistribution == 0:
direc = os.path.realpath('data/demandTs'+str(config.demandDistribution)+'-'+str(config.demandUp)+'-'+str(config.maxEpisodesTrain)+'.npy')
if not os.path.exists(direc):
direc = os.path.realpath('data/demandTs'+str(config.demandDistribution)+'-'+str(config.demandUp)+'.npy')
elif config.demandDistribution == 1:
direc = os.path.realpath('data/demandTs'+str(config.demandDistribution)+'-'+str(int(config.demandMu))+'-'+str(int(config.demandSigma))+'.npy')
elif config.demandDistribution == 2:
direc = os.path.realpath('data/demandTs'+str(config.demandDistribution)+'.npy')
elif config.demandDistribution == 3:
direc = os.path.realpath(adsr+'/demandTs-'+str(config.data_id)+'.npy')
direcVl = os.path.realpath(adsr+'/demandVl-'+str(config.data_id)+'.npy')
demandVl = np.load(direcVl)
elif config.demandDistribution == 4:
direc = os.path.realpath(adsr+'/demandTs-'+str(config.data_id)+'.npy')
direcVl = os.path.realpath(adsr+'/demandVl-'+str(config.data_id)+'.npy')
demandVl = np.load(direcVl)
demandTs = np.load(direc)
print("loaded test set=", direc)
# initilize an instance of Beergame
beerGame = clBeerGame(config)
# get the length of the demand.
demand_len = np.shape(demandTr)[0]
# Do Initial tests
beerGame.doTestMid(demandTs[0:config.testRepeatMid])
# train the specified number of games
for i in range(0, config.maxEpisodesTrain):
beerGame.playGame(demandTr[i%demand_len],"train")
# get the test results
if (np.mod(beerGame.curGame,config.testInterval) == 0) and (beerGame.curGame>500):
beerGame.doTestMid(demandTs[0:config.testRepeatMid])
# do the last test on the middle test data set.
beerGame.doTestMid(demandTs[0:config.testRepeatMid])
if config.demandDistribution == 3:
beerGame.doTestMid(demandVl[0:config.testRepeatMid])
if __name__ == '__main__':
# load parameters
config, unparsed = get_config()
# run main
main(config)
|
{"hexsha": "e4c50bc04e1803705da51a1efb61960e174ddd34", "size": 3822, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "OptMLGroup/DeepBeerInventory-RL", "max_stars_repo_head_hexsha": "ca2bb90a5ee3a45fa89cfadf56354369a62bf5a8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2020-07-24T16:06:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T07:59:47.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "Jessiehha/DeepBeerInventory-RL", "max_issues_repo_head_hexsha": "dfb003ab0fe2cfefe4f54c4c993752dc0143f954", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-13T19:07:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:54:43.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "Jessiehha/DeepBeerInventory-RL", "max_forks_repo_head_hexsha": "dfb003ab0fe2cfefe4f54c4c993752dc0143f954", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-10-24T19:51:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T13:55:47.000Z", "avg_line_length": 37.8415841584, "max_line_length": 144, "alphanum_fraction": 0.7315541601, "include": true, "reason": "import numpy", "num_tokens": 1116}
|
# This file is a part of AstroLib.jl. License is MIT "Expat".
"""
true_obliquity(jd) -> t_eps
### Purpose ###
Return the true obliquity of the ecliptic for a given Julian date
### Explanation ###
The function is used by the [`co_aberration`](@ref) procedure.
### Arguments ###
* `jd`: Julian date.
### Output ###
* `t_eps`: true obliquity of the ecliptic, in radians
### Example ###
```jldoctest
julia> using AstroLib
julia> true_obliquity(jdcnv(1978,01,7,11, 01))
0.4090953896211926
```
### Notes ###
The function calls [`mean_obliquity`](@ref).
"""
function true_obliquity(jd::Real)
eps0 = mean_obliquity(jd)
t_eps = eps0 + sec2rad(nutate(jd)[2])
return t_eps
end
|
{"hexsha": "67b4ac92eda2e221230270e3ace06a574e743866", "size": 696, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/true_obliquity.jl", "max_stars_repo_name": "UnofficialJuliaMirror/AstroLib.jl-c7932e45-9af1-51e7-9da9-f004cd3a462b", "max_stars_repo_head_hexsha": "fb2ef587a2ac68a1c864bf251e8d9c3601ec4719", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2016-09-11T14:59:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T20:45:36.000Z", "max_issues_repo_path": "src/true_obliquity.jl", "max_issues_repo_name": "UnofficialJuliaMirror/AstroLib.jl-c7932e45-9af1-51e7-9da9-f004cd3a462b", "max_issues_repo_head_hexsha": "fb2ef587a2ac68a1c864bf251e8d9c3601ec4719", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2017-01-19T21:03:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:27:46.000Z", "max_forks_repo_path": "src/true_obliquity.jl", "max_forks_repo_name": "UnofficialJuliaMirror/AstroLib.jl-c7932e45-9af1-51e7-9da9-f004cd3a462b", "max_forks_repo_head_hexsha": "fb2ef587a2ac68a1c864bf251e8d9c3601ec4719", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:11:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:55:21.000Z", "avg_line_length": 17.4, "max_line_length": 65, "alphanum_fraction": 0.6637931034, "num_tokens": 221}
|
function title = p07_title ( title )
%*****************************************************************************80
%
%% P07_TITLE returns the title of problem 7.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 06 May 2011
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Output, string TITLE, the title of the problem.
%
title = 'F(X) = X**3, only linear Newton convergence.';
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_zero/p07_title.m"}
|
# encoding: utf-8
"""
evolsim.brain
-------------
class definition of animal brain from its DNA.
"""
# imports
# built-in
from enum import Enum
# local
# 3rd-party
import numpy as np
class SensorType(Enum):
AGE = 1,
XPOS = 2,
YPOS = 3,
class OutputType(Enum):
XMOVE = 1,
YMOVE = 2,
ATTACK = 3,
EAT = 4
class Node:
"""
"""
def __init__(self):
self.value = 0
class Network:
def __init__(self, input_nodes=None, middle_nodes=None, output_nodes=None):
self._inputs = input_nodes
self._middle = middle_nodes
self._outputs = output_nodes
self.inputs_vals = np.zeros(len(input_nodes))
self.middle_vals = np.zeros(len(middle_nodes))
self.outputs_vals = np.zero(len(output_nodes))
self.first_step = np.zeros(self.input.size, self.middle.size)
self.last_layer = np.zeros(self.middle.size, self.outputs.size)
def add_connection(self, source, destination, weight):
if destination in self._outputs:
self.last_layer[source.value][destination.value] = weight
else:
self.first_step[source.value][destination.value] = weight
def remove_connection(self, source, destination):
self.add_connection(source, destination, 0)
def trim_network(self):
trimmed_inputs = {}
trimmed_first_step = []
trimmed_last_layer = []
for input_node in self._inputs:
if any(self.first_step[input_node.value][:]):
trimmed_first_step.append(
self.first_step[input_node.value][:]
)
trimmed_inputs.add(input_node)
if any(self.last_layer[input_node.value][:]):
trimmed_last_layer.append(
self.last_layer[input_node.value][:]
)
trimmed_inputs.add(input_node)
for middle_node in self._middle:
if any(self.first_step[input_node.value][:]):
trimmed_first_step.append(
self.first_step[input_node.value][:]
)
trimmed_inputs.add(input_node)
if any(self.last_layer[input_node.value][:]):
trimmed_last_layer.append(
self.last_layer[input_node.value][:]
)
trimmed_inputs.add(input_node)
class Sensor(Node):
def __init__(self, sensor_type: SensorType):
self._type = sensor_type
@property
def type(self):
return self._type
def evaluate(self, inputs):
self._value = inputs[self._type.value]
class Brain:
"""
Implementation of animal brain.
"""
def __init__(self, DNA):
"""
Initialize brain from DNA.
"""
brain_DNA = DNA.brain()
for gene in brain_DNA:
self.create_connection(gene)
def create_connection(self, gene):
# source_type = (gene >> 24) & True
# source_node = (gene >> 20) & 0xf
# end_type = (gene >> 19) & True
# end_node = (gene >> 15) & 0xf
# weight = ((gene & 0xefff) / 4000.0) - 4
pass
|
{"hexsha": "7cdd981fdf3ce413a8feae58aa8b675e6b53f0f4", "size": 3168, "ext": "py", "lang": "Python", "max_stars_repo_path": "evolsim-source/brain.py", "max_stars_repo_name": "labrunhosarodrigues/evolsim", "max_stars_repo_head_hexsha": "e24288b4ed396d6c80ca7818ef49e2ac56a21d34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evolsim-source/brain.py", "max_issues_repo_name": "labrunhosarodrigues/evolsim", "max_issues_repo_head_hexsha": "e24288b4ed396d6c80ca7818ef49e2ac56a21d34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evolsim-source/brain.py", "max_forks_repo_name": "labrunhosarodrigues/evolsim", "max_forks_repo_head_hexsha": "e24288b4ed396d6c80ca7818ef49e2ac56a21d34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5581395349, "max_line_length": 79, "alphanum_fraction": 0.5773358586, "include": true, "reason": "import numpy", "num_tokens": 700}
|
"""Utilities supporting pydmmt."""
import ast
import math
import numpy
import operator as op
import re
import string
class YAMLError(ValueError):
"""YAML error"""
class TextBased():
def __repr__(self):
return self.original_string
def __str__(self):
return self.original_string
def __hash__(self):
return hash(self.original_string)
def __eq__(self, other):
return self.original_string == other.original_string
class Variable(TextBased):
def __init__(self, text):
if type(text) is not str:
for key, value in text.items():
self.original_string = key
if "length" in value.keys():
self.length = int(value["length"])
break # allow only one cycle = one variable definition
else:
self.original_string = text
# this contains only basename if var is indexed
self.name = self.original_string
self.index = None
self.delay = None
self.is_relatively_indexed = False
self.is_absolutely_indexed = False
self.is_sliced = False
self.value = numpy.nan
self.is_indexed = ('[' in self.original_string and
']' in self.original_string)
if self.is_indexed:
self.index = self.original_string.split('[')[1].split(']')[0]
if any((l != 't' and l in string.ascii_letters)
for l in self.index):
raise ValueError("Using wrong index variable in " +
self.original_string)
self.name = self.original_string.split('[')[0]
self.is_relatively_indexed = 't' in self.index
self.is_absolutely_indexed = 't' not in self.index
self.is_sliced = ':' in self.index
if self.is_relatively_indexed and not self.is_sliced:
if self.index.strip() == 't':
self.delay = 0
else:
self.delay = int(self.index.replace('t', ''))
def actualize(self, index):
if self.is_relatively_indexed:
return Variable(self.name + '[' + str(index + self.delay) + ']')
if self.is_absolutely_indexed:
return self
raise YAMLError
@staticmethod
def is_it(text):
if type(text) is not str:
for key, value in text.items():
text = key
break # allow only one definition, discard the rest
if text is None or len(text) == 0:
return False
if text[0] not in string.ascii_letters:
return False
if '[' in text:
if text.count('[') != 1 or text.count(']') != 1:
return False
if '(' in text:
if text.split('(')[0] in Function.accepted_functions:
return False
if text in Function.accepted_functions:
return False
if text in Function.accepted_keywords:
return False
return True
def _power(a, b):
if any(abs(n) > 100 for n in [a, b]):
raise ValueError((a, b))
return op.pow(a, b)
def mean(a):
return sum(a) / len(a)
def rbf(inputs, param, n_nodes):
bases = []
idx_p = 0
for i in range(n_nodes):
temp = []
try:
for inp in inputs:
temp.append((inp - param[idx_p])**2 / param[idx_p+1]**2)
idx_p += 2
except TypeError as err:
# Expecting: TypeError: 'numpy.float64' object is not iterable
# it means there's only one input
if "iterable" in err.args[0].split():
# print(param)
temp.append((inputs - param[idx_p])**2 / param[idx_p+1]**2)
idx_p += 2
else:
raise err
bases.append(math.exp(-sum(temp)))
output = 0
for w, base in zip(param[idx_p:idx_p+n_nodes], bases):
output += base * w
idx_p += n_nodes
return output + param[idx_p]
class Function(TextBased):
# supported operators and functions
accepted_functions = {"sum": sum, "max": max, "min": min, "mean": mean,
"rbf": rbf}
accepted_tree_nodes = ((ast.Num, ast.BinOp, ast.UnaryOp, ast.Subscript,
ast.Index, ast.Slice, ast.Load, ast.IfExp,
ast.Compare) +
(ast.Add, ast.Sub, ast.Mult, ast.Div, ast.FloorDiv,
ast.Pow, ast.USub, ast.Mod, ast.Lt, ast.Gt,
ast.NotEq, ast.Eq))
accepted_keywords = {"if": None, "else": None}
def __init__(self, text):
self.original_string = text
equation_sides = text.replace(')', ' ')\
.replace('(', ' ')\
.replace(',', ' ')\
.split('=', maxsplit=1)
self.outputs = [Variable(el)
for el in equation_sides[0].split()
if Variable.is_it(el)]
# take care of keyword arguments for function (mean(3,5,w=23))
self.inputs = [Variable(el.split('=')[-1])
for el in equation_sides[1].split()
if Variable.is_it(el.split('=')[-1])]
# parse the text and store the result
# power operator: change ^ in **
text = text.split('=', maxsplit=1)[1].lstrip()
text = text.replace('^', '**')
try:
tree = ast.parse(text, mode="eval")
except SyntaxError as exc:
print("While parsing:", text, ";")
raise exc
tree.lineno = 0
tree.col_offset = 0
if not Function._check_tree(tree, self.inputs):
print("While parsing of:", text, ";")
raise YAMLError("I'm screwed")
tree = Function.SubstituteVariables(self).visit(tree)
ast.fix_missing_locations(tree)
a_useful_name = ("<util.py: compiling function " +
self.original_string + ">")
self.compiled = compile(tree, filename=a_useful_name, mode="eval")
@staticmethod
def _check_tree(tree, variables):
# Expression(body=UnaryOp(left=Name(id='x1', ctx=Load()), op=USub()))])
for node in ast.walk(tree.body):
if isinstance(node, Function.accepted_tree_nodes):
continue
elif isinstance(node, ast.Call):
if node.func.id not in Function.accepted_functions:
print("call", ast.dump(node)) # TODO
return False
continue
elif isinstance(node, ast.keyword):
continue
elif isinstance(node, ast.Name):
if node.id in Function.accepted_functions:
continue
if Variable(node.id) not in variables:
if node.id not in [v.name for v in variables]:
if node.id != 't':
print("name", node.id) # TODO
return False
else:
print("else", ast.dump(node)) # TODO
return False
return True
class SubstituteVariables(ast.NodeTransformer):
def __init__(self, funct):
ast.NodeTransformer.__init__(self)
self.f = funct
def visit_Subscript(self, node):
# substitute names with variables
# rebuild string of variable
var_str = node.value.id
if isinstance(node.slice, ast.Index):
var_str += '['
# if var[t+1]
if isinstance(node.slice.value, ast.BinOp):
var_str += 't'
if isinstance(node.slice.value.op, ast.Add):
var_str += '+' + str(node.slice.value.right.n)
elif isinstance(node.slice.value.op, ast.Sub):
var_str += '-' + str(node.slice.value.right.n)
# else if var[t]
elif isinstance(node.slice.value, ast.Name):
var_str += 't'
# else if var[9]
elif isinstance(node.slice.value, ast.Num):
var_str += str(node.slice.value.n)
var_str += ']'
# if var[1:12] or var[:12] or var [123:]
if isinstance(node.slice, ast.Slice):
var_str += '['
if isinstance(node.slice.lower, ast.Num):
if isinstance(node.slice.upper, ast.Num):
var_str += (str(node.slice.lower.n) + ':' +
str(node.slice.upper.n))
elif not node.slice.upper:
var_str += str(node.slice.lower.n) + ':'
elif isinstance(node.slice.upper, ast.Num): # but not lower
var_str += ':' + str(node.slice.upper.n)
elif not node.slice.lower and not node.slice.upper:
# they're there but equal to None
var_str += ':'
else:
raise NotImplementedError(ast.dump(node))
var_str += ']'
if Variable(var_str) not in self.f.inputs:
print(Variable(var_str), "in", self.f.inputs) # TODO
raise YAMLError(ast.dump(node))
text = ("self.inputs[" +
str(self.f.inputs.index(Variable(var_str))) +
"].value")
new_node = ast.parse(text, mode="eval").body
ast.copy_location(new_node, node)
ast.fix_missing_locations(new_node)
# print(ast.dump(new_node)) # TODO
return new_node
def visit_Name(self, node):
# substitute names with variables - works only for non subscript
if node.id in Function.accepted_functions:
return node
if Variable(node.id) not in self.f.inputs:
raise YAMLError(ast.dump(node))
text = ("self.inputs[" +
str(self.f.inputs.index(Variable(node.id))) +
"].value")
new_node = ast.parse(text, mode="eval").body
ast.copy_location(new_node, node)
ast.fix_missing_locations(new_node)
# print(ast.dump(new_node)) # TODO
return new_node
def calculate(self):
return eval(self.compiled)
# sorting files in human sorting
# http://stackoverflow.com/questions/4623446/how-do-you-sort-files-numerically
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
def tryint(s):
try:
return int(s)
except:
return s
return [tryint(c) for c in re.split('([0-9]+)', s)]
|
{"hexsha": "4731ca476ad89b9fd7a7efca4cefbb0c84292af2", "size": 10926, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydmmt/util.py", "max_stars_repo_name": "Lordmzn/pydmmt", "max_stars_repo_head_hexsha": "a3155eb480b333023c2da20acf1f11a156387cd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pydmmt/util.py", "max_issues_repo_name": "Lordmzn/pydmmt", "max_issues_repo_head_hexsha": "a3155eb480b333023c2da20acf1f11a156387cd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydmmt/util.py", "max_forks_repo_name": "Lordmzn/pydmmt", "max_forks_repo_head_hexsha": "a3155eb480b333023c2da20acf1f11a156387cd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8062283737, "max_line_length": 79, "alphanum_fraction": 0.5154676917, "include": true, "reason": "import numpy", "num_tokens": 2334}
|
from autofit.non_linear.samples.pdf import quantile
import autogalaxy as ag
import numpy as np
def test__quantile_1d_profile():
profile_1d_0 = np.array([1.0, 2.0, 3.0])
profile_1d_1 = np.array([1.0, 2.0, 3.0])
profile_1d_list = [profile_1d_0, profile_1d_1]
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5
)
assert (median_profile_1d == np.array([1.0, 2.0, 3.0])).all()
profile_1d_0 = np.array([1.0, 2.0, 3.0])
profile_1d_1 = np.array([2.0, 4.0, 6.0])
profile_1d_list = [profile_1d_0, profile_1d_1]
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5
)
assert (median_profile_1d == np.array([1.5, 3.0, 4.5])).all()
profile_1d_list = [
profile_1d_0,
profile_1d_0,
profile_1d_0,
profile_1d_1,
profile_1d_1,
profile_1d_1,
profile_1d_1,
]
weights = np.array([9.9996, 9.9996, 9.9996, 1e-4, 1e-4, 1e-4, 1e-4])
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5, weights=weights
)
assert (median_profile_1d == np.array([1.0, 2.0, 3.0])).all()
radial_values = [1.0, 2.0, 3.0, 4.0, 5.0]
weights = [0.1, 0.3, 0.2, 0.05, 0.35]
quantile_result = quantile(x=radial_values, q=0.23, weights=weights)
profile_1d_0 = np.array([1.0])
profile_1d_1 = np.array([2.0])
profile_1d_2 = np.array([3.0])
profile_1d_3 = np.array([4.0])
profile_1d_4 = np.array([5.0])
profile_1d_list = [
profile_1d_0,
profile_1d_1,
profile_1d_2,
profile_1d_3,
profile_1d_4,
]
profile_1d_via_error_util = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.23, weights=weights
)
assert quantile_result == profile_1d_via_error_util[0]
|
{"hexsha": "ea319b174e521eedd1eb8788136bfce6b273b847", "size": 1982, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_autogalaxy/util/test_error_util.py", "max_stars_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_stars_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-05-29T08:46:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T14:06:20.000Z", "max_issues_repo_path": "test_autogalaxy/util/test_error_util.py", "max_issues_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_issues_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-01-06T09:42:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T15:52:23.000Z", "max_forks_repo_path": "test_autogalaxy/util/test_error_util.py", "max_forks_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_forks_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T07:45:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T17:36:40.000Z", "avg_line_length": 27.5277777778, "max_line_length": 73, "alphanum_fraction": 0.6175580222, "include": true, "reason": "import numpy", "num_tokens": 761}
|
*----------------------------------------------------------------------*
subroutine topo_make_unique(ireo,vtx,topo,xlines,nvtx,nj)
*----------------------------------------------------------------------*
* (unrestricted) sort of vertices, topo matrix, and xline matrix
* to ascending sequence as needed for comparing terms
* not intended for generating the actual unique sequence for
* the representation (which needs to consider restricted sort and
* thus is much more expensive)
*
* the input vertices are reordered
*
* ireo(1:nvtx) contains the reordering info: ireo(ivtx) = ivtx_old
*
*----------------------------------------------------------------------*
implicit none
include 'stdunit.h'
integer, parameter ::
& ntest = 00,
& max_sweep = 3
integer, intent(in) ::
& nvtx, nj
integer, intent(out) ::
& ireo(nvtx)
integer(8), intent(inout) ::
& vtx(nvtx), topo(nvtx,nvtx), xlines(nvtx,nj)
integer ::
& idx, jdx, sweep, neqv_blocks
logical ::
& changed
integer, external ::
& i8list_cmp
if (ntest.ge.100) then
call write_title(lulog,wst_dbg_subr,'topo_make_unique')
write(lulog,*) 'topo on entry'
call prt_contr_p(lulog,-1,vtx,topo,xlines,nvtx,nj)
end if
c test - symmetrize topo
c do idx = 1, nvtx
c do jdx = 1, idx-1
c topo(jdx,idx) = topo(idx,jdx)
c end do
c end do
c test
do idx = 1, nvtx
ireo(idx) = idx
end do
call idxsort8(vtx,ireo,nvtx,+1)
call reoi8mat(xlines,ireo,nvtx,nj,1)
! look for vertices with equal entry on vtx:
if (nj.gt.0) then
do idx = 1, nvtx
jdx = idx
c dbg fix by mh
if (jdx.lt.nvtx) then
c dbg original
do while(jdx.lt.nvtx.and.vtx(jdx+1).eq.vtx(idx))
jdx = jdx+1
end do
c dbg resume fix
end if
c dbg end fix
if (idx.ne.jdx) then
! sort such that xlines(,nj:1) give ascending seq
call topo_sort_xlines(xlines,ireo,idx,jdx,nvtx,nj)
end if
end do
end if
! apply reordering to topo
call reoi8mat(topo,ireo,nvtx,nvtx,3)
! look for vertices with both equal entry on vtx and xlines
do sweep = 1, max_sweep
neqv_blocks = 0
do idx = 1, nvtx
jdx = idx
c dbg fix by mh
if (jdx.lt.nvtx) then
c dbg original
do while(jdx.lt.nvtx.and.vtx(jdx+1).eq.vtx(idx).and.
& i8list_cmp(xlines(jdx+1,1:nj),xlines(jdx,1:nj),nj).eq.0)
jdx = jdx+1
end do
c dbg resume fix
end if
c dbg end fix
if (idx.ne.jdx) then
neqv_blocks = neqv_blocks+1
! sort such that topo gives ascending seq
call topo_sort_topo(topo,changed,ireo,idx,jdx,nvtx,
& vtx,xlines,nj)
c dbg
c print *,'sorting: sweep = ',sweep,' block = ',neqv_blocks,
c & ' changed = ',changed
c call prt_contr_p(lulog,-1,vtx,topo,xlines,nvtx,nj)
c dbg
end if
end do
if (neqv_blocks.le.1.or..not.changed) exit
if (sweep.eq.max_sweep) then
write(lulog,*) 'max_sweep = ',max_sweep
write(lulog,*) 'neqv_blocks: ',neqv_blocks
call prt_contr_p(lulog,-1,vtx,topo,xlines,nvtx,nj)
call quit(1,'topo_make_unique',
& 'sort of topo matrix does not converge')
end if
end do
if (ntest.ge.100) then
write(lulog,*) 'topo on exit'
call prt_contr_p(lulog,-1,vtx,topo,xlines,nvtx,nj)
end if
return
end
subroutine topo_sort_xlines(xlines,ireo,ist,ind,nvtx,nj)
implicit none
integer, intent(in) ::
& nvtx, nj, ist, ind
integer, intent(inout) ::
& ireo(nvtx)
integer(8), intent(inout) ::
& xlines(nvtx,nj)
integer ::
& ij, ivtx, jvtx, ihlp
integer(8) ::
& iscr(nj), xl_r(nj,nvtx)
integer, external ::
& i8list_cmp
if (nj.eq.0) return
do ivtx = 1, nvtx
do ij = 1, nj
xl_r(ij,ivtx) = xlines(ivtx,nj+1-ij)
end do
end do
do ivtx = ist+1, ind
iscr(1:nj) = xl_r(1:nj,ivtx)
ihlp = ireo(ivtx)
jvtx = ivtx-1
do while (jvtx.ge.ist.and.
& i8list_cmp(iscr(1:nj),xl_r(1:nj,jvtx),nj).gt.0)
xl_r(1:nj,jvtx+1) = xl_r(1:nj,jvtx)
ireo(jvtx+1) = ireo(jvtx)
jvtx = jvtx-1
end do
xl_r(1:nj,jvtx+1) = iscr(1:nj)
ireo(jvtx+1) = ihlp
end do
do ivtx = 1, nvtx
do ij = 1, nj
xlines(ivtx,nj+1-ij) = xl_r(ij,ivtx)
end do
end do
return
end
subroutine topo_sort_topo(topo,changed,ireo,ist,ind,nvtx,
& vtx,xlines,nj)
implicit none
integer, intent(in) ::
& nvtx, ist, ind, nj
integer, intent(inout) ::
& ireo(nvtx)
integer(8), intent(inout) ::
& topo(nvtx,nvtx)
integer(8), intent(in) ::
& vtx(nvtx), xlines(nvtx,nj)
logical, intent(out) ::
& changed
integer ::
& ivtx, jvtx, ihlp, jhlp, ireo_loc(nvtx)
integer(8) ::
& lscr(nvtx), iscr(nvtx), jscr(nvtx)
integer, external ::
& i8list_cmp
if (nvtx.eq.0) return
do ivtx = 1, nvtx
ireo_loc(ivtx) = ivtx
end do
changed = .false.
do ivtx = ist+1, ind
lscr(1:nvtx) = topo(1:nvtx,ivtx)
c iscr(1:ivtx) = topo(1:ivtx,ivtx)
c iscr(ivtx+1:nvtx) = topo(ivtx,ivtx+1:nvtx)
ihlp = ireo_loc(ivtx)
jhlp = ireo(ivtx)
jvtx = ivtx-1
do while (jvtx.ge.ist)
if (i8list_cmp(lscr,topo(1,jvtx),nvtx).le.0) exit
c jscr(1:jvtx) = topo(1:jvtx,jvtx)
c jscr(jvtx+1:nvtx) = topo(jvtx,jvtx+1:nvtx)
c if (i8list_cmp(iscr,topo(1,jvtx),nvtx).le.0) exit
c if (i8list_cmp(iscr,jscr,nvtx).le.0) exit
topo(1:nvtx,jvtx+1) = topo(1:nvtx,jvtx)
ireo_loc(jvtx+1) = ireo_loc(jvtx)
ireo(jvtx+1) = ireo(jvtx)
jvtx = jvtx-1
end do
changed = changed.or.jvtx.ne.ivtx-1
topo(1:nvtx,jvtx+1) = lscr(1:nvtx)
ireo_loc(jvtx+1) = ihlp
ireo(jvtx+1) = jhlp
end do
! reorder rows as well
call reoi8mat(topo,ireo_loc,nvtx,nvtx,1)
c ! re-check that sequence is OK
c do ivtx = ist, ind-1
c if (i8list_cmp(topo(1,ivtx),topo(1,ivtx+1),nvtx).lt.0)
cc if (i8list_cmp(topo(1,ivtx),topo(1,ivtx+1),ind).lt.0)
c & call quit(1,'topo_make_unique',
c & 'topo_reo in trouble')
c end do
return
end
|
{"hexsha": "e084dfca3bb640be8f736e1578f50725817ad3c2", "size": 6826, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "formula/topo_make_unique.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "formula/topo_make_unique.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "formula/topo_make_unique.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9754098361, "max_line_length": 72, "alphanum_fraction": 0.5243187811, "num_tokens": 2309}
|
from time import time
import cv2
import argparse
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pdb
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.datasets import make_sparse_coded_signal
from sklearn.decomposition import MiniBatchDictionaryLearning
from matplotlib import pyplot as plt
from skimage.exposure import rescale_intensity
# python -i image_denoising.py -i 01.png -iter 500 -coeff 2 -n_comp 100
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
ap.add_argument("-n_comp", "--n_components", type=int, default=100, help="number of componets in the dictionary")
ap.add_argument("-iter", "--n_iter", type=int, default=500, help="number of iterations")
ap.add_argument("-coeff", "--non_zero_coeff", type=int,default=1, help="number of non zero coefficients")
args = vars(ap.parse_args())
n_comp = args['n_components']
n_iter = args['n_iter']
non_zero_coeff = args['non_zero_coeff']
def noisy_patches(image, dict_learning=False, channel=None):
image = image / 255.
if dict_learning:
image = image[::2, ::2] + image[1::2, ::2] + image[::2, 1::2] + image[1::2, 1::2]
image /= 4.0
print('Distorting image...')
distorted = image.copy()
if channel :
height, width, channel = image.shape
distorted += 0.1 * np.random.randn(height, width, channel)
else:
height, width = image.shape
distorted += 0.075 * np.random.randn(height, width)
cv2.imwrite('noisy.jpg', (distorted*255))
print(distorted.shape)
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted, patch_size)
data = data.reshape(data.shape[0], -1)
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
data -= mean
data /= std
print('done in %.2fs.' % (time() - t0))
return (data, 255.*distorted, mean, std)
def ksvd(noisy_data):
print('Updating Dictionary')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=n_comp,
alpha=2,
n_iter=n_iter)
#dict_init=D)
print('done in %.2fs.' % (time() - t0))
V = dico.fit(noisy_data).components_
return V, dico
if __name__ == '__main__':
image = cv2.imread(args['image'])
channel=None
if len(image.shape) >2:
channel = image.shape[2]
data,distorted, _, _ = noisy_patches(image, dict_learning=True, channel=channel)
dict_final, dico = ksvd(data)
n0_data, distorted, mean, std= noisy_patches(image,channel=channel)
dico.set_params(transform_algorithm='omp',transform_n_nonzero_coefs = non_zero_coeff )
code = dico.transform(n0_data)
patches = np.dot(code,dict_final)
patches*= std
patches += mean
patches = (patches.reshape(n0_data.shape[0], 7, 7, channel))
print('Reconstructing...')
reconstruction = reconstruct_from_patches_2d(patches, (image.shape[0], image.shape[1], channel))
reconstruction*=255
difference = image - reconstruction
error = np.sqrt(np.sum(difference ** 2))
print('Difference (norm: %.2f)' %error)
print('Finished reconstruction..')
plt.subplot(1, 2, 1)
plt.imshow(distorted[:,:,0], cmap='gray')
plt.title("Noisy image")
plt.subplot(1, 2, 2)
plt.imshow(reconstruction[:,:,0], cmap='gray')
plt.title("Recon. image")
plt.show()
|
{"hexsha": "5ddee8496aa47858cb3d4e100444d83a2f05ba23", "size": 3352, "ext": "py", "lang": "Python", "max_stars_repo_path": "ImageDenoising/sparse_encoding.py", "max_stars_repo_name": "koriavinash1/ImageDenoising", "max_stars_repo_head_hexsha": "d598cd55ba230f72dca9e800286cccae2dce853b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-28T05:57:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-28T05:57:41.000Z", "max_issues_repo_path": "ImageDenoising/sparse_encoding.py", "max_issues_repo_name": "koriavinash1/ImageDenoising", "max_issues_repo_head_hexsha": "d598cd55ba230f72dca9e800286cccae2dce853b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ImageDenoising/sparse_encoding.py", "max_forks_repo_name": "koriavinash1/ImageDenoising", "max_forks_repo_head_hexsha": "d598cd55ba230f72dca9e800286cccae2dce853b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1881188119, "max_line_length": 113, "alphanum_fraction": 0.7228520286, "include": true, "reason": "import numpy,import scipy", "num_tokens": 915}
|
import ..list ..logic data.rat
instance rat.inhabited : inhabited rat := ⟨0⟩
inductive term_dlo : Type
| var : nat → term_dlo
| cst : rat → term_dlo
notation `V'` := term_dlo.var
notation `C'` r := term_dlo.cst r
instance term_dlo.decidable_eq : decidable_eq term_dlo := by tactic.mk_dec_eq_instance
-- def term_dlo.decr_idx : term_dlo rat → term_dlo rat
-- | (term_dlo.var rat m) := term_dlo.var rat (m-1)
-- | (term_dlo.cst b) := term_dlo.cst b
--
def term_dlo.incr_idx : term_dlo → term_dlo
| (term_dlo.var m) := term_dlo.var (m+1)
| (term_dlo.cst b) := term_dlo.cst b
def term_dlo.eval (bs : list rat) : term_dlo → rat
| (term_dlo.var n) := list.inth bs n
| (term_dlo.cst r) := r
-- meta def term_dlo.to_format : term_dlo → format
-- | (term_dlo.var m) := "#" ++ to_fmt m
-- | (term_dlo.cst b) := "@" ++ to_fmt b
-- meta instance term_dlo.has_to_format : has_to_format term_dlo :=
-- ⟨term_dlo.to_format⟩
lemma term_dlo.eval_incr_idx_eq {t : term_dlo} {b : rat} {bs} :
t.incr_idx.eval (b::bs) = t.eval bs :=
begin cases t with n b; simp [term_dlo.incr_idx, term_dlo.eval] end
instance dec_eq_term_dlo : decidable_eq term_dlo :=
by tactic.mk_dec_eq_instance
inductive atom_dlo : Type
| lt : term_dlo → term_dlo → atom_dlo
| eq : term_dlo → term_dlo → atom_dlo
def atom_dlo.eval (bs : list rat) : atom_dlo → Prop
| (atom_dlo.lt t1 t2) := term_dlo.eval bs t1 < term_dlo.eval bs t2
| (atom_dlo.eq t1 t2) := term_dlo.eval bs t1 = term_dlo.eval bs t2
-- | (x <' y) := sorry
-- | (x =' y) := sorry
-- meta def atom_dlo.to_format : atom_dlo → format
-- | (atom_dlo.lt t s) := to_fmt t ++ " < " ++ to_fmt s
-- | (atom_dlo.eq t s) := to_fmt t ++ " = " ++ to_fmt s
inductive formula_dlo : Type
| true : formula_dlo
| false : formula_dlo
| atom : atom_dlo → formula_dlo
| and : formula_dlo → formula_dlo → formula_dlo
| or : formula_dlo → formula_dlo → formula_dlo
| not : formula_dlo → formula_dlo
| ex : formula_dlo → formula_dlo
-- | formula_dlo.true := sorry
-- | formula_dlo.false := sorry
-- | (formula_dlo.atom a) := sorry
-- | (formula_dlo.and p q) := sorry
-- | (p ∨' q) := sorry
-- | (¬' p) := sorry
-- | (formula_dlo.ex p) := sorry
notation `⊤'` := formula_dlo.true
notation `⊥'` := formula_dlo.false
notation `A'` a := formula_dlo.atom a
notation `¬'` p := formula_dlo.not p
notation p `∧'` q := formula_dlo.and p q
notation p `∨'` q := formula_dlo.or p q
notation `∃'` p := formula_dlo.ex p
notation x `<'` y := atom_dlo.lt x y
notation x `='` y := atom_dlo.eq x y
def formula_dlo.eval : list rat → formula_dlo → Prop
| as formula_dlo.true := _root_.true
| as formula_dlo.false := _root_.false
| as (formula_dlo.atom a) := a.eval as
| as (formula_dlo.not φ) := ¬ (formula_dlo.eval as φ)
| as (formula_dlo.or φ ψ) := (formula_dlo.eval as φ) ∨ (formula_dlo.eval as ψ)
| as (formula_dlo.and φ ψ) := (formula_dlo.eval as φ) ∧ (formula_dlo.eval as ψ)
| as (formula_dlo.ex φ) := ∃ d, formula_dlo.eval (d::as) φ
lemma eval_true {xs : list rat} : formula_dlo.true.eval xs ↔ _root_.true :=
by simp [formula_dlo.eval]
lemma eval_not {p : formula_dlo} (xs : list rat) :
(p.not).eval xs ↔ ¬ (p.eval xs) := iff.refl _
lemma eval_not_false {p : formula_dlo} (xs : list rat) :
(formula_dlo.false.not).eval xs ↔ _root_.true :=
begin simp [formula_dlo.eval] end
lemma eval_not_or {p q : formula_dlo} {xs : list rat} :
((formula_dlo.or p q).not).eval xs ↔ ((p.not).eval xs ∧ (q.not).eval xs) :=
begin simp [formula_dlo.eval, eval_not, not_or_distrib] end
lemma eval_not_and {p q : formula_dlo} {xs : list rat} :
((formula_dlo.and p q).not).eval xs ↔ ((p.not).eval xs ∨ (q.not).eval xs) :=
begin
simp only [formula_dlo.eval, eval_not ],
rw (@not_and_distrib _ _ (classical.dec _))
end
lemma eval_or {p q : formula_dlo} (xs : list rat) :
(formula_dlo.or p q).eval xs = (p.eval xs ∨ q.eval xs) := eq.refl _
lemma eval_and {p q : formula_dlo} {xs : list rat} :
(formula_dlo.and p q).eval xs = (p.eval xs ∧ q.eval xs) := eq.refl _
lemma eval_le {t s : term_dlo} {rs : list rat} :
(atom_dlo.lt t s).eval rs ↔ (t.eval rs < s.eval rs) := iff.refl _
lemma eval_false {xs : list rat} : formula_dlo.false.eval xs ↔ _root_.false :=
by simp [formula_dlo.eval]
lemma eval_ex {p : formula_dlo} (xs) : p.ex.eval xs = ∃ x, (p.eval (x::xs)) :=
begin simp [formula_dlo.eval] end
open rat
def not_o : formula_dlo → formula_dlo
| formula_dlo.true := formula_dlo.false
| formula_dlo.false := formula_dlo.true
| p := p.not
def and_o : formula_dlo → formula_dlo → formula_dlo
| (formula_dlo.true) q' := q'
| p' (formula_dlo.true) := p'
| (formula_dlo.false) q' := formula_dlo.false
| p' (formula_dlo.false) := formula_dlo.false
| p' q' := formula_dlo.and p' q'
def or_o : formula_dlo → formula_dlo → formula_dlo
| (formula_dlo.true ) _ := formula_dlo.true
| _ (formula_dlo.true ) := formula_dlo.true
| (formula_dlo.false ) q := q
| p (formula_dlo.false ) := p
| p q := formula_dlo.or p q
lemma eval_not_o {p : formula_dlo} (xs : list rat) :
(not_o p).eval xs ↔ ¬(p.eval xs) :=
begin cases p; simp [not_o, formula_dlo.eval] end
lemma eval_or_o {p q : formula_dlo} (xs : list rat) :
(or_o p q).eval xs ↔ (p.eval xs ∨ q.eval xs) :=
begin cases p; cases q; simp [or_o, formula_dlo.eval] end
lemma eval_and_o {p q : formula_dlo} (xs : list rat) :
(and_o p q).eval xs ↔ (p.eval xs ∧ q.eval xs) :=
begin cases p; cases q; simp [and_o, formula_dlo.eval] end
-- Requires : qfree arg-0
def formula_dlo.map (f : atom_dlo → atom_dlo) : formula_dlo → formula_dlo
| formula_dlo.true := formula_dlo.true
| formula_dlo.false := formula_dlo.false
| (formula_dlo.atom a) := formula_dlo.atom (f a)
| (formula_dlo.not p) := p.map.not
| (formula_dlo.or p q) := formula_dlo.or p.map q.map
| (formula_dlo.and p q) := formula_dlo.and p.map q.map
| (formula_dlo.ex p) := formula_dlo.false
def conj : list (formula_dlo) → formula_dlo
| [] := formula_dlo.true
| (p::ps) := and_o p $ conj ps
def conj_atom : list (atom_dlo) → formula_dlo
| [] := formula_dlo.true
| (a::as) := and_o (formula_dlo.atom a) $ conj_atom as
def disj : list (formula_dlo) → formula_dlo
| [] := formula_dlo.false
| (p::ps) := or_o p $ disj ps
lemma eval_disj {xs : list rat} :
∀ {ps : list (formula_dlo)}, (disj ps).eval xs ↔ (∃ p : formula_dlo, p ∈ ps ∧ p.eval xs)
| [] := begin simp [disj, formula_dlo.eval] end
| (p::ps) :=
begin
rw [list.forsome_mem_cons], simp [disj, eval_or_o],
apply or_iff_or iff.rfl eval_disj
end
def disj_map {β : Type} (bs : list β) (p : β → formula_dlo) := disj (list.map p bs)
lemma eval_disj_map {β : Type} (as : list β)
(p : β → formula_dlo) {ds : list rat} :
(disj_map as p).eval ds ↔ ∃ a, a ∈ as ∧ (p a).eval ds :=
begin
simp [disj_map, eval_disj],
constructor; intro h; cases h with x hx; cases hx with hx1 hx2,
{ cases hx1 with a ha, cases ha with ha1 ha2, subst ha2,
existsi a, constructor; assumption },
{ existsi (p x), constructor, existsi x,
constructor, assumption, refl, assumption }
end
lemma cases_not_o (P : formula_dlo → Prop) (p : formula_dlo)
(HT : P formula_dlo.true) (Hp : P formula_dlo.false) (Hq : P (p.not)) : P (not_o p) :=
begin cases p; try {simp [not_o], assumption} end
lemma cases_or_o (P : formula_dlo → Prop) (p q : formula_dlo)
(HT : P formula_dlo.true) (Hp : P p) (Hq : P q) (Hpq : P (formula_dlo.or p q)) : P (or_o p q) :=
begin cases p; cases q; try {simp [or_o], assumption} end
lemma cases_and_o (P : formula_dlo → Prop) (p q : formula_dlo)
(HB : P formula_dlo.false) (Hp : P p) (Hq : P q) (Hpq : P (formula_dlo.and p q)) : P (and_o p q) :=
begin cases p; cases q; try {simp [and_o], assumption} end
-- meta def formula_dlo.to_format : formula_dlo → format
-- | (formula_dlo.true) := "⊤"
-- | (formula_dlo.false) := "⊥"
-- | (formula_dlo.atom a) := a.to_format
-- | (formula_dlo.and p q) := "(" ++ (formula_dlo.to_format p) ++ " ∧ " ++ (formula_dlo.to_format q) ++ ")"
-- | (formula_dlo.or p q) := "(" ++ (formula_dlo.to_format p) ++ " ∨ " ++ (formula_dlo.to_format q) ++ ")"
-- | (formula_dlo.not p) := "¬(" ++ (formula_dlo.to_format p) ++ ")"
-- | (formula_dlo.ex p) := "∃(" ++ (formula_dlo.to_format p) ++ ")"
-- meta instance formula_dlo.has_to_format : has_to_format (formula_dlo) := ⟨formula_dlo.to_format⟩
--
-- meta instance formula_dlo.has_to_tactic_format : has_to_tactic_format (formula_dlo) :=
-- has_to_format_to_has_to_tactic_format _
instance atom_dlo.decidable_eq : decidable_eq atom_dlo := by tactic.mk_dec_eq_instance
instance dec_aval {as a} : decidable (atom_dlo.eval as a) :=
begin cases a; {simp [atom_dlo.eval], apply_instance} end
def avals (bs : list rat) (as : list atom_dlo) : Prop :=
∀ a ∈ as, atom_dlo.eval bs a
lemma eval_conj {xs : list rat} :
∀ {ps : list formula_dlo}, formula_dlo.eval xs (conj ps) ↔ (∀ p ∈ ps, formula_dlo.eval xs p)
| [] := begin simp [conj, formula_dlo.eval] end
| (p::ps) :=
begin
simp [conj, eval_and_o],
apply and_iff_and iff.rfl eval_conj
end
lemma eval_conj_atom {xs : list rat} :
∀ {as : list atom_dlo}, (conj_atom as).eval xs ↔ (∀ a ∈ as, atom_dlo.eval xs a)
| [] := begin simp [conj_atom] end
| (a::as) :=
begin simp [conj_atom, eval_and_o, atom_dlo.eval,
eval_conj_atom, formula_dlo.eval] end
|
{"author": "skbaek", "repo": "cooper", "sha": "812afc6b158821f2e7dac9c91d3b6123c7a19faf", "save_path": "github-repos/lean/skbaek-cooper", "path": "github-repos/lean/skbaek-cooper/cooper-812afc6b158821f2e7dac9c91d3b6123c7a19faf/dlo/formula.lean"}
|
r"""
Solve Poisson equation in 1D with mixed Dirichlet and Neumann bcs
\nabla^2 u = f,
The equation to solve is
(\nabla^2 u, v) = (f, v)
Use any combination of Dirichlet and Neumann boundary conditions.
"""
import os
import sympy as sp
import numpy as np
from shenfun import inner, div, grad, TestFunction, TrialFunction, \
Array, Function, FunctionSpace, dx, legendre, extract_bc_matrices
# Use sympy to compute a rhs, given an analytical solution
# Choose a solution with non-zero values
domain = (-2, 1)
x = sp.symbols("x", real=True)
ue = sp.cos(5*sp.pi*(x+0.1)/2)
fe = ue.diff(x, 2)
# The pure Neumann requires the value of the mean
x_map = -1 + (x-domain[0])*2/(domain[1]-domain[0])
mean = {
'c': sp.integrate(ue/sp.sqrt(1-x_map**2), (x, domain[0], domain[1])).evalf(),
'l': sp.integrate(ue, (x, domain[0], domain[1])).evalf()
}
# 5 different types of boundary conditions
bcs = [
{'left': ('N', ue.diff(x, 1).subs(x, domain[0])), 'right': ('N', ue.diff(x, 1).subs(x, domain[1]))},
{'left': ('D', ue.subs(x, domain[0])), 'right': ('D', ue.subs(x, domain[1]))},
{'left': ('N', ue.diff(x, 1).subs(x, domain[0])), 'right': ('D', ue.subs(x, domain[1]))},
{'left': ('D', ue.subs(x, domain[0])), 'right': ('N', ue.diff(x, 1).subs(x, domain[1]))},
{'right': (('D', ue.subs(x, domain[1])), ('N', ue.diff(x, 1).subs(x, domain[1])))}
]
def main(N, family, bci):
bc = bcs[bci]
if bci == 0:
SD = FunctionSpace(N, family=family, bc=bc, domain=domain, mean=mean[family.lower()])
else:
SD = FunctionSpace(N, family=family, bc=bc, domain=domain)
u = TrialFunction(SD)
v = TestFunction(SD)
# Get f on quad points
fj = Array(SD, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(SD)
f_hat = inner(v, fj, output_array=f_hat)
# Get left hand side of Poisson equation
A = inner(v, div(grad(u)))
u_hat = Function(SD).set_boundary_dofs()
if isinstance(A, list):
bc_mat = extract_bc_matrices([A])
A = A[0]
f_hat -= bc_mat[0].matvec(u_hat, Function(SD))
u_hat = A.solve(f_hat, u_hat)
uj = u_hat.backward()
uh = uj.forward()
# Compare with analytical solution
ua = Array(SD, buffer=ue)
assert np.allclose(uj, ua), np.linalg.norm(uj-ua)
if 'pytest' not in os.environ:
print("Error=%2.16e" %(np.sqrt(dx((uj-ua)**2))))
import matplotlib.pyplot as plt
plt.plot(SD.mesh(), uj, 'b', SD.mesh(), ua, 'r')
#plt.show()
if __name__ == '__main__':
import sys
N = int(sys.argv[-1]) if len(sys.argv) == 2 else 36
for family in ('C', 'L'):
for bci in range(5):
main(N, family, bci)
|
{"hexsha": "6bfadcbadbf3d0b48f1ba203db7acc62522447a2", "size": 2717, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/mixedneumanndirichlet_poisson1D.py", "max_stars_repo_name": "jaisw7/shenfun", "max_stars_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-06T09:29:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-06T09:29:39.000Z", "max_issues_repo_path": "demo/mixedneumanndirichlet_poisson1D.py", "max_issues_repo_name": "jaisw7/shenfun", "max_issues_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/mixedneumanndirichlet_poisson1D.py", "max_forks_repo_name": "jaisw7/shenfun", "max_forks_repo_head_hexsha": "7482beb5b35580bc45f72704b69343cc6fc1d773", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.875, "max_line_length": 104, "alphanum_fraction": 0.5969819654, "include": true, "reason": "import numpy,import sympy", "num_tokens": 882}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.