code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import cv2
import numpy as np
import sys
from flaskholo.detector import roi_helpers
from flaskholo.settings import detector_config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import flaskholo.detector.resnet as nn
def format_img_size(img, C):
""" formats the image size based on config """
img_min_side = float(C.im_size)
(height, width ,_) = img.shape
if width <= height:
ratio = img_min_side/width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side/height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
def format_img_channels(img, C):
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def format_img(img, C):
""" formats an image for model prediction based on config """
img, ratio = format_img_size(img, C)
img = format_img_channels(img, C)
return img, ratio
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2 ,real_y2)
class MentuDetector():
def __init__(self):
self.C = detector_config['frcnn']()
class_mapping = self.C.class_mapping
self.class_mapping = {v: k for k, v in class_mapping.items()}
self.class_to_color = {self.class_mapping[v]: np.random.randint(0, 255, 3) for v in self.class_mapping}
# def init_model(self):
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(self.C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(self.C.anchor_box_scales) * len(self.C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, self.C.num_rois, nb_classes=len(self.class_mapping), trainable=True)
self.model_rpn = Model(img_input, rpn_layers)
self.model_classifier_only = Model([feature_map_input, roi_input], classifier)
self.model_classifier = Model([feature_map_input, roi_input], classifier)
# print('Loading weights from {}'.format(self.C.model_path))
self.model_rpn.load_weights(self.C.model_path, by_name=True)
self.model_classifier.load_weights(self.C.model_path, by_name=True)
self.model_rpn.compile(optimizer='sgd', loss='mse')
self.model_classifier.compile(optimizer='sgd', loss='mse')
def detect_img(self, img_file, bbox_threshold=0.8, visualise=True): # img_file
img = cv2.imread(img_file)
w, h = img.shape[:2]
if w > h:
if w > 1280:
img = cv2.resize(img, ( int(h*1280/w), 1280), interpolation=cv2.INTER_AREA)
elif h > 720:
img = cv2.resize(img, (720, int(w*720/h)), interpolation=cv2.INTER_AREA)
elif h > w:
if h > 1280:
img = cv2.resize(img, (1280, int(w*1280/h) ), interpolation=cv2.INTER_AREA)
elif w > 720:
img = cv2.resize(img, (int(h*720/w), 720), interpolation=cv2.INTER_AREA)
C = self.C
class_to_color = self.class_to_color
X, ratio = format_img(img, C)
# if K.image_data_format() == 'channels_last':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = self.model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_data_format(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0]//C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0]//C.num_rois:
#pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0],C.num_rois,curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = self.model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
index_list = []
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk,:]
(real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)
cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2)
textLabel = '{}: {}'.format(key,int(100*new_probs[jk]))
all_dets.append(key)
index_list.append((real_x1, real_y1, real_x2, real_y2))
(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1)
textOrg = (real_x1, real_y1-0)
cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2)
cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1)
cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)
return img, all_dets, index_list
| [
"cv2.putText",
"keras.backend.image_data_format",
"numpy.argmax",
"cv2.getTextSize",
"numpy.transpose",
"numpy.expand_dims",
"keras.models.Model",
"numpy.zeros",
"cv2.imread",
"numpy.max",
"numpy.random.randint",
"numpy.array",
"flaskholo.detector.roi_helpers.apply_regr",
"cv2.rectangle",
... | [((755, 826), 'cv2.resize', 'cv2.resize', (['img', '(new_width, new_height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n', (765, 826), False, 'import cv2\n'), ((1170, 1198), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (1182, 1198), True, 'import numpy as np\n'), ((1209, 1236), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1223, 1236), True, 'import numpy as np\n'), ((2236, 2264), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape_img'}), '(shape=input_shape_img)\n', (2241, 2264), False, 'from keras.layers import Input\n'), ((2285, 2318), 'keras.layers.Input', 'Input', ([], {'shape': '(self.C.num_rois, 4)'}), '(shape=(self.C.num_rois, 4))\n', (2290, 2318), False, 'from keras.layers import Input\n'), ((2347, 2380), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape_features'}), '(shape=input_shape_features)\n', (2352, 2380), False, 'from keras.layers import Input\n'), ((2482, 2519), 'flaskholo.detector.resnet.nn_base', 'nn.nn_base', (['img_input'], {'trainable': '(True)'}), '(img_input, trainable=True)\n', (2492, 2519), True, 'import flaskholo.detector.resnet as nn\n'), ((2677, 2711), 'flaskholo.detector.resnet.rpn', 'nn.rpn', (['shared_layers', 'num_anchors'], {}), '(shared_layers, num_anchors)\n', (2683, 2711), True, 'import flaskholo.detector.resnet as nn\n'), ((2872, 2900), 'keras.models.Model', 'Model', (['img_input', 'rpn_layers'], {}), '(img_input, rpn_layers)\n', (2877, 2900), False, 'from keras.models import Model\n'), ((2938, 2987), 'keras.models.Model', 'Model', (['[feature_map_input, roi_input]', 'classifier'], {}), '([feature_map_input, roi_input], classifier)\n', (2943, 2987), False, 'from keras.models import Model\n'), ((3020, 3069), 'keras.models.Model', 'Model', (['[feature_map_input, roi_input]', 'classifier'], {}), '([feature_map_input, roi_input], classifier)\n', (3025, 3069), False, 'from keras.models import Model\n'), ((3512, 3532), 'cv2.imread', 'cv2.imread', (['img_file'], {}), '(img_file)\n', (3522, 3532), False, 'import cv2\n'), ((4235, 4264), 'numpy.transpose', 'np.transpose', (['X', '(0, 2, 3, 1)'], {}), '(X, (0, 2, 3, 1))\n', (4247, 4264), True, 'import numpy as np\n'), ((2036, 2064), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3)'], {}), '(0, 255, 3)\n', (2053, 2064), True, 'import numpy as np\n'), ((4415, 4436), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4434, 4436), True, 'from keras import backend as K\n'), ((4744, 4811), 'numpy.expand_dims', 'np.expand_dims', (['R[C.num_rois * jk:C.num_rois * (jk + 1), :]'], {'axis': '(0)'}), '(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0)\n', (4758, 4811), True, 'import numpy as np\n'), ((6553, 6574), 'numpy.array', 'np.array', (['bboxes[key]'], {}), '(bboxes[key])\n', (6561, 6574), True, 'import numpy as np\n'), ((5818, 5844), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5827, 5844), True, 'import numpy as np\n'), ((6654, 6674), 'numpy.array', 'np.array', (['probs[key]'], {}), '(probs[key])\n', (6662, 6674), True, 'import numpy as np\n'), ((7281, 7339), 'cv2.getTextSize', 'cv2.getTextSize', (['textLabel', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(1)'], {}), '(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n', (7296, 7339), False, 'import cv2\n'), ((7401, 7540), 'cv2.rectangle', 'cv2.rectangle', (['img', '(textOrg[0] - 5, textOrg[1] + baseLine - 5)', '(textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5)', '(0, 0, 0)', '(2)'], {}), '(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] +\n retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2)\n', (7414, 7540), False, 'import cv2\n'), ((7547, 7693), 'cv2.rectangle', 'cv2.rectangle', (['img', '(textOrg[0] - 5, textOrg[1] + baseLine - 5)', '(textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5)', '(255, 255, 255)', '(-1)'], {}), '(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] +\n retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1)\n', (7560, 7693), False, 'import cv2\n'), ((7699, 7777), 'cv2.putText', 'cv2.putText', (['img', 'textLabel', 'textOrg', 'cv2.FONT_HERSHEY_DUPLEX', '(1)', '(0, 0, 0)', '(1)'], {}), '(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n', (7710, 7777), False, 'import cv2\n'), ((5589, 5615), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5598, 5615), True, 'import numpy as np\n'), ((6181, 6231), 'flaskholo.detector.roi_helpers.apply_regr', 'roi_helpers.apply_regr', (['x', 'y', 'w', 'h', 'tx', 'ty', 'tw', 'th'], {}), '(x, y, w, h, tx, ty, tw, th)\n', (6203, 6231), False, 'from flaskholo.detector import roi_helpers\n'), ((6434, 6457), 'numpy.max', 'np.max', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (6440, 6457), True, 'import numpy as np\n'), ((5074, 5096), 'numpy.zeros', 'np.zeros', (['target_shape'], {}), '(target_shape)\n', (5082, 5096), True, 'import numpy as np\n'), ((5417, 5440), 'numpy.max', 'np.max', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5423, 5440), True, 'import numpy as np\n'), ((5461, 5487), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5470, 5487), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
| Tools for language models.
"""
from copy import copy
import numpy as np
import random
def apply_bert_mask(inputs, pad_mask, tokenizer):
"""
Apply BERT mask to the token_ids.
Args:
token_ids: The list of token ids.
Returns:
masked_token_ids: The list of masked token ids.
labels: The labels for traininig BERT.
"""
vocab_size = len(tokenizer.vocab)
bert_mask = np.random.uniform(size=inputs.shape) < 0.15
bert_mask &= pad_mask
masked_inputs = inputs * ~bert_mask
random_uniform = np.random.uniform(size=inputs.shape)
token_bert_mask = random_uniform < 0.8
random_bert_mask = random_uniform > 0.9
true_bert_mask = ~token_bert_mask & ~random_bert_mask
token_bert_mask = token_bert_mask & bert_mask
random_bert_mask = random_bert_mask & bert_mask
true_bert_mask = true_bert_mask & bert_mask
masked_inputs += tokenizer.mask_token_id * token_bert_mask
masked_inputs += np.random.randint(0, vocab_size, size=(inputs.shape)) * random_bert_mask
masked_inputs += inputs * true_bert_mask
labels = np.where(bert_mask, inputs, -1)
return masked_inputs, labels | [
"numpy.random.uniform",
"numpy.where",
"numpy.random.randint"
] | [((1167, 1203), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'inputs.shape'}), '(size=inputs.shape)\n', (1184, 1203), True, 'import numpy as np\n'), ((1718, 1749), 'numpy.where', 'np.where', (['bert_mask', 'inputs', '(-1)'], {}), '(bert_mask, inputs, -1)\n', (1726, 1749), True, 'import numpy as np\n'), ((1035, 1071), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'inputs.shape'}), '(size=inputs.shape)\n', (1052, 1071), True, 'import numpy as np\n'), ((1586, 1637), 'numpy.random.randint', 'np.random.randint', (['(0)', 'vocab_size'], {'size': 'inputs.shape'}), '(0, vocab_size, size=inputs.shape)\n', (1603, 1637), True, 'import numpy as np\n')] |
import bisect
import collections
import datetime as dt
import time
from typing import List
import discord
import numpy as np
import pandas as pd
import seaborn as sns
from discord.ext import commands
from matplotlib import pyplot as plt
from matplotlib import patches as patches
from matplotlib import lines as mlines
from tle import constants
from tle.util import codeforces_api as cf
from tle.util import codeforces_common as cf_common
from tle.util import discord_common
from tle.util import graph_common as gc
pd.plotting.register_matplotlib_converters()
# A user is considered active if the duration since his last contest is not more than this
CONTEST_ACTIVE_TIME_CUTOFF = 90 * 24 * 60 * 60 # 90 days
class GraphCogError(commands.CommandError):
pass
def nice_sub_type(types):
nice_map = {
"CONTESTANT": "Contest: {}",
"OUT_OF_COMPETITION": "Unofficial: {}",
"VIRTUAL": "Virtual: {}",
"PRACTICE": "Practice: {}",
}
return [nice_map[t] for t in types]
def _plot_rating(resp, mark="o"):
for rating_changes in resp:
ratings, times = [], []
for rating_change in rating_changes:
ratings.append(rating_change.newRating)
times.append(
dt.datetime.fromtimestamp(rating_change.ratingUpdateTimeSeconds)
)
plt.plot(
times,
ratings,
linestyle="-",
marker=mark,
markersize=3,
markerfacecolor="white",
markeredgewidth=0.5,
)
gc.plot_rating_bg(cf.RATED_RANKS)
plt.gcf().autofmt_xdate()
def _classify_submissions(submissions):
solved_by_type = {sub_type: [] for sub_type in cf.Party.PARTICIPANT_TYPES}
for submission in submissions:
solved_by_type[submission.author.participantType].append(submission)
return solved_by_type
def _plot_scatter(regular, practice, virtual, point_size):
for contest in [practice, regular, virtual]:
if contest:
times, ratings = zip(*contest)
plt.scatter(times, ratings, zorder=10, s=point_size)
def _running_mean(x, bin_size):
n = len(x)
cum_sum = [0] * (n + 1)
for i in range(n):
cum_sum[i + 1] = x[i] + cum_sum[i]
res = [0] * (n - bin_size + 1)
for i in range(bin_size, n + 1):
res[i - bin_size] = (cum_sum[i] - cum_sum[i - bin_size]) / bin_size
return res
def _get_extremes(contest, problemset, submissions):
def in_contest(sub):
return sub.author.participantType == "CONTESTANT" or (
cf_common.is_rated_for_onsite_contest(contest)
and sub.author.participantType == "OUT_OF_COMPETITION"
)
problemset = [prob for prob in problemset if prob.rating is not None]
submissions = [
sub
for sub in submissions
if in_contest(sub) and sub.problem.rating is not None
]
solved = {
sub.problem.index: sub.problem.rating
for sub in submissions
if sub.verdict == "OK"
}
max_solved = max(solved.values(), default=None)
min_unsolved = min(
(prob.rating for prob in problemset if prob.index not in solved),
default=None,
)
return min_unsolved, max_solved
def _plot_extreme(
handle, rating, packed_contest_subs_problemset, solved, unsolved
):
extremes = [
(
dt.datetime.fromtimestamp(contest.end_time),
_get_extremes(contest, problemset, subs),
)
for contest, problemset, subs in packed_contest_subs_problemset
]
regular = []
fullsolves = []
nosolves = []
for t, (mn, mx) in extremes:
if mn and mx:
regular.append((t, mn, mx))
elif mx:
fullsolves.append((t, mx))
elif mn:
nosolves.append((t, mn))
else:
# No rated problems in the contest, which means rating is not yet available for
# problems in this contest. Skip this data point.
pass
solvedcolor = "tab:orange"
unsolvedcolor = "tab:blue"
linecolor = "#00000022"
outlinecolor = "#00000022"
def scatter_outline(*args, **kwargs):
plt.scatter(*args, **kwargs)
kwargs["zorder"] -= 1
kwargs["color"] = outlinecolor
if kwargs["marker"] == "*":
kwargs["s"] *= 3
elif kwargs["marker"] == "s":
kwargs["s"] *= 1.5
else:
kwargs["s"] *= 2
if "alpha" in kwargs:
del kwargs["alpha"]
if "label" in kwargs:
del kwargs["label"]
plt.scatter(*args, **kwargs)
plt.clf()
time_scatter, plot_min, plot_max = zip(*regular)
if unsolved:
scatter_outline(
time_scatter,
plot_min,
zorder=10,
s=14,
marker="o",
color=unsolvedcolor,
label="Easiest unsolved",
)
if solved:
scatter_outline(
time_scatter,
plot_max,
zorder=10,
s=14,
marker="o",
color=solvedcolor,
label="Hardest solved",
)
ax = plt.gca()
if solved and unsolved:
for t, mn, mx in regular:
ax.add_line(mlines.Line2D((t, t), (mn, mx), color=linecolor))
if fullsolves:
scatter_outline(
*zip(*fullsolves), zorder=15, s=42, marker="*", color=solvedcolor
)
if nosolves:
scatter_outline(
*zip(*nosolves), zorder=15, s=32, marker="X", color=unsolvedcolor
)
plt.legend(
title=f"{handle}: {rating}",
title_fontsize=plt.rcParams["legend.fontsize"],
loc="upper left",
).set_zorder(20)
gc.plot_rating_bg(cf.RATED_RANKS)
plt.gcf().autofmt_xdate()
def _plot_average(practice, bin_size, label: str = ""):
if len(practice) > bin_size:
sub_times, ratings = map(list, zip(*practice))
sub_timestamps = [sub_time.timestamp() for sub_time in sub_times]
mean_sub_timestamps = _running_mean(sub_timestamps, bin_size)
mean_sub_times = [
dt.datetime.fromtimestamp(timestamp)
for timestamp in mean_sub_timestamps
]
mean_ratings = _running_mean(ratings, bin_size)
plt.plot(
mean_sub_times,
mean_ratings,
linestyle="-",
marker="",
markerfacecolor="white",
markeredgewidth=0.5,
label=label,
)
def _mention_to_handle(args, ctx):
new_args = []
for x in args:
if x.startswith("<@!"):
linked_acc = cf_common.user_db.get_handle(x[3:-1], ctx.guild.id)
if not linked_acc:
raise GraphCogError(
f"Handle for <@!{x[3:-1]}> not found in database"
)
else:
new_args.insert(len(new_args), linked_acc)
else:
new_args.insert(len(new_args), x)
return new_args
class Graphs(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.converter = commands.MemberConverter()
@commands.group(
brief="Graphs for analyzing Codeforces activity",
invoke_without_command=True,
)
async def plot(self, ctx):
"""Plot various graphs. Wherever Codeforces handles are accepted it is possible to
use a server member's name instead by prefixing it with '!'."""
await ctx.send_help("plot")
@plot.command(
brief="Plot Codeforces rating graph",
usage="[+zoom] [handles...] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]",
)
async def rating(self, ctx, *args: str):
"""Plots Codeforces rating graph for the handles provided."""
(zoom,), args = cf_common.filter_flags(args, ["+zoom"])
filt = cf_common.SubFilter()
args = filt.parse(args)
args = _mention_to_handle(args,ctx)
handles = args or ("!" + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.rating(handle=handle) for handle in handles]
resp = [
filt.filter_rating_changes(rating_changes)
for rating_changes in resp
]
if not any(resp):
handles_str = ", ".join(f"`{handle}`" for handle in handles)
if len(handles) == 1:
message = f"User {handles_str} is not rated"
else:
message = f"None of the given users {handles_str} are rated"
raise GraphCogError(message)
plt.clf()
_plot_rating(resp)
current_ratings = [
rating_changes[-1].newRating if rating_changes else "Unrated"
for rating_changes in resp
]
labels = [
gc.StrWrap(f"{handle} ({rating})")
for handle, rating in zip(handles, current_ratings)
]
plt.legend(labels, loc="upper left")
if not zoom:
min_rating = 1100
max_rating = 1800
for rating_changes in resp:
for rating in rating_changes:
min_rating = min(min_rating, rating.newRating)
max_rating = max(max_rating, rating.newRating)
plt.ylim(min_rating - 100, max_rating + 200)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title="Rating graph on Codeforces"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(
brief="Plot Codeforces extremes graph",
usage="[handles] [+solved] [+unsolved]",
)
async def extreme(self, ctx, *args: str):
"""Plots pairs of lowest rated unsolved problem and highest rated solved problem for every
contest that was rated for the given user.
"""
(solved, unsolved), args = cf_common.filter_flags(
args, ["+solved", "+unsolved"]
)
if not solved and not unsolved:
solved = unsolved = True
handles = args or ("!" + str(ctx.author),)
(handle,) = await cf_common.resolve_handles(
ctx, self.converter, handles
)
ratingchanges = await cf.user.rating(handle=handle)
if not ratingchanges:
raise GraphCogError(f"User {handle} is not rated")
contest_ids = [change.contestId for change in ratingchanges]
subs_by_contest_id = {contest_id: [] for contest_id in contest_ids}
for sub in await cf.user.status(handle=handle):
if sub.contestId in subs_by_contest_id:
subs_by_contest_id[sub.contestId].append(sub)
packed_contest_subs_problemset = [
(
cf_common.cache2.contest_cache.get_contest(contest_id),
cf_common.cache2.problemset_cache.get_problemset(contest_id),
subs_by_contest_id[contest_id],
)
for contest_id in contest_ids
]
rating = max(
ratingchanges, key=lambda change: change.ratingUpdateTimeSeconds
).newRating
_plot_extreme(
handle, rating, packed_contest_subs_problemset, solved, unsolved
)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title="Codeforces extremes graph")
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(
brief="Show histogram of solved problems on CF.",
usage="[handles] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [c+marker..] [i+index..]",
)
async def solved(self, ctx, *args: str):
"""Shows a histogram of problems solved on Codeforces for the handles provided.
e.g. ;plot solved meooow +contest +virtual +outof +dp"""
filt = cf_common.SubFilter()
args = filt.parse(args)
args = _mention_to_handle(args,ctx)
handles = args or ("!" + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [
filt.filter_subs(submissions) for submissions in resp
]
if not any(all_solved_subs):
raise GraphCogError(
f"There are no problems within the specified parameters."
)
plt.clf()
plt.xlabel("Problem rating")
plt.ylabel("Number solved")
if len(handles) == 1:
# Display solved problem separately by type for a single user.
handle, solved_by_type = handles[0], _classify_submissions(
all_solved_subs[0]
)
all_ratings = [
[sub.problem.rating for sub in solved_by_type[sub_type]]
for sub_type in filt.types
]
nice_names = nice_sub_type(filt.types)
labels = [
name.format(len(ratings))
for name, ratings in zip(nice_names, all_ratings)
]
step = 100
# shift the range to center the text
hist_bins = list(
range(filt.rlo - step // 2, filt.rhi + step // 2 + 1, step)
)
plt.hist(all_ratings, stacked=True, bins=hist_bins, label=labels)
total = sum(map(len, all_ratings))
plt.legend(
title=f"{handle}: {total}",
title_fontsize=plt.rcParams["legend.fontsize"],
loc="upper right",
)
else:
all_ratings = [
[sub.problem.rating for sub in solved_subs]
for solved_subs in all_solved_subs
]
labels = [
gc.StrWrap(f"{handle}: {len(ratings)}")
for handle, ratings in zip(handles, all_ratings)
]
step = 200
hist_bins = list(
range(filt.rlo - step // 2, filt.rhi + step // 2 + 1, step)
)
plt.hist(all_ratings, bins=hist_bins)
plt.legend(labels, loc="upper right")
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title="Histogram of problems solved on Codeforces"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(
brief="Show histogram of solved problems on CF.",
usage="[handles] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [c+marker..] [i+index..]",
)
async def hist(self, ctx, *args: str):
"""Shows the histogram of problems solved over time on Codeforces for the handles provided."""
filt = cf_common.SubFilter()
args = filt.parse(args)
args = _mention_to_handle(args,ctx)
handles = args or ("!" + str(ctx.author),)
handles = await cf_common.resolve_handles(ctx, self.converter, handles)
resp = [await cf.user.status(handle=handle) for handle in handles]
all_solved_subs = [
filt.filter_subs(submissions) for submissions in resp
]
if not any(all_solved_subs):
raise GraphCogError(
f"There are no problems within the specified parameters."
)
plt.clf()
plt.xlabel("Time")
plt.ylabel("Number solved")
if len(handles) == 1:
handle, solved_by_type = handles[0], _classify_submissions(
all_solved_subs[0]
)
all_times = [
[
dt.datetime.fromtimestamp(sub.creationTimeSeconds)
for sub in solved_by_type[sub_type]
]
for sub_type in filt.types
]
nice_names = nice_sub_type(filt.types)
labels = [
name.format(len(times))
for name, times in zip(nice_names, all_times)
]
plt.hist(all_times, stacked=True, label=labels, bins=34)
total = sum(map(len, all_times))
plt.legend(
title=f"{handle}: {total}",
title_fontsize=plt.rcParams["legend.fontsize"],
)
else:
all_times = [
[
dt.datetime.fromtimestamp(sub.creationTimeSeconds)
for sub in solved_subs
]
for solved_subs in all_solved_subs
]
# NOTE: matplotlib ignores labels that begin with _
# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
# Add zero-width space to work around this
labels = [
gc.StrWrap(f"{handle}: {len(times)}")
for handle, times in zip(handles, all_times)
]
plt.hist(all_times)
plt.legend(labels)
plt.gcf().autofmt_xdate()
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title="Histogram of number of solved problems over time"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(
brief="Show history of problems solved by rating.",
aliases=["chilli"],
usage="[handle] [+practice] [+contest] [+virtual] [+outof] [+team] [+tag..] [r>=rating] [r<=rating] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy] [b=10] [s=3] [c+marker..] [i+index..]",
)
async def scatter(self, ctx, *args):
"""Plot Codeforces rating overlaid on a scatter plot of problems solved.
Also plots a running average of ratings of problems solved in practice."""
filt = cf_common.SubFilter()
args = filt.parse(args)
args = _mention_to_handle(args,ctx)
handle, bin_size, point_size = None, 10, 3
for arg in args:
if arg[0:2] == "b=":
bin_size = int(arg[2:])
elif arg[0:2] == "s=":
point_size = int(arg[2:])
else:
handle = arg
if bin_size < 1 or point_size < 1 or point_size > 100:
raise GraphCogError("Invalid parameters")
handle = handle or "!" + str(ctx.author)
(handle,) = await cf_common.resolve_handles(
ctx, self.converter, (handle,)
)
rating_resp = [await cf.user.rating(handle=handle)]
rating_resp = [
filt.filter_rating_changes(rating_changes)
for rating_changes in rating_resp
]
submissions = filt.filter_subs(await cf.user.status(handle=handle))
def extract_time_and_rating(submissions):
return [
(
dt.datetime.fromtimestamp(sub.creationTimeSeconds),
sub.problem.rating,
)
for sub in submissions
]
if not any(rating_resp) and not any(submissions):
raise GraphCogError(
f"User `{handle}` is not rated and has not solved any rated problem"
)
solved_by_type = _classify_submissions(submissions)
regular = extract_time_and_rating(
solved_by_type["CONTESTANT"] + solved_by_type["OUT_OF_COMPETITION"]
)
practice = extract_time_and_rating(solved_by_type["PRACTICE"])
virtual = extract_time_and_rating(solved_by_type["VIRTUAL"])
plt.clf()
_plot_scatter(regular, practice, virtual, point_size)
labels = []
if practice:
labels.append("Practice")
if regular:
labels.append("Regular")
if virtual:
labels.append("Virtual")
plt.legend(labels, loc="upper left")
_plot_average(practice, bin_size)
_plot_rating(rating_resp, mark="")
# zoom
ymin, ymax = plt.gca().get_ylim()
plt.ylim(max(ymin, filt.rlo - 100), min(ymax, filt.rhi + 100))
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title=f"Rating vs solved problem rating for {handle}"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
async def _rating_hist(self, ctx, ratings, mode, binsize, title):
if mode not in ("log", "normal"):
raise GraphCogError("Mode should be either `log` or `normal`")
ratings = [r for r in ratings if r >= 0]
assert ratings, "Cannot histogram plot empty list of ratings"
assert 100 % binsize == 0 # because bins is semi-hardcoded
bins = 39 * 100 // binsize
colors = []
low, high = 0, binsize * bins
for rank in cf.RATED_RANKS:
for r in range(max(rank.low, low), min(rank.high, high), binsize):
colors.append("#" + "%06x" % rank.color_embed)
assert len(colors) == bins, f"Expected {bins} colors, got {len(colors)}"
height = [0] * bins
for r in ratings:
height[r // binsize] += 1
csum = 0
cent = [0]
users = sum(height)
for h in height:
csum += h
cent.append(round(100 * csum / users))
x = [k * binsize for k in range(bins)]
label = [f"{r} ({c})" for r, c in zip(x, cent)]
l, r = 0, bins - 1
while not height[l]:
l += 1
while not height[r]:
r -= 1
x = x[l : r + 1]
cent = cent[l : r + 1]
label = label[l : r + 1]
colors = colors[l : r + 1]
height = height[l : r + 1]
plt.clf()
fig = plt.figure(figsize=(15, 5))
plt.xticks(rotation=45)
plt.xlim(l * binsize - binsize // 2, r * binsize + binsize // 2)
plt.bar(
x,
height,
binsize * 0.9,
color=colors,
linewidth=0,
tick_label=label,
log=(mode == "log"),
)
plt.xlabel("Rating")
plt.ylabel("Number of users")
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(title=title)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief="Show server rating distribution")
async def distrib(self, ctx):
"""Plots rating distribution of users in this server"""
def in_purgatory(userid):
member = ctx.guild.get_member(int(userid))
return not member or "Purgatory" in {
role.name for role in member.roles
}
res = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
ratings = [
cf_user.rating
for user_id, cf_user in res
if cf_user.rating is not None and not in_purgatory(user_id)
]
await self._rating_hist(
ctx,
ratings,
"normal",
binsize=100,
title="Rating distribution of server members",
)
@plot.command(
brief="Show Codeforces rating distribution",
usage="[normal/log] [active/all] [contest_cutoff=5]",
)
async def cfdistrib(
self, ctx, mode: str = "log", activity="active", contest_cutoff: int = 5
):
"""Plots rating distribution of either active or all users on Codeforces, in either normal or log scale.
Default mode is log, default activity is active (competed in last 90 days)
Default contest cutoff is 5 (competed at least five times overall)
"""
if activity not in ["active", "all"]:
raise GraphCogError("Activity should be either `active` or `all`")
time_cutoff = (
int(time.time()) - CONTEST_ACTIVE_TIME_CUTOFF
if activity == "active"
else 0
)
handles = cf_common.cache2.rating_changes_cache.get_users_with_more_than_n_contests(
time_cutoff, contest_cutoff
)
if not handles:
raise GraphCogError(
"No Codeforces users meet the specified criteria"
)
ratings = [
cf_common.cache2.rating_changes_cache.get_current_rating(handle)
for handle in handles
]
title = (
f"Rating distribution of {activity} Codeforces users ({mode} scale)"
)
await self._rating_hist(ctx, ratings, mode, binsize=100, title=title)
@plot.command(
brief="Show percentile distribution on codeforces",
usage="[+zoom] [handles...]",
)
async def centile(self, ctx, *args: str):
"""Show percentile distribution of codeforces and mark given handles in the plot. If +zoom and handles are given, it zooms to the neighborhood of the handles."""
(zoom,), args = cf_common.filter_flags(args, ["+zoom"])
# Prepare data
intervals = [(rank.low, rank.high) for rank in cf.RATED_RANKS]
colors = [rank.color_graph for rank in cf.RATED_RANKS]
ratings = cf_common.cache2.rating_changes_cache.get_all_ratings()
ratings = np.array(sorted(ratings))
n = len(ratings)
perc = 100 * np.arange(n) / n
if args:
handles = await cf_common.resolve_handles(
ctx, self.converter, args, mincnt=0, maxcnt=50
)
infos = await cf.user.info(handles=list(set(handles)))
users_to_mark = {}
for info in infos:
if info.rating is None:
raise GraphCogError(f"User `{info.handle}` is not rated")
ix = bisect.bisect_left(ratings, info.rating)
cent = 100 * ix / len(ratings)
users_to_mark[info.handle] = info.rating, cent
else:
users_to_mark = {}
# Plot
plt.clf()
fig, ax = plt.subplots(1)
ax.plot(ratings, perc, color="#00000099")
plt.xlabel("Rating")
plt.ylabel("Percentile")
for pos in ["right", "top", "bottom", "left"]:
ax.spines[pos].set_visible(False)
ax.tick_params(axis="both", which="both", length=0)
# Color intervals by rank
for interval, color in zip(intervals, colors):
alpha = "99"
l, r = interval
col = color + alpha
rect = patches.Rectangle(
(l, -50), r - l, 200, edgecolor="none", facecolor=col
)
ax.add_patch(rect)
# Mark users in plot
for user, point in users_to_mark.items():
x, y = point
plt.annotate(
user,
xy=point,
xytext=(0, 0),
textcoords="offset points",
ha="right",
va="bottom",
)
plt.plot(
*point,
marker="o",
markersize=5,
color="red",
markeredgecolor="darkred",
)
# Set limits (before drawing tick lines)
if users_to_mark and zoom:
xmargin = 50
ymargin = 5
xmin = min(point[0] for point in users_to_mark.values())
xmax = max(point[0] for point in users_to_mark.values())
ymin = min(point[1] for point in users_to_mark.values())
ymax = max(point[1] for point in users_to_mark.values())
plt.xlim(xmin - xmargin, xmax + xmargin)
plt.ylim(ymin - ymargin, ymax + ymargin)
else:
plt.xlim(ratings[0], ratings[-1])
plt.ylim(-1.5, 101.5)
# Draw tick lines
linecolor = "#00000022"
inf = 10000
def horz_line(y):
l = mlines.Line2D([-inf, inf], [y, y], color=linecolor)
ax.add_line(l)
def vert_line(x):
l = mlines.Line2D([x, x], [-inf, inf], color=linecolor)
ax.add_line(l)
for y in ax.get_yticks():
horz_line(y)
for x in ax.get_xticks():
vert_line(x)
# Discord stuff
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title=f"Rating/percentile relationship"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief="Plot histogram of gudgiting")
async def howgud(self, ctx, *members: discord.Member):
members = members or (ctx.author,)
if len(members) > 5:
raise GraphCogError("Please specify at most 5 gudgitters.")
# shift the [-300, 300] gitgud range to center the text
hist_bins = list(range(-300 - 50, 300 + 50 + 1, 100))
deltas = [
[x[0] for x in cf_common.user_db.howgud(member.id)]
for member in members
]
labels = [
gc.StrWrap(f"{member.display_name}: {len(delta)}")
for member, delta in zip(members, deltas)
]
plt.clf()
plt.margins(x=0)
plt.hist(deltas, bins=hist_bins, label=labels, rwidth=1)
plt.xlabel("Problem delta")
plt.ylabel("Number solved")
plt.legend(prop=gc.fontprop)
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(title="Histogram of gudgitting")
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(brief="Plot distribution of server members by country")
async def country(self, ctx, *countries):
"""Plots distribution of server members by countries. When no countries are specified, plots
a bar graph of all members by country. When one or more countries are specified, plots a
swarmplot of members by country and rating. Only members with registered handles and
countries set on Codeforces are considered.
"""
max_countries = 8
if len(countries) > max_countries:
raise GraphCogError(
f"At most {max_countries} countries may be specified."
)
users = cf_common.user_db.get_cf_users_for_guild(ctx.guild.id)
counter = collections.Counter(
user.country for _, user in users if user.country
)
if not countries:
# list because seaborn complains for tuple.
countries, counts = map(list, zip(*counter.most_common()))
plt.clf()
fig = plt.figure(figsize=(15, 5))
with sns.axes_style(rc={"xtick.bottom": True}):
sns.barplot(x=countries, y=counts)
# Show counts on top of bars.
ax = plt.gca()
for p in ax.patches:
x = p.get_x() + p.get_width() / 2
y = p.get_y() + p.get_height() + 0.5
ax.text(
x,
y,
int(p.get_height()),
horizontalalignment="center",
color="#30304f",
fontsize="x-small",
)
plt.xticks(rotation=40, horizontalalignment="right")
ax.tick_params(
axis="x", length=4, color=ax.spines["bottom"].get_edgecolor()
)
plt.xlabel("Country")
plt.ylabel("Number of members")
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(
title="Distribution of server members by country"
)
else:
countries = [country.title() for country in countries]
data = [
[user.country, user.rating]
for _, user in users
if user.rating and user.country and user.country in countries
]
if not data:
raise GraphCogError(
"No rated members from the specified countries are present."
)
color_map = {
rating: f"#{cf.rating2rank(rating).color_embed:06x}"
for _, rating in data
}
df = pd.DataFrame(data, columns=["Country", "Rating"])
column_order = sorted(
(country for country in countries if counter[country]),
key=counter.get,
reverse=True,
)
plt.clf()
if len(column_order) <= 5:
sns.swarmplot(
x="Country",
y="Rating",
hue="Rating",
data=df,
order=column_order,
palette=color_map,
)
else:
# Add ticks and rotate tick labels to avoid overlap.
with sns.axes_style(rc={"xtick.bottom": True}):
sns.swarmplot(
x="Country",
y="Rating",
hue="Rating",
data=df,
order=column_order,
palette=color_map,
)
plt.xticks(rotation=30, horizontalalignment="right")
ax = plt.gca()
ax.tick_params(
axis="x", color=ax.spines["bottom"].get_edgecolor()
)
plt.legend().remove()
plt.xlabel("Country")
plt.ylabel("Rating")
discord_file = gc.get_current_figure_as_file()
embed = discord_common.cf_color_embed(
title="Rating distribution of server members by " "country"
)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@plot.command(
brief="Show rating changes by rank",
usage="contest_id [+server] [+zoom] [handles..]",
)
async def visualrank(self, ctx, contest_id: int, *args: str):
"""Plot rating changes by rank. Add handles to specify a handle in the plot.
if arguments contains `+server`, it will include just server members and not all codeforces users.
Specify `+zoom` to zoom to the neighborhood of handles."""
args = set(args)
(in_server, zoom), handles = cf_common.filter_flags(
args, ["+server", "+zoom"]
)
handles = await cf_common.resolve_handles(
ctx, self.converter, handles, mincnt=0, maxcnt=20
)
users = cf_common.cache2.rating_changes_cache.get_rating_changes_for_contest(
contest_id
)
if not users:
raise GraphCogError(
f"No rating change cache for contest `{contest_id}`"
)
if in_server:
guild_handles = [
handle
for discord_id, handle in cf_common.user_db.get_handles_for_guild(
ctx.guild.id
)
]
users = [user for user in users if user.handle in guild_handles]
ranks = []
delta = []
color = []
users_to_mark = dict()
for user in users:
user_delta = user.newRating - user.oldRating
ranks.append(user.rank)
delta.append(user_delta)
color.append(cf.rating2rank(user.oldRating).color_graph)
if user.handle in handles:
users_to_mark[user.handle] = (user.rank, user_delta)
title = users[0].contestName
plt.clf()
fig = plt.figure(figsize=(12, 8))
plt.title(title)
plt.xlabel("Rank")
plt.ylabel("Rating Changes")
ymargin = 50
xmargin = 50
if users_to_mark and zoom:
xmin = min(point[0] for point in users_to_mark.values())
xmax = max(point[0] for point in users_to_mark.values())
ymin = min(point[1] for point in users_to_mark.values())
ymax = max(point[1] for point in users_to_mark.values())
mark_size = 2e4 / (xmax - xmin + 2 * xmargin)
plt.xlim(xmin - xmargin, xmax + xmargin)
plt.ylim(ymin - ymargin, ymax + ymargin)
else:
ylim = 0
if users_to_mark:
ylim = max(abs(point[1]) for point in users_to_mark.values())
ylim = max(ylim, 200)
xmax = max(user.rank for user in users)
mark_size = 2e4 / (xmax + 2 * xmargin)
plt.xlim(-xmargin, xmax + xmargin)
plt.ylim(-ylim - ymargin, ylim + ymargin)
plt.scatter(ranks, delta, s=mark_size, c=color)
for handle, point in users_to_mark.items():
plt.annotate(
handle,
xy=point,
xytext=(0, 0),
textcoords="offset points",
ha="left",
va="bottom",
fontsize="large",
)
plt.plot(*point, marker="o", markersize=5, color="black")
discord_file = gc.get_current_figure_as_file()
plt.close(fig)
embed = discord_common.cf_color_embed(title=title)
discord_common.attach_image(embed, discord_file)
discord_common.set_author_footer(embed, ctx.author)
await ctx.send(embed=embed, file=discord_file)
@discord_common.send_error_if(
GraphCogError, cf_common.ResolveHandleError, cf_common.FilterError
)
async def cog_command_error(self, ctx, error):
pass
def setup(bot):
bot.add_cog(Graphs(bot))
| [
"matplotlib.pyplot.title",
"tle.util.discord_common.set_author_footer",
"tle.util.codeforces_api.user.rating",
"tle.util.graph_common.plot_rating_bg",
"tle.util.codeforces_common.cache2.rating_changes_cache.get_users_with_more_than_n_contests",
"tle.util.codeforces_common.user_db.get_handles_for_guild",
... | [((517, 561), 'pandas.plotting.register_matplotlib_converters', 'pd.plotting.register_matplotlib_converters', ([], {}), '()\n', (559, 561), True, 'import pandas as pd\n'), ((1554, 1587), 'tle.util.graph_common.plot_rating_bg', 'gc.plot_rating_bg', (['cf.RATED_RANKS'], {}), '(cf.RATED_RANKS)\n', (1571, 1587), True, 'from tle.util import graph_common as gc\n'), ((4630, 4639), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4637, 4639), True, 'from matplotlib import pyplot as plt\n'), ((5169, 5178), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5176, 5178), True, 'from matplotlib import pyplot as plt\n'), ((5739, 5772), 'tle.util.graph_common.plot_rating_bg', 'gc.plot_rating_bg', (['cf.RATED_RANKS'], {}), '(cf.RATED_RANKS)\n', (5756, 5772), True, 'from tle.util import graph_common as gc\n'), ((7149, 7246), 'discord.ext.commands.group', 'commands.group', ([], {'brief': '"""Graphs for analyzing Codeforces activity"""', 'invoke_without_command': '(True)'}), "(brief='Graphs for analyzing Codeforces activity',\n invoke_without_command=True)\n", (7163, 7246), False, 'from discord.ext import commands\n'), ((38306, 38406), 'tle.util.discord_common.send_error_if', 'discord_common.send_error_if', (['GraphCogError', 'cf_common.ResolveHandleError', 'cf_common.FilterError'], {}), '(GraphCogError, cf_common.ResolveHandleError,\n cf_common.FilterError)\n', (38334, 38406), False, 'from tle.util import discord_common\n'), ((1341, 1457), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'ratings'], {'linestyle': '"""-"""', 'marker': 'mark', 'markersize': '(3)', 'markerfacecolor': '"""white"""', 'markeredgewidth': '(0.5)'}), "(times, ratings, linestyle='-', marker=mark, markersize=3,\n markerfacecolor='white', markeredgewidth=0.5)\n", (1349, 1457), True, 'from matplotlib import pyplot as plt\n'), ((4189, 4217), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*args'], {}), '(*args, **kwargs)\n', (4200, 4217), True, 'from matplotlib import pyplot as plt\n'), ((4596, 4624), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*args'], {}), '(*args, **kwargs)\n', (4607, 4624), True, 'from matplotlib import pyplot as plt\n'), ((6294, 6421), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_sub_times', 'mean_ratings'], {'linestyle': '"""-"""', 'marker': '""""""', 'markerfacecolor': '"""white"""', 'markeredgewidth': '(0.5)', 'label': 'label'}), "(mean_sub_times, mean_ratings, linestyle='-', marker='',\n markerfacecolor='white', markeredgewidth=0.5, label=label)\n", (6302, 6421), True, 'from matplotlib import pyplot as plt\n'), ((7116, 7142), 'discord.ext.commands.MemberConverter', 'commands.MemberConverter', ([], {}), '()\n', (7140, 7142), False, 'from discord.ext import commands\n'), ((7781, 7820), 'tle.util.codeforces_common.filter_flags', 'cf_common.filter_flags', (['args', "['+zoom']"], {}), "(args, ['+zoom'])\n", (7803, 7820), True, 'from tle.util import codeforces_common as cf_common\n'), ((7836, 7857), 'tle.util.codeforces_common.SubFilter', 'cf_common.SubFilter', ([], {}), '()\n', (7855, 7857), True, 'from tle.util import codeforces_common as cf_common\n'), ((8601, 8610), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8608, 8610), True, 'from matplotlib import pyplot as plt\n'), ((8937, 8973), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '"""upper left"""'}), "(labels, loc='upper left')\n", (8947, 8973), True, 'from matplotlib import pyplot as plt\n'), ((9357, 9388), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (9386, 9388), True, 'from tle.util import graph_common as gc\n'), ((9405, 9470), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Rating graph on Codeforces"""'}), "(title='Rating graph on Codeforces')\n", (9434, 9470), False, 'from tle.util import discord_common\n'), ((9501, 9549), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (9528, 9549), False, 'from tle.util import discord_common\n'), ((9558, 9609), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (9590, 9609), False, 'from tle.util import discord_common\n'), ((10031, 10085), 'tle.util.codeforces_common.filter_flags', 'cf_common.filter_flags', (['args', "['+solved', '+unsolved']"], {}), "(args, ['+solved', '+unsolved'])\n", (10053, 10085), True, 'from tle.util import codeforces_common as cf_common\n'), ((11386, 11417), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (11415, 11417), True, 'from tle.util import graph_common as gc\n'), ((11434, 11498), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Codeforces extremes graph"""'}), "(title='Codeforces extremes graph')\n", (11463, 11498), False, 'from tle.util import discord_common\n'), ((11507, 11555), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (11534, 11555), False, 'from tle.util import discord_common\n'), ((11564, 11615), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (11596, 11615), False, 'from tle.util import discord_common\n'), ((12139, 12160), 'tle.util.codeforces_common.SubFilter', 'cf_common.SubFilter', ([], {}), '()\n', (12158, 12160), True, 'from tle.util import codeforces_common as cf_common\n'), ((12715, 12724), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12722, 12724), True, 'from matplotlib import pyplot as plt\n'), ((12733, 12761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Problem rating"""'], {}), "('Problem rating')\n", (12743, 12761), True, 'from matplotlib import pyplot as plt\n'), ((12770, 12797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number solved"""'], {}), "('Number solved')\n", (12780, 12797), True, 'from matplotlib import pyplot as plt\n'), ((14472, 14503), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (14501, 14503), True, 'from tle.util import graph_common as gc\n'), ((14520, 14606), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Histogram of problems solved on Codeforces"""'}), "(title=\n 'Histogram of problems solved on Codeforces')\n", (14549, 14606), False, 'from tle.util import discord_common\n'), ((14632, 14680), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (14659, 14680), False, 'from tle.util import discord_common\n'), ((14689, 14740), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (14721, 14740), False, 'from tle.util import discord_common\n'), ((15212, 15233), 'tle.util.codeforces_common.SubFilter', 'cf_common.SubFilter', ([], {}), '()\n', (15231, 15233), True, 'from tle.util import codeforces_common as cf_common\n'), ((15788, 15797), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15795, 15797), True, 'from matplotlib import pyplot as plt\n'), ((15806, 15824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (15816, 15824), True, 'from matplotlib import pyplot as plt\n'), ((15833, 15860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number solved"""'], {}), "('Number solved')\n", (15843, 15860), True, 'from matplotlib import pyplot as plt\n'), ((17441, 17472), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (17470, 17472), True, 'from tle.util import graph_common as gc\n'), ((17489, 17581), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Histogram of number of solved problems over time"""'}), "(title=\n 'Histogram of number of solved problems over time')\n", (17518, 17581), False, 'from tle.util import discord_common\n'), ((17607, 17655), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (17634, 17655), False, 'from tle.util import discord_common\n'), ((17664, 17715), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (17696, 17715), False, 'from tle.util import discord_common\n'), ((18288, 18309), 'tle.util.codeforces_common.SubFilter', 'cf_common.SubFilter', ([], {}), '()\n', (18307, 18309), True, 'from tle.util import codeforces_common as cf_common\n'), ((20011, 20020), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20018, 20020), True, 'from matplotlib import pyplot as plt\n'), ((20284, 20320), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '"""upper left"""'}), "(labels, loc='upper left')\n", (20294, 20320), True, 'from matplotlib import pyplot as plt\n'), ((20559, 20590), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (20588, 20590), True, 'from tle.util import graph_common as gc\n'), ((20607, 20696), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': 'f"""Rating vs solved problem rating for {handle}"""'}), "(title=\n f'Rating vs solved problem rating for {handle}')\n", (20636, 20696), False, 'from tle.util import discord_common\n'), ((20722, 20770), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (20749, 20770), False, 'from tle.util import discord_common\n'), ((20779, 20830), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (20811, 20830), False, 'from tle.util import discord_common\n'), ((22268, 22277), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (22275, 22277), True, 'from matplotlib import pyplot as plt\n'), ((22292, 22319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (22302, 22319), True, 'from matplotlib import pyplot as plt\n'), ((22329, 22352), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (22339, 22352), True, 'from matplotlib import pyplot as plt\n'), ((22361, 22425), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(l * binsize - binsize // 2)', '(r * binsize + binsize // 2)'], {}), '(l * binsize - binsize // 2, r * binsize + binsize // 2)\n', (22369, 22425), True, 'from matplotlib import pyplot as plt\n'), ((22434, 22536), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'height', '(binsize * 0.9)'], {'color': 'colors', 'linewidth': '(0)', 'tick_label': 'label', 'log': "(mode == 'log')"}), "(x, height, binsize * 0.9, color=colors, linewidth=0, tick_label=\n label, log=mode == 'log')\n", (22441, 22536), True, 'from matplotlib import pyplot as plt\n'), ((22637, 22657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {}), "('Rating')\n", (22647, 22657), True, 'from matplotlib import pyplot as plt\n'), ((22666, 22695), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of users"""'], {}), "('Number of users')\n", (22676, 22695), True, 'from matplotlib import pyplot as plt\n'), ((22720, 22751), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (22749, 22751), True, 'from tle.util import graph_common as gc\n'), ((22760, 22774), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (22769, 22774), True, 'from matplotlib import pyplot as plt\n'), ((22792, 22834), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': 'title'}), '(title=title)\n', (22821, 22834), False, 'from tle.util import discord_common\n'), ((22843, 22891), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (22870, 22891), False, 'from tle.util import discord_common\n'), ((22900, 22951), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (22932, 22951), False, 'from tle.util import discord_common\n'), ((23385, 23439), 'tle.util.codeforces_common.user_db.get_cf_users_for_guild', 'cf_common.user_db.get_cf_users_for_guild', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (23425, 23439), True, 'from tle.util import codeforces_common as cf_common\n'), ((24624, 24731), 'tle.util.codeforces_common.cache2.rating_changes_cache.get_users_with_more_than_n_contests', 'cf_common.cache2.rating_changes_cache.get_users_with_more_than_n_contests', (['time_cutoff', 'contest_cutoff'], {}), '(\n time_cutoff, contest_cutoff)\n', (24697, 24731), True, 'from tle.util import codeforces_common as cf_common\n'), ((25579, 25618), 'tle.util.codeforces_common.filter_flags', 'cf_common.filter_flags', (['args', "['+zoom']"], {}), "(args, ['+zoom'])\n", (25601, 25618), True, 'from tle.util import codeforces_common as cf_common\n'), ((25795, 25850), 'tle.util.codeforces_common.cache2.rating_changes_cache.get_all_ratings', 'cf_common.cache2.rating_changes_cache.get_all_ratings', ([], {}), '()\n', (25848, 25850), True, 'from tle.util import codeforces_common as cf_common\n'), ((26597, 26606), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26604, 26606), True, 'from matplotlib import pyplot as plt\n'), ((26625, 26640), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (26637, 26640), True, 'from matplotlib import pyplot as plt\n'), ((26700, 26720), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {}), "('Rating')\n", (26710, 26720), True, 'from matplotlib import pyplot as plt\n'), ((26729, 26753), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentile"""'], {}), "('Percentile')\n", (26739, 26753), True, 'from matplotlib import pyplot as plt\n'), ((28859, 28890), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (28888, 28890), True, 'from tle.util import graph_common as gc\n'), ((28907, 28977), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': 'f"""Rating/percentile relationship"""'}), "(title=f'Rating/percentile relationship')\n", (28936, 28977), False, 'from tle.util import discord_common\n'), ((29008, 29056), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (29035, 29056), False, 'from tle.util import discord_common\n'), ((29065, 29116), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (29097, 29116), False, 'from tle.util import discord_common\n'), ((29840, 29849), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (29847, 29849), True, 'from matplotlib import pyplot as plt\n'), ((29858, 29874), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (29869, 29874), True, 'from matplotlib import pyplot as plt\n'), ((29883, 29939), 'matplotlib.pyplot.hist', 'plt.hist', (['deltas'], {'bins': 'hist_bins', 'label': 'labels', 'rwidth': '(1)'}), '(deltas, bins=hist_bins, label=labels, rwidth=1)\n', (29891, 29939), True, 'from matplotlib import pyplot as plt\n'), ((29948, 29975), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Problem delta"""'], {}), "('Problem delta')\n", (29958, 29975), True, 'from matplotlib import pyplot as plt\n'), ((29984, 30011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number solved"""'], {}), "('Number solved')\n", (29994, 30011), True, 'from matplotlib import pyplot as plt\n'), ((30020, 30048), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'gc.fontprop'}), '(prop=gc.fontprop)\n', (30030, 30048), True, 'from matplotlib import pyplot as plt\n'), ((30073, 30104), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (30102, 30104), True, 'from tle.util import graph_common as gc\n'), ((30121, 30183), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Histogram of gudgitting"""'}), "(title='Histogram of gudgitting')\n", (30150, 30183), False, 'from tle.util import discord_common\n'), ((30192, 30240), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (30219, 30240), False, 'from tle.util import discord_common\n'), ((30249, 30300), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (30281, 30300), False, 'from tle.util import discord_common\n'), ((31036, 31090), 'tle.util.codeforces_common.user_db.get_cf_users_for_guild', 'cf_common.user_db.get_cf_users_for_guild', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (31076, 31090), True, 'from tle.util import codeforces_common as cf_common\n'), ((31109, 31179), 'collections.Counter', 'collections.Counter', (['(user.country for _, user in users if user.country)'], {}), '(user.country for _, user in users if user.country)\n', (31128, 31179), False, 'import collections\n'), ((34602, 34650), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (34629, 34650), False, 'from tle.util import discord_common\n'), ((34659, 34710), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (34691, 34710), False, 'from tle.util import discord_common\n'), ((35283, 35333), 'tle.util.codeforces_common.filter_flags', 'cf_common.filter_flags', (['args', "['+server', '+zoom']"], {}), "(args, ['+server', '+zoom'])\n", (35305, 35333), True, 'from tle.util import codeforces_common as cf_common\n'), ((35496, 35581), 'tle.util.codeforces_common.cache2.rating_changes_cache.get_rating_changes_for_contest', 'cf_common.cache2.rating_changes_cache.get_rating_changes_for_contest', (['contest_id'], {}), '(contest_id\n )\n', (35564, 35581), True, 'from tle.util import codeforces_common as cf_common\n'), ((36512, 36521), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (36519, 36521), True, 'from matplotlib import pyplot as plt\n'), ((36536, 36563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (36546, 36563), True, 'from matplotlib import pyplot as plt\n'), ((36572, 36588), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (36581, 36588), True, 'from matplotlib import pyplot as plt\n'), ((36597, 36615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rank"""'], {}), "('Rank')\n", (36607, 36615), True, 'from matplotlib import pyplot as plt\n'), ((36624, 36652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rating Changes"""'], {}), "('Rating Changes')\n", (36634, 36652), True, 'from matplotlib import pyplot as plt\n'), ((37563, 37610), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ranks', 'delta'], {'s': 'mark_size', 'c': 'color'}), '(ranks, delta, s=mark_size, c=color)\n', (37574, 37610), True, 'from matplotlib import pyplot as plt\n'), ((38013, 38044), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (38042, 38044), True, 'from tle.util import graph_common as gc\n'), ((38053, 38067), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (38062, 38067), True, 'from matplotlib import pyplot as plt\n'), ((38085, 38127), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': 'title'}), '(title=title)\n', (38114, 38127), False, 'from tle.util import discord_common\n'), ((38136, 38184), 'tle.util.discord_common.attach_image', 'discord_common.attach_image', (['embed', 'discord_file'], {}), '(embed, discord_file)\n', (38163, 38184), False, 'from tle.util import discord_common\n'), ((38193, 38244), 'tle.util.discord_common.set_author_footer', 'discord_common.set_author_footer', (['embed', 'ctx.author'], {}), '(embed, ctx.author)\n', (38225, 38244), False, 'from tle.util import discord_common\n'), ((1592, 1601), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1599, 1601), True, 'from matplotlib import pyplot as plt\n'), ((2062, 2114), 'matplotlib.pyplot.scatter', 'plt.scatter', (['times', 'ratings'], {'zorder': '(10)', 's': 'point_size'}), '(times, ratings, zorder=10, s=point_size)\n', (2073, 2114), True, 'from matplotlib import pyplot as plt\n'), ((3384, 3427), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['contest.end_time'], {}), '(contest.end_time)\n', (3409, 3427), True, 'import datetime as dt\n'), ((5583, 5693), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': 'f"""{handle}: {rating}"""', 'title_fontsize': "plt.rcParams['legend.fontsize']", 'loc': '"""upper left"""'}), "(title=f'{handle}: {rating}', title_fontsize=plt.rcParams[\n 'legend.fontsize'], loc='upper left')\n", (5593, 5693), True, 'from matplotlib import pyplot as plt\n'), ((5777, 5786), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5784, 5786), True, 'from matplotlib import pyplot as plt\n'), ((6133, 6169), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (6158, 6169), True, 'import datetime as dt\n'), ((6644, 6695), 'tle.util.codeforces_common.user_db.get_handle', 'cf_common.user_db.get_handle', (['x[3:-1]', 'ctx.guild.id'], {}), '(x[3:-1], ctx.guild.id)\n', (6672, 6695), True, 'from tle.util import codeforces_common as cf_common\n'), ((8009, 8064), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'handles'], {}), '(ctx, self.converter, handles)\n', (8034, 8064), True, 'from tle.util import codeforces_common as cf_common\n'), ((8820, 8854), 'tle.util.graph_common.StrWrap', 'gc.StrWrap', (['f"""{handle} ({rating})"""'], {}), "(f'{handle} ({rating})')\n", (8830, 8854), True, 'from tle.util import graph_common as gc\n'), ((9288, 9332), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(min_rating - 100)', '(max_rating + 200)'], {}), '(min_rating - 100, max_rating + 200)\n', (9296, 9332), True, 'from matplotlib import pyplot as plt\n'), ((10263, 10318), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'handles'], {}), '(ctx, self.converter, handles)\n', (10288, 10318), True, 'from tle.util import codeforces_common as cf_common\n'), ((10371, 10400), 'tle.util.codeforces_api.user.rating', 'cf.user.rating', ([], {'handle': 'handle'}), '(handle=handle)\n', (10385, 10400), True, 'from tle.util import codeforces_api as cf\n'), ((10665, 10694), 'tle.util.codeforces_api.user.status', 'cf.user.status', ([], {'handle': 'handle'}), '(handle=handle)\n', (10679, 10694), True, 'from tle.util import codeforces_api as cf\n'), ((12312, 12367), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'handles'], {}), '(ctx, self.converter, handles)\n', (12337, 12367), True, 'from tle.util import codeforces_common as cf_common\n'), ((13584, 13649), 'matplotlib.pyplot.hist', 'plt.hist', (['all_ratings'], {'stacked': '(True)', 'bins': 'hist_bins', 'label': 'labels'}), '(all_ratings, stacked=True, bins=hist_bins, label=labels)\n', (13592, 13649), True, 'from matplotlib import pyplot as plt\n'), ((13709, 13819), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': 'f"""{handle}: {total}"""', 'title_fontsize': "plt.rcParams['legend.fontsize']", 'loc': '"""upper right"""'}), "(title=f'{handle}: {total}', title_fontsize=plt.rcParams[\n 'legend.fontsize'], loc='upper right')\n", (13719, 13819), True, 'from matplotlib import pyplot as plt\n'), ((14360, 14397), 'matplotlib.pyplot.hist', 'plt.hist', (['all_ratings'], {'bins': 'hist_bins'}), '(all_ratings, bins=hist_bins)\n', (14368, 14397), True, 'from matplotlib import pyplot as plt\n'), ((14410, 14447), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '"""upper right"""'}), "(labels, loc='upper right')\n", (14420, 14447), True, 'from matplotlib import pyplot as plt\n'), ((15385, 15440), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'handles'], {}), '(ctx, self.converter, handles)\n', (15410, 15440), True, 'from tle.util import codeforces_common as cf_common\n'), ((16461, 16517), 'matplotlib.pyplot.hist', 'plt.hist', (['all_times'], {'stacked': '(True)', 'label': 'labels', 'bins': '(34)'}), '(all_times, stacked=True, label=labels, bins=34)\n', (16469, 16517), True, 'from matplotlib import pyplot as plt\n'), ((16576, 16667), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': 'f"""{handle}: {total}"""', 'title_fontsize': "plt.rcParams['legend.fontsize']"}), "(title=f'{handle}: {total}', title_fontsize=plt.rcParams[\n 'legend.fontsize'])\n", (16586, 16667), True, 'from matplotlib import pyplot as plt\n'), ((17332, 17351), 'matplotlib.pyplot.hist', 'plt.hist', (['all_times'], {}), '(all_times)\n', (17340, 17351), True, 'from matplotlib import pyplot as plt\n'), ((17364, 17382), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {}), '(labels)\n', (17374, 17382), True, 'from matplotlib import pyplot as plt\n'), ((18853, 18910), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', '(handle,)'], {}), '(ctx, self.converter, (handle,))\n', (18878, 18910), True, 'from tle.util import codeforces_common as cf_common\n'), ((24919, 24983), 'tle.util.codeforces_common.cache2.rating_changes_cache.get_current_rating', 'cf_common.cache2.rating_changes_cache.get_current_rating', (['handle'], {}), '(handle)\n', (24975, 24983), True, 'from tle.util import codeforces_common as cf_common\n'), ((27110, 27182), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(l, -50)', '(r - l)', '(200)'], {'edgecolor': '"""none"""', 'facecolor': 'col'}), "((l, -50), r - l, 200, edgecolor='none', facecolor=col)\n", (27127, 27182), True, 'from matplotlib import patches as patches\n'), ((27361, 27462), 'matplotlib.pyplot.annotate', 'plt.annotate', (['user'], {'xy': 'point', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(user, xy=point, xytext=(0, 0), textcoords='offset points', ha=\n 'right', va='bottom')\n", (27373, 27462), True, 'from matplotlib import pyplot as plt\n'), ((27581, 27668), 'matplotlib.pyplot.plot', 'plt.plot', (['*point'], {'marker': '"""o"""', 'markersize': '(5)', 'color': '"""red"""', 'markeredgecolor': '"""darkred"""'}), "(*point, marker='o', markersize=5, color='red', markeredgecolor=\n 'darkred')\n", (27589, 27668), True, 'from matplotlib import pyplot as plt\n'), ((28181, 28221), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin - xmargin)', '(xmax + xmargin)'], {}), '(xmin - xmargin, xmax + xmargin)\n', (28189, 28221), True, 'from matplotlib import pyplot as plt\n'), ((28234, 28274), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin - ymargin)', '(ymax + ymargin)'], {}), '(ymin - ymargin, ymax + ymargin)\n', (28242, 28274), True, 'from matplotlib import pyplot as plt\n'), ((28301, 28334), 'matplotlib.pyplot.xlim', 'plt.xlim', (['ratings[0]', 'ratings[-1]'], {}), '(ratings[0], ratings[-1])\n', (28309, 28334), True, 'from matplotlib import pyplot as plt\n'), ((28347, 28368), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(101.5)'], {}), '(-1.5, 101.5)\n', (28355, 28368), True, 'from matplotlib import pyplot as plt\n'), ((28491, 28542), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[-inf, inf]', '[y, y]'], {'color': 'linecolor'}), '([-inf, inf], [y, y], color=linecolor)\n', (28504, 28542), True, 'from matplotlib import lines as mlines\n'), ((28613, 28664), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[x, x]', '[-inf, inf]'], {'color': 'linecolor'}), '([x, x], [-inf, inf], color=linecolor)\n', (28626, 28664), True, 'from matplotlib import lines as mlines\n'), ((31368, 31377), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31375, 31377), True, 'from matplotlib import pyplot as plt\n'), ((31396, 31423), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (31406, 31423), True, 'from matplotlib import pyplot as plt\n'), ((31595, 31604), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31602, 31604), True, 'from matplotlib import pyplot as plt\n'), ((32011, 32063), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(40)', 'horizontalalignment': '"""right"""'}), "(rotation=40, horizontalalignment='right')\n", (32021, 32063), True, 'from matplotlib import pyplot as plt\n'), ((32196, 32217), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Country"""'], {}), "('Country')\n", (32206, 32217), True, 'from matplotlib import pyplot as plt\n'), ((32230, 32261), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of members"""'], {}), "('Number of members')\n", (32240, 32261), True, 'from matplotlib import pyplot as plt\n'), ((32289, 32320), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (32318, 32320), True, 'from tle.util import graph_common as gc\n'), ((32333, 32347), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (32342, 32347), True, 'from matplotlib import pyplot as plt\n'), ((32368, 32453), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Distribution of server members by country"""'}), "(title='Distribution of server members by country'\n )\n", (32397, 32453), False, 'from tle.util import discord_common\n'), ((33080, 33129), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Country', 'Rating']"}), "(data, columns=['Country', 'Rating'])\n", (33092, 33129), True, 'import pandas as pd\n'), ((33326, 33335), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (33333, 33335), True, 'from matplotlib import pyplot as plt\n'), ((34338, 34359), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Country"""'], {}), "('Country')\n", (34348, 34359), True, 'from matplotlib import pyplot as plt\n'), ((34372, 34392), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rating"""'], {}), "('Rating')\n", (34382, 34392), True, 'from matplotlib import pyplot as plt\n'), ((34420, 34451), 'tle.util.graph_common.get_current_figure_as_file', 'gc.get_current_figure_as_file', ([], {}), '()\n', (34449, 34451), True, 'from tle.util import graph_common as gc\n'), ((34472, 34564), 'tle.util.discord_common.cf_color_embed', 'discord_common.cf_color_embed', ([], {'title': '"""Rating distribution of server members by country"""'}), "(title=\n 'Rating distribution of server members by country')\n", (34501, 34564), False, 'from tle.util import discord_common\n'), ((35380, 35456), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'handles'], {'mincnt': '(0)', 'maxcnt': '(20)'}), '(ctx, self.converter, handles, mincnt=0, maxcnt=20)\n', (35405, 35456), True, 'from tle.util import codeforces_common as cf_common\n'), ((37078, 37118), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin - xmargin)', '(xmax + xmargin)'], {}), '(xmin - xmargin, xmax + xmargin)\n', (37086, 37118), True, 'from matplotlib import pyplot as plt\n'), ((37131, 37171), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin - ymargin)', '(ymax + ymargin)'], {}), '(ymin - ymargin, ymax + ymargin)\n', (37139, 37171), True, 'from matplotlib import pyplot as plt\n'), ((37465, 37499), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-xmargin)', '(xmax + xmargin)'], {}), '(-xmargin, xmax + xmargin)\n', (37473, 37499), True, 'from matplotlib import pyplot as plt\n'), ((37512, 37553), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-ylim - ymargin)', '(ylim + ymargin)'], {}), '(-ylim - ymargin, ylim + ymargin)\n', (37520, 37553), True, 'from matplotlib import pyplot as plt\n'), ((37676, 37795), 'matplotlib.pyplot.annotate', 'plt.annotate', (['handle'], {'xy': 'point', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""', 'ha': '"""left"""', 'va': '"""bottom"""', 'fontsize': '"""large"""'}), "(handle, xy=point, xytext=(0, 0), textcoords='offset points',\n ha='left', va='bottom', fontsize='large')\n", (37688, 37795), True, 'from matplotlib import pyplot as plt\n'), ((37931, 37988), 'matplotlib.pyplot.plot', 'plt.plot', (['*point'], {'marker': '"""o"""', 'markersize': '(5)', 'color': '"""black"""'}), "(*point, marker='o', markersize=5, color='black')\n", (37939, 37988), True, 'from matplotlib import pyplot as plt\n'), ((1253, 1317), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['rating_change.ratingUpdateTimeSeconds'], {}), '(rating_change.ratingUpdateTimeSeconds)\n', (1278, 1317), True, 'import datetime as dt\n'), ((2579, 2625), 'tle.util.codeforces_common.is_rated_for_onsite_contest', 'cf_common.is_rated_for_onsite_contest', (['contest'], {}), '(contest)\n', (2616, 2625), True, 'from tle.util import codeforces_common as cf_common\n'), ((5265, 5313), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['(t, t)', '(mn, mx)'], {'color': 'linecolor'}), '((t, t), (mn, mx), color=linecolor)\n', (5278, 5313), True, 'from matplotlib import lines as mlines\n'), ((8087, 8116), 'tle.util.codeforces_api.user.rating', 'cf.user.rating', ([], {'handle': 'handle'}), '(handle=handle)\n', (8101, 8116), True, 'from tle.util import codeforces_api as cf\n'), ((10884, 10938), 'tle.util.codeforces_common.cache2.contest_cache.get_contest', 'cf_common.cache2.contest_cache.get_contest', (['contest_id'], {}), '(contest_id)\n', (10926, 10938), True, 'from tle.util import codeforces_common as cf_common\n'), ((10956, 11016), 'tle.util.codeforces_common.cache2.problemset_cache.get_problemset', 'cf_common.cache2.problemset_cache.get_problemset', (['contest_id'], {}), '(contest_id)\n', (11004, 11016), True, 'from tle.util import codeforces_common as cf_common\n'), ((12390, 12419), 'tle.util.codeforces_api.user.status', 'cf.user.status', ([], {'handle': 'handle'}), '(handle=handle)\n', (12404, 12419), True, 'from tle.util import codeforces_api as cf\n'), ((15463, 15492), 'tle.util.codeforces_api.user.status', 'cf.user.status', ([], {'handle': 'handle'}), '(handle=handle)\n', (15477, 15492), True, 'from tle.util import codeforces_api as cf\n'), ((17392, 17401), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17399, 17401), True, 'from matplotlib import pyplot as plt\n'), ((18962, 18991), 'tle.util.codeforces_api.user.rating', 'cf.user.rating', ([], {'handle': 'handle'}), '(handle=handle)\n', (18976, 18991), True, 'from tle.util import codeforces_api as cf\n'), ((19173, 19202), 'tle.util.codeforces_api.user.status', 'cf.user.status', ([], {'handle': 'handle'}), '(handle=handle)\n', (19187, 19202), True, 'from tle.util import codeforces_api as cf\n'), ((20443, 20452), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20450, 20452), True, 'from matplotlib import pyplot as plt\n'), ((25941, 25953), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (25950, 25953), True, 'import numpy as np\n'), ((26004, 26077), 'tle.util.codeforces_common.resolve_handles', 'cf_common.resolve_handles', (['ctx', 'self.converter', 'args'], {'mincnt': '(0)', 'maxcnt': '(50)'}), '(ctx, self.converter, args, mincnt=0, maxcnt=50)\n', (26029, 26077), True, 'from tle.util import codeforces_common as cf_common\n'), ((26377, 26417), 'bisect.bisect_left', 'bisect.bisect_left', (['ratings', 'info.rating'], {}), '(ratings, info.rating)\n', (26395, 26417), False, 'import bisect\n'), ((31441, 31482), 'seaborn.axes_style', 'sns.axes_style', ([], {'rc': "{'xtick.bottom': True}"}), "(rc={'xtick.bottom': True})\n", (31455, 31482), True, 'import seaborn as sns\n'), ((31500, 31534), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'countries', 'y': 'counts'}), '(x=countries, y=counts)\n', (31511, 31534), True, 'import seaborn as sns\n'), ((33391, 33496), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""Country"""', 'y': '"""Rating"""', 'hue': '"""Rating"""', 'data': 'df', 'order': 'column_order', 'palette': 'color_map'}), "(x='Country', y='Rating', hue='Rating', data=df, order=\n column_order, palette=color_map)\n", (33404, 33496), True, 'import seaborn as sns\n'), ((34086, 34138), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)', 'horizontalalignment': '"""right"""'}), "(rotation=30, horizontalalignment='right')\n", (34096, 34138), True, 'from matplotlib import pyplot as plt\n'), ((34160, 34169), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34167, 34169), True, 'from matplotlib import pyplot as plt\n'), ((16076, 16126), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['sub.creationTimeSeconds'], {}), '(sub.creationTimeSeconds)\n', (16101, 16126), True, 'import datetime as dt\n'), ((16788, 16838), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['sub.creationTimeSeconds'], {}), '(sub.creationTimeSeconds)\n', (16813, 16838), True, 'import datetime as dt\n'), ((19314, 19364), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['sub.creationTimeSeconds'], {}), '(sub.creationTimeSeconds)\n', (19339, 19364), True, 'import datetime as dt\n'), ((24499, 24510), 'time.time', 'time.time', ([], {}), '()\n', (24508, 24510), False, 'import time\n'), ((29604, 29639), 'tle.util.codeforces_common.user_db.howgud', 'cf_common.user_db.howgud', (['member.id'], {}), '(member.id)\n', (29628, 29639), True, 'from tle.util import codeforces_common as cf_common\n'), ((33739, 33780), 'seaborn.axes_style', 'sns.axes_style', ([], {'rc': "{'xtick.bottom': True}"}), "(rc={'xtick.bottom': True})\n", (33753, 33780), True, 'import seaborn as sns\n'), ((33802, 33907), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""Country"""', 'y': '"""Rating"""', 'hue': '"""Rating"""', 'data': 'df', 'order': 'column_order', 'palette': 'color_map'}), "(x='Country', y='Rating', hue='Rating', data=df, order=\n column_order, palette=color_map)\n", (33815, 33907), True, 'import seaborn as sns\n'), ((34304, 34316), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (34314, 34316), True, 'from matplotlib import pyplot as plt\n'), ((35856, 35909), 'tle.util.codeforces_common.user_db.get_handles_for_guild', 'cf_common.user_db.get_handles_for_guild', (['ctx.guild.id'], {}), '(ctx.guild.id)\n', (35895, 35909), True, 'from tle.util import codeforces_common as cf_common\n'), ((36312, 36342), 'tle.util.codeforces_api.rating2rank', 'cf.rating2rank', (['user.oldRating'], {}), '(user.oldRating)\n', (36326, 36342), True, 'from tle.util import codeforces_api as cf\n'), ((32970, 32992), 'tle.util.codeforces_api.rating2rank', 'cf.rating2rank', (['rating'], {}), '(rating)\n', (32984, 32992), True, 'from tle.util import codeforces_api as cf\n')] |
from itertools import cycle, islice
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
# create a new elementsstate list with doubled elements with
# roundrobin(elementsstate, elementsstate)
# and use it as c in plt.tripcolor with cmap="Blues"
# use same method for vom mises but 0 elements must be masked
def get_triangulation(connectivity):
# return elements triangulation order for use in tri.Triangulation()
number_of_elements = len(connectivity)
triangulation = np.zeros((2*number_of_elements, 3))
correction = -1
for i in range(number_of_elements):
correction = correction + 1
triangulation[i + correction, :] = np.array([connectivity[i][0], connectivity[i][1], connectivity[i][3]])
triangulation[i + correction + 1, :] = np.array([connectivity[i][3], connectivity[i][1], connectivity[i][2]])
return triangulation
def get_mask(elements_density):
# return a elements density mask for use on tri.Triangulation()
mask = list(roundrobin(elements_density, elements_density))
for i in range(len(mask)):
mask[i] = bool(mask[i] == 0)
return mask
def get_triangles(triangulation, x_nodes_coordinates, y_nodes_coordinates, mask):
# return design triangles plot with elements with density 0 masked
x = list(x_nodes_coordinates)
y = list(y_nodes_coordinates)
triangles = tri.Triangulation(x, y, triangulation, mask=mask)
return triangles
def draw_triangles_contour(triangles):
# not used
plt.gca().set_aspect('equal')
plt.triplot(triangles)
def draw_interior(triangles, elements_density):
# not used
c = list(roundrobin(elements_density, elements_density))
cmap = 'Blues'
plt.gca().set_aspect('equal')
plt.tripcolor(triangles, c, cmap=cmap)
def draw_results(triangles, color_map):
# plot design with selected color map
c = list(roundrobin(color_map, color_map))
cmap = 'jet'
plt.gca().set_aspect('equal')
plt.tripcolor(triangles, c, cmap=cmap, vmin=0)
plt.colorbar(orientation='horizontal')
| [
"matplotlib.pyplot.tripcolor",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.triplot",
"numpy.zeros",
"matplotlib.pyplot.colorbar",
"numpy.array",
"itertools.islice",
"matplotlib.tri.Triangulation"
] | [((1068, 1105), 'numpy.zeros', 'np.zeros', (['(2 * number_of_elements, 3)'], {}), '((2 * number_of_elements, 3))\n', (1076, 1105), True, 'import numpy as np\n'), ((1979, 2028), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['x', 'y', 'triangulation'], {'mask': 'mask'}), '(x, y, triangulation, mask=mask)\n', (1996, 2028), True, 'import matplotlib.tri as tri\n'), ((2157, 2179), 'matplotlib.pyplot.triplot', 'plt.triplot', (['triangles'], {}), '(triangles)\n', (2168, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2375, 2413), 'matplotlib.pyplot.tripcolor', 'plt.tripcolor', (['triangles', 'c'], {'cmap': 'cmap'}), '(triangles, c, cmap=cmap)\n', (2388, 2413), True, 'import matplotlib.pyplot as plt\n'), ((2612, 2658), 'matplotlib.pyplot.tripcolor', 'plt.tripcolor', (['triangles', 'c'], {'cmap': 'cmap', 'vmin': '(0)'}), '(triangles, c, cmap=cmap, vmin=0)\n', (2625, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2664, 2702), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (2676, 2702), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1317), 'numpy.array', 'np.array', (['[connectivity[i][0], connectivity[i][1], connectivity[i][3]]'], {}), '([connectivity[i][0], connectivity[i][1], connectivity[i][3]])\n', (1255, 1317), True, 'import numpy as np\n'), ((1366, 1436), 'numpy.array', 'np.array', (['[connectivity[i][3], connectivity[i][1], connectivity[i][2]]'], {}), '([connectivity[i][3], connectivity[i][1], connectivity[i][2]])\n', (1374, 1436), True, 'import numpy as np\n'), ((2122, 2131), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2129, 2131), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2349), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2347, 2349), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2586), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2584, 2586), True, 'import matplotlib.pyplot as plt\n'), ((583, 608), 'itertools.islice', 'islice', (['nexts', 'num_active'], {}), '(nexts, num_active)\n', (589, 608), False, 'from itertools import cycle, islice\n')] |
"""
This module will provide unit conversion capabilities based on the Energistics Unit of Measure Standard v1.0.
For more information regarding the Energistics standard, please see
http://www.energistics.org/asset-data-management/unit-of-measure-standard
Author: <NAME>
August 17 - 2016
"""
from __future__ import division
import warnings
import functools
from lxml import etree
try:
import numpy as np
__numpyEnabled = True
except:
__numpyEnabled = False
#log.info("Numpy not installed. performance will be degraded")
import pkg_resources
import math # needed for PI operations to work properly
resource_package = __name__
# load Energistics symbols and factors
xmlFile = pkg_resources.resource_string(resource_package, "/units.xml")
root = etree.fromstring(xmlFile)
tag = etree.QName('http://www.energistics.org/energyml/data/uomv1', 'unit')
__units = {}
for unitXml in root.iter(tag.text):
unit = {}
isBase = False
for field in unitXml:
t = etree.QName(field.tag)
# print(t.localname)
# print field.text
try:
unit[t.localname] = float(eval(field.text.replace("PI", "math.pi")))
except:
unit[t.localname] = field.text
if t.localname=="isBase":
unit["A"] = 0.0
unit["B"] = 1.0
unit["C"] = 1.0
__units[unit["symbol"]] = unit
#CustomUnits
#if "tf" not in Units.keys():
# Units["tf"]={'symbol':'tf','name':"Metric Ton Force","A":0.0,"B":9.80665*1000.0,"C":1.0,"D":0.0}
def add_custom_unit(symbol, name, a, b, c, d=0, force=False):
"""
Adds a custom unit defined as:\n
y=(a+b*value)/(c+d*value)\n
where\n
offset = a/c\n
scale = b/c\n
All current Units have d=0, so this can safely be ignored
Set the force flag to True to force an override of existing symbol
"""
#global Units
if symbol not in __units.keys():
__units[symbol] = {'symbol': symbol, 'name': name, "A": a, "B": b, "C": c, "D": d}
def from_si(value, targetUnit):
"""
Takes value(s) in SI, and converts it to a value in the desired TARGETUNIT
:param value: The value to convert (can be a list)
:type value: float
:param targetUnit: The relevant unitsymbol as a string
:type targetUnit: str
:return: The value converted to TARGETUNIT
:rtype: float
"""
global __numpyEnabled
offset = __units[targetUnit]["A"] * 1.0 / __units[targetUnit]["C"]
scale = __units[targetUnit]["B"] * 1.0 / __units[targetUnit]["C"]
if __numpyEnabled:
y = np.divide(value, scale) - np.divide(offset, scale)
else:
scaledOffset = offset / scale
if hasattr(value, "__iter__"):
y = [(v / scale)-scaledOffset for v in value]
else:
y = value/scale - scaledOffset
return y
def to_si(value, sourceUnit):
"""
Takes value(s) in SOURCEUNIT and converts it to a value in the SI unit for the relevant quantity
:param value: The value to convert (can be a list)
:type value: float
:param sourceUnit: The relevant unitsymbol as a string
:type sourceUnit: str
:return: The value converted to SI
:rtype: float
"""
global __numpyEnabled
offset = __units[sourceUnit]["A"] * 1.0 / __units[sourceUnit]["C"]
scale = __units[sourceUnit]["B"] * 1.0 / __units[sourceUnit]["C"]
if __numpyEnabled:
y = np.multiply(value,scale) + offset
else:
if hasattr(value,"__iter__"):
y = [(v * scale) + offset for v in value]
else:
y = value*scale + offset
return y
def set_numpy_enabled(enabled):
global __numpyEnabled
__numpyEnabled = enabled
| [
"numpy.divide",
"numpy.multiply",
"lxml.etree.fromstring",
"pkg_resources.resource_string",
"lxml.etree.QName"
] | [((700, 761), 'pkg_resources.resource_string', 'pkg_resources.resource_string', (['resource_package', '"""/units.xml"""'], {}), "(resource_package, '/units.xml')\n", (729, 761), False, 'import pkg_resources\n'), ((769, 794), 'lxml.etree.fromstring', 'etree.fromstring', (['xmlFile'], {}), '(xmlFile)\n', (785, 794), False, 'from lxml import etree\n'), ((802, 871), 'lxml.etree.QName', 'etree.QName', (['"""http://www.energistics.org/energyml/data/uomv1"""', '"""unit"""'], {}), "('http://www.energistics.org/energyml/data/uomv1', 'unit')\n", (813, 871), False, 'from lxml import etree\n'), ((992, 1014), 'lxml.etree.QName', 'etree.QName', (['field.tag'], {}), '(field.tag)\n', (1003, 1014), False, 'from lxml import etree\n'), ((2566, 2589), 'numpy.divide', 'np.divide', (['value', 'scale'], {}), '(value, scale)\n', (2575, 2589), True, 'import numpy as np\n'), ((2592, 2616), 'numpy.divide', 'np.divide', (['offset', 'scale'], {}), '(offset, scale)\n', (2601, 2616), True, 'import numpy as np\n'), ((3406, 3431), 'numpy.multiply', 'np.multiply', (['value', 'scale'], {}), '(value, scale)\n', (3417, 3431), True, 'import numpy as np\n')] |
import os
import numpy as np
class FileUtils:
"""
Some static methods to support file/ folder delete and creation
"""
@staticmethod
def create_dir(path):
"""
Create a directory if it doesnt exist
:param path:
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def delete_file(path):
"""
delete the file for the passed filepath
:param path:
:return:
"""
if os.path.exists(path):
os.remove(path)
else:
print("The file to delete does not exist")
@staticmethod
def save_nparray_to_csv(data, path):
"""
save a 2d array into csv excel format
:param data:
:param path:
:return:
"""
np.savetxt(path, data, delimiter=',', fmt='%s')
@staticmethod
def save_dictionary_to_csv(data, path):
"""
save the supplied dictionary on csv format
:param data:
:param path:
:return:
"""
with open(path, 'w') as f:
for key in data.keys():
f.write("%s,%s\n" % (key, data[key]))
| [
"os.remove",
"numpy.savetxt",
"os.path.exists",
"os.makedirs"
] | [((519, 539), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (533, 539), False, 'import os\n'), ((835, 882), 'numpy.savetxt', 'np.savetxt', (['path', 'data'], {'delimiter': '""","""', 'fmt': '"""%s"""'}), "(path, data, delimiter=',', fmt='%s')\n", (845, 882), True, 'import numpy as np\n'), ((300, 320), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (314, 320), False, 'import os\n'), ((334, 351), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (345, 351), False, 'import os\n'), ((553, 568), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (562, 568), False, 'import os\n')] |
"""
===============================
A demo of PBALL2D environment
===============================
Illustration of PBall2D environment
.. video:: ../../video_plot_pball.mp4
:width: 600
"""
# sphinx_gallery_thumbnail_path = 'thumbnails/video_plot_pball.jpg'
import numpy as np
from rlberry.envs.benchmarks.ball_exploration import PBall2D
p = 5
A = np.array([[1.0, 0.1], [-0.1, 1.0]])
reward_amplitudes = np.array([1.0, 0.5, 0.5])
reward_smoothness = np.array([0.25, 0.25, 0.25])
reward_centers = [
np.array([0.75 * np.cos(np.pi / 2), 0.75 * np.sin(np.pi / 2)]),
np.array([0.75 * np.cos(np.pi / 6), 0.75 * np.sin(np.pi / 6)]),
np.array([0.75 * np.cos(5 * np.pi / 6), 0.75 * np.sin(5 * np.pi / 6)]),
]
action_list = [
0.1 * np.array([1, 0]),
-0.1 * np.array([1, 0]),
0.1 * np.array([0, 1]),
-0.1 * np.array([0, 1]),
]
env = PBall2D(
p=p,
A=A,
reward_amplitudes=reward_amplitudes,
reward_centers=reward_centers,
reward_smoothness=reward_smoothness,
action_list=action_list,
)
env.enable_rendering()
for ii in range(5):
env.step(1)
env.step(3)
env.render()
video = env.save_video("_video/video_plot_pball.mp4")
| [
"numpy.sin",
"numpy.array",
"numpy.cos",
"rlberry.envs.benchmarks.ball_exploration.PBall2D"
] | [((354, 389), 'numpy.array', 'np.array', (['[[1.0, 0.1], [-0.1, 1.0]]'], {}), '([[1.0, 0.1], [-0.1, 1.0]])\n', (362, 389), True, 'import numpy as np\n'), ((411, 436), 'numpy.array', 'np.array', (['[1.0, 0.5, 0.5]'], {}), '([1.0, 0.5, 0.5])\n', (419, 436), True, 'import numpy as np\n'), ((457, 485), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (465, 485), True, 'import numpy as np\n'), ((860, 1017), 'rlberry.envs.benchmarks.ball_exploration.PBall2D', 'PBall2D', ([], {'p': 'p', 'A': 'A', 'reward_amplitudes': 'reward_amplitudes', 'reward_centers': 'reward_centers', 'reward_smoothness': 'reward_smoothness', 'action_list': 'action_list'}), '(p=p, A=A, reward_amplitudes=reward_amplitudes, reward_centers=\n reward_centers, reward_smoothness=reward_smoothness, action_list=\n action_list)\n', (867, 1017), False, 'from rlberry.envs.benchmarks.ball_exploration import PBall2D\n'), ((747, 763), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (755, 763), True, 'import numpy as np\n'), ((776, 792), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (784, 792), True, 'import numpy as np\n'), ((804, 820), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (812, 820), True, 'import numpy as np\n'), ((833, 849), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (841, 849), True, 'import numpy as np\n'), ((527, 544), 'numpy.cos', 'np.cos', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (533, 544), True, 'import numpy as np\n'), ((553, 570), 'numpy.sin', 'np.sin', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (559, 570), True, 'import numpy as np\n'), ((595, 612), 'numpy.cos', 'np.cos', (['(np.pi / 6)'], {}), '(np.pi / 6)\n', (601, 612), True, 'import numpy as np\n'), ((621, 638), 'numpy.sin', 'np.sin', (['(np.pi / 6)'], {}), '(np.pi / 6)\n', (627, 638), True, 'import numpy as np\n'), ((663, 684), 'numpy.cos', 'np.cos', (['(5 * np.pi / 6)'], {}), '(5 * np.pi / 6)\n', (669, 684), True, 'import numpy as np\n'), ((693, 714), 'numpy.sin', 'np.sin', (['(5 * np.pi / 6)'], {}), '(5 * np.pi / 6)\n', (699, 714), True, 'import numpy as np\n')] |
# author: jussikai, timoh
import os
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn import model_selection
import cv2
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.models import Model
import efficientnet.tfkeras
X = np.load("X.npy")
y = np.load("y.npy")
weights = [4,3,1,1,1,1,5,3,2,5,1,4,4,3,2,1,1]
Train_X, test_X, Train_y, test_y= sklearn.model_selection.train_test_split(X,y,test_size=0.2)
del X
del y
################
# EfficientNet #
################
base_model = base_model = efficientnet.tfkeras.EfficientNetB7(weights='imagenet', input_shape = (224,224,3), include_top = False)
#base_model.summary()
w = base_model.output
w = Flatten()(w)
w = Dense(256, activation = "relu")(w)
w = Dense(128, activation = "relu")(w)
output = Dense(17, activation = "sigmoid")(w)
# Compile the model for execution. Losses and optimizers
# can be anything here, since we don’t train the model.
model = Model(inputs = [base_model.inputs[0]], outputs = [output])
model.compile(loss = 'categorical_crossentropy', optimizer = 'sgd',metrics=['accuracy'])
model.fit(Train_X,Train_y, epochs=15, batch_size=15,validation_data = (test_X, test_y), class_weight = weights)
model.save('EfficientNetB7_Timo.h5')
print("Saved")
del model
del Train_X
del Train_y
del test_X
del test_y
| [
"numpy.load",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Flatten"
] | [((313, 329), 'numpy.load', 'np.load', (['"""X.npy"""'], {}), "('X.npy')\n", (320, 329), True, 'import numpy as np\n'), ((334, 350), 'numpy.load', 'np.load', (['"""y.npy"""'], {}), "('y.npy')\n", (341, 350), True, 'import numpy as np\n'), ((432, 493), 'sklearn.model_selection.train_test_split', 'sklearn.model_selection.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (472, 493), False, 'import sklearn\n'), ((995, 1049), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[base_model.inputs[0]]', 'outputs': '[output]'}), '(inputs=[base_model.inputs[0]], outputs=[output])\n', (1000, 1049), False, 'from tensorflow.keras.models import Model\n'), ((736, 745), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (743, 745), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten\n'), ((753, 782), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (758, 782), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten\n'), ((792, 821), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (797, 821), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten\n'), ((836, 867), 'tensorflow.keras.layers.Dense', 'Dense', (['(17)'], {'activation': '"""sigmoid"""'}), "(17, activation='sigmoid')\n", (841, 867), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten\n')] |
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
from tkinter import *
from tkinter.ttk import Notebook
import tkinter.filedialog
from tkinter.font import Font
import os
import webbrowser
import pickle
import copy
import xlrd
import time
import datetime
from pyCellAnalyst import (Volume, CellMech)
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import OrderedDict
class Application(Frame):
""" This is a GUI for the pyCellAnalyst Segmentation Feature """
def __init__(self, master):
Frame.__init__(self, master)
self.lastdir = os.getcwd()
self.notebook = Notebook(self)
self.tab1 = Frame(self.notebook)
self.tab2 = Frame(self.notebook)
self.tab3 = Frame(self.notebook)
self.tab4 = Frame(self.notebook)
self.notebook.add(self.tab1, text="I/O")
self.notebook.add(self.tab2, text="Filtering")
self.notebook.add(self.tab3, text="Segmentation")
self.notebook.add(self.tab4, text="Kinematics")
self.notebook.grid(row=0, column=0, sticky=NW)
self.directories = []
self.ROI = []
#default settings
self.settings = {'xdim': DoubleVar(value=0.41),
'ydim': DoubleVar(value=0.41),
'zdim': DoubleVar(value=0.3),
'upsampling': DoubleVar(value=2.0),
'thresholdPercentage': DoubleVar(value=0.55),
'medianRadius': IntVar(value=1),
'gaussianSigma': DoubleVar(value=0.5),
'curvatureIterations': IntVar(value=10),
'curvatureConductance': DoubleVar(value=9.0),
'gradientIterations': IntVar(value=10),
'gradientConductance': DoubleVar(value=9.0),
'gradientTimeStep': DoubleVar(value=0.01),
'bilateralDomainSigma': DoubleVar(value=1.5),
'bilateralRangeSigma': DoubleVar(value=5.0),
'bilateralSamples': IntVar(value=100),
'patchRadius': IntVar(value=4),
'patchNumber': IntVar(value=20),
'patchNoiseModel': IntVar(value=2),
'patchIterations': IntVar(value=5),
'geodesicCannyVariance': DoubleVar(value=0.1),
'geodesicCannyUpper': DoubleVar(value=0.0),
'geodesicCannyLower': DoubleVar(value=0.0),
'geodesicPropagation': DoubleVar(value=0.15),
'geodesicCurvature': DoubleVar(value=0.1),
'geodesicAdvection': DoubleVar(value=1.0),
'geodesicIterations': IntVar(value=200),
'geodesicRMS': DoubleVar(value=0.01),
'edgeLambda1': DoubleVar(value=1.1),
'edgeLambda2': DoubleVar(value=1.0),
'edgeIterations': IntVar(value=20),
'edgeCurvature': DoubleVar(value=10.0),
'deformableIterations': IntVar(value=200),
'deformableRMS': DoubleVar(value=0.01),
'deformableSigma': DoubleVar(value=3.0),
'deformablePrecision': DoubleVar(value=0.02)}
self.intSettings = {'stain': IntVar(value=0),
'display': IntVar(value=1),
'removeBright': IntVar(value=0),
'edgeEnhancement': IntVar(value=0),
'processAs2D': IntVar(value=0),
'opening': IntVar(value=1),
'fillHoles': IntVar(value=0),
'handleOverlap': IntVar(value=1),
'debug': IntVar(value=0),
'smoothingMethod': IntVar(value=4),
'thresholdMethod': IntVar(value=3),
'thresholdAdaptive': IntVar(value=1),
'activeMethod': IntVar(value=1),
'rigidInitial': IntVar(value=1),
'defReg': IntVar(value=0),
'saveFEA': IntVar(value=0),
'makePlots': IntVar(value=1),
'deformableDisplay': IntVar(value=1),
'depth_adjust': IntVar(value=0)}
self.smoothingMethods = ['None',
'Median',
'Gaussian',
'Curvature Diffusion',
'Gradient Diffusion',
'Bilateral',
'Patch-based']
self.thresholdMethods = ['Percentage',
'Otsu',
'MaxEntropy',
'Li',
'Huang',
'IsoData',
'KittlerIllingworth',
'Moments',
'Yen',
'RenyiEntropy',
'Shanbhag']
for i in list(self.settings.keys()):
self.settings[i].trace("w", self.update)
self.grid()
self.create_widgets()
#eliminates the focus switch to first widget
#in frames when changing tabs
self.notebook.focus()
def create_widgets(self):
#save/load settings
self.buttonSaveSettings = Button(self.tab1, text="Save Settings",
command=self.saveSettings)
self.buttonSaveSettings.grid(row=1, column=0, padx=5, pady=5,
sticky=W + E)
self.buttonLoadSettings = Button(self.tab1, text="Load Settings",
command=self.loadSettings)
self.buttonLoadSettings.grid(row=1, column=1, padx=5, pady=5,
sticky=W + E)
self.buttonLoadROI = Button(self.tab1,
text="Load Region of Interest File",
command=self.loadROI)
self.buttonLoadROI.grid(row=1, column=2, padx=5, pady=5, sticky=W + E)
#create label frame for image directory selection
self.directoryFrame = LabelFrame(self.tab1,
text="Image directories to process")
self.directoryFrame.grid(row=2, column=0, rowspan=2, columnspan=5,
padx=5, pady=5, sticky=NW)
#add directory
self.buttonAddDirectory = Button(self.directoryFrame, bg='green')
self.buttonAddDirectory["text"] = "Add"
self.buttonAddDirectory["command"] = self.add_directory
self.buttonAddDirectory.grid(row=2, column=0, padx=5, sticky=W + E)
#remove directory
self.buttonRemoveDirectory = Button(self.directoryFrame, bg='red')
self.buttonRemoveDirectory["text"] = "Remove"
self.buttonRemoveDirectory["command"] = self.remove_directory
self.buttonRemoveDirectory.grid(row=3, column=0, padx=5, sticky=W + E)
#directory list
self.listDirectories = Listbox(self.directoryFrame)
self.listDirectories["width"] = 80
self.listDirectories["selectmode"] = MULTIPLE
self.listDirectories.grid(row=2, column=1, rowspan=2, columnspan=4,
padx=5, pady=5, sticky=E + W)
# Image Settings
self.imageSettingsFrame = LabelFrame(self.tab1, text="Image Settings")
self.imageSettingsFrame.grid(row=4, column=0, columnspan=5, rowspan=2,
padx=5, pady=5, sticky=E + W)
settings = [('x-spacing', 'xdim'),
('y-spacing', 'ydim'),
('z-spacing', 'zdim'),
('Upsampling Factor', 'upsampling')]
for i, v in enumerate(settings):
Label(self.imageSettingsFrame, text=v[0]).grid(row=4, column=i,
padx=5, pady=5,
sticky=E + W)
Entry(self.imageSettingsFrame,
textvariable=self.settings[v[1]],
width=5).grid(row=5, column=i,
padx=5, pady=5, sticky=E + W)
Checkbutton(self.imageSettingsFrame,
text='Objects are Dark',
variable=self.intSettings['stain']).grid(row=5,
column=i + 1,
padx=5, pady=5,
sticky=NW)
# Other settings
####################################################################
self.otherSettingsFrame = LabelFrame(self.tab1, text="Other Options")
self.otherSettingsFrame.grid(row=6, column=0, columnspan=5,
padx=5, pady=5, sticky=E + W)
settings = [('Display Objects', 'display'),
('Remove Bright Spots', 'removeBright'),
('Edge Enhancement', 'edgeEnhancement'),
('Consider Slices Independent', 'processAs2D'),
('Perform Morphogical Opening', 'opening'),
('Fill Holes', 'fillHoles'),
('Handle Overlap', 'handleOverlap'),
('Debug Mode', 'debug'),
('Equalize Intensity Over Depth', 'depth_adjust')]
row = 6
shift = 0
for i, v in enumerate(settings):
Checkbutton(self.otherSettingsFrame,
text=v[0],
variable=self.intSettings[v[1]]).grid(row=row,
column=i - shift,
padx=5, pady=5,
sticky=NW)
if (i + 1) % 3 == 0:
row += 1
shift = i + 1
######################################################################
#button to execute segmentation(s)
self.buttonExecute = Button(self.tab1,
bg='green',
font=('Helvetica', '20', 'bold'))
self.buttonExecute["text"] = "Execute Segmentation"
self.buttonExecute["command"] = self.run_segmentation
self.buttonExecute.grid(row=row + 1, column=0, columnspan=5,
padx=5, pady=5, sticky=W + E)
#smoothing/denoising
methods = [("None", 1),
("Median", 2),
("Gaussian", 3),
("Curvature-based\nAnisotropic Diffusion", 4),
("Gradient-based\nAnisotropic Diffusion", 5),
("Bilateral", 6),
("Patch-based Denoising", 7)]
self.smoothingFrame = LabelFrame(self.tab2, text="Smoothing/Denoising")
self.smoothingFrame.grid(row=0, column=0, padx=5, pady=5, sticky=NW)
for m, i in methods:
Radiobutton(self.smoothingFrame,
text=m,
indicatoron=0,
padx=5,
width=20,
variable=self.intSettings['smoothingMethod'],
command=self.populateSmoothingSettings,
value=i).pack(anchor=W)
self.smoothingHelpFrame = LabelFrame(self.tab2, text="Description")
self.smoothingHelpFrame.grid(row=0, column=1, padx=5, pady=5,
sticky=NW)
self.textSmoothingHelp = Text(self.smoothingHelpFrame, wrap=WORD,
height=11, width=40)
self.textSmoothingHelp.insert(END, ("Apply an iterative curvature-bas"
"ed anisotropic diffusion filter. "
"Higher conductance will result in"
" more change per iteration. More "
"iterations will result in a "
"smoother image. This filter shoul"
"d preserve edges. It is better at"
" retaining fine features than "
"gradient-based anisotropic "
"diffusion, and also better when "
"the edge contrast is low."))
self.textSmoothingHelp.pack(anchor=NW)
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://www.itk.org/ItkSoftwareGuide.pdf"
self.smoothingReference = Label(self.smoothingHelpFrame,
text="Reference",
fg="blue", cursor="hand2")
self.smoothingReference.bind("<Button-1>",
self.open_smoothing_reference)
self.smoothingReference.pack(anchor=NW)
self.smoothingSettingsFrame = LabelFrame(
self.tab2,
text="Smoothing/Denoising Settings")
self.smoothingSettingsFrame.grid(row=0, column=2,
padx=5, pady=5, sticky=NW)
settings = [('Conductance', 'curvatureConductance'),
('Iterations', 'curvatureIterations')]
for t, v in settings:
Label(self.smoothingSettingsFrame, text=t).pack(anchor=W)
Entry(self.smoothingSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
#Threshold segmentation
self.thresholdFrame = LabelFrame(self.tab3,
text="Threshold segmentation")
self.thresholdFrame.grid(row=0, column=0, padx=5, pady=5, sticky=NW)
methods = [("Percentage", 1),
("Otsu", 2),
("Maximum Entropy", 3),
("Li", 4),
("Huang", 5),
("IsoData (Ridler-Calvard)", 6),
("KittlerIllingworth", 7),
("Moments", 8),
("Yen", 9),
("RenyiEntropy", 10),
("Shanbhag", 11)]
for m, i in methods:
Radiobutton(self.thresholdFrame,
text=m,
indicatoron=0,
padx=5,
width=20,
variable=self.intSettings['thresholdMethod'],
command=self.populateThresholdSettings,
value=i).pack(anchor=W)
self.thresholdHelpFrame = LabelFrame(self.tab3, text="Description")
self.thresholdHelpFrame.grid(row=0, column=1,
padx=5, pady=5, sticky=NW)
self.textThresholdHelp = Text(self.thresholdHelpFrame, wrap=WORD,
height=11, width=40)
self.textThresholdHelp.insert(END, ("Calculates the threshold such"
" that entropy is maximized "
"between the foreground and "
"background. This has shown "
"good performance even when "
"objects are in close "
"proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/10.1016/"
"0734-189X(85)90125-2")
self.thresholdReference = Label(self.thresholdHelpFrame,
text="", fg="blue", cursor="hand2")
self.textThresholdHelp.pack(anchor=NW)
self.thresholdSettingsFrame = LabelFrame(self.tab3,
text="Threshold Settings")
Checkbutton(
self.thresholdSettingsFrame,
text="Iterative Threshold Adjustment",
variable=self.intSettings['thresholdAdaptive']).pack(anchor=W)
self.thresholdSettingsFrame.grid(row=0, column=2, padx=5, pady=5,
sticky=NW)
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
#make Active Contour segmentation frame
self.activeContourFrame = LabelFrame(
self.tab3, text="Active contour segmentation")
self.activeContourFrame.grid(row=1, column=0,
padx=5, pady=5, sticky=NW)
self.activeHelpFrame = LabelFrame(self.tab3, text="Description")
self.activeHelpFrame.grid(row=1, column=1,
padx=5, pady=5, sticky=NW)
self.textActiveHelp = Text(self.activeHelpFrame,
wrap=WORD, height=11, width=40)
self.textActiveHelp.insert(END, ("Only a threshold-based "
"segmentation will "
"be performed."))
self.textActiveHelp.pack(anchor=NW)
self.textActiveHelp["state"] = DISABLED
self.activeLink = ""
self.activeReference = Label(self.activeHelpFrame,
text="",
fg="blue", cursor="hand2")
self.activeReference.bind("<Button-1>", self.open_active_reference)
self.activeReference.pack(anchor=NW)
methods = [("None", 1),
("Geodesic Levelset", 2),
("Edge-free", 3)]
for m, i in methods:
Radiobutton(self.activeContourFrame,
text=m,
indicatoron=0,
padx=5,
width=20,
variable=self.intSettings['activeMethod'],
command=self.populateActiveContourSettings,
value=i).pack(anchor=W)
self.activeSettingsFrame = LabelFrame(self.tab3,
text="Active Contour Settings")
self.activeSettingsFrame.grid(row=1, column=2,
padx=5, pady=5, sticky=NW)
Label(self.activeSettingsFrame,
text="No additional settings needed.").pack(anchor=W)
##### Kinematics Tab ######
#create label frame for image directory selection
self.materialFrame = LabelFrame(
self.tab4,
text="Directory containing reference configuration data")
self.materialFrame.grid(row=0, column=0,
rowspan=1, columnspan=5,
padx=5, pady=5, sticky=NW)
#add directory
self.buttonAddMaterialDirectory = Button(self.materialFrame,
bg='green')
self.buttonAddMaterialDirectory["text"] = "Load"
self.buttonAddMaterialDirectory["command"] = self.add_material_dir
self.buttonAddMaterialDirectory.grid(row=0, column=0,
padx=5, pady=5, sticky=W + E)
self.materialDirectoryLabel = Label(self.materialFrame, text="")
self.materialDirectoryLabel.grid(row=0, column=1,
padx=5, pady=5, sticky=W + E)
self.spatialFrame = LabelFrame(
self.tab4,
text="Directories containing deformed configuration data")
self.spatialFrame.grid(row=1, column=0,
rowspan=2, columnspan=2,
padx=5, pady=5, sticky=NW)
#add directory
self.buttonAddSpatialDirectory = Button(self.spatialFrame,
bg='green')
self.buttonAddSpatialDirectory["text"] = "Add"
self.buttonAddSpatialDirectory["command"] = self.add_spatial_dir
self.buttonAddSpatialDirectory.grid(row=1, column=0,
padx=5, sticky=W + E)
#remove directory
self.buttonRemoveSpatialDirectory = Button(self.spatialFrame, bg='red')
self.buttonRemoveSpatialDirectory["text"] = "Remove"
self.buttonRemoveSpatialDirectory["command"] = self.remove_spatial_dir
self.buttonRemoveSpatialDirectory.grid(row=2, column=0,
padx=5, pady=5, sticky=W + E)
#directory list
self.spatialDirectories = []
self.listSpatialDirectories = Listbox(self.spatialFrame)
self.listSpatialDirectories["width"] = 80
self.listSpatialDirectories["selectmode"] = MULTIPLE
self.listSpatialDirectories.grid(row=1, column=1,
rowspan=2, padx=5, pady=5,
sticky=E + W)
#Options
self.kinematicsOptionsFrame = LabelFrame(
self.tab4, text="Kinematics Analysis Options")
self.kinematicsOptionsFrame.grid(row=3, column=0,
padx=5, pady=5, sticky=E + W)
settings = [('Align with a rigid rotation initially', 'rigidInitial'),
('Perform Deformable Image Registration', 'defReg'),
('Save for Finite Element Analysis', 'saveFEA'),
('Display Registration Results', 'deformableDisplay'),
('Generate Plots', 'makePlots')]
for i, (t, v) in enumerate(settings):
if i == 1:
Checkbutton(
self.kinematicsOptionsFrame,
text=t,
variable=self.intSettings[v],
command=self.populateDeformableSettings).grid(row=3 + i,
column=0,
padx=5,
pady=5,
sticky=NW)
else:
Checkbutton(self.kinematicsOptionsFrame,
text=t,
variable=self.intSettings[v]).grid(row=3 + i,
column=0,
padx=5,
pady=5,
sticky=NW)
self.deformableSettingsFrame = LabelFrame(
self.tab4, text="Deformable Image Registration Settings")
self.deformableSettingsFrame.grid(row=3, column=1,
padx=5, pady=5, sticky=E + W)
#button to execute segmentation(s)
self.buttonExecuteKinematics = Button(self.tab4, bg='green',
font=('Helvetica', '20', 'bold'))
self.buttonExecuteKinematics["text"] = "Execute Analysis"
self.buttonExecuteKinematics["command"] = self.run_kinematics
self.buttonExecuteKinematics.grid(row=7, column=0, columnspan=2,
padx=5, pady=5, sticky=W + E)
##### End Kinematics Tab #####
def populateSmoothingSettings(self):
for child in self.smoothingSettingsFrame.pack_slaves():
child.destroy()
if self.intSettings['smoothingMethod'].get() == 1:
self.smoothingReference.unbind("<Button-1>")
self.smoothingReference["text"] = ""
elif self.intSettings['smoothingMethod'].get() == 2:
self.smoothingReference.bind("<Button-1>",
self.open_smoothing_reference)
self.smoothingReference["text"] = "Reference: page 80"
elif self.intSettings['smoothingMethod'].get() == 3:
self.smoothingReference.bind("<Button-1>",
self.open_smoothing_reference)
self.smoothingReference["text"] = "Reference: page 96"
elif self.intSettings['smoothingMethod'].get() == 4:
self.smoothingReference.bind("<Button-1>",
self.open_smoothing_reference)
self.smoothingReference["text"] = "Reference: page 106"
else:
self.smoothingReference.bind("<Button-1>",
self.open_smoothing_reference)
self.smoothingReference["text"] = "Reference"
if self.intSettings['smoothingMethod'].get() == 1:
Label(self.smoothingSettingsFrame,
text="No additional settings needed.").pack(anchor=W)
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("No smoothing or denoising "
"will be applied to the "
"image."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r""
elif self.intSettings['smoothingMethod'].get() == 2:
settings = [('Kernel Radius', 'medianRadius')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("Apply a median filter with "
"the window size defined by "
"Kernel Radius. A larger "
"radius will result in a "
"smoother image, but may "
"degrade the edges."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://www.itk.org/ItkSoftwareGuide.pdf"
elif self.intSettings['smoothingMethod'].get() == 3:
settings = [('Gaussian Variance', 'gaussianSigma')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("Apply a discrete Gaussian "
"filter. Increasing Gaussian "
"Variance will result in a "
"smoother image, but will "
"further blur edges."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://www.itk.org/ItkSoftwareGuide.pdf"
elif self.intSettings['smoothingMethod'].get() == 4:
settings = [('Conductance', 'curvatureConductance'),
('Iterations', 'curvatureIterations')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("Apply an iterative curvature-"
"based anisotropic diffusion "
"filter. Higher conductance "
"will result in more change "
"per iteration. More itera"
"tions will result in a smooth"
"er image. This filter should"
" preserve edges. It is "
"better at retaining fine "
"features than gradient-based"
" anisotropic diffusion, and "
"also better when the edge "
"contrast is low."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://www.itk.org/ItkSoftwareGuide.pdf"
elif self.intSettings['smoothingMethod'].get() == 5:
settings = [('Conductance', 'gradientConductance'),
('Iterations', 'gradientIterations'),
('Time Step', 'gradientTimeStep')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("Apply an iterative gradient-"
"based anisotropic diffusion "
"filter. Higher conductance "
"will result in more change "
"per iteration. More itera"
"tions will result in a smooth"
"er image. This filter should"
" preserve edges. This may "
"perform better than curvature"
"-based if edge contrast "
"is good."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://dx.doi.org/10.1109/34.56205"
elif self.intSettings['smoothingMethod'].get() == 6:
settings = [
('Domain Variance (costly)', 'bilateralDomainSigma'),
('Range Variance', 'bilateralRangeSigma'),
('Samples', 'bilateralSamples')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("A bilateral filter is applied"
" both on a neighborhood "
"defined by the Euclidean "
"distance (the domain) from"
" a given voxel and a "
"'neighborhood' based on "
"voxel intensities (the range)"
". Two Gaussian kernels are "
"defined by Domain Variance "
"and Range Variance and the "
"actual weight of influence a "
"particular neighbor voxel has"
" on the current voxel is a "
"combination of both. A voxel"
" that is both close in "
"distance and similar in "
"intensity will have a high"
" weight. This results in "
"edge-preserving smoothing."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http://dx.doi.org/10.1109/ICCV.1998.710815"
elif self.intSettings['smoothingMethod'].get() == 7:
Label(
self.smoothingSettingsFrame,
text='Warning: CPU cost grows rapidly with increasing values.',
fg='red').pack(anchor=W)
settings = [('Patch Radius', 'patchRadius'),
('Number of Patches', 'patchNumber'),
('Iterations', 'patchIterations')]
self.textSmoothingHelp["state"] = NORMAL
self.textSmoothingHelp.delete("0.0", END)
self.textSmoothingHelp.insert(END, ("This filter will denoise "
"the image using an unsuper"
"vised information-theoretic"
" adaptive filter "
"(SEE REFERENCE). The algo"
"rithm attempts to extract "
"the noise based on random "
"sampling of the image with "
"windows of size, Patch Radius"
", and number, Number of "
"Patches. No a priori "
"knowledge of the noise is"
" needed, but a Noise Model"
" can be specified. Since "
"laser fluorescence microscopy"
" is known to have Poisson "
"noise, this is the default."
" The drawback of this method"
" is it becomes extremely "
"costly with increasing "
"any of its parameters."))
self.textSmoothingHelp["state"] = DISABLED
self.smoothingLink = r"http:/dx.doi.org/10.1109/TPAMI.2006.64"
if not(self.intSettings['smoothingMethod'].get() in [1, 7]):
for t, v in settings:
Label(self.smoothingSettingsFrame, text=t).pack(anchor=W)
Entry(self.smoothingSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
if self.intSettings['smoothingMethod'].get() == 7:
for t, v in settings:
Label(self.smoothingSettingsFrame, text=t).pack(anchor=W)
Entry(self.smoothingSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
models = [("No Model", 1),
("Poisson", 2),
("Gaussian", 3),
("Rician", 4)]
Label(self.smoothingSettingsFrame,
text="Noise Model").pack(anchor=NW)
for t, v in models:
Radiobutton(self.smoothingSettingsFrame,
text=t,
indicatoron=0,
padx=5,
width=8,
variable=self.settings['patchNoiseModel'],
value=v).pack(side=LEFT)
def populateThresholdSettings(self):
for child in self.thresholdSettingsFrame.pack_slaves():
child.destroy()
Checkbutton(
self.thresholdSettingsFrame,
text="Iterative Threshold Adjustment",
variable=self.intSettings['thresholdAdaptive']).pack(anchor=W)
if self.intSettings['thresholdMethod'].get() == 1:
self.thresholdReference.unbind("<Button-1>")
self.thresholdReference['text'] = ""
else:
self.thresholdReference['text'] = "Reference"
self.thresholdReference.bind("<Button-1>",
self.open_threshold_reference)
self.thresholdReference.pack(anchor=NW)
if self.intSettings['thresholdMethod'].get() == 1:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("Thresholds at a user-specifie"
"d ratio of the maximum voxel "
"intensity."))
self.textThresholdHelp["state"] = DISABLED
Label(self.thresholdSettingsFrame, text="Ratio").pack(anchor=W)
e = Entry(self.thresholdSettingsFrame,
textvariable=self.settings['thresholdPercentage'])
e.pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 2:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("The method determines the "
"threshold that maximizes the"
" 'total variance of levels';"
" equation 12 in reference. "
"Performs poorly when objects "
"are in close proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = r"http://dx.doi.org/10.1109/TSMC.1979.4310076"
Label(self.thresholdSettingsFrame,
text=" No additional settings needed",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 3:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("Calculates the threshold such"
" that entropy is maximized "
"between the foreground and "
"background. This has shown "
"good performance even when "
"objects are in close "
"proximity."))
self.textThresholdHelp.pack(anchor=NW)
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/10.1016/"
"0734-189X(85)90125-2")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 4:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("An iterative method to "
"minimize cross-entropy of "
"the gray and binarized image."
" Performs poorly when objects"
" are in close proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/10.1016/"
"S0167-8655(98)00057-9")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 5:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("Thresholds the image using "
"fuzzy set theory. The "
"'index of fuzziness' between "
"the binarized and the gray "
"image is calculated using "
"Shannon's function. The "
"threshold level that minimize"
"s the index of fuzziness is "
"chosen. Performs poorly when"
" objects are in close "
"proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/10.1016"
"/0031-3203(94)E0043-K")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 6:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("An iterative method that uses"
" a switching function to "
"classify voxels as either "
"foreground or background. "
"The first iteration defines"
" the switch based on the "
"assumption that the image "
"corners are background and "
"the rest is foreground. The"
" binarized image that results"
" is then used to define the "
"switching function in the "
"next iteration. Performs "
"poorly when objects are in"
" close proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = r"http://dx.doi.org/10.1109/TSMC.1978.4310039"
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 7:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("This method approximates the "
"image histogram as two "
"Gaussian distributions estim"
"ating the histogram above and"
" below the current threshold "
"level. The threshold that "
"produces minimal overlap "
"between the Gaussian fits "
"is taken. Performs poorly "
"when objects are in close "
"proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/"
"10.1016/0031-3203(86)90030-0")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 8:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("This method calculates the "
"moments of the gray image and"
" then determines the thresh"
"old level that yields a thres"
"holded image that has the "
"same moments. Performs poorly"
" when objects are in close "
"proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/"
"10.1016/0734-189X(85)90133-1")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 9:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("Performs a multilevel thres"
"hold that minimizes a cost "
"function based on similarity"
" between the original and "
"thresholded images and the "
"total number of bits needed "
"to represent the thresholded "
"image. The balance of these "
"two terms typically has a "
"unique minimum. This method "
"is much less expensive than "
"entropy based methods, but "
"has shown poor performance "
"when objects are in close "
"proximity."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = r"http://dx.doi.org/10.1109/83.366472"
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 10:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("The same as the Maximum "
"Entropy measure, but uses "
"the Renyi entropy definition."
" In practice this sets the "
"threshold value lower than "
"the Maximum Entropy approach,"
" so is more likely to capture"
" faint voxels. This can "
"cause problems when objects"
" are close."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = (r"http://dx.doi.org/"
"10.1016/0734-189X(85)90125-2")
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
elif self.intSettings['thresholdMethod'].get() == 11:
self.textThresholdHelp["state"] = NORMAL
self.textThresholdHelp.delete("0.0", END)
self.textThresholdHelp.insert(END, ("A modification of the Maximum"
" Entropy method. This method "
"additionally considers the "
"voxel's 'distance' from the "
"determined threshold in its "
"analytics. This results in a "
"more aggressive thresholding "
"with fewer faint voxels "
"classified as white. For "
"microscopy images, voxels "
"with partial volume effects "
"are more likely to not be "
"considered an object with "
"this approach. This will "
"perform better still than "
"Maximum Entropy when objects"
" are close."))
self.textThresholdHelp["state"] = DISABLED
self.thresholdLink = r"http://dx.doi.org/10.1006/cgip.1994.1037"
Label(self.thresholdSettingsFrame,
text=" No additional settings needed.",
fg='red').pack(anchor=W)
def populateActiveContourSettings(self):
for child in self.activeSettingsFrame.pack_slaves():
child.destroy()
if self.intSettings['activeMethod'].get() == 1:
Label(self.activeSettingsFrame,
text="No additional settings needed.").pack(anchor=W)
self.textActiveHelp["state"] = NORMAL
self.textActiveHelp.delete("0.0", END)
self.textActiveHelp.insert(END, ("Only a threshold-based "
"segmentation will "
"be performed."))
self.textActiveHelp["state"] = DISABLED
self.activeReference["text"] = ""
self.activeLink = ""
elif self.intSettings['activeMethod'].get() == 2:
self.textActiveHelp["state"] = NORMAL
self.textActiveHelp.delete("0.0", END)
self.textActiveHelp.insert(END,
("Segments object using a geodesic "
"active contour levelset algorithm. "
"Uses the Canny edge detector; Canny"
"Variance governs degree of Gaussian"
" smoothing of edge detector. Upper"
"Threshold will guarantee detection "
"of edges with intensity greater than"
" setting. LowerThreshold will disca"
"rd intensities lower than setting. "
"Propagation is the contour inflation"
" force (deflation if negative); "
"higher values result in skipping "
"weaker edges or small islands of "
"contrast change. Curvature governs "
"how smooth the contour will be; "
"higher values result in smoother. "
"Advection causes the contour to "
"attract to edges; higher values "
"help prevent leakage. Iterations "
"are the maximum iterations to take."
" Maximum RMS Error Change is the "
"change in RMS at which iterations "
"will terminate if Iterations has "
"not yet been reached."))
self.textActiveHelp.pack(anchor=NW)
self.textActiveHelp["state"] = DISABLED
self.activeReference["text"] = "Reference"
self.activeLink = r"http://dx.doi.org/10.1023/A:1007979827043"
settings = [('CannyVariance', 'geodesicCannyVariance'),
('UpperThreshold', 'geodesicCannyUpper'),
('LowerThreshold', 'geodesicCannyLower'),
('Propagation', 'geodesicPropagation'),
('Curvature', 'geodesicCurvature'),
('Advection', 'geodesicAdvection'),
('Iterations', 'geodesicIterations'),
('Maximum RMS Error Change', 'geodesicRMS')]
for t, v in settings:
Label(self.activeSettingsFrame, text=t).pack(anchor=W)
Entry(self.activeSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
else:
self.textActiveHelp["state"] = NORMAL
self.textActiveHelp.delete("0.0", END)
self.textActiveHelp.insert(END, ("An active contour model that "
"requires no edge information. "
"This algorithm employs a convex "
"objective function and is "
"therefore, very robust. "
"Unfortunately, there is only "
"a single phase implementation "
"in SimpleITK, so this tends to "
"have trouble with objects in "
"close proximity. If a multiphase"
" version is released in the "
"future, this will be the ideal "
"approach to segment close or "
"touching objects. The contour "
"is evolved iteratively using "
"curvature flow. Lambda1 and "
"Lambda2 are energy term weights "
"for voxels inside and outside "
"the contour, respectively. A "
"strategy to help resolve close "
"objects is to slightly increase "
"Lambda1, penalizing voxels "
"inside the contour."))
self.textActiveHelp.pack(anchor=NW)
self.textActiveHelp["state"] = DISABLED
self.activeReference["text"] = "Reference"
self.activeLink = r"http://dx.doi.org/10.1109/83.902291"
settings = [('Lambda1 (internal weight)', 'edgeLambda1'),
('Lambda2 (external weight)', 'edgeLambda2'),
('Curvature Weight', 'edgeCurvature'),
('Iterations', 'edgeIterations')]
for t, v in settings:
Label(self.activeSettingsFrame, text=t).pack(anchor=W)
Entry(self.activeSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
def populateDeformableSettings(self):
for child in self.deformableSettingsFrame.pack_slaves():
child.destroy()
if self.intSettings['defReg'].get() == 1:
settings = [
('Displacement Field Smoothing Variance', 'deformableSigma'),
('Maximum RMS error', 'deformableRMS'),
('Maximum Iterations', 'deformableIterations'),
('Precision', 'deformablePrecision')]
for t, v in settings:
Label(self.deformableSettingsFrame, text=t).pack(anchor=W)
Entry(self.deformableSettingsFrame,
textvariable=self.settings[v]).pack(anchor=W)
def add_directory(self):
dir_name = tkinter.filedialog.askdirectory(
parent=root,
initialdir=self.lastdir,
title="Select directory containing images.")
self.lastdir = dir_name
self.directories.append(dir_name)
self.listDirectories.insert(END, dir_name)
def remove_directory(self):
index = self.listDirectories.curselection()
if index:
for i in index[::-1]:
self.listDirectories.delete(i)
del self.directories[i]
def add_material_dir(self):
dir_name = tkinter.filedialog.askdirectory(
parent=root,
initialdir=self.lastdir,
title="Select directory containing reference configuration data.")
if dir_name:
self.lastdir = dir_name
self.materialDirectory = dir_name
self.materialDirectoryLabel["text"] = dir_name
def add_spatial_dir(self):
dir_name = tkinter.filedialog.askdirectory(
parent=root,
initialdir=self.lastdir,
title="Select directory containing reference configuration data.")
if dir_name:
self.lastdir = dir_name
self.spatialDirectories.append(dir_name)
self.listSpatialDirectories.insert(END, dir_name)
def remove_spatial_dir(self):
index = self.listSpatialDirectories.curselection()
if index:
for i in index[::-1]:
self.listSpatialDirectories.delete(i)
del self.spatialDirectories[i]
def saveSettings(self):
filename = tkinter.filedialog.asksaveasfilename(defaultextension=".pkl")
if filename:
fid = open(filename, 'wb')
tmp_settings = copy.copy(self.settings)
for key in list(self.settings.keys()):
tmp_settings[key] = self.settings[key].get()
tmp_int_settings = copy.copy(self.intSettings)
for key in list(self.intSettings.keys()):
tmp_int_settings[key] = self.intSettings[key].get()
values = {"Settings": tmp_settings,
"ButtonStates": tmp_int_settings}
pickle.dump(values, fid)
fid.close()
def loadSettings(self):
filename = tkinter.filedialog.askopenfilename(
parent=root,
initialdir=os.getcwd(),
title="Select a saved settings file.")
if filename:
fid = open(filename, 'rb')
values = pickle.load(fid)
fid.close()
for key in self.settings:
try:
self.settings[key].set(values['Settings'][key])
except:
print((("WARNING: Failed to detect a value for {:s} "
"in file.\n The settings file was probably saved "
"by a previous version.").format(key)))
for key in self.intSettings:
try:
self.intSettings[key].set(values['ButtonStates'][key])
except:
print((("WARNING: Failed to detect a value for {:s} "
"in file.\n The settings file was probably saved "
"by a previous version.").format(key)))
self.populateSmoothingSettings()
self.populateThresholdSettings()
self.populateActiveContourSettings()
def loadROI(self):
filename = tkinter.filedialog.askopenfilename(
parent=root,
initialdir=os.getcwd(),
title="Select an .xls file containing Regions of Interest.")
if '.xls' in filename:
wb = xlrd.open_workbook(filename)
N = wb.nsheets
#clear any previously loaded regions
self.ROI = []
for i in range(N):
self.ROI.append([])
s = wb.sheet_by_index(i)
#skip the first row
for r in range(s.nrows - 1):
v = s.row_values(r + 1, start_colx=1)
#even row
if r % 2 == 0:
tmp = [0] * 6
tmp[0] = int(v[0])
tmp[1] = int(v[1])
tmp[2] = int(v[4])
tmp[3] = int(v[2])
tmp[4] = int(v[3])
else:
tmp[5] = int(v[4]) - tmp[2]
self.ROI[i].append(tmp)
else:
print((("{:s} does not have proper extension. Currently supporting"
" only .xls filetypes.").format(filename)))
def run_segmentation(self):
if not self.directories:
print(("WARNING: no directories have been indicated; "
"nothing has been done."))
return
if not self.ROI:
print(("WARNING: no region of interest loaded; "
"assuming only 1 cell in the image."))
# translate smoothing parameters to pyCellAnalyst dictionary syntax
if self.intSettings['smoothingMethod'].get() == 1:
smoothingParameters = {}
elif self.intSettings['smoothingMethod'].get() == 2:
smoothingParameters = {
'radius': (self.settings['medianRadius'].get(),) * 3}
elif self.intSettings['smoothingMethod'].get() == 3:
smoothingParameters = {
'sigma': self.settings['gaussianSigma'].get()}
elif self.intSettings['smoothingMethod'].get() == 4:
smoothingParameters = {
'iterations': self.settings['curvatureIterations'].get(),
'conductance': self.settings['curvatureConductance'].get()}
elif self.intSettings['smoothingMethod'].get() == 5:
smoothingParameters = {
'iterations': self.settings['gradientIterations'].get(),
'conductance': self.settings['gradientConductance'].get(),
'time step': self.settings['gradientTimeStep'].get()}
elif self.intSettings['smoothingMethod'].get() == 6:
smoothingParameters = {
'domainSigma': self.settings['bilateralDomainSigma'].get(),
'rangeSigma': self.settings['bilateralRangeSigma'].get(),
'samples': self.settings['bilateralSamples'].get()}
elif self.intSettings['smoothingMethod'].get() == 7:
noiseModel = ['no model', 'poisson', 'gaussian', 'rician']
smoothingParameters = {
'radius': self.settings['patchRadius'].get(),
'iterations': self.settings['patchIterations'].get(),
'patches': self.settings['patchNumber'].get(),
'noise model': noiseModel[
self.settings['patchNoiseModel'].get() - 1]}
objects = ['Foreground', 'Background']
for i, d in enumerate(self.directories):
try:
regions = self.ROI[i]
except:
regions = None
vol = Volume(d,
pixel_dim=[self.settings['xdim'].get(),
self.settings['ydim'].get(),
self.settings['zdim'].get()],
regions=regions,
segmentation='User',
handle_overlap=self.intSettings[
'handleOverlap'].get(),
display=self.intSettings['display'].get(),
stain=objects[self.intSettings['stain'].get()],
two_dim=self.intSettings['processAs2D'].get(),
bright=self.intSettings['removeBright'].get(),
enhance_edge=self.intSettings[
'edgeEnhancement'].get(),
depth_adjust=self.intSettings['depth_adjust'].get(),
smoothing_method=self.smoothingMethods[
self.intSettings['smoothingMethod'].get() - 1],
debug=self.intSettings['debug'].get(),
fillholes=self.intSettings['fillHoles'].get(),
opening=self.intSettings['opening'].get(),
smoothing_parameters=smoothingParameters)
if self.intSettings['activeMethod'].get() == 1:
vol.thresholdSegmentation(
method=self.thresholdMethods[self.intSettings[
'thresholdMethod'].get() - 1],
ratio=self.settings['thresholdPercentage'].get(),
adaptive=self.intSettings['thresholdAdaptive'].get())
elif self.intSettings['activeMethod'].get() == 2:
vol.geodesicSegmentation(
upsampling=int(self.settings['upsampling'].get()),
seed_method=self.thresholdMethods[self.intSettings[
'thresholdMethod'].get() - 1],
adaptive=self.intSettings['thresholdAdaptive'].get(),
ratio=self.settings['thresholdPercentage'].get(),
canny_variance=(self.settings['geodesicCannyVariance']
.get(),) * 3,
cannyUpper=self.settings['geodesicCannyUpper'].get(),
cannyLower=self.settings['geodesicCannyLower'].get(),
propagation=self.settings['geodesicPropagation'].get(),
curvature=self.settings['geodesicCurvature'].get(),
advection=self.settings['geodesicAdvection'].get(),
active_iterations=self.settings[
'geodesicIterations'].get())
elif self.intSettings['activeMethod'].get() == 3:
vol.edgeFreeSegmentation(
upsampling=int(self.settings['upsampling'].get()),
seed_method=self.thresholdMethods[self.intSettings[
'thresholdMethod'].get() - 1],
adaptive=self.intSettings['thresholdAdaptive'].get(),
ratio=self.settings['thresholdPercentage'].get(),
lambda1=self.settings['edgeLambda1'].get(),
lambda2=self.settings['edgeLambda2'].get(),
curvature=self.settings['edgeCurvature'].get(),
iterations=self.settings['edgeIterations'].get())
vol.writeLabels()
vol.writeSurfaces()
def run_kinematics(self):
#timestamp
ts = datetime.datetime.fromtimestamp(
time.time()).strftime('%Y-%m-%d_%H-%M-%S')
pardir = os.path.dirname(self.spatialDirectories[0])
#uniform strain
ofid = open(pardir + os.sep + "Kinematics_Analysis" + ts + ".csv", 'w')
#ellipsoidal approximation
efid = open(pardir + os.sep + "Ellipsoidal_Analysis" + ts
+ ".csv", 'w')
if self.intSettings["makePlots"].get():
self.results = OrderedDict()
self.results["Tissue"] = OrderedDict()
self.aggregate = OrderedDict()
for i, d in enumerate(self.spatialDirectories):
shortname = d.split(os.sep)[-1]
try:
shortname = shortname.replace('_results', '')
except:
pass
mech = CellMech(
ref_dir=self.materialDirectory, def_dir=d,
rigidInitial=self.intSettings['rigidInitial']
.get(),
deformable=self.intSettings['defReg'].get(),
saveFEA=self.intSettings['saveFEA'].get(),
deformableSettings={
'Iterations': self.settings[
'deformableIterations'].get(),
'Maximum RMS': self.settings[
'deformableRMS'].get(),
'Displacement Smoothing': self.settings[
'deformableSigma'].get(),
'Precision': self.settings[
'deformablePrecision'].get()},
display=self.intSettings['deformableDisplay'].get())
ofid.write(d + '\n')
ofid.write(("Object ID, E11, E22, E33, E12, E13, E23, "
"Volumetric, Effective, Maximum Tensile, "
"Maximum Compressive, Maximum Shear\n"))
if np.any(mech.ecm_strain):
N = len(mech.ecm_strain)
ecm_w, ecm_v = np.linalg.eigh(mech.ecm_strain)
ecm_w = np.sort(ecm_w)
tissue_tensile = ecm_w[2]
tissue_compressive = ecm_w[0]
tissue_shear = 0.5 * np.abs(ecm_w[2] - ecm_w[0])
tissue_vol = np.sqrt(
np.linalg.det(2 * mech.ecm_strain + np.eye(3))) - 1.0
ofid.write(("Tissue, {:f}, {:f}, {:f}, {:f}, {:f}, {:f}, "
"{:f}, {:f}, {:f}, {:f}, {:f}\n")
.format(mech.ecm_strain[0, 0],
mech.ecm_strain[1, 1],
mech.ecm_strain[2, 2],
mech.ecm_strain[0, 1],
mech.ecm_strain[0, 2],
mech.ecm_strain[1, 2],
tissue_vol,
np.sqrt((ecm_w[2] - ecm_w[1]) ** 2 +
(ecm_w[1] - ecm_w[0]) ** 2 +
(ecm_w[2] - ecm_w[0]) ** 2),
tissue_tensile,
tissue_compressive,
tissue_shear))
if self.intSettings['makePlots'].get():
self.results["Tissue"][shortname] = OrderedDict([
("Max Compression", tissue_compressive),
("Max Tension", tissue_tensile),
("Max Shear", tissue_shear),
("Volume", tissue_vol)])
N = len(mech.cell_strains)
if i == 0 and self.intSettings["makePlots"].get():
for j in range(N):
self.results[
"Cell {:d}".format(j + 1)] = OrderedDict()
if self.intSettings["makePlots"].get():
self.aggregate[shortname] = OrderedDict([
("Max Compression", np.zeros(N, float)),
("Max Tension", np.zeros(N, float)),
("Max Shear", np.zeros(N, float)),
("Volume", np.zeros(N, float)),
("Height", np.zeros(N, float)),
("Width", np.zeros(N, float)),
("Depth", np.zeros(N, float))])
for j, c in enumerate(mech.cell_strains):
w, v = np.linalg.eigh(c)
w = np.sort(w)
ofid.write(("Cell {:d}, {:f}, {:f}, {:f}, {:f}, {:f}, {:f}, "
"{:f}, {:f}, {:f}, {:f}, {:f}\n")
.format(j + 1,
c[0, 0],
c[1, 1],
c[2, 2],
c[0, 1],
c[0, 2],
c[1, 2],
mech.vstrains[j],
np.sqrt((w[2] - w[1]) ** 2 +
(w[1] - w[0]) ** 2 +
(w[2] - w[0]) ** 2),
w[2],
w[0],
0.5 * np.abs(w[2] - w[0])))
if self.intSettings["makePlots"].get():
key = "Cell {:d}".format(j + 1)
self.results[key][shortname] = OrderedDict([
("Max Compression", w[0]),
("Max Tension", w[2]),
("Max Shear", 0.5 * np.abs(w[2] - w[0])),
("Volume", mech.vstrains[j]),
("Height", None),
("Width", None),
("Depth", None)])
self.aggregate[shortname]['Max Compression'][j] = w[0]
self.aggregate[shortname]['Max Tension'][j] = w[2]
self.aggregate[shortname]['Max Shear'][j] = 0.5 * np.abs(
w[2] - w[0])
self.aggregate[shortname]['Volume'][j] = mech.vstrains[j]
efid.write(d + '\n')
efid.write(("Object ID, Reference Major Axis, Reference Middle "
"Axis, Reference Minor Axis, Deformed Major Axis, "
"Deformed Middle Axis, Deformed Minor Axis, Reference"
" Volume, Deformed Volume\n"))
for j, (rvol, dvol, raxes, daxes) in enumerate(
zip(mech.rvols, mech.dvols, mech.raxes, mech.daxes)):
raxes = np.sort(raxes)
daxes = np.sort(daxes)
efid.write(("Cell {:d}, {:f}, {:f}, {:f}, {:f}, {:f}, {:f},"
" {:f}, {:f}\n").format(j + 1,
raxes[2],
raxes[1],
raxes[0],
daxes[2],
daxes[1],
daxes[0],
rvol,
dvol))
if self.intSettings['makePlots'].get():
key = "Cell {:d}".format(j + 1)
self.results[key][shortname]["Height"] = (old_div(daxes[0],
raxes[0]) - 1)
self.results[key][shortname]["Depth"] = (old_div(daxes[1],
raxes[1]) - 1)
self.results[key][shortname]["Width"] = (old_div(daxes[2],
raxes[2]) - 1)
self.aggregate[shortname]["Height"][j] = (old_div(daxes[0],
raxes[0]) - 1)
self.aggregate[shortname]["Depth"][j] = (old_div(daxes[1],
raxes[1]) - 1)
self.aggregate[shortname]["Width"][j] = (old_div(daxes[2],
raxes[2]) - 1)
if self.intSettings['makePlots'].get():
self.makePlots(ts)
ofid.close()
efid.close()
def makePlots(self, ts):
pardir = os.path.dirname(self.spatialDirectories[0])
#boxplots of all cells for different cases
N = len(list(self.aggregate.keys()))
M = len(list(self.aggregate.values())[0]["Height"])
for m in list(list(self.aggregate.values())[0].keys()):
data = np.zeros((M, N), float)
for j, k in enumerate(self.aggregate.keys()):
data[:, j] = self.aggregate[k][m]
fig, ax = plt.subplots()
ax.boxplot(data)
ax.set_xticklabels(list(self.aggregate.keys()))
ax.set_ylabel(m + " Strain")
fig.savefig(os.path.join(pardir, "AllCells_{:s}_{:s}.svg".format(m, ts)))
for k in list(self.results.keys()):
if not(list(self.results[k].keys())):
continue
fig, ax = plt.subplots()
fig.set_size_inches([3.34646, 3.34646])
N = len(list(self.results[k].keys()))
ind = np.arange(4)
width = old_div(0.8, float(N))
cm = plt.get_cmap('jet')
colors = plt.cm.Set3(np.linspace(0, 1, N))
plt.rcParams.update({'font.size': 8})
rects = []
# affine transformation approach
for j, case in enumerate(self.results[k].keys()):
dat = np.array([self.results[k][case]["Max Compression"],
self.results[k][case]["Max Tension"],
self.results[k][case]["Max Shear"],
self.results[k][case]["Volume"]]).ravel()
rects.append(ax.bar(ind + j * width, dat, width,
color=colors[j], label=case))
ax.set_ylabel('Green-Lagrange Strain')
ax.set_title(k)
ax.set_xticks(np.arange(4) + 0.4)
ax.set_xticklabels(['Compression', 'Tension', 'Shear', 'Volume'])
ax.axhline(y=0.0, color='k')
fig.savefig(os.path.join(pardir, "Kinematics_{:s}_{:s}.svg".format(k, ts)))
# ellipsoidal approach
fig2, ax2 = plt.subplots()
fig2.set_size_inches([3.34646, 3.34646])
rects = []
for j, case in enumerate(self.results[k].keys()):
if k == "Tissue":
continue
dat = np.array([self.results[k][case]["Height"],
self.results[k][case]["Width"],
self.results[k][case]["Depth"],
self.results[k][case]["Volume"]]).ravel()
rects.append(ax2.bar(ind + j * width, dat, width,
color=colors[j], label=case))
ax2.set_ylabel('Green-Lagrange Strain')
ax2.set_title(k)
ax2.set_xticks(np.arange(4) + 0.4)
ax2.set_xticklabels(['Height', 'Width', 'Depth', 'Volume'])
ax2.axhline(y=0.0, color='k')
fig2.savefig(os.path.join(pardir, "Ellipsoidal_{:s}_{:s}.svg".format(k, ts)))
plt.close('all')
figLegend1 = plt.figure()
plt.figlegend(*ax.get_legend_handles_labels(), loc='upper left')
figLegend1.savefig(os.path.join(pardir, "Kinematics_Legend_{:s}.svg".format(ts)))
figLegend2 = plt.figure()
plt.figlegend(*ax.get_legend_handles_labels(), loc='upper left')
figLegend2.savefig(os.path.join(pardir, "Ellipsoidal_Legend_{:s}.svg".format(ts)))
plt.close('all')
def open_smoothing_reference(self, *args):
webbrowser.open_new(self.smoothingLink)
def open_threshold_reference(self, *args):
webbrowser.open_new(self.thresholdLink)
def open_active_reference(self, *args):
webbrowser.open_new(self.activeLink)
def update(self, *args):
"""dummy function to use trace feature"""
pass
root = Tk()
text = Text(root)
myFont = Font(family="Times New Roman", size=12)
text.configure(font=myFont)
root.title("Welcome to the pyCellAnalyst segmentation GUI.")
app = Application(root)
root.mainloop()
| [
"pickle.dump",
"webbrowser.open_new",
"numpy.abs",
"past.utils.old_div",
"tkinter.font.Font",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"builtins.range",
"matplotlib.pyplot.close",
"os.path.dirname",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"matplotlib.pyplot... | [((441, 462), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (455, 462), False, 'import matplotlib\n'), ((80100, 80139), 'tkinter.font.Font', 'Font', ([], {'family': '"""Times New Roman"""', 'size': '(12)'}), "(family='Times New Roman', size=12)\n", (80104, 80139), False, 'from tkinter.font import Font\n'), ((720, 731), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (729, 731), False, 'import os\n'), ((756, 770), 'tkinter.ttk.Notebook', 'Notebook', (['self'], {}), '(self)\n', (764, 770), False, 'from tkinter.ttk import Notebook\n'), ((67848, 67891), 'os.path.dirname', 'os.path.dirname', (['self.spatialDirectories[0]'], {}), '(self.spatialDirectories[0])\n', (67863, 67891), False, 'import os\n'), ((76206, 76249), 'os.path.dirname', 'os.path.dirname', (['self.spatialDirectories[0]'], {}), '(self.spatialDirectories[0])\n', (76221, 76249), False, 'import os\n'), ((79286, 79298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (79296, 79298), True, 'import matplotlib.pyplot as plt\n'), ((79483, 79495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (79493, 79495), True, 'import matplotlib.pyplot as plt\n'), ((79668, 79684), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (79677, 79684), True, 'import matplotlib.pyplot as plt\n'), ((79741, 79780), 'webbrowser.open_new', 'webbrowser.open_new', (['self.smoothingLink'], {}), '(self.smoothingLink)\n', (79760, 79780), False, 'import webbrowser\n'), ((79837, 79876), 'webbrowser.open_new', 'webbrowser.open_new', (['self.thresholdLink'], {}), '(self.thresholdLink)\n', (79856, 79876), False, 'import webbrowser\n'), ((79930, 79966), 'webbrowser.open_new', 'webbrowser.open_new', (['self.activeLink'], {}), '(self.activeLink)\n', (79949, 79966), False, 'import webbrowser\n'), ((58872, 58896), 'copy.copy', 'copy.copy', (['self.settings'], {}), '(self.settings)\n', (58881, 58896), False, 'import copy\n'), ((59040, 59067), 'copy.copy', 'copy.copy', (['self.intSettings'], {}), '(self.intSettings)\n', (59049, 59067), False, 'import copy\n'), ((59306, 59330), 'pickle.dump', 'pickle.dump', (['values', 'fid'], {}), '(values, fid)\n', (59317, 59330), False, 'import pickle\n'), ((59632, 59648), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (59643, 59648), False, 'import pickle\n'), ((60811, 60839), 'xlrd.open_workbook', 'xlrd.open_workbook', (['filename'], {}), '(filename)\n', (60829, 60839), False, 'import xlrd\n'), ((60963, 60971), 'builtins.range', 'range', (['N'], {}), '(N)\n', (60968, 60971), False, 'from builtins import range\n'), ((68207, 68220), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (68218, 68220), False, 'from collections import OrderedDict\n'), ((68258, 68271), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (68269, 68271), False, 'from collections import OrderedDict\n'), ((68301, 68314), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (68312, 68314), False, 'from collections import OrderedDict\n'), ((69600, 69623), 'numpy.any', 'np.any', (['mech.ecm_strain'], {}), '(mech.ecm_strain)\n', (69606, 69623), True, 'import numpy as np\n'), ((76489, 76512), 'numpy.zeros', 'np.zeros', (['(M, N)', 'float'], {}), '((M, N), float)\n', (76497, 76512), True, 'import numpy as np\n'), ((76643, 76657), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (76655, 76657), True, 'import matplotlib.pyplot as plt\n'), ((77016, 77030), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (77028, 77030), True, 'import matplotlib.pyplot as plt\n'), ((77151, 77163), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (77160, 77163), True, 'import numpy as np\n'), ((77224, 77243), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (77236, 77243), True, 'import matplotlib.pyplot as plt\n'), ((77311, 77348), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (77330, 77348), True, 'import matplotlib.pyplot as plt\n'), ((78288, 78302), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (78300, 78302), True, 'import matplotlib.pyplot as plt\n'), ((79248, 79264), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (79257, 79264), True, 'import matplotlib.pyplot as plt\n'), ((59487, 59498), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (59496, 59498), False, 'import os\n'), ((60677, 60688), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (60686, 60688), False, 'import os\n'), ((61111, 61129), 'builtins.range', 'range', (['(s.nrows - 1)'], {}), '(s.nrows - 1)\n', (61116, 61129), False, 'from builtins import range\n'), ((69697, 69728), 'numpy.linalg.eigh', 'np.linalg.eigh', (['mech.ecm_strain'], {}), '(mech.ecm_strain)\n', (69711, 69728), True, 'import numpy as np\n'), ((69753, 69767), 'numpy.sort', 'np.sort', (['ecm_w'], {}), '(ecm_w)\n', (69760, 69767), True, 'import numpy as np\n'), ((71415, 71423), 'builtins.range', 'range', (['N'], {}), '(N)\n', (71420, 71423), False, 'from builtins import range\n'), ((72094, 72111), 'numpy.linalg.eigh', 'np.linalg.eigh', (['c'], {}), '(c)\n', (72108, 72111), True, 'import numpy as np\n'), ((72132, 72142), 'numpy.sort', 'np.sort', (['w'], {}), '(w)\n', (72139, 72142), True, 'import numpy as np\n'), ((74236, 74287), 'builtins.zip', 'zip', (['mech.rvols', 'mech.dvols', 'mech.raxes', 'mech.daxes'], {}), '(mech.rvols, mech.dvols, mech.raxes, mech.daxes)\n', (74239, 74287), False, 'from builtins import zip\n'), ((74314, 74328), 'numpy.sort', 'np.sort', (['raxes'], {}), '(raxes)\n', (74321, 74328), True, 'import numpy as np\n'), ((74353, 74367), 'numpy.sort', 'np.sort', (['daxes'], {}), '(daxes)\n', (74360, 74367), True, 'import numpy as np\n'), ((77277, 77297), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (77288, 77297), True, 'import numpy as np\n'), ((67788, 67799), 'time.time', 'time.time', ([], {}), '()\n', (67797, 67799), False, 'import time\n'), ((69893, 69920), 'numpy.abs', 'np.abs', (['(ecm_w[2] - ecm_w[0])'], {}), '(ecm_w[2] - ecm_w[0])\n', (69899, 69920), True, 'import numpy as np\n'), ((71049, 71193), 'collections.OrderedDict', 'OrderedDict', (["[('Max Compression', tissue_compressive), ('Max Tension', tissue_tensile),\n ('Max Shear', tissue_shear), ('Volume', tissue_vol)]"], {}), "([('Max Compression', tissue_compressive), ('Max Tension',\n tissue_tensile), ('Max Shear', tissue_shear), ('Volume', tissue_vol)])\n", (71060, 71193), False, 'from collections import OrderedDict\n'), ((71512, 71525), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (71523, 71525), False, 'from collections import OrderedDict\n'), ((78001, 78013), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (78010, 78013), True, 'import numpy as np\n'), ((79012, 79024), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (79021, 79024), True, 'import numpy as np\n'), ((70600, 70697), 'numpy.sqrt', 'np.sqrt', (['((ecm_w[2] - ecm_w[1]) ** 2 + (ecm_w[1] - ecm_w[0]) ** 2 + (ecm_w[2] -\n ecm_w[0]) ** 2)'], {}), '((ecm_w[2] - ecm_w[1]) ** 2 + (ecm_w[1] - ecm_w[0]) ** 2 + (ecm_w[2] -\n ecm_w[0]) ** 2)\n', (70607, 70697), True, 'import numpy as np\n'), ((72677, 72746), 'numpy.sqrt', 'np.sqrt', (['((w[2] - w[1]) ** 2 + (w[1] - w[0]) ** 2 + (w[2] - w[0]) ** 2)'], {}), '((w[2] - w[1]) ** 2 + (w[1] - w[0]) ** 2 + (w[2] - w[0]) ** 2)\n', (72684, 72746), True, 'import numpy as np\n'), ((73711, 73730), 'numpy.abs', 'np.abs', (['(w[2] - w[0])'], {}), '(w[2] - w[0])\n', (73717, 73730), True, 'import numpy as np\n'), ((75164, 75191), 'past.utils.old_div', 'old_div', (['daxes[0]', 'raxes[0]'], {}), '(daxes[0], raxes[0])\n', (75171, 75191), False, 'from past.utils import old_div\n'), ((75320, 75347), 'past.utils.old_div', 'old_div', (['daxes[1]', 'raxes[1]'], {}), '(daxes[1], raxes[1])\n', (75327, 75347), False, 'from past.utils import old_div\n'), ((75475, 75502), 'past.utils.old_div', 'old_div', (['daxes[2]', 'raxes[2]'], {}), '(daxes[2], raxes[2])\n', (75482, 75502), False, 'from past.utils import old_div\n'), ((75631, 75658), 'past.utils.old_div', 'old_div', (['daxes[0]', 'raxes[0]'], {}), '(daxes[0], raxes[0])\n', (75638, 75658), False, 'from past.utils import old_div\n'), ((75787, 75814), 'past.utils.old_div', 'old_div', (['daxes[1]', 'raxes[1]'], {}), '(daxes[1], raxes[1])\n', (75794, 75814), False, 'from past.utils import old_div\n'), ((75942, 75969), 'past.utils.old_div', 'old_div', (['daxes[2]', 'raxes[2]'], {}), '(daxes[2], raxes[2])\n', (75949, 75969), False, 'from past.utils import old_div\n'), ((77501, 77670), 'numpy.array', 'np.array', (["[self.results[k][case]['Max Compression'], self.results[k][case][\n 'Max Tension'], self.results[k][case]['Max Shear'], self.results[k][\n case]['Volume']]"], {}), "([self.results[k][case]['Max Compression'], self.results[k][case][\n 'Max Tension'], self.results[k][case]['Max Shear'], self.results[k][\n case]['Volume']])\n", (77509, 77670), True, 'import numpy as np\n'), ((78526, 78670), 'numpy.array', 'np.array', (["[self.results[k][case]['Height'], self.results[k][case]['Width'], self.\n results[k][case]['Depth'], self.results[k][case]['Volume']]"], {}), "([self.results[k][case]['Height'], self.results[k][case]['Width'],\n self.results[k][case]['Depth'], self.results[k][case]['Volume']])\n", (78534, 78670), True, 'import numpy as np\n'), ((71676, 71694), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71684, 71694), True, 'import numpy as np\n'), ((71733, 71751), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71741, 71751), True, 'import numpy as np\n'), ((71788, 71806), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71796, 71806), True, 'import numpy as np\n'), ((71840, 71858), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71848, 71858), True, 'import numpy as np\n'), ((71892, 71910), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71900, 71910), True, 'import numpy as np\n'), ((71943, 71961), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (71951, 71961), True, 'import numpy as np\n'), ((71994, 72012), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (72002, 72012), True, 'import numpy as np\n'), ((72957, 72976), 'numpy.abs', 'np.abs', (['(w[2] - w[0])'], {}), '(w[2] - w[0])\n', (72963, 72976), True, 'import numpy as np\n'), ((70015, 70024), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (70021, 70024), True, 'import numpy as np\n'), ((73294, 73313), 'numpy.abs', 'np.abs', (['(w[2] - w[0])'], {}), '(w[2] - w[0])\n', (73300, 73313), True, 'import numpy as np\n')] |
import pandas as pd
import torch
import math
import numpy as np
from statistics import mean
from sklearn.metrics import r2_score
from stewart_intro.train_nn import (
default_dt,
default_include_known_forcing,
)
from matplotlib import pyplot as plt
def get_weighted_r2_score(true, pred, data):
weights = np.concatenate([data.layer_mass.values] * true.shape[0])
return r2_score(true.ravel(), pred.ravel(), sample_weight=weights)
def get_diagnostic_r2_score(
mlp,
data,
normalization_dict,
dt=default_dt,
include_known_forcing=default_include_known_forcing):
time_steps = 50
q1_r2s = []
q2_r2s = []
np.random.seed(33)
for x in np.random.choice(data.x.values, 10):
for y in np.random.choice(data.y.values, 10):
times = np.random.choice(
data.time.values[data.time.values < data.time.values[
-(time_steps * int(dt / default_dt))]], 6)
data_to_select = {'x': x, 'y': y, 'time': times}
q1_true = []
q2_true = []
q1_pred = []
q2_pred = []
last_true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
fqt_fsli = np.hstack(
data.sel(data_to_select)[['FQT', 'FSLI']].to_array().values)
for idx in range(time_steps):
to_predict_from = torch.tensor(np.hstack([
(np.hstack(data.sel(data_to_select)[
['QT', 'SLI']].to_array().values) -
normalization_dict['qt_sli']['mean']) /
normalization_dict['qt_sli']['sd'],
data.sel(data_to_select)[
['LHF_normalized',
'SHF_normalized',
'SOLIN_normalized']].to_array().values.T
]))
nn_output = mlp.forward(to_predict_from)
q1_pred.append(nn_output.detach().numpy()[:, 34:])
q2_pred.append(nn_output.detach().numpy()[:, :34])
data_to_select['time'] += dt
true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
if include_known_forcing:
q_true = (true_state - last_true_state - (
dt * 86400 * fqt_fsli))
fqt_fsli = np.hstack(data.sel(
data_to_select)[['FQT', 'FSLI']].to_array().values)
else:
q_true = (true_state - last_true_state) / (dt * 86400)
q1_true.append(q_true[:, 34:])
q2_true.append(q_true[:, :34])
last_true_state = true_state
q1_true = np.vstack(q1_true)
q2_true = np.vstack(q2_true)
q1_pred = np.vstack(q1_pred)
q2_pred = np.vstack(q2_pred)
q1_r2 = get_weighted_r2_score(q1_true, q1_pred, data)
q2_r2 = get_weighted_r2_score(q2_true, q2_pred, data)
q1_r2s.append(q1_r2)
q2_r2s.append(q2_r2)
print(f'Q1 R2: {mean(q1_r2s)}')
print(f'Q2 R2: {mean(q2_r2s)}')
def draw_histogram(values, bins=40, x_min=None, x_max=None,
x_label='', y_label='Counts', title='',
figsize=None,
show=True, save_to_filepath=None):
if x_max is None:
x_max = max(values)
if x_min is None:
x_min = min(values)
if figsize is not None:
plt.figure(figsize=figsize)
n_, bins, patches = plt.hist(values, bins=bins)
plt.plot(bins)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.axis([x_min, x_max, 0, max(n_)])
plt.grid(True)
if save_to_filepath:
plt.savefig(save_to_filepath)
if show:
plt.show()
plt.close()
def plot_residuals_and_get_correlation_matrix(
mlp,
data,
batches,
layer_mass,
normalization_dict,
dt,
n_time_steps,
include_known_forcing):
time_steps = 50
np.random.seed(33)
q1_trues = []
q1_preds = []
q2_trues = []
q2_preds = []
for x in np.random.choice(data.x.values, 10):
for y in np.random.choice(data.y.values, 10):
times = np.random.choice(
data.time.values[data.time.values < data.time.values[
-(time_steps * int(dt / default_dt))]], 6)
data_to_select = {'x': x, 'y': y, 'time': times}
q1_true = []
q2_true = []
q1_pred = []
q2_pred = []
last_true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
fqt_fsli = np.hstack(
data.sel(data_to_select)[['FQT', 'FSLI']].to_array().values)
for idx in range(time_steps):
to_predict_from = torch.tensor(np.hstack([
(np.hstack(data.sel(data_to_select)[
['QT', 'SLI']].to_array().values) -
normalization_dict['qt_sli']['mean']) /
normalization_dict['qt_sli']['sd'],
data.sel(data_to_select)[
['LHF_normalized',
'SHF_normalized',
'SOLIN_normalized']].to_array().values.T
]))
nn_output = mlp.forward(to_predict_from)
q1_pred.append(nn_output.detach().numpy()[:, 34:])
q2_pred.append(nn_output.detach().numpy()[:, :34])
data_to_select['time'] += dt
true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
if include_known_forcing:
q_true = (true_state - last_true_state - (
dt * 86400 * fqt_fsli))
fqt_fsli = np.hstack(data.sel(
data_to_select)[['FQT', 'FSLI']].to_array().values)
else:
q_true = (true_state - last_true_state) / (dt * 86400)
q1_true.append(q_true[:, 34:])
q2_true.append(q_true[:, :34])
last_true_state = true_state
q1_true = np.vstack(q1_true)
q2_true = np.vstack(q2_true)
q1_pred = np.vstack(q1_pred)
q2_pred = np.vstack(q2_pred)
if len(q1_trues):
q1_trues = np.concatenate([q1_trues, q1_true])
q2_trues = np.concatenate([q2_trues, q2_true])
q1_preds = np.concatenate([q1_preds, q1_pred])
q2_preds = np.concatenate([q2_preds, q2_pred])
else:
q1_trues = q1_true
q2_trues = q2_true
q1_preds = q1_pred
q2_preds = q2_pred
q1_residuals = q1_preds - q1_trues
q2_residuals = q2_preds - q2_trues
for name, residuals in [('Q1', q1_residuals), ('Q2', q2_residuals)]:
for i in range(34):
data = residuals[:, i]
draw_histogram(
data, x_label='Residual', y_label='Count',
title=f'{name} Residuals for the Z-level {i+1}',
show=False,
save_to_filepath=f'/Users/stewart/Desktop/{name}_{i}_residuals.png')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(
pd.DataFrame(q1_residuals).corr(), interpolation='nearest')
fig.colorbar(cax)
plt.title('Q1 Correlation Matrix')
plt.savefig('/Users/stewart/Desktop/q1_correlation_matrix.png')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(
pd.DataFrame(q2_residuals).corr(), interpolation='nearest')
fig.colorbar(cax)
plt.title('Q2 Correlation Matrix')
plt.savefig('/Users/stewart/Desktop/q2_correlation_matrix.png')
def plot_q_vs_nn_output(
mlp,
data,
normalization_dict,
save_location_format_str=None,
dt=default_dt,
include_known_forcing=default_include_known_forcing):
time_steps = 50
x = data.x.values[35]
y = data.y.values[3]
time = data.time.values[20]
data_to_select = {'x': x, 'y': y, 'time': time}
q1_true = []
q2_true = []
q1_pred = []
q2_pred = []
last_true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
fqt_fsli = np.hstack(
data.sel(data_to_select)[['FQT', 'FSLI']].to_array().values)
for idx in range(time_steps):
to_predict_from = torch.tensor(np.hstack([
(np.hstack(data.sel(data_to_select)[
['QT', 'SLI']].to_array().values) -
normalization_dict['qt_sli']['mean']) /
normalization_dict['qt_sli']['sd'],
data.sel(data_to_select)[
['LHF_normalized',
'SHF_normalized',
'SOLIN_normalized']].to_array().values.T
]))
nn_output = mlp.forward(to_predict_from)
q1_pred.append(nn_output[34:].detach().numpy())
q2_pred.append(nn_output[:34].detach().numpy())
data_to_select['time'] += dt
true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
if include_known_forcing:
q_true = (true_state - last_true_state - (
dt * 86400 * fqt_fsli))
fqt_fsli = np.hstack(data.sel(
data_to_select)[['FQT', 'FSLI']].to_array().values)
else:
q_true = (true_state - last_true_state) / (dt * 86400)
q1_true.append(q_true[34:])
q2_true.append(q_true[:34])
last_true_state = true_state
q1_true = np.stack(q1_true)
q2_true = np.stack(q2_true)
q1_pred = np.stack(q1_pred)
q2_pred = np.stack(q2_pred)
# q1_r2 = get_weighted_r2_score(q1_true, q1_pred, data)
# print(f'Q1 Weighted R2 score: {q1_r2}')
# q2_r2 = get_weighted_r2_score(q2_true, q2_pred, data)
# print(f'Q2 Weighted R2 score: {q2_r2}')
vmin = math.floor(min(q1_true.min(), q1_pred.min()))
vmax = math.ceil(max(q1_true.max(), q1_pred.max())) + 1
fig, axs = plt.subplots(2, 1)
ax0 = axs[0].contourf(q1_true.T, vmin=vmin, vmax=vmax)
axs[1].contourf(q1_pred.T, vmin=vmin, vmax=vmax)
axs[0].set_title('True Normalized Q1')
axs[0].set_xlabel('Time')
axs[0].set_ylabel('Z')
axs[1].set_title('Predicted Normalized Q1')
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Z')
plt.subplots_adjust(hspace=0.7)
fig.colorbar(ax0, ax=axs.ravel().tolist())
if save_location_format_str:
plt.savefig(save_location_format_str.format('Q1'))
else:
plt.show()
vmin = math.floor(min(q2_true.min(), q2_pred.min()))
vmax = math.ceil(max(q2_true.max(), q2_pred.max())) + 1
fig, axs = plt.subplots(2, 1)
ax0 = axs[0].contourf(q2_true.T, vmin=vmin, vmax=vmax)
axs[1].contourf(q2_pred.T, vmin=vmin, vmax=vmax)
axs[0].set_title('True Normalized Q2')
axs[0].set_xlabel('Time')
axs[0].set_ylabel('Z')
axs[1].set_title('Predicted Normalized Q2')
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Z')
plt.subplots_adjust(hspace=0.7)
fig.colorbar(ax0, ax=axs.ravel().tolist())
if save_location_format_str:
plt.savefig(save_location_format_str.format('Q2'))
else:
plt.show()
def plot_model_error_output(
mlp,
data,
normalization_dict,
save_location_format_str=None,
dt=default_dt,
include_known_forcing=default_include_known_forcing):
time_steps = 50
x = data.x.values[35]
y = data.y.values[3]
time = data.time.values[20]
data_to_select = {'x': x, 'y': y, 'time': time}
qt_true = []
sli_true = []
qt_pred = []
sli_pred = []
prediction = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
fqt_fsli = np.hstack(
data.sel(data_to_select)[['FQT', 'FSLI']].to_array().values)
for idx in range(time_steps):
to_predict_from = torch.tensor(np.hstack([
(np.hstack(data.sel(data_to_select)[
['QT', 'SLI']].to_array().values) -
normalization_dict['qt_sli']['mean']) /
normalization_dict['qt_sli']['sd'],
data.sel(data_to_select)[
['LHF_normalized',
'SHF_normalized',
'SOLIN_normalized']].to_array().values.T
]))
nn_output = mlp.forward(to_predict_from).detach().numpy()
if include_known_forcing:
prediction = prediction + (dt * 86400 * fqt_fsli) + nn_output
else:
prediction = prediction + nn_output
qt_pred.append(prediction[:34])
sli_pred.append(prediction[34:])
data_to_select['time'] += dt
fqt_fsli = np.hstack(data.sel(
data_to_select)[['FQT', 'FSLI']].to_array().values)
true_state = np.hstack(
data.sel(data_to_select)[['QT', 'SLI']].to_array().values)
qt_true.append(true_state[:34])
sli_true.append(true_state[34:])
qt_true = np.stack(qt_true)
sli_true = np.stack(sli_true)
qt_pred = np.stack(qt_pred)
sli_pred = np.stack(sli_pred)
# qt_r2 = get_weighted_r2_score(qt_true, qt_pred, data)
# print(f'qt Weighted R2 score: {qt_r2}')
# sli_r2 = get_weighted_r2_score(sli_true, sli_pred, data)
# print(f'sli Weighted R2 score: {sli_r2}')
vmin = math.floor(min(qt_true.min(), qt_pred.min()))
vmax = math.ceil(max(qt_true.max(), qt_pred.max())) + 1
fig, axs = plt.subplots(2, 1)
ax0 = axs[0].contourf(qt_true.T, vmin=vmin, vmax=vmax)
axs[1].contourf(qt_pred.T, vmin=vmin, vmax=vmax)
axs[0].set_title('True QT')
axs[0].set_xlabel('Time')
axs[0].set_ylabel('Z')
axs[1].set_title('Predicted QT')
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Z')
plt.subplots_adjust(hspace=0.7)
fig.colorbar(ax0, ax=axs.ravel().tolist())
if save_location_format_str:
plt.savefig(save_location_format_str.format('Q1'))
else:
plt.show()
vmin = math.floor(min(sli_true.min(), sli_pred.min()))
vmax = math.ceil(max(sli_true.max(), sli_pred.max())) + 1
fig, axs = plt.subplots(2, 1)
ax0 = axs[0].contourf(sli_true.T, vmin=vmin, vmax=vmax)
axs[1].contourf(sli_pred.T, vmin=vmin, vmax=vmax)
axs[0].set_title('True SLI')
axs[0].set_xlabel('Time')
axs[0].set_ylabel('Z')
axs[1].set_title('Predicted SLI')
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Z')
plt.subplots_adjust(hspace=0.7)
fig.colorbar(ax0, ax=axs.ravel().tolist())
if save_location_format_str:
plt.savefig(save_location_format_str.format('Q2'))
else:
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.random.choice",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matplotlib.pyplot.show",
"statistics.mean",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.yl... | [((317, 373), 'numpy.concatenate', 'np.concatenate', (['([data.layer_mass.values] * true.shape[0])'], {}), '([data.layer_mass.values] * true.shape[0])\n', (331, 373), True, 'import numpy as np\n'), ((672, 690), 'numpy.random.seed', 'np.random.seed', (['(33)'], {}), '(33)\n', (686, 690), True, 'import numpy as np\n'), ((704, 739), 'numpy.random.choice', 'np.random.choice', (['data.x.values', '(10)'], {}), '(data.x.values, 10)\n', (720, 739), True, 'import numpy as np\n'), ((3617, 3644), 'matplotlib.pyplot.hist', 'plt.hist', (['values'], {'bins': 'bins'}), '(values, bins=bins)\n', (3625, 3644), True, 'from matplotlib import pyplot as plt\n'), ((3649, 3663), 'matplotlib.pyplot.plot', 'plt.plot', (['bins'], {}), '(bins)\n', (3657, 3663), True, 'from matplotlib import pyplot as plt\n'), ((3668, 3687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3678, 3687), True, 'from matplotlib import pyplot as plt\n'), ((3692, 3711), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3702, 3711), True, 'from matplotlib import pyplot as plt\n'), ((3716, 3732), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3725, 3732), True, 'from matplotlib import pyplot as plt\n'), ((3778, 3792), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3786, 3792), True, 'from matplotlib import pyplot as plt\n'), ((3892, 3903), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3901, 3903), True, 'from matplotlib import pyplot as plt\n'), ((4135, 4153), 'numpy.random.seed', 'np.random.seed', (['(33)'], {}), '(33)\n', (4149, 4153), True, 'import numpy as np\n'), ((4239, 4274), 'numpy.random.choice', 'np.random.choice', (['data.x.values', '(10)'], {}), '(data.x.values, 10)\n', (4255, 4274), True, 'import numpy as np\n'), ((7416, 7428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7426, 7428), True, 'from matplotlib import pyplot as plt\n'), ((7575, 7609), 'matplotlib.pyplot.title', 'plt.title', (['"""Q1 Correlation Matrix"""'], {}), "('Q1 Correlation Matrix')\n", (7584, 7609), True, 'from matplotlib import pyplot as plt\n'), ((7614, 7677), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/stewart/Desktop/q1_correlation_matrix.png"""'], {}), "('/Users/stewart/Desktop/q1_correlation_matrix.png')\n", (7625, 7677), True, 'from matplotlib import pyplot as plt\n'), ((7689, 7701), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7699, 7701), True, 'from matplotlib import pyplot as plt\n'), ((7848, 7882), 'matplotlib.pyplot.title', 'plt.title', (['"""Q2 Correlation Matrix"""'], {}), "('Q2 Correlation Matrix')\n", (7857, 7882), True, 'from matplotlib import pyplot as plt\n'), ((7887, 7950), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/stewart/Desktop/q2_correlation_matrix.png"""'], {}), "('/Users/stewart/Desktop/q2_correlation_matrix.png')\n", (7898, 7950), True, 'from matplotlib import pyplot as plt\n'), ((9788, 9805), 'numpy.stack', 'np.stack', (['q1_true'], {}), '(q1_true)\n', (9796, 9805), True, 'import numpy as np\n'), ((9820, 9837), 'numpy.stack', 'np.stack', (['q2_true'], {}), '(q2_true)\n', (9828, 9837), True, 'import numpy as np\n'), ((9852, 9869), 'numpy.stack', 'np.stack', (['q1_pred'], {}), '(q1_pred)\n', (9860, 9869), True, 'import numpy as np\n'), ((9884, 9901), 'numpy.stack', 'np.stack', (['q2_pred'], {}), '(q2_pred)\n', (9892, 9901), True, 'import numpy as np\n'), ((10246, 10264), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (10258, 10264), True, 'from matplotlib import pyplot as plt\n'), ((10586, 10617), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (10605, 10617), True, 'from matplotlib import pyplot as plt\n'), ((10919, 10937), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (10931, 10937), True, 'from matplotlib import pyplot as plt\n'), ((11259, 11290), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (11278, 11290), True, 'from matplotlib import pyplot as plt\n'), ((13207, 13224), 'numpy.stack', 'np.stack', (['qt_true'], {}), '(qt_true)\n', (13215, 13224), True, 'import numpy as np\n'), ((13240, 13258), 'numpy.stack', 'np.stack', (['sli_true'], {}), '(sli_true)\n', (13248, 13258), True, 'import numpy as np\n'), ((13273, 13290), 'numpy.stack', 'np.stack', (['qt_pred'], {}), '(qt_pred)\n', (13281, 13290), True, 'import numpy as np\n'), ((13306, 13324), 'numpy.stack', 'np.stack', (['sli_pred'], {}), '(sli_pred)\n', (13314, 13324), True, 'import numpy as np\n'), ((13674, 13692), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (13686, 13692), True, 'from matplotlib import pyplot as plt\n'), ((13992, 14023), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (14011, 14023), True, 'from matplotlib import pyplot as plt\n'), ((14329, 14347), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (14341, 14347), True, 'from matplotlib import pyplot as plt\n'), ((14651, 14682), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (14670, 14682), True, 'from matplotlib import pyplot as plt\n'), ((758, 793), 'numpy.random.choice', 'np.random.choice', (['data.y.values', '(10)'], {}), '(data.y.values, 10)\n', (774, 793), True, 'import numpy as np\n'), ((3565, 3592), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3575, 3592), True, 'from matplotlib import pyplot as plt\n'), ((3826, 3855), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_to_filepath'], {}), '(save_to_filepath)\n', (3837, 3855), True, 'from matplotlib import pyplot as plt\n'), ((3877, 3887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3885, 3887), True, 'from matplotlib import pyplot as plt\n'), ((4293, 4328), 'numpy.random.choice', 'np.random.choice', (['data.y.values', '(10)'], {}), '(data.y.values, 10)\n', (4309, 4328), True, 'import numpy as np\n'), ((10775, 10785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10783, 10785), True, 'from matplotlib import pyplot as plt\n'), ((11448, 11458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11456, 11458), True, 'from matplotlib import pyplot as plt\n'), ((14181, 14191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14189, 14191), True, 'from matplotlib import pyplot as plt\n'), ((14840, 14850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14848, 14850), True, 'from matplotlib import pyplot as plt\n'), ((2809, 2827), 'numpy.vstack', 'np.vstack', (['q1_true'], {}), '(q1_true)\n', (2818, 2827), True, 'import numpy as np\n'), ((2850, 2868), 'numpy.vstack', 'np.vstack', (['q2_true'], {}), '(q2_true)\n', (2859, 2868), True, 'import numpy as np\n'), ((2891, 2909), 'numpy.vstack', 'np.vstack', (['q1_pred'], {}), '(q1_pred)\n', (2900, 2909), True, 'import numpy as np\n'), ((2932, 2950), 'numpy.vstack', 'np.vstack', (['q2_pred'], {}), '(q2_pred)\n', (2941, 2950), True, 'import numpy as np\n'), ((6344, 6362), 'numpy.vstack', 'np.vstack', (['q1_true'], {}), '(q1_true)\n', (6353, 6362), True, 'import numpy as np\n'), ((6385, 6403), 'numpy.vstack', 'np.vstack', (['q2_true'], {}), '(q2_true)\n', (6394, 6403), True, 'import numpy as np\n'), ((6426, 6444), 'numpy.vstack', 'np.vstack', (['q1_pred'], {}), '(q1_pred)\n', (6435, 6444), True, 'import numpy as np\n'), ((6467, 6485), 'numpy.vstack', 'np.vstack', (['q2_pred'], {}), '(q2_pred)\n', (6476, 6485), True, 'import numpy as np\n'), ((3169, 3181), 'statistics.mean', 'mean', (['q1_r2s'], {}), '(q1_r2s)\n', (3173, 3181), False, 'from statistics import mean\n'), ((3205, 3217), 'statistics.mean', 'mean', (['q2_r2s'], {}), '(q2_r2s)\n', (3209, 3217), False, 'from statistics import mean\n'), ((6543, 6578), 'numpy.concatenate', 'np.concatenate', (['[q1_trues, q1_true]'], {}), '([q1_trues, q1_true])\n', (6557, 6578), True, 'import numpy as np\n'), ((6606, 6641), 'numpy.concatenate', 'np.concatenate', (['[q2_trues, q2_true]'], {}), '([q2_trues, q2_true])\n', (6620, 6641), True, 'import numpy as np\n'), ((6669, 6704), 'numpy.concatenate', 'np.concatenate', (['[q1_preds, q1_pred]'], {}), '([q1_preds, q1_pred])\n', (6683, 6704), True, 'import numpy as np\n'), ((6732, 6767), 'numpy.concatenate', 'np.concatenate', (['[q2_preds, q2_pred]'], {}), '([q2_preds, q2_pred])\n', (6746, 6767), True, 'import numpy as np\n'), ((7489, 7515), 'pandas.DataFrame', 'pd.DataFrame', (['q1_residuals'], {}), '(q1_residuals)\n', (7501, 7515), True, 'import pandas as pd\n'), ((7762, 7788), 'pandas.DataFrame', 'pd.DataFrame', (['q2_residuals'], {}), '(q2_residuals)\n', (7774, 7788), True, 'import pandas as pd\n')] |
import os.path as osp
import os.path as osp
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.utils import data
from torchvision import transforms
from utils.transform import FixScaleRandomCropWH
class densepassDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(1024, 200),
mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='val', ssl_dir='', trans='resize'):
self.root = root
self.list_path = list_path
self.crop_size = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
self.ssl_dir = ssl_dir
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
self.set = set
self.trans = trans
for name in self.img_ids:
img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name))
self.files.append({
"img": img_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(datafiles["img"]).convert('RGB')
name = datafiles["name"]
if self.trans == 'resize':
# resize
image = image.resize(self.crop_size, Image.BICUBIC)
elif self.trans == 'FixScaleRandomCropWH':
# resize, keep ratio
image = FixScaleRandomCropWH(self.crop_size, is_label=False)(image)
else:
raise NotImplementedError
size = np.asarray(image, np.float32).shape
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((.485, .456, .406), (.229, .224, .225)),
])
image = input_transform(image)
if len(self.ssl_dir)>0:
label = Image.open(osp.join(self.ssl_dir, name.replace('.png', '_labelTrainIds.png')))
if self.trans == 'resize':
# resize
label = label.resize(self.crop_size, Image.NEAREST)
elif self.trans == 'FixScaleRandomCropWH':
# resize, keep ratio
label = FixScaleRandomCropWH(self.crop_size, is_label=True)(label)
else:
raise NotImplementedError
label = torch.LongTensor(np.array(label).astype('int32'))
return image, label, np.array(size), name
return image, np.array(size), name
class densepassTestDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(2048, 400), mean=(128, 128, 128),
scale=False, mirror=False, ignore_label=255, set='val'):
self.root = root
self.list_path = list_path
self.crop_size = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
self.set = set
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for name in self.img_ids:
img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name))
lbname = name.replace("_.png", "_labelTrainIds.png")
label_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, lbname))
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(datafiles["img"]).convert('RGB')
label = Image.open(datafiles["label"])
name = datafiles["name"]
# resize
image = image.resize(self.crop_size, Image.BICUBIC)
label = label.resize(self.crop_size, Image.NEAREST)
size = np.asarray(image).shape
#
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((.485, .456, .406), (.229, .224, .225)),
])
image = input_transform(image)
label = torch.LongTensor(np.array(label).astype('int32'))
return image, label, np.array(size), name
if __name__ == '__main__':
dst = densepassTestDataSet("data/DensePASS_train_pseudo_val", 'dataset/densepass_list/val.txt', mean=(0,0,0))
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels, *args = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
img = Image.fromarray(np.uint8(img) )
img.show()
break
| [
"numpy.uint8",
"utils.transform.FixScaleRandomCropWH",
"torch.utils.data.DataLoader",
"numpy.asarray",
"numpy.transpose",
"PIL.Image.open",
"torchvision.utils.make_grid",
"numpy.array",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.transforms.ToTensor"
] | [((4728, 4762), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dst'], {'batch_size': '(4)'}), '(dst, batch_size=4)\n', (4743, 4762), False, 'from torch.utils import data\n'), ((3994, 4024), 'PIL.Image.open', 'Image.open', (["datafiles['label']"], {}), "(datafiles['label'])\n", (4004, 4024), False, 'from PIL import Image\n'), ((1047, 1106), 'os.path.join', 'osp.join', (['self.root', "('leftImg8bit/%s/%s' % (self.set, name))"], {}), "(self.root, 'leftImg8bit/%s/%s' % (self.set, name))\n", (1055, 1106), True, 'import os.path as osp\n'), ((1792, 1821), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (1802, 1821), True, 'import numpy as np\n'), ((2681, 2695), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (2689, 2695), True, 'import numpy as np\n'), ((3435, 3494), 'os.path.join', 'osp.join', (['self.root', "('leftImg8bit/%s/%s' % (self.set, name))"], {}), "(self.root, 'leftImg8bit/%s/%s' % (self.set, name))\n", (3443, 3494), True, 'import os.path as osp\n'), ((3585, 3641), 'os.path.join', 'osp.join', (['self.root', "('gtFine/%s/%s' % (self.set, lbname))"], {}), "(self.root, 'gtFine/%s/%s' % (self.set, lbname))\n", (3593, 3641), True, 'import os.path as osp\n'), ((4210, 4227), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4220, 4227), True, 'import numpy as np\n'), ((4546, 4560), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (4554, 4560), True, 'import numpy as np\n'), ((4938, 4966), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (4950, 4966), True, 'import numpy as np\n'), ((1361, 1389), 'PIL.Image.open', 'Image.open', (["datafiles['img']"], {}), "(datafiles['img'])\n", (1371, 1389), False, 'from PIL import Image\n'), ((1888, 1909), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1907, 1909), False, 'from torchvision import transforms\n'), ((1923, 1989), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1943, 1989), False, 'from torchvision import transforms\n'), ((2637, 2651), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (2645, 2651), True, 'import numpy as np\n'), ((3934, 3962), 'PIL.Image.open', 'Image.open', (["datafiles['img']"], {}), "(datafiles['img'])\n", (3944, 3962), False, 'from PIL import Image\n'), ((4303, 4324), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4322, 4324), False, 'from torchvision import transforms\n'), ((4338, 4404), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (4358, 4404), False, 'from torchvision import transforms\n'), ((5035, 5048), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (5043, 5048), True, 'import numpy as np\n'), ((1663, 1715), 'utils.transform.FixScaleRandomCropWH', 'FixScaleRandomCropWH', (['self.crop_size'], {'is_label': '(False)'}), '(self.crop_size, is_label=False)\n', (1683, 1715), False, 'from utils.transform import FixScaleRandomCropWH\n'), ((4484, 4499), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (4492, 4499), True, 'import numpy as np\n'), ((4878, 4911), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['imgs'], {}), '(imgs)\n', (4905, 4911), False, 'import torchvision\n'), ((2415, 2466), 'utils.transform.FixScaleRandomCropWH', 'FixScaleRandomCropWH', (['self.crop_size'], {'is_label': '(True)'}), '(self.crop_size, is_label=True)\n', (2435, 2466), False, 'from utils.transform import FixScaleRandomCropWH\n'), ((2571, 2586), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2579, 2586), True, 'import numpy as np\n')] |
from typing import List, Optional, Dict
import datetime
import numpy as np
from pydantic import Field, validator
from .base import Model
from .atom import Atom
from .parameter import Bond, Angle, Dihedral, Improper, Ring
from .utils import require_package
class Molecule(Model):
"""ATB Molecule"""
molid: int
name: Optional[str] = None
iupac: Optional[str] = None
residue_name: str = Field(alias="rnme")
topology_hash: str = ""
total_charge: int = 0
total_qm_charge: float = 0
update_ifp: bool = False
use_CH1_united_atom_for_double_bonds: bool = False
use_charge_assign_charge_model: bool = True
run_charge_group_partitioning: bool = False
enforce_unique_atom_names: bool = True
dipole: np.ndarray
scale_manual_charges: bool = True
symmetrize_charges: bool = Field(default=True, alias="symmetrise_charges")
sp_timeout: Optional[int] = None
select_output_atom_names: Optional[Dict[int, str]] = None
ff_version: str
charge_assign_error: str
atom_order_template: Optional[str] = None
amino_acid_building_block: bool = False
new_parameter_added: bool = False
additional_info_lines: List[str] = []
VERSION: str
REV_DATE: str
A_born_energy: float
E_born_energy: float
qm_level: int = 1
qm0_method: str
qm_1_ESP: np.ndarray
manual_charges: Optional[List[float]] = None
has_manual_charges: bool = False
volume: float
atoms: List[Atom] = []
bonds: List[Bond] = []
angles: List[Angle] = []
dihedrals: List[Dihedral] = []
impropers: List[Improper] = []
rings: List[Ring] = []
@validator("qm_1_ESP", "dipole", pre=True)
def _to_numpy_array(cls, v):
v = np.asarray(v)
return v
@validator("atoms", "rings", pre=True)
def _to_list(cls, v):
if isinstance(v, dict):
v = [v[x] for x in sorted(v)]
return v
@classmethod
def from_atb_dict(cls, v):
v = dict(v)
dct = v.pop("var", {})
v.pop("_dihedrals", None)
dct.update(v)
return cls(**dct)
def to_rdkit(self):
require_package("rdkit")
from rdkit import Chem
rdmol = Chem.RWMol()
atbatoms = sorted(self.atoms, key=lambda x: x.input_id)
rdconf = Chem.Conformer(len(atbatoms))
optimized_coordinates = []
id_to_index = {}
for i, atbatom in enumerate(atbatoms):
rdatom = Chem.Atom(atbatom.element.symbol)
rdatom.SetIsAromatic(atbatom.is_aromatic)
rdatom.SetFormalCharge(atbatom.formal_charge)
rdatom.SetAtomMapNum(atbatom.atomistic_output_id)
# coord: original, nm
original = atbatom.original_coordinate * 10
rdatom.SetDoubleProp("original_x", original[0])
rdatom.SetDoubleProp("original_y", original[1])
rdatom.SetDoubleProp("original_z", original[2])
# ocoord: optimized, nm
optimized = atbatom.optimized_coordinate * 10
rdatom.SetDoubleProp("optimized_x", optimized[0])
rdatom.SetDoubleProp("optimized_y", optimized[1])
rdatom.SetDoubleProp("optimized_z", optimized[2])
rdatom.SetBoolProp("is_united", atbatom.is_united)
optimized_coordinates.append(optimized)
rdconf.SetAtomPosition(i, original)
rdmol.AddAtom(rdatom)
id_to_index[atbatom.input_id] = i
BONDTYPES = {
1.0: Chem.BondType.SINGLE,
1.5: Chem.BondType.AROMATIC,
2.0: Chem.BondType.DOUBLE,
3.0: Chem.BondType.TRIPLE,
}
for bond in self.bonds:
i = id_to_index[bond.atomistic_atom_ids[0]]
j = id_to_index[bond.atomistic_atom_ids[1]]
index = rdmol.AddBond(i, j, BONDTYPES[bond.order]) - 1
rdbond = rdmol.GetBondWithIdx(index)
rdbond.SetIsAromatic(bond.is_aromatic)
rdbond.SetBoolProp("is_united", bond.is_united)
Chem.SanitizeMol(rdmol)
return Chem.Mol(rdmol)
def to_mdanalysis(self, united: bool = False, optimized: bool = True):
require_package("MDAnalysis")
import MDAnalysis as mda
from .mdanalysis import (
PartialCharge,
United,
OutputAtomisticID,
OutputUnitedID,
)
atoms = self.atoms
if united:
atoms = [atom for atom in atoms if atom.get_united_ljsym()]
u = mda.Universe.empty(len(atoms), trajectory=True)
for attr in (
"names",
"resnames",
"elements",
"types",
"charges",
"partial_charges",
"united",
"aromaticities",
"ids",
"output_atomistic_ids",
"output_united_ids",
"masses",
):
u.add_TopologyAttr(attr)
id_to_index = {}
for i, (atbatom, mdaatom) in enumerate(zip(atoms, u.atoms)):
mdaatom.name = atbatom.name
mdaatom.element = atbatom.element.symbol
mdaatom.type = atbatom.atomistic_lj_atom_type
mdaatom.charge = atbatom.formal_charge
mdaatom.partial_charge = atbatom.atomistic_partial_charge
mdaatom.united = atbatom.is_united
mdaatom.aromaticity = atbatom.is_aromatic
mdaatom.id = i + 1
mdaatom.output_atomistic_id = atbatom.atomistic_output_id
mdaatom.output_united_id = atbatom.united_output_id
mdaatom.mass = atbatom.atomistic_mass
if optimized:
mdaatom.position = atbatom.optimized_coordinate * 10
else:
mdaatom.position = atbatom.original_coordinate * 10
mdaatom.residue.resname = atbatom.residue_name
id_to_index[atbatom.input_id] = i
bond_values = []
bond_types = []
bond_orders = []
for bond in self.bonds:
if not all(x in id_to_index for x in bond.atomistic_atom_ids):
continue
i_, j_ = bond.atomistic_atom_ids
i = id_to_index[i_]
j = id_to_index[j_]
bond_values.append((i, j))
bond_types.append((u.atoms[i].type, u.atoms[j].type))
bond_orders.append(bond.order)
u.add_bonds(bond_values, types=bond_types, order=bond_orders)
for parameter_name in ("angles", "dihedrals"): # , "impropers"):
atb_parameters = getattr(self, parameter_name)
values = []
types = []
for parameter in atb_parameters:
if parameter_name == "dihedrals" and not parameter.essential:
continue
if not all(x in id_to_index for x in parameter.atomistic_atom_ids):
continue
value_ = tuple(id_to_index[x] for x in parameter.atomistic_atom_ids)
type_ = tuple(u.atoms[x].type for x in value_)
values.append(value_)
types.append(type_)
u._add_topology_objects(parameter_name, values, types=types)
return u
def to_itp(
self, filename: str,
use_input_order: bool = False,
united: bool = False,
):
itp_string = self.to_itp_string(use_input_order=use_input_order, united=united)
with open(str(filename), "w") as f:
f.write(itp_string)
def to_itp_string(self, use_input_order: bool = False, united: bool = False) -> str:
from .templates.itp import ITP_TEMPLATE
atom_id_mapping = self.get_atom_id_mapping(
use_input_order=use_input_order, united=united
)
atoms = sorted(
[a for a in self.atoms if a.input_id in atom_id_mapping],
key=lambda a: atom_id_mapping[a.input_id],
)
atom_str = "\n".join(
[
atom.to_itp_string(
output_id=atom_id_mapping[atom.input_id],
united=united,
residue_name=self.residue_name,
)
for atom in atoms
]
)
if united:
charge = sum([atom.united_partial_charge for atom in atoms])
else:
charge = sum([atom.atomistic_partial_charge for atom in atoms])
bonds = self.get_sorted_parameters(self.bonds, atom_id_mapping, (0, 1))
bond_str = "\n".join([bond.to_itp_string(atom_id_mapping) for bond in bonds])
angles = self.get_sorted_parameters(self.angles, atom_id_mapping, (1, 0, 2))
angle_str = "\n".join(
[angle.to_itp_string(atom_id_mapping) for angle in angles]
)
dihedrals = self.get_sorted_parameters(
self.dihedrals, atom_id_mapping, (0, 1, 2, 3)
)
essential_dihedrals = [x for x in dihedrals if x.essential]
dih_str = "\n".join(
[dih.to_itp_string(atom_id_mapping) for dih in essential_dihedrals]
)
impropers = self.get_sorted_parameters(
self.impropers, atom_id_mapping, (0, 1, 2, 3)
)
if not united:
impropers = [
imp
for imp in impropers
if not self.atoms[imp.atomistic_atom_ids[0]].is_united
]
imp_str = "\n".join([imp.to_itp_string(atom_id_mapping) for imp in impropers])
aa_output_id_to_atom = {atom.atomistic_output_id: atom for atom in atoms}
aa_output_id_to_new_id = {
k: v.input_id for k, v in aa_output_id_to_atom.items()
}
exclusions = []
exclusions_to_include = []
for atom1 in atoms:
id1 = atom_id_mapping[atom1.input_id]
excluded_atomistic_output_ids = atom1.get_exclusions(united=False)
for atom2id in excluded_atomistic_output_ids:
if atom2id in aa_output_id_to_new_id:
excl = tuple(sorted([id1, aa_output_id_to_new_id[atom2id]]))
exclusions.append(excl)
atom2 = aa_output_id_to_atom[atom2id]
if atom1.is_aromatic and atom2.is_aromatic:
for ring in self.rings:
if len(ring.atomistic_atom_ids) == 6:
if (
atom1.input_id in ring.atomistic_atom_ids
and atom2.input_id in ring.atomistic_atom_ids
):
exclusions_to_include.append(excl)
exclusion_str = "\n".join(
[f"{x[0]:>6d} {x[1]:>6d}" for x in sorted(set(exclusions_to_include))]
)
pairs = []
for dih in dihedrals:
first, _, __, last = dih.atomistic_atom_ids
pair = sorted([atom_id_mapping[first], atom_id_mapping[last]])
if pair not in exclusions:
pairs.append(pair)
pair_str = "\n".join([f"{x[0]:>5d}{x[1]:>5d} 1" for x in sorted(pairs)])
now = datetime.datetime.now()
resolution = "all atom" if not united else "united atom"
return ITP_TEMPLATE.format(
time=now.strftime("%H:%M"),
date=now.strftime("%Y-%m-%d"),
revision=self.REV_DATE,
residue_name=self.residue_name,
resolution_upper=resolution.upper(),
resolution=resolution,
molecule_molid=self.molid,
molecule_hash=self.topology_hash,
atoms=atom_str,
total_charge=charge,
bonds=bond_str,
angles=angle_str,
dihedrals=dih_str,
impropers=imp_str,
exclusions=exclusion_str,
pairs=pair_str,
)
def get_sorted_parameters(self, initial_container, atom_id_mapping, sort_indices):
return sorted(
[
b
for b in initial_container
if all(a in atom_id_mapping for a in b.atomistic_atom_ids)
],
key=lambda b: tuple(
atom_id_mapping[b.atomistic_atom_ids[i]] for i in sort_indices
),
)
def get_atom_id_mapping(
self, use_input_order: bool = False, united: bool = False
) -> Dict[int, int]:
id_to_atoms = {atom.input_id: atom for atom in self.atoms}
if use_input_order:
if united:
id_to_atoms = {k: v for k, v in id_to_atoms.items() if v.get_united_ljsym()}
id_to_output_id = {k: i for i, k in enumerate(sorted(id_to_atoms), 1)}
else:
if united:
id_to_output_id = {
k: v.united_output_id for k, v in id_to_atoms.items()
if v.get_united_ljsym()
}
else:
id_to_output_id = {
k: v.atomistic_output_id for k, v in id_to_atoms.items()
}
return id_to_output_id
| [
"rdkit.Chem.Mol",
"numpy.asarray",
"rdkit.Chem.SanitizeMol",
"pydantic.Field",
"rdkit.Chem.RWMol",
"pydantic.validator",
"rdkit.Chem.Atom",
"datetime.datetime.now"
] | [((409, 428), 'pydantic.Field', 'Field', ([], {'alias': '"""rnme"""'}), "(alias='rnme')\n", (414, 428), False, 'from pydantic import Field, validator\n'), ((829, 876), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'alias': '"""symmetrise_charges"""'}), "(default=True, alias='symmetrise_charges')\n", (834, 876), False, 'from pydantic import Field, validator\n'), ((1642, 1683), 'pydantic.validator', 'validator', (['"""qm_1_ESP"""', '"""dipole"""'], {'pre': '(True)'}), "('qm_1_ESP', 'dipole', pre=True)\n", (1651, 1683), False, 'from pydantic import Field, validator\n'), ((1766, 1803), 'pydantic.validator', 'validator', (['"""atoms"""', '"""rings"""'], {'pre': '(True)'}), "('atoms', 'rings', pre=True)\n", (1775, 1803), False, 'from pydantic import Field, validator\n'), ((1729, 1742), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (1739, 1742), True, 'import numpy as np\n'), ((2209, 2221), 'rdkit.Chem.RWMol', 'Chem.RWMol', ([], {}), '()\n', (2219, 2221), False, 'from rdkit import Chem\n'), ((4038, 4061), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['rdmol'], {}), '(rdmol)\n', (4054, 4061), False, 'from rdkit import Chem\n'), ((4077, 4092), 'rdkit.Chem.Mol', 'Chem.Mol', (['rdmol'], {}), '(rdmol)\n', (4085, 4092), False, 'from rdkit import Chem\n'), ((11132, 11155), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11153, 11155), False, 'import datetime\n'), ((2461, 2494), 'rdkit.Chem.Atom', 'Chem.Atom', (['atbatom.element.symbol'], {}), '(atbatom.element.symbol)\n', (2470, 2494), False, 'from rdkit import Chem\n')] |
from BLP import BLP
import numpy as np
from test import Faker
from time import time
"""
This file provides some testing
for the acceleration algorithm.
Note:
----
Don't expect the test to work every
time. There is a lot fine-tuning to
do in each run and it is possible that
this automatic run will fail due to this
lack of tuning. The system will provide
more information on errors if they occur.
"""
if __name__ == "__main__":
market = Faker()
market.genData(150, 10, 6, 500)
blp = BLP(market.X1, market.X2, market.Z, market.M, market.S)
blp.prepareSample()
a = np.random.rand(market.X2.shape[1])
bounds = [(-3, 3) for x in a]
# Keep initial delta vector
initial_delta = blp.initial_delta.copy()
# Test non-accelerated convergence
print("Starting non-accelerated...")
blp.delta_method = 0
start4 = time()
res4 = blp.solve(a, method="Nelder-Mead", delta_method="picard")
end4 = time()
# Test non-accelerated convergence
np.copyto(blp.initial_delta, initial_delta)
print("Starting BFGS non-accelerated...")
blp.delta_method = 0
start1 = time()
res1 = blp.solve(a, method="BFGS", delta_method="picard", bounds=bounds)
end1 = time()
# Test Anderson-accelerated convergence
np.copyto(blp.initial_delta, initial_delta)
print("Starting BFGS Anderson accelerated...")
blp.delta_method = 1
blp.anderson = np.empty((6, blp.initial_delta.shape[0]))
start2 = time()
res2 = blp.solve(a, method="BFGS", delta_method="anderson", bounds=bounds)
end2 = time()
# Test Anderson-accelerated convergence
np.copyto(blp.initial_delta, initial_delta)
print("Starting Anderson accelerated...")
blp.delta_method = 1
blp.anderson = np.empty((6, blp.initial_delta.shape[0]))
start3 = time()
res3 = blp.solve(a, method="Nelder-Mead", delta_method="anderson")
end3 = time()
print("BFGS non-accelerated time : %f sec" % (end1 - start1))
print("BFGS Anderson-accelerated time: %f sec" % (end2 - start2))
print("BFGS Distance between results: %e" % (np.linalg.norm(res1.theta2 - res2.theta2, ord=2)))
print("Nelder-Mead non-accelerated time : %f sec" % (end4 - start4))
print("Nelder-Mead Anderson-accelerated time: %f sec" % (end3 - start3))
print("Nelder-Mead Distance between results: %e" % (np.linalg.norm(res3.theta2 - res4.theta2, ord=2)))
print("theta2:")
print("Real: %s" % market.sigma)
print("Initial: %s" % a)
print("BFGS non-accel: %s" % res1.theta2)
print("BFGS accel: %s" % res2.theta2)
print("NM accel: %s" % res3.theta2)
print("NM non-accel: %s" % res4.theta2)
print("theta1:")
print("Real: %s %s" % (market.alpha, market.beta))
print("BFGS non-accel: %s" % res1.theta1)
print("BFGS accel: %s" % res2.theta1)
print("NM accel: %s" % res3.theta1)
print("NM non-accel: %s" % res4.theta1)
| [
"numpy.empty",
"test.Faker",
"BLP.BLP",
"time.time",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.copyto"
] | [((445, 452), 'test.Faker', 'Faker', ([], {}), '()\n', (450, 452), False, 'from test import Faker\n'), ((499, 554), 'BLP.BLP', 'BLP', (['market.X1', 'market.X2', 'market.Z', 'market.M', 'market.S'], {}), '(market.X1, market.X2, market.Z, market.M, market.S)\n', (502, 554), False, 'from BLP import BLP\n'), ((587, 621), 'numpy.random.rand', 'np.random.rand', (['market.X2.shape[1]'], {}), '(market.X2.shape[1])\n', (601, 621), True, 'import numpy as np\n'), ((853, 859), 'time.time', 'time', ([], {}), '()\n', (857, 859), False, 'from time import time\n'), ((940, 946), 'time.time', 'time', ([], {}), '()\n', (944, 946), False, 'from time import time\n'), ((991, 1034), 'numpy.copyto', 'np.copyto', (['blp.initial_delta', 'initial_delta'], {}), '(blp.initial_delta, initial_delta)\n', (1000, 1034), True, 'import numpy as np\n'), ((1119, 1125), 'time.time', 'time', ([], {}), '()\n', (1123, 1125), False, 'from time import time\n'), ((1214, 1220), 'time.time', 'time', ([], {}), '()\n', (1218, 1220), False, 'from time import time\n'), ((1270, 1313), 'numpy.copyto', 'np.copyto', (['blp.initial_delta', 'initial_delta'], {}), '(blp.initial_delta, initial_delta)\n', (1279, 1313), True, 'import numpy as np\n'), ((1409, 1450), 'numpy.empty', 'np.empty', (['(6, blp.initial_delta.shape[0])'], {}), '((6, blp.initial_delta.shape[0]))\n', (1417, 1450), True, 'import numpy as np\n'), ((1464, 1470), 'time.time', 'time', ([], {}), '()\n', (1468, 1470), False, 'from time import time\n'), ((1561, 1567), 'time.time', 'time', ([], {}), '()\n', (1565, 1567), False, 'from time import time\n'), ((1617, 1660), 'numpy.copyto', 'np.copyto', (['blp.initial_delta', 'initial_delta'], {}), '(blp.initial_delta, initial_delta)\n', (1626, 1660), True, 'import numpy as np\n'), ((1751, 1792), 'numpy.empty', 'np.empty', (['(6, blp.initial_delta.shape[0])'], {}), '((6, blp.initial_delta.shape[0]))\n', (1759, 1792), True, 'import numpy as np\n'), ((1806, 1812), 'time.time', 'time', ([], {}), '()\n', (1810, 1812), False, 'from time import time\n'), ((1895, 1901), 'time.time', 'time', ([], {}), '()\n', (1899, 1901), False, 'from time import time\n'), ((2088, 2136), 'numpy.linalg.norm', 'np.linalg.norm', (['(res1.theta2 - res2.theta2)'], {'ord': '(2)'}), '(res1.theta2 - res2.theta2, ord=2)\n', (2102, 2136), True, 'import numpy as np\n'), ((2345, 2393), 'numpy.linalg.norm', 'np.linalg.norm', (['(res3.theta2 - res4.theta2)'], {'ord': '(2)'}), '(res3.theta2 - res4.theta2, ord=2)\n', (2359, 2393), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import minepy
def compute_alpha(npoints):
NPOINTS_BINS = [1, 25, 50, 250, 500, 1000, 2500, 5000, 10000, 40000]
ALPHAS = [0.85, 0.80, 0.75, 0.70, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4]
if npoints < 1:
raise ValueError("the number of points must be >=1")
return ALPHAS[np.digitize([npoints], NPOINTS_BINS)[0] - 1]
def mic(x,y):
alpha_cl = compute_alpha(x.shape[0])
mine = minepy.MINE(alpha=alpha_cl, c=5, est="mic_e")
mine.compute_score(x, y)
mic = mine.mic()
return mic
Fs = 8000
f = 5
sample = 8000
x = np.arange(sample)
# y = np.sin(2 * np.pi * f * x / Fs)
# z = np.sin(2 * np.pi * f * x / Fs + np.pi/2)
# # plt.figure()
# # plt.plot(x, y)
# # plt.savefig("first_sin.jpg")
# # plt.figure()
# # plt.plot(x, z)
# # plt.savefig("second_sin.jpg")
#
out_dir = "/home/panda-linux/PycharmProjects/low_dim_update_dart/low_dim_update_stable/neuron_vis"
#
# plt.figure()
# plt.plot(y, z)
# plt.savefig(f"{out_dir}/first_VS_second_pi_div_2.jpg")
#
# print(mic(y,z))
x = np.arange(sample)
y = np.sin(2 * np.pi * f * x / Fs)
z = np.sin(4 * np.pi * f * x / Fs)
plt.figure()
plt.plot(x, y)
plt.savefig("first_sin.jpg")
plt.figure()
plt.plot(x, z)
plt.savefig("second_sin.jpg")
plt.figure()
plt.plot(y, z)
plt.savefig(f"{out_dir}/first_VS_second_double_freq.jpg")
print(mic(y,z))
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"minepy.MINE",
"numpy.digitize",
"matplotlib.pyplot.savefig"
] | [((596, 613), 'numpy.arange', 'np.arange', (['sample'], {}), '(sample)\n', (605, 613), True, 'import numpy as np\n'), ((1054, 1071), 'numpy.arange', 'np.arange', (['sample'], {}), '(sample)\n', (1063, 1071), True, 'import numpy as np\n'), ((1076, 1106), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * x / Fs)'], {}), '(2 * np.pi * f * x / Fs)\n', (1082, 1106), True, 'import numpy as np\n'), ((1111, 1141), 'numpy.sin', 'np.sin', (['(4 * np.pi * f * x / Fs)'], {}), '(4 * np.pi * f * x / Fs)\n', (1117, 1141), True, 'import numpy as np\n'), ((1142, 1154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1152, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1169), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1163, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1198), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""first_sin.jpg"""'], {}), "('first_sin.jpg')\n", (1181, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1211), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1209, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1226), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z'], {}), '(x, z)\n', (1220, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1256), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""second_sin.jpg"""'], {}), "('second_sin.jpg')\n", (1238, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1259, 1271), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1269, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1286), 'matplotlib.pyplot.plot', 'plt.plot', (['y', 'z'], {}), '(y, z)\n', (1280, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{out_dir}/first_VS_second_double_freq.jpg"""'], {}), "(f'{out_dir}/first_VS_second_double_freq.jpg')\n", (1298, 1344), True, 'import matplotlib.pyplot as plt\n'), ((450, 495), 'minepy.MINE', 'minepy.MINE', ([], {'alpha': 'alpha_cl', 'c': '(5)', 'est': '"""mic_e"""'}), "(alpha=alpha_cl, c=5, est='mic_e')\n", (461, 495), False, 'import minepy\n'), ((338, 374), 'numpy.digitize', 'np.digitize', (['[npoints]', 'NPOINTS_BINS'], {}), '([npoints], NPOINTS_BINS)\n', (349, 374), True, 'import numpy as np\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
#np.set_printoptions(threshold=np.inf)
import pickle
from tensorflow.python.platform import gfile
from tensorflow.examples.tutorials.mnist import input_data
model_filename_before_quantization = "saved_model/cnn_sa_test.pb";
model_filename_after_quantization = "quantization_model/cnn_sa_test_quantizate.pb";
model_filename = model_filename_after_quantization;
# input [batch, in_height, in_width, in_channels]
# filter [filter_height, filter_width, in_channels, out_channels]
filter_width = 3
filter_height = filter_width
feature_map_height = 10
feature_map_width = 10
input_channels = 16
output_channels = 16
# print one 'variable'/operation name, but now it is constant not variable
# with tf.Session() as sess:
# with open(model_filename, 'rb') as f:
# graph_def = tf.GraphDef()
# graph_def.ParseFromString(f.read())
# output = tf.import_graph_def(graph_def, return_elements=['W1:0'])
# print(sess.run(output))
# print operation name and operation type, print all variable and other result
# with open(model_filename, 'rb') as f:
# graph_def = tf.GraphDef()
# graph_def.ParseFromString(f.read())
# output = tf.import_graph_def(graph_def)
# graph = tf.get_default_graph()
# for op in graph.get_operations():
# print(op.name, op.type)
# use quantized mode to do inference work and get accuracy
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True, reshape=True)
# X = tf.placeholder(tf.float32, [None, 784])
# Y_ = tf.placeholder(tf.float32, [None, 10])
# dropout = tf.placeholder(tf.float32)
# with tf.Session() as sess:
# with open(model_filename, 'rb') as f:
# graph_def = tf.GraphDef()
# graph_def.ParseFromString(f.read())
# X = tf.placeholder(tf.float32, [None, 784], name="X")
# output = tf.import_graph_def(graph_def, input_map={'X:0': X, 'Y_:0': Y_,'dropout:0': dropout},
# return_elements=['accuracy:0'])
# print(sess.run(output, feed_dict={X: mnist.test.images,
# Y_: mnist.test.labels,
# dropout: 1.}))
# use quantized mode or unquantized mode to do inference work
# If you have change the name of each operation node or change the model, please run the code
# (line 34 - 40) firstly and update the list "node_name", then run the following code finally.
# If you just change the parameter(height, width, channels), you can directly run the following code
node_name = [ 'reshape_X_eightbit_reshape_X:0',
'reshape_X_eightbit_min_X:0','reshape_X_eightbit_max_X:0',
'reshape_X_eightbit_quantize_X:0','reshape_X_eightbit_quantized_reshape:0',
'W1_quint8_const:0','W1_min:0','W1_max:0','first_conv_eightbit_quantized_conv:0',
'first_conv_eightbit_requant_range:0','first_conv_eightbit_requantize:0',
'B1_quint8_const:0','B1_min:0','B1_max:0','first_bias_eightbit_quantized_bias_add:0',
'first_bias_eightbit_requant_range:0','first_bias_eightbit_requantize:0',
'Y1_eightbit_quantized:0','Y1:0']
mnist = input_data.read_data_sets('MNIST_data', one_hot=True, reshape=True)
X = tf.placeholder(tf.float32, [None, feature_map_height*feature_map_width*input_channels], name="X")
with tf.Session() as sess:
with open(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'X:0': X},
return_elements=node_name)
# batch_X = np.array([[0.8,1.,1.,1.,1.,0.,0.,0.1,1.,0.05,0.,0.,0.95,0.,0.,0.,1.,0.,0.,0.,0.85,0.,0.,0.,0.]])
batch_X = np.random.random_sample((1,feature_map_height*feature_map_width*input_channels))
output_list = sess.run(output, feed_dict={X: batch_X})
print('X:0')
print(batch_X.shape)
print(batch_X.dtype)
np.savetxt('model_parameter/cnn_sa_test_V1/X:0', batch_X, fmt='%-14.4f')
for op in zip(output_list,node_name):
print('')
print(op[1])
print(op[0].shape)
data_type = op[0].dtype
print(data_type)
file_path = 'model_parameter/cnn_sa_test_V1/'+op[1]
data = op[0].reshape((1,-1))
np.savetxt(file_path, data, fmt='%-14.4f') | [
"numpy.random.random_sample",
"numpy.savetxt",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
] | [((3136, 3203), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)', 'reshape': '(True)'}), "('MNIST_data', one_hot=True, reshape=True)\n", (3161, 3203), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((3208, 3313), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, feature_map_height * feature_map_width * input_channels]'], {'name': '"""X"""'}), "(tf.float32, [None, feature_map_height * feature_map_width *\n input_channels], name='X')\n", (3222, 3313), True, 'import tensorflow as tf\n'), ((3902, 3974), 'numpy.savetxt', 'np.savetxt', (['"""model_parameter/cnn_sa_test_V1/X:0"""', 'batch_X'], {'fmt': '"""%-14.4f"""'}), "('model_parameter/cnn_sa_test_V1/X:0', batch_X, fmt='%-14.4f')\n", (3912, 3974), True, 'import numpy as np\n'), ((3311, 3323), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3321, 3323), True, 'import tensorflow as tf\n'), ((4185, 4227), 'numpy.savetxt', 'np.savetxt', (['file_path', 'data'], {'fmt': '"""%-14.4f"""'}), "(file_path, data, fmt='%-14.4f')\n", (4195, 4227), True, 'import numpy as np\n'), ((3396, 3409), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (3407, 3409), True, 'import tensorflow as tf\n'), ((3472, 3551), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'input_map': "{'X:0': X}", 'return_elements': 'node_name'}), "(graph_def, input_map={'X:0': X}, return_elements=node_name)\n", (3491, 3551), True, 'import tensorflow as tf\n'), ((3703, 3792), 'numpy.random.random_sample', 'np.random.random_sample', (['(1, feature_map_height * feature_map_width * input_channels)'], {}), '((1, feature_map_height * feature_map_width *\n input_channels))\n', (3726, 3792), True, 'import numpy as np\n')] |
"""Extract training set.
Randomly select frame to patch.
Patches are stored in several npys.
Each npy contains several batches.
So there are n x batch_size patches in each npy.
Return: a few npy with shape (n x width_patch x width_height x 1), dtype=np.float32 \in [0,1]."""
import os, glob, gc, h5py
import numpy as np
import random, math
def y_import(video_path, height_frame, width_frame, nfs, startfrm, bar=True, opt_clear=True):
"""Import Y channel from a yuv video.
startfrm: start from 0
return: (nfs * height * width), dtype=uint8"""
fp = open(video_path, 'rb')
# target at startfrm
blk_size = int(height_frame * width_frame * 3 / 2)
fp.seek(blk_size * startfrm, 0)
d0 = height_frame // 2
d1 = width_frame // 2
Yt = np.zeros((height_frame, width_frame), dtype=np.uint8) # 0-255
for ite_frame in range(nfs):
for m in range(height_frame):
for n in range(width_frame):
Yt[m,n] = ord(fp.read(1))
for m in range(d0):
for n in range(d1):
fp.read(1)
for m in range(d0):
for n in range(d1):
fp.read(1)
if ite_frame == 0:
Y = Yt[np.newaxis, :, :]
else:
Y = np.vstack((Y, Yt[np.newaxis, :, :]))
if bar:
print("\r%4d | %4d" % (ite_frame + 1, nfs), end="", flush=True)
if opt_clear:
print("\r ", end="\r")
fp.close()
return Y
def func_PatchFrame(info_patch, num_patch, ite_npy, mode):
"""Patch and store four npys with a same index.
Shuffle the patches inside these four npys before saving."""
order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack = info_patch[:]
### Init stack
stack_pre = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_cmp = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_sub = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
stack_raw = np.zeros((num_patch, height_patch, width_patch, 1), dtype=np.float32)
### Extract patches
cal_patch_total = 0
num_frame_total = order_LastFrame - order_FirstFrame + 1
for ite_frame, order_frame in enumerate(range(order_FirstFrame, order_LastFrame + 1)):
print("\rframe %d | %d" % (ite_frame + 1, num_frame_total), end="")
cal_patch_frame = 0
### Extract basic information
index_video = VideoIndex_list_list[order_frame]
index_Mid = MidIndex_list_list[order_frame]
index_Pre = PreIndex_list_list[order_frame]
index_Sub = SubIndex_list_list[order_frame]
cmp_path = list_CmpVideo[index_video]
cmp_name = cmp_path.split("/")[-1].split(".")[0]
raw_name = cmp_name
raw_name = raw_name + ".yuv"
raw_path = os.path.join(dir_raw, raw_name)
dims_str = raw_name.split("_")[1]
width_frame = int(dims_str.split("x")[0])
height_frame = int(dims_str.split("x")[1])
### Cal step
step_height = int((height_frame - height_patch) / (num_patch_height - 1))
step_width = int((width_frame - width_patch) / (num_patch_width - 1))
### Load frames
Y_raw = np.squeeze(y_import(raw_path, height_frame, width_frame, 1, index_Mid, bar=False, opt_clear=False))
Y_cmp = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Mid, bar=False, opt_clear=False))
Y_pre = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Pre, bar=False, opt_clear=False))
Y_sub = np.squeeze(y_import(cmp_path, height_frame, width_frame, 1, index_Sub, bar=False, opt_clear=False))
### Patch
for ite_patch_height in range(num_patch_height):
start_height = ite_patch_height * step_height
for ite_patch_width in range(num_patch_width):
if (order_frame == order_FirstFrame) and (cal_patch_frame < order_FirstPatch):
cal_patch_frame += 1
continue
if (order_frame == order_LastFrame) and (cal_patch_frame > order_LastPatch):
cal_patch_frame += 1
continue
start_width = ite_patch_width * step_width
stack_pre[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_pre[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_cmp[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_cmp[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_sub[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_sub[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
stack_raw[cal_patch_total, 0:height_patch, 0:width_patch, 0] = Y_raw[start_height:(start_height+height_patch), start_width:(start_width+width_patch)] / 255.0
cal_patch_total += 1
cal_patch_frame += 1
### Shuffle and save npy
print("\nsaving 1/4...", end="")
random.seed(100)
random.shuffle(stack_pre)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_pre_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_pre', data=stack_pre)
f.close()
stack_pre = []
gc.collect()
print("\rsaving 2/4...", end="")
random.seed(100)
random.shuffle(stack_cmp)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_cmp_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_cmp', data=stack_cmp)
f.close()
stack_cmp = []
gc.collect()
print("\rsaving 3/4...", end="")
random.seed(100)
random.shuffle(stack_sub)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_sub_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_sub', data=stack_sub)
f.close()
stack_sub = []
gc.collect()
print("\rsaving 4/4...", end="")
random.seed(100)
random.shuffle(stack_raw)
save_path = os.path.join(dir_save_stack, "stack_" + mode + "_raw_" + str(ite_npy) + ".hdf5")
f = h5py.File(save_path, "w")
f.create_dataset('stack_raw', data=stack_raw)
f.close()
stack_raw = []
gc.collect()
print("\r ", end="\r") # clear bar
def main_extract_TrainingSet():
"""Extract training setself.
Select a non-PQF between each pair of PQFs.
Randomly select up to 20 non-PQFs each video."""
for QP in QP_list:
dir_cmp = dir_cmp_pre + str(QP)
dir_PQFLabel = dir_PQFLabel_pre + str(QP)
### List all cmp video
list_CmpVideo = glob.glob(os.path.join(dir_cmp, "*.yuv"))
num_CmpVideo = len(list_CmpVideo)
### Init dir_save_stack for this QP
dir_save_stack = dir_save_stack_pre + str(QP)
if not os.path.exists(dir_save_stack):
os.makedirs(dir_save_stack)
### List all randomly selected non-PQFs with their pre/sub PQFs and calculate the num of patches
VideoIndex_list_list = []
MidIndex_list_list = []
PreIndex_list_list = []
SubIndex_list_list = []
cal_frame = 0
for ite_CmpVideo in range(num_CmpVideo): # video by video
cmp_name = list_CmpVideo[ite_CmpVideo].split("/")[-1].split(".")[0]
# load PQF label
PQFLabel_path = os.path.join(dir_PQFLabel, "PQFLabel_" + cmp_name + PQFLabel_sub)
PQF_label = h5py.File(PQFLabel_path,'r')['PQF_label'][:]
# locate PQFs
PQFIndex_list = [i for i in range(len(PQF_label)) if PQF_label[i] == 1]
num_PQF = len(PQFIndex_list)
#
MidIndex_list = PQFIndex_list[1: (num_PQF - 1)]
PreIndex_list = PQFIndex_list[0: (num_PQF - 2)]
SubIndex_list = PQFIndex_list[2: num_PQF]
# randomly select maximum allowable pairs
random.seed(666)
random.shuffle(PreIndex_list)
random.seed(666)
random.shuffle(SubIndex_list)
random.seed(666)
random.shuffle(MidIndex_list)
num_pairs = len(PreIndex_list)
if num_pairs > max_NonPQF_OneVideo:
PreIndex_list = PreIndex_list[0: max_NonPQF_OneVideo]
SubIndex_list = SubIndex_list[0: max_NonPQF_OneVideo]
MidIndex_list = MidIndex_list[0: max_NonPQF_OneVideo]
# record
cal_frame += len(PreIndex_list)
VideoIndex_list_list += [ite_CmpVideo] * len(PreIndex_list) # video index for all selected non-PQFs
PreIndex_list_list += PreIndex_list
MidIndex_list_list += MidIndex_list
SubIndex_list_list += SubIndex_list
num_patch_available = cal_frame * num_patch_PerFrame
print("Available frames: %d - patches: %d" % (cal_frame, num_patch_available))
### Shuffle the numbering of all frames
random.seed(888)
random.shuffle(VideoIndex_list_list)
random.seed(888)
random.shuffle(MidIndex_list_list)
random.seed(888)
random.shuffle(PreIndex_list_list)
random.seed(888)
random.shuffle(SubIndex_list_list)
### Cut down the num of frames
max_patch_total = int(num_patch_available / batch_size) * batch_size
max_frame_total = math.ceil(max_patch_total / num_patch_PerFrame) # may need one more frame to patch
VideoIndex_list_list = VideoIndex_list_list[0: max_frame_total]
MidIndex_list_list = MidIndex_list_list[0: max_frame_total]
PreIndex_list_list = PreIndex_list_list[0: max_frame_total]
SubIndex_list_list = SubIndex_list_list[0: max_frame_total]
### Cal num of batch for each npy, including training and validation
num_patch_val = int(int((1 - ratio_training) * max_patch_total) / batch_size) * batch_size
num_patch_tra = max_patch_total - num_patch_val # we can make sure that it is a multiple of batch size
num_batch_tra = int(num_patch_tra / batch_size)
num_batch_val = int(num_patch_val / batch_size)
num_npy_tra = int(num_batch_tra / max_batch_PerNpy)
num_batch_PerNpy_list_tra = [max_batch_PerNpy] * num_npy_tra
if (num_batch_tra % max_batch_PerNpy) > 0:
num_batch_PerNpy_list_tra.append(num_batch_tra - max_batch_PerNpy * num_npy_tra)
num_npy_val = int(num_batch_val / max_batch_PerNpy)
num_batch_PerNpy_list_val = [max_batch_PerNpy] * num_npy_val
if (num_batch_val % max_batch_PerNpy) > 0:
num_batch_PerNpy_list_val.append(num_batch_val - max_batch_PerNpy * num_npy_val)
### Patch and stack
# some frames may be partly patched.
for ite_npy_tra in range(len(num_batch_PerNpy_list_tra)):
print("stacking tra npy %d / %d..." % (ite_npy_tra + 1, len(num_batch_PerNpy_list_tra)))
# Cal the position of the first patch and the last patch of this npy
first_patch_cal = sum(num_batch_PerNpy_list_tra[0: ite_npy_tra]) * batch_size + 1
order_FirstFrame = math.ceil(first_patch_cal / num_patch_PerFrame) - 1
order_FirstPatch = first_patch_cal - order_FirstFrame * num_patch_PerFrame - 1
last_patch_cal = sum(num_batch_PerNpy_list_tra[0: ite_npy_tra + 1]) * batch_size
order_LastFrame = math.ceil(last_patch_cal / num_patch_PerFrame) - 1
order_LastPatch = last_patch_cal - order_LastFrame * num_patch_PerFrame - 1
# patch
num_patch = num_batch_PerNpy_list_tra[ite_npy_tra] * batch_size
info_patch = (order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack)
func_PatchFrame(info_patch, num_patch=num_patch, ite_npy=ite_npy_tra, mode="tra")
for ite_npy_val in range(len(num_batch_PerNpy_list_val)):
print("stacking val npy %d / %d..." % (ite_npy_val + 1, len(num_batch_PerNpy_list_val)))
# Cal the position of the first patch and the last patch of this npy
first_patch_cal = (sum(num_batch_PerNpy_list_tra) + sum(num_batch_PerNpy_list_val[0: ite_npy_val])) * batch_size + 1
order_FirstFrame = math.ceil(first_patch_cal / num_patch_PerFrame) - 1
order_FirstPatch = first_patch_cal - order_FirstFrame * num_patch_PerFrame - 1
last_patch_cal = (sum(num_batch_PerNpy_list_tra) + sum(num_batch_PerNpy_list_val[0: ite_npy_val + 1])) * batch_size
order_LastFrame = math.ceil(last_patch_cal / num_patch_PerFrame) - 1
order_LastPatch = last_patch_cal - order_LastFrame * num_patch_PerFrame - 1
# patch
num_patch = num_batch_PerNpy_list_val[ite_npy_val] * batch_size
info_patch = (order_FirstFrame, order_FirstPatch, order_LastFrame, order_LastPatch, list_CmpVideo, \
VideoIndex_list_list, MidIndex_list_list, PreIndex_list_list, SubIndex_list_list, dir_save_stack)
func_PatchFrame(info_patch, num_patch=num_patch, ite_npy=ite_npy_val, mode="val")
if __name__ == '__main__':
QP_list = [32,42]
### Settings
num_patch_width = 26
num_patch_height = 16
height_patch = 64
width_patch = 64
num_patch_PerFrame = num_patch_width * num_patch_height
dir_database = "/home/x/SCI_1/Database/"
dir_raw = os.path.join(dir_database, "train_108/raw")
dir_cmp_pre = os.path.join(dir_database, "train_108/LDP_HM16.5/QP")
dir_PQFLabel_pre = "/home/x/SCI_1/MFQEv2.0/Database/PQF_label/ground_truth/train_108/QP"
dir_save_stack_pre = "/home/x/SCI_1/MFQEv2.0/Database/PQF_enhancement/QP"
PQFLabel_sub = "_MaxNfs_300.hdf5"
batch_size = 64
max_batch_PerNpy = 14500
ratio_training = 1.0 # we select a small part of test set for validation
max_NonPQF_OneVideo = 20
main_extract_TrainingSet()
| [
"h5py.File",
"os.makedirs",
"math.ceil",
"random.shuffle",
"numpy.zeros",
"os.path.exists",
"gc.collect",
"random.seed",
"os.path.join",
"numpy.vstack"
] | [((772, 825), 'numpy.zeros', 'np.zeros', (['(height_frame, width_frame)'], {'dtype': 'np.uint8'}), '((height_frame, width_frame), dtype=np.uint8)\n', (780, 825), True, 'import numpy as np\n'), ((1932, 2001), 'numpy.zeros', 'np.zeros', (['(num_patch, height_patch, width_patch, 1)'], {'dtype': 'np.float32'}), '((num_patch, height_patch, width_patch, 1), dtype=np.float32)\n', (1940, 2001), True, 'import numpy as np\n'), ((2018, 2087), 'numpy.zeros', 'np.zeros', (['(num_patch, height_patch, width_patch, 1)'], {'dtype': 'np.float32'}), '((num_patch, height_patch, width_patch, 1), dtype=np.float32)\n', (2026, 2087), True, 'import numpy as np\n'), ((2104, 2173), 'numpy.zeros', 'np.zeros', (['(num_patch, height_patch, width_patch, 1)'], {'dtype': 'np.float32'}), '((num_patch, height_patch, width_patch, 1), dtype=np.float32)\n', (2112, 2173), True, 'import numpy as np\n'), ((2190, 2259), 'numpy.zeros', 'np.zeros', (['(num_patch, height_patch, width_patch, 1)'], {'dtype': 'np.float32'}), '((num_patch, height_patch, width_patch, 1), dtype=np.float32)\n', (2198, 2259), True, 'import numpy as np\n'), ((5282, 5298), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (5293, 5298), False, 'import random, math\n'), ((5303, 5328), 'random.shuffle', 'random.shuffle', (['stack_pre'], {}), '(stack_pre)\n', (5317, 5328), False, 'import random, math\n'), ((5434, 5459), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (5443, 5459), False, 'import os, glob, gc, h5py\n'), ((5547, 5559), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5557, 5559), False, 'import os, glob, gc, h5py\n'), ((5602, 5618), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (5613, 5618), False, 'import random, math\n'), ((5623, 5648), 'random.shuffle', 'random.shuffle', (['stack_cmp'], {}), '(stack_cmp)\n', (5637, 5648), False, 'import random, math\n'), ((5754, 5779), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (5763, 5779), False, 'import os, glob, gc, h5py\n'), ((5867, 5879), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5877, 5879), False, 'import os, glob, gc, h5py\n'), ((5922, 5938), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (5933, 5938), False, 'import random, math\n'), ((5943, 5968), 'random.shuffle', 'random.shuffle', (['stack_sub'], {}), '(stack_sub)\n', (5957, 5968), False, 'import random, math\n'), ((6074, 6099), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (6083, 6099), False, 'import os, glob, gc, h5py\n'), ((6187, 6199), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6197, 6199), False, 'import os, glob, gc, h5py\n'), ((6242, 6258), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (6253, 6258), False, 'import random, math\n'), ((6263, 6288), 'random.shuffle', 'random.shuffle', (['stack_raw'], {}), '(stack_raw)\n', (6277, 6288), False, 'import random, math\n'), ((6394, 6419), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (6403, 6419), False, 'import os, glob, gc, h5py\n'), ((6507, 6519), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6517, 6519), False, 'import os, glob, gc, h5py\n'), ((13873, 13916), 'os.path.join', 'os.path.join', (['dir_database', '"""train_108/raw"""'], {}), "(dir_database, 'train_108/raw')\n", (13885, 13916), False, 'import os, glob, gc, h5py\n'), ((13935, 13988), 'os.path.join', 'os.path.join', (['dir_database', '"""train_108/LDP_HM16.5/QP"""'], {}), "(dir_database, 'train_108/LDP_HM16.5/QP')\n", (13947, 13988), False, 'import os, glob, gc, h5py\n'), ((3008, 3039), 'os.path.join', 'os.path.join', (['dir_raw', 'raw_name'], {}), '(dir_raw, raw_name)\n', (3020, 3039), False, 'import os, glob, gc, h5py\n'), ((9303, 9319), 'random.seed', 'random.seed', (['(888)'], {}), '(888)\n', (9314, 9319), False, 'import random, math\n'), ((9328, 9364), 'random.shuffle', 'random.shuffle', (['VideoIndex_list_list'], {}), '(VideoIndex_list_list)\n', (9342, 9364), False, 'import random, math\n'), ((9373, 9389), 'random.seed', 'random.seed', (['(888)'], {}), '(888)\n', (9384, 9389), False, 'import random, math\n'), ((9398, 9432), 'random.shuffle', 'random.shuffle', (['MidIndex_list_list'], {}), '(MidIndex_list_list)\n', (9412, 9432), False, 'import random, math\n'), ((9441, 9457), 'random.seed', 'random.seed', (['(888)'], {}), '(888)\n', (9452, 9457), False, 'import random, math\n'), ((9466, 9500), 'random.shuffle', 'random.shuffle', (['PreIndex_list_list'], {}), '(PreIndex_list_list)\n', (9480, 9500), False, 'import random, math\n'), ((9509, 9525), 'random.seed', 'random.seed', (['(888)'], {}), '(888)\n', (9520, 9525), False, 'import random, math\n'), ((9534, 9568), 'random.shuffle', 'random.shuffle', (['SubIndex_list_list'], {}), '(SubIndex_list_list)\n', (9548, 9568), False, 'import random, math\n'), ((9712, 9759), 'math.ceil', 'math.ceil', (['(max_patch_total / num_patch_PerFrame)'], {}), '(max_patch_total / num_patch_PerFrame)\n', (9721, 9759), False, 'import random, math\n'), ((1259, 1295), 'numpy.vstack', 'np.vstack', (['(Y, Yt[np.newaxis, :, :])'], {}), '((Y, Yt[np.newaxis, :, :]))\n', (1268, 1295), True, 'import numpy as np\n'), ((6940, 6970), 'os.path.join', 'os.path.join', (['dir_cmp', '"""*.yuv"""'], {}), "(dir_cmp, '*.yuv')\n", (6952, 6970), False, 'import os, glob, gc, h5py\n'), ((7128, 7158), 'os.path.exists', 'os.path.exists', (['dir_save_stack'], {}), '(dir_save_stack)\n', (7142, 7158), False, 'import os, glob, gc, h5py\n'), ((7172, 7199), 'os.makedirs', 'os.makedirs', (['dir_save_stack'], {}), '(dir_save_stack)\n', (7183, 7199), False, 'import os, glob, gc, h5py\n'), ((7673, 7738), 'os.path.join', 'os.path.join', (['dir_PQFLabel', "('PQFLabel_' + cmp_name + PQFLabel_sub)"], {}), "(dir_PQFLabel, 'PQFLabel_' + cmp_name + PQFLabel_sub)\n", (7685, 7738), False, 'import os, glob, gc, h5py\n'), ((8243, 8259), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (8254, 8259), False, 'import random, math\n'), ((8272, 8301), 'random.shuffle', 'random.shuffle', (['PreIndex_list'], {}), '(PreIndex_list)\n', (8286, 8301), False, 'import random, math\n'), ((8314, 8330), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (8325, 8330), False, 'import random, math\n'), ((8343, 8372), 'random.shuffle', 'random.shuffle', (['SubIndex_list'], {}), '(SubIndex_list)\n', (8357, 8372), False, 'import random, math\n'), ((8385, 8401), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (8396, 8401), False, 'import random, math\n'), ((8414, 8443), 'random.shuffle', 'random.shuffle', (['MidIndex_list'], {}), '(MidIndex_list)\n', (8428, 8443), False, 'import random, math\n'), ((11470, 11517), 'math.ceil', 'math.ceil', (['(first_patch_cal / num_patch_PerFrame)'], {}), '(first_patch_cal / num_patch_PerFrame)\n', (11479, 11517), False, 'import random, math\n'), ((11737, 11783), 'math.ceil', 'math.ceil', (['(last_patch_cal / num_patch_PerFrame)'], {}), '(last_patch_cal / num_patch_PerFrame)\n', (11746, 11783), False, 'import random, math\n'), ((12715, 12762), 'math.ceil', 'math.ceil', (['(first_patch_cal / num_patch_PerFrame)'], {}), '(first_patch_cal / num_patch_PerFrame)\n', (12724, 12762), False, 'import random, math\n'), ((13017, 13063), 'math.ceil', 'math.ceil', (['(last_patch_cal / num_patch_PerFrame)'], {}), '(last_patch_cal / num_patch_PerFrame)\n', (13026, 13063), False, 'import random, math\n'), ((7763, 7792), 'h5py.File', 'h5py.File', (['PQFLabel_path', '"""r"""'], {}), "(PQFLabel_path, 'r')\n", (7772, 7792), False, 'import os, glob, gc, h5py\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from models import InferSent, NLINet, ClassificationNet
from data_utils import *
import os
import sys
import time
import argparse
import copy
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
import nltk
from utils.Error_all import Errors
from utils.statistic_all import stat, dis
from attack_agent import *
from pattern3.en import conjugate, lemma, lexeme
from nltk.corpus import wordnet
import inflection
import random
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def load_infersent():
V = 2
MODEL_PATH = 'encoder/infersent%s.pkl' % V
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
infersent = InferSent(params_model)
infersent.load_state_dict(torch.load(MODEL_PATH))
W2V_PATH = 'fastText/crawl-300d-2M.vec'
infersent.set_w2v_path(W2V_PATH)
infersent.build_vocab_k_words(K=100000)
return infersent
def args_parser():
# start parser
parser = argparse.ArgumentParser()
# requires parameters
parser.add_argument("--target_model", default='infersent', type=str)
parser.add_argument("--mode", default='fine-tune', help='options: fine-tune, score, attack', type=str)
parser.add_argument("--data_dir", default="./", type=str)
parser.add_argument("--pos_dir", default="./pos", type=str)
parser.add_argument("--attack_dir", default="./attacked_dir", type=str)
parser.add_argument("--output_dir", type=str, default="/home/yinfan/robustGrammar/fanyin_data/saved_models")
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--dev_batch_size", default=256, type=int)
parser.add_argument("--test_batch_size", default=256, type=int)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument("--num_train_epochs", default=5, type=int)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--checkpoint", default=50, type=int)
parser.add_argument("--seed", type=int, default=2333)
parser.add_argument("--export_model", type=bool, default=True)
parser.add_argument("--data_sign", type=str, default="MRPC")
parser.add_argument("--dropout", type=float, default=0.1)
parser.add_argument("--enc_lstm_dim", type=int, default=2048)
parser.add_argument("--fc_dim", type=int, default=512)
parser.add_argument("--adversarial", action='store_true')
parser.add_argument("--attack_rate", type=float, default=0.15)
parser.add_argument("--adv_type", type=str, default='greedy')
parser.add_argument("--random_attack_file", type=str, default=None)
parser.add_argument("--beam_size", type=int, default=5)
parser.add_argument("--pop_size", type=int, default=60)
parser.add_argument("--max_iter_rate", type=float, default=0.23)
args = parser.parse_args()
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
return args
def load_data(config):
print("-*-" * 10)
print("current data_sign: {}".format(config.data_sign))
if config.data_sign == "MRPC":
data_processor = MRPCProcessor()
elif config.data_sign == "QNLI":
data_processor = QNLIProcessor()
elif config.data_sign == "MNLI":
data_processor = MnliProcessor()
elif config.data_sign == "SST-2":
data_processor = SSTProcessor()
else:
raise ValueError("Please Notice that your data_sign DO NOT exits !!!!!")
label_list = data_processor.get_labels()
print(label_list)
# load data exampels
train_examples = data_processor.get_train_examples(config.data_dir)
dev_examples = data_processor.get_dev_examples(config.data_dir)
if config.random_attack_file is not None:
test_examples = data_processor.get_test_examples(config.data_dir, config.random_attack_file)
else:
test_examples = data_processor.get_test_examples(config.data_dir)
print(len(train_examples))
print(len(dev_examples))
print(len(test_examples))
for idx, example in enumerate(train_examples):
if example.text_b is None:
train_examples[idx].text_b = '<p>'
for idx, example in enumerate(dev_examples):
if example.text_b is None:
dev_examples[idx].text_b = '<p>'
for idx, example in enumerate(test_examples):
if example.text_b is None:
test_examples[idx].text_b = '<p>'
train_data = TextDataset([example.text_a for example in train_examples], [example.text_b
for example in train_examples], [example.label for example in train_examples])
train_sampler = SequentialSampler(train_data)
dev_data = TextDataset([example.text_a for example in dev_examples], [example.text_b
for example in dev_examples], [example.label for example in dev_examples])
dev_sampler = SequentialSampler(dev_data)
test_data = TextDataset([example.text_a for example in test_examples], [example.text_b
for example in test_examples], [example.label for example in test_examples])
test_sampler = SequentialSampler(test_data)
print("check loaded data")
train_dataloader = DataLoader(train_data, sampler=train_sampler, \
batch_size=config.train_batch_size)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, \
batch_size=config.dev_batch_size)
test_dataloader = DataLoader(test_data, sampler=test_sampler, \
batch_size=config.test_batch_size)
num_train_steps = int(
len(train_examples) / config.train_batch_size / config.gradient_accumulation_steps * config.num_train_epochs)
return train_examples, dev_examples, test_examples, train_dataloader, dev_dataloader, test_dataloader, num_train_steps, label_list
def load_model(config):
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
infersent = load_infersent()
infersent.to(device)
if not config.data_sign == 'SST-2':
model = NLINet(config)
else:
model = ClassificationNet(config)
model.to(device)
if config.mode == 'score' or config.mode == 'attack':
model_dict_path = os.path.join(config.output_dir,
"{}_{}.bin".format(config.data_sign, config.target_model))
model.load_state_dict(torch.load(model_dict_path))
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
return infersent, model, optimizer, device, n_gpu
def train(infersent, model, optimizer, train_dataloader, dev_dataloader, test_dataloader, config, \
device, n_gpu, label_list):
model.train()
global_step = 0
label2idx = {label: i for i, label in enumerate(label_list)}
idx2label = {i: label for i, label in enumerate(label_list)}
is_single = config.data_sign == "SST-2"
loss_fc = CrossEntropyLoss().cuda()
dev_best_acc = 0
test_best_acc = 0
try:
for idx in range(int(config.num_train_epochs)):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
print("#######" * 10)
print("EPOCH: ", str(idx))
last_step_eval = False
for step, batch in tqdm(enumerate(train_dataloader)):
if is_single:
sent1s, _, label_ids = [list(item) for item in batch]
else:
sent1s, sent2s, label_ids = [list(item) for item in batch]
label_idx_ids = [label2idx[label] for label in label_ids]
label_idx_ids = torch.tensor(label_idx_ids, dtype=torch.long).to(device)
with torch.no_grad():
if is_single:
sent1_tensor = infersent.encode(sent1s, tokenize=True)
else:
sent1_tensor = infersent.encode(sent1s, tokenize=True)
sent2_tensor = infersent.encode(sent2s, tokenize=True)
if is_single:
sent1_tensor = torch.tensor(sent1_tensor)
sent1_tensor = sent1_tensor.to(device)
output = model(sent1_tensor)
else:
sent1_tensor = torch.tensor(sent1_tensor)
sent2_tensor = torch.tensor(sent2_tensor)
sent1_tensor = sent1_tensor.to(device)
sent2_tensor = sent2_tensor.to(device)
output = model(sent1_tensor, sent2_tensor)
loss = loss_fc(output, label_idx_ids)
if n_gpu > 1:
loss = loss.mean()
if config.gradient_accumulation_steps > 1:
loss = loss / config.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
nb_tr_examples += sent1_tensor.size(0)
nb_tr_steps += 1
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if nb_tr_steps % (config.checkpoint * config.gradient_accumulation_steps) == 0 and not last_step_eval:
print("-*-" * 15)
print("current training loss is : ")
print(loss.item())
tmp_dev_acc = eval_checkpoint(model, infersent, dev_dataloader, config, device, n_gpu, label_list, eval_sign="dev")
print("......" * 10)
print("DEV: acc")
print(tmp_dev_acc)
if tmp_dev_acc > dev_best_acc:
dev_best_acc = tmp_dev_acc
tmp_test_acc = eval_checkpoint(model, infersent, test_dataloader, config, device, n_gpu,
label_list, eval_sign="test")
print("......" * 10)
print("TEST: acc")
print(tmp_test_acc)
print("......" * 10)
if tmp_test_acc > test_best_acc:
test_best_acc = tmp_test_acc
# export model
if config.export_model:
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(config.output_dir, "{}_{}.bin".format(config.data_sign, config.target_model))
torch.save(model_to_save.state_dict(), output_model_file)
print("-*-" * 15)
last_step_eval = True
else:
last_step_eval = False
except KeyboardInterrupt:
print("=&=" * 15)
print("DEV: current best acc")
print(dev_best_acc)
print("TEST: current best acc")
print(test_best_acc)
print("=&=" * 15)
print("=&=" * 15)
print("DEV: current best acc")
print(dev_best_acc)
print("TEST: current best acc")
print(test_best_acc)
print("=&=" * 15)
def eval_checkpoint(model_object, infersent, eval_dataloader, config, \
device, n_gpu, label_list, eval_sign="dev"):
# input_dataloader type can only be one of dev_dataloader, test_dataloader
model_object.eval()
label2idx = {label: i for i, label in enumerate(label_list)}
idx2label = {i: label for i, label in enumerate(label_list)}
is_single = config.data_sign == 'SST-2'
eval_loss = 0
pred_lst = []
gold_lst = []
eval_steps = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
if is_single:
sent1s, _, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
else:
sent1s, sent2s, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
sent2_tensor = infersent.encode(sent2s, tokenize=True)
label_idx_ids = [label2idx[label] for label in label_ids]
if is_single:
sent1_tensor = torch.tensor(sent1_tensor)
sent1_tensor = sent1_tensor.to(device)
logits = model_object(sent1_tensor)
else:
sent1_tensor = torch.tensor(sent1_tensor)
sent2_tensor = torch.tensor(sent2_tensor)
sent1_tensor = sent1_tensor.to(device)
sent2_tensor = sent2_tensor.to(device)
logits = model_object(sent1_tensor, sent2_tensor)
logits = logits.cpu().detach().numpy()
preds = np.argmax(logits, axis=-1)
pred_lst += list(preds)
gold_lst += label_idx_ids
cnt = 0
for pred, gold in zip(pred_lst, gold_lst):
if pred == gold:
cnt += 1
return 1.0 * cnt / len(pred_lst)
def random_attack(config, infersent, model, device, n_gpu, dev_loader, test_loader, label_list):
model.eval()
label2idx = {label: i for i, label in enumerate(label_list)}
idx2label = {i: label for i, label in enumerate(label_list)}
pred_lst = []
gold_lst = []
error_lst = []
is_single = config.data_sign == 'SST-2'
for step, batch in enumerate(dev_loader):
with torch.no_grad():
if is_single:
sent1s, _, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
else:
sent1s, sent2s, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
sent2_tensor = infersent.encode(sent2s, tokenize=True)
label_idx_ids = [label2idx[label] for label in label_ids]
if is_single:
sent1_tensor = torch.tensor(sent1_tensor)
sent1_tensor = sent1_tensor.to(device)
logits = model(sent1_tensor)
else:
sent1_tensor = torch.tensor(sent1_tensor)
sent2_tensor = torch.tensor(sent2_tensor)
sent1_tensor = sent1_tensor.to(device)
sent2_tensor = sent2_tensor.to(device)
logits = model(sent1_tensor, sent2_tensor)
logits = logits.cpu().detach().numpy()
preds = np.argmax(logits, axis=-1)
pred_lst += list(preds)
gold_lst += label_idx_ids
for step, batch in enumerate(test_loader):
with torch.no_grad():
if is_single:
sent1s, _, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
else:
sent1s, sent2s, label_ids = [list(item) for item in batch]
sent1_tensor = infersent.encode(sent1s, tokenize=True)
sent2_tensor = infersent.encode(sent2s, tokenize=True)
if is_single:
sent1_tensor = torch.tensor(sent1_tensor)
sent1_tensor = sent1_tensor.to(device)
logits = model(sent1_tensor)
else:
sent1_tensor = torch.tensor(sent1_tensor)
sent2_tensor = torch.tensor(sent2_tensor)
sent1_tensor = sent1_tensor.to(device)
sent2_tensor = sent2_tensor.to(device)
logits = model(sent1_tensor, sent2_tensor)
logits = logits.cpu().detach().numpy()
preds = np.argmax(logits, axis=-1)
error_lst += list(preds)
cnt = 0
all = 0
for pred, err, gold in zip(pred_lst, error_lst, gold_lst):
if not pred == gold:
continue
all += 1
if not pred == err:
cnt += 1
print('acc drop: ', 1.0 * cnt / all)
def adversarial_attack(config, infersent, model, device, n_gpu, dev_examples, dev_loader, test_loader, label_list, type='greedy'):
preps, dets, trans = stat()
error_matrix = Errors(preps, dets, trans)
if type == 'greedy':
agent = infersent_greedy_attack_agent(config, error_matrix, infersent, device, label_list)
elif type == 'beam_search':
agent = infersent_beam_search_attack_agent(config, error_matrix, infersent, device, label_list)
elif type == 'genetic':
agent = infersent_genetic_attack_agent(config, error_matrix, infersent, device, label_list)
elif type == 'random':
random_attack(config, infersent, model, device, n_gpu, dev_loader, test_loader, label_list)
return
logger.info('start attacking')
per_rate, att_rate = agent.attack(model, dev_examples, dev_loader)
logger.info('{} attack finished: attack success rate {:.2f}%, changed {:.2f}% tokens'.format(config.adv_type,
att_rate, per_rate))
def main():
config = args_parser()
if config.mode == 'fine-tune':
train_examples, dev_examples, test_examples, train_loader, dev_loader, test_loader, num_train_steps, label_list = load_data(config)
config.n_classes = len(label_list)
infersent, model, optimizer, device, n_gpu = load_model(config)
train(infersent, model, optimizer, train_loader, dev_loader, test_loader, config, device, n_gpu, label_list)
else:
config.train_batch_size = 1
config.dev_batch_size = 1
config.test_batch_size = 1
train_examples, dev_examples, test_examples, train_loader, dev_loader, test_loader, num_train_steps, label_list = load_data(config)
config.n_classes = len(label_list)
infersent, model, optimizer, device, n_gpu = load_model(config)
model.eval()
tmp_dev_acc = eval_checkpoint(model, infersent, dev_loader, config, device, n_gpu, label_list, eval_sign="dev")
logger.info('checked loaded model, current dev score: {}'.format(tmp_dev_acc))
if not os.path.exists(config.pos_dir):
os.mkdir(config.pos_dir)
config.pos_file = os.path.join(config.pos_dir, '{}_{}_pos.txt'.format(config.data_sign, config.target_model))
if not os.path.exists(config.attack_dir):
os.mkdir(config.attack_dir)
config.output_att_file = os.path.join(config.attack_dir,
'{}_{}_{}.txt'.format(config.data_sign, config.target_model, config.adv_type))
if config.mode == 'score':
infersent_adversarial_scoring(config, infersent, model, device, dev_loader, label_list)
else:
adversarial_attack(config, infersent, model, device, n_gpu, dev_examples, dev_loader, test_loader,
label_list, type=config.adv_type)
if __name__ == "__main__":
main()
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.cuda.device_count",
"torch.device",
"torch.no_grad",
"utils.Error_all.Errors",
"torch.load",
"os.path.exists",
"utils.statistic_all.stat",
"random.seed",
"models.InferSent",
"models.NLINet",
"torch.manual_... | [((995, 1018), 'models.InferSent', 'InferSent', (['params_model'], {}), '(params_model)\n', (1004, 1018), False, 'from models import InferSent, NLINet, ClassificationNet\n'), ((1273, 1298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1296, 1298), False, 'import argparse\n'), ((3260, 3282), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3271, 3282), False, 'import random\n'), ((3287, 3312), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3301, 3312), True, 'import numpy as np\n'), ((3317, 3345), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3334, 3345), False, 'import torch\n'), ((3351, 3388), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (3377, 3388), False, 'import torch\n'), ((3394, 3437), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (3405, 3437), False, 'import os\n'), ((6550, 6570), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6562, 6570), False, 'import torch\n'), ((6583, 6608), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6606, 6608), False, 'import torch\n'), ((16841, 16847), 'utils.statistic_all.stat', 'stat', ([], {}), '()\n', (16845, 16847), False, 'from utils.statistic_all import stat, dis\n'), ((16867, 16893), 'utils.Error_all.Errors', 'Errors', (['preps', 'dets', 'trans'], {}), '(preps, dets, trans)\n', (16873, 16893), False, 'from utils.Error_all import Errors\n'), ((1049, 1071), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1059, 1071), False, 'import torch\n'), ((6723, 6737), 'models.NLINet', 'NLINet', (['config'], {}), '(config)\n', (6729, 6737), False, 'from models import InferSent, NLINet, ClassificationNet\n'), ((6764, 6789), 'models.ClassificationNet', 'ClassificationNet', (['config'], {}), '(config)\n', (6781, 6789), False, 'from models import InferSent, NLINet, ClassificationNet\n'), ((7193, 7221), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (7214, 7221), False, 'import torch\n'), ((7057, 7084), 'torch.load', 'torch.load', (['model_dict_path'], {}), '(model_dict_path)\n', (7067, 7084), False, 'import torch\n'), ((7642, 7660), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (7658, 7660), False, 'from torch.nn import CrossEntropyLoss\n'), ((12474, 12489), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12487, 12489), False, 'import torch\n'), ((13536, 13562), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (13545, 13562), True, 'import numpy as np\n'), ((14184, 14199), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14197, 14199), False, 'import torch\n'), ((15232, 15258), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (15241, 15258), True, 'import numpy as np\n'), ((15393, 15408), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15406, 15408), False, 'import torch\n'), ((16371, 16397), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (16380, 16397), True, 'import numpy as np\n'), ((18825, 18855), 'os.path.exists', 'os.path.exists', (['config.pos_dir'], {}), '(config.pos_dir)\n', (18839, 18855), False, 'import os\n'), ((18869, 18893), 'os.mkdir', 'os.mkdir', (['config.pos_dir'], {}), '(config.pos_dir)\n', (18877, 18893), False, 'import os\n'), ((19027, 19060), 'os.path.exists', 'os.path.exists', (['config.attack_dir'], {}), '(config.attack_dir)\n', (19041, 19060), False, 'import os\n'), ((19074, 19101), 'os.mkdir', 'os.mkdir', (['config.attack_dir'], {}), '(config.attack_dir)\n', (19082, 19101), False, 'import os\n'), ((13021, 13047), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (13033, 13047), False, 'import torch\n'), ((13204, 13230), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (13216, 13230), False, 'import torch\n'), ((13262, 13288), 'torch.tensor', 'torch.tensor', (['sent2_tensor'], {}), '(sent2_tensor)\n', (13274, 13288), False, 'import torch\n'), ((14731, 14757), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (14743, 14757), False, 'import torch\n'), ((14907, 14933), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (14919, 14933), False, 'import torch\n'), ((14965, 14991), 'torch.tensor', 'torch.tensor', (['sent2_tensor'], {}), '(sent2_tensor)\n', (14977, 14991), False, 'import torch\n'), ((15870, 15896), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (15882, 15896), False, 'import torch\n'), ((16046, 16072), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (16058, 16072), False, 'import torch\n'), ((16104, 16130), 'torch.tensor', 'torch.tensor', (['sent2_tensor'], {}), '(sent2_tensor)\n', (16116, 16130), False, 'import torch\n'), ((8418, 8433), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8431, 8433), False, 'import torch\n'), ((8797, 8823), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (8809, 8823), False, 'import torch\n'), ((8989, 9015), 'torch.tensor', 'torch.tensor', (['sent1_tensor'], {}), '(sent1_tensor)\n', (9001, 9015), False, 'import torch\n'), ((9051, 9077), 'torch.tensor', 'torch.tensor', (['sent2_tensor'], {}), '(sent2_tensor)\n', (9063, 9077), False, 'import torch\n'), ((8340, 8385), 'torch.tensor', 'torch.tensor', (['label_idx_ids'], {'dtype': 'torch.long'}), '(label_idx_ids, dtype=torch.long)\n', (8352, 8385), False, 'import torch\n')] |
"""
SPDX-FileCopyrightText: 2021 International Photoacoustic Standardisation Consortium (IPASC)
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-License-Identifier: MIT
"""
import numpy as np
from image_reconstruction.reconstruction_utils.beamforming import back_projection
from image_reconstruction.reconstruction_algorithms import ReconstructionAlgorithm
from image_reconstruction.reconstruction_utils.pre_processing import butter_bandpass_filter
from image_reconstruction.reconstruction_utils.post_processing import hilbert_transform_1_d
from image_reconstruction.reconstruction_utils.post_processing import log_compression
class BackProjection(ReconstructionAlgorithm):
def implementation(self, time_series_data: np.ndarray,
detection_elements: dict,
field_of_view: np.ndarray,
**kwargs):
"""
Implementation of a baseline delay and sum algorithm without any additional features.
Parameters
----------
time_series_data: A 2D numpy array with the following internal array definition:
[detectors, time samples]
detection_elements: A dictionary that describes the detection geometry.
The dictionary contains three entries:
** "positions": The positions of the detection elements relative to the field of view
** "orientations": The orientations of the detection elements
** "sizes": The sizes of the detection elements.
field_of_view: A 1D 6 element-long numpy array that contains the extent of the field of view in x, y and
z direction in the same coordinate system as the detection element positions.
kwargs: the list of parameters for the delay and sum reconstruction includes the following parameters:
** 'spacing_m' the target isotropic reconstruction spacing in units of meters
** 'speed_of_sound_m_s' the target speed of sound in units of meters per second
** 'lowcut' the highpass frequency for the bandpass filter
** 'highcut' the lowpass frequency for the bandpass filter
** 'filter_order' the order of the butter filter
** 'envelope_type' the type of envelope detection to be performed
** 'p_factor' the p-factor TODO include paper reference
** 'p_SCF' the SCF-factor TODO include paper reference
** 'p_PCF' the PCF-factor TODO include paper reference
** 'fnumber' the fnumber TODO include paper reference
Returns
-------
A reconstructed image
"""
time_series_data = time_series_data.astype(float)
# parse kwargs with sensible defaults
speed_of_sound_in_m_per_s = 1540
if "speed_of_sound_m_s" in kwargs:
speed_of_sound_in_m_per_s = kwargs["speed_of_sound_m_s"]
spacing_m = 0.0005
if "spacing_m" in kwargs:
spacing_m = kwargs["spacing_m"]
lowcut = None
if "lowcut" in kwargs:
lowcut = kwargs["lowcut"]
highcut = None
if "highcut" in kwargs:
highcut = kwargs["highcut"]
filter_order = 5
if "filter_order" in kwargs:
filter_order = kwargs["filter_order"]
envelope = False
if "envelope" in kwargs:
envelope = kwargs["envelope"]
envelope_type = None
if "envelope_type" in kwargs:
envelope_type = kwargs["envelope_type"]
p_factor = 1
if "p_factor" in kwargs:
p_factor = kwargs["p_factor"]
p_scf = 0
if "p_SCF" in kwargs:
p_scf = kwargs["p_SCF"]
p_pcf = 0
if "p_PCF" in kwargs:
p_pcf = kwargs["p_PCF"]
fnumber = 0
if "fnumber" in kwargs:
fnumber = kwargs["fnumber"]
if lowcut is not None or highcut is not None:
time_series_data = butter_bandpass_filter(signal=time_series_data,
sampling_rate=self.ipasc_data.get_sampling_rate(),
lowcut=lowcut,
highcut=highcut,
order=filter_order)
reconstructed = back_projection(time_series_data, detection_elements, self.ipasc_data.get_sampling_rate(),
field_of_view, spacing_m, speed_of_sound_in_m_per_s,
fnumber, p_scf, p_factor, p_pcf)
if envelope:
if envelope_type == "hilbert":
# hilbert transform
reconstructed = hilbert_transform_1_d(reconstructed, axis=0)
elif envelope_type == "log":
# hilbert transform + log-compression on 40 dB
reconstructed = log_compression(reconstructed, axis=0, dynamic=40)
elif envelope_type == "zero":
# zero forcing
reconstructed[reconstructed < 0] = 0
elif envelope_type == "abs":
# absolute value
reconstructed = np.abs(reconstructed)
else:
print("WARN: No envelope type specified!")
return reconstructed
| [
"image_reconstruction.reconstruction_utils.post_processing.log_compression",
"image_reconstruction.reconstruction_utils.post_processing.hilbert_transform_1_d",
"numpy.abs"
] | [((4857, 4901), 'image_reconstruction.reconstruction_utils.post_processing.hilbert_transform_1_d', 'hilbert_transform_1_d', (['reconstructed'], {'axis': '(0)'}), '(reconstructed, axis=0)\n', (4878, 4901), False, 'from image_reconstruction.reconstruction_utils.post_processing import hilbert_transform_1_d\n'), ((5038, 5088), 'image_reconstruction.reconstruction_utils.post_processing.log_compression', 'log_compression', (['reconstructed'], {'axis': '(0)', 'dynamic': '(40)'}), '(reconstructed, axis=0, dynamic=40)\n', (5053, 5088), False, 'from image_reconstruction.reconstruction_utils.post_processing import log_compression\n'), ((5321, 5342), 'numpy.abs', 'np.abs', (['reconstructed'], {}), '(reconstructed)\n', (5327, 5342), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
x = np.arange(0, 20, 0.25)
F = 10*(1 + 3.33 * 0.50*np.sqrt(x) - .35*x)
fig = plt.figure()
ax = fig.add_subplot(111)
# ax = fig.add_subplot(111, projection='3d')
ax.plot(x, F)
plt.xlim([0,20])
plt.ylim([0,80])
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Meshgrid")
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] | [((108, 130), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(0.25)'], {}), '(0, 20, 0.25)\n', (117, 130), True, 'import numpy as np\n'), ((181, 193), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (191, 193), True, 'import matplotlib.pyplot as plt\n'), ((279, 296), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 20]'], {}), '([0, 20])\n', (287, 296), True, 'import matplotlib.pyplot as plt\n'), ((296, 313), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 80]'], {}), '([0, 80])\n', (304, 313), True, 'import matplotlib.pyplot as plt\n'), ((313, 328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (323, 328), True, 'import matplotlib.pyplot as plt\n'), ((329, 344), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (339, 344), True, 'import matplotlib.pyplot as plt\n'), ((345, 366), 'matplotlib.pyplot.title', 'plt.title', (['"""Meshgrid"""'], {}), "('Meshgrid')\n", (354, 366), True, 'import matplotlib.pyplot as plt\n'), ((367, 377), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (375, 377), True, 'import matplotlib.pyplot as plt\n'), ((155, 165), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (162, 165), True, 'import numpy as np\n')] |
import argparse
import glob
import sys
import xml.etree.cElementTree as etree
from datetime import datetime
from math import atan2, cos, radians, sin, sqrt
from typing import Any, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
DF_COLS_DICT = ["lat", "lon", "ele", "dist"]
MIN_FILES = 4
R = 6373.0
def strip_namespaces(tag_elem: str) -> str:
""" Strip all namespaces from the gpx """
idx = tag_elem.rfind("}")
if idx != -1:
tag_elem = tag_elem[idx + 1 :]
return tag_elem
def get_filenames(args: argparse.Namespace) -> Tuple[List[str], int]:
""" Collect filenames glob """
print(type(args))
filenames = glob.glob(args.dir)
file_num = len(filenames)
return filenames, file_num
def parse_gpx(xml_file: str, df_cols: list) -> pd.DataFrame:
""" Parse gpx files into dataframe """
trkpt_count = -1
rows: "List[Any]" = []
trkpt_lst: "List[float]" = []
prev_trkpt_lst: "List[float]" = [0, 0, 0]
got_coords = False
total_distance: float = 0
distance_delta: float = 0
for event, elem in etree.iterparse(xml_file, events=("start", "end")):
tag_names = strip_namespaces(elem.tag) # strips all namespaces from input file
if event == "start":
if tag_names == "wpt":
pass
elif tag_names == "trkpt":
trkpt_lst = [] # clear trkpt_lst at the start of the next loop
trkpt_lst.append(float(elem.attrib[df_cols[1]]))
trkpt_lst.append(float(elem.attrib[df_cols[0]]))
trkpt_count += 1
got_coords = True
elif (tag_names == "ele") and got_coords is True:
if elem.text is not None:
trkpt_lst.append((float(elem.text))) # ele
if prev_trkpt_lst[0] == 0:
trkpt_lst.append(0)
else:
dlon = radians(trkpt_lst[0]) - radians(prev_trkpt_lst[0])
dlat = radians(trkpt_lst[1]) - radians(prev_trkpt_lst[1])
a = (
sin(dlat / 2) ** 2
+ cos(radians(prev_trkpt_lst[1]))
* cos(radians(trkpt_lst[1]))
* sin(dlon / 2) ** 2
)
c = 2 * atan2(np.sqrt(a), sqrt(1 - a))
distance_delta = R * c * 1000
total_distance = round(total_distance + distance_delta, 2)
trkpt_lst.append(total_distance)
prev_trkpt_lst = trkpt_lst[:]
rows.append(
{df_cols[i]: trkpt_lst[i] for i, _ in enumerate(df_cols)}
)
gpx_dataframe = pd.DataFrame(rows, columns=df_cols)
print(round(gpx_dataframe["dist"].iloc[-1] / 1000, 2), "km")
return gpx_dataframe
def create_outer_dataframe(filenames: "List[str]") -> List:
""" Fill dataframe with parsed data """
if len(filenames) <= MIN_FILES:
print(f"None or too few gpx files found. You need at least 5 files")
sys.exit(1)
else:
print(f"{len(filenames)} file(s) found. Parsing GPX files...")
parsed_dataframe = [
parse_gpx(filenames[index], DF_COLS_DICT) for index in range(len(filenames))
]
return parsed_dataframe
def create_grid(file_num: int) -> Tuple[int, int, List[Tuple[int, int]]]:
""" Create the grid for the small multiples to be arranged """
rows_count = int(np.sqrt(file_num))
cols_count = int(file_num / rows_count) + 1
print(f"Grid layout: ({ rows_count }, { cols_count }) ")
grid_layout = [divmod(x, rows_count) for x in range(file_num)]
print("Creating plot(s)...")
return cols_count, rows_count, grid_layout
def plot_graph(
cols_count: int,
rows_count: Any,
file_num: int,
grid_layout: List[Any],
parsed_dataframe: List[Any],
mode: Tuple[str, str],
):
""" Plot grid without decorations """
fig, ax = plt.subplots(cols_count, rows_count)
for ax_i in np.ravel(ax):
ax_i.axis("off")
for i in range(file_num):
ax[grid_layout[i]].plot(
parsed_dataframe[i][mode[0]], parsed_dataframe[i][mode[1]]
)
def main():
""" Print small multiples from gpx files """
start_time = datetime.now()
if sys.version_info <= (3, 6, 0):
print("This script needs Python >3.6")
sys.exit(1)
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
dest="dir",
metavar="gpx_directory",
type=str,
help="A directory containing at least {MIN_FILES} gpx files",
)
args_parser.add_argument(
"-e", "--elevation", help="Plot the elevation graphs", action="store_true"
)
args_parser.add_argument(
"-t", "--tracks", help="Plot the track outlines", action="store_true"
)
args = args_parser.parse_args()
filenames, file_num = get_filenames(args)
parsed_dataframe = create_outer_dataframe(filenames)
cols_count, rows_count, grid_layout = create_grid(file_num)
if args.elevation:
mode = ("dist", "ele")
plot_graph(
cols_count, rows_count, file_num, grid_layout, parsed_dataframe, mode
)
if args.tracks:
mode = ("lat", "lon")
plot_graph(
cols_count, rows_count, file_num, grid_layout, parsed_dataframe, mode
)
end_time = datetime.now()
print(f"Total time: {str(end_time - start_time).split('.')[0]}")
plt.show()
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"math.sqrt",
"numpy.ravel",
"math.radians",
"xml.etree.cElementTree.iterparse",
"datetime.datetime.now",
"math.sin",
"glob.glob",
"matplotlib.pyplot.subplots",
"sys.exit",
"numpy.sqrt"
] | [((679, 698), 'glob.glob', 'glob.glob', (['args.dir'], {}), '(args.dir)\n', (688, 698), False, 'import glob\n'), ((1101, 1151), 'xml.etree.cElementTree.iterparse', 'etree.iterparse', (['xml_file'], {'events': "('start', 'end')"}), "(xml_file, events=('start', 'end'))\n", (1116, 1151), True, 'import xml.etree.cElementTree as etree\n'), ((2829, 2864), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'df_cols'}), '(rows, columns=df_cols)\n', (2841, 2864), True, 'import pandas as pd\n'), ((4097, 4133), 'matplotlib.pyplot.subplots', 'plt.subplots', (['cols_count', 'rows_count'], {}), '(cols_count, rows_count)\n', (4109, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4150, 4162), 'numpy.ravel', 'np.ravel', (['ax'], {}), '(ax)\n', (4158, 4162), True, 'import numpy as np\n'), ((4413, 4427), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4425, 4427), False, 'from datetime import datetime\n'), ((4553, 4578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4576, 4578), False, 'import argparse\n'), ((5536, 5550), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5548, 5550), False, 'from datetime import datetime\n'), ((5625, 5635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5633, 5635), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3193), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3190, 3193), False, 'import sys\n'), ((3595, 3612), 'numpy.sqrt', 'np.sqrt', (['file_num'], {}), '(file_num)\n', (3602, 3612), True, 'import numpy as np\n'), ((4522, 4533), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4530, 4533), False, 'import sys\n'), ((1958, 1979), 'math.radians', 'radians', (['trkpt_lst[0]'], {}), '(trkpt_lst[0])\n', (1965, 1979), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((1982, 2008), 'math.radians', 'radians', (['prev_trkpt_lst[0]'], {}), '(prev_trkpt_lst[0])\n', (1989, 2008), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2040, 2061), 'math.radians', 'radians', (['trkpt_lst[1]'], {}), '(trkpt_lst[1])\n', (2047, 2061), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2064, 2090), 'math.radians', 'radians', (['prev_trkpt_lst[1]'], {}), '(prev_trkpt_lst[1])\n', (2071, 2090), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2150, 2163), 'math.sin', 'sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (2153, 2163), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2401, 2411), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (2408, 2411), True, 'import numpy as np\n'), ((2413, 2424), 'math.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (2417, 2424), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2318, 2331), 'math.sin', 'sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (2321, 2331), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2203, 2229), 'math.radians', 'radians', (['prev_trkpt_lst[1]'], {}), '(prev_trkpt_lst[1])\n', (2210, 2229), False, 'from math import atan2, cos, radians, sin, sqrt\n'), ((2265, 2286), 'math.radians', 'radians', (['trkpt_lst[1]'], {}), '(trkpt_lst[1])\n', (2272, 2286), False, 'from math import atan2, cos, radians, sin, sqrt\n')] |
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils.net as net_utils
from core.config import cfg
import numpy as np
from sklearn.cluster import KMeans
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def PCL(boxes, cls_prob, im_labels, cls_prob_new):
cls_prob = cls_prob.data.cpu().numpy()
cls_prob_new = cls_prob_new.data.cpu().numpy()
if cls_prob.shape[1] != im_labels.shape[1]:
cls_prob = cls_prob[:, 1:]
eps = 1e-9
cls_prob[cls_prob < eps] = eps
cls_prob[cls_prob > 1 - eps] = 1 - eps
cls_prob_new[cls_prob_new < eps] = eps
cls_prob_new[cls_prob_new > 1 - eps] = 1 - eps
proposals = _get_graph_centers(boxes.copy(), cls_prob.copy(),
im_labels.copy())
labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights \
= get_proposal_clusters(boxes.copy(), proposals, im_labels.copy())
return {'labels' : labels.reshape(1, -1).astype(np.int64).copy(),
'cls_loss_weights' : cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'gt_assignment' : gt_assignment.reshape(1, -1).astype(np.float32).copy(),
'bbox_targets' : bbox_targets.astype(np.float32).copy(),
'bbox_inside_weights' : bbox_inside_weights.astype(np.float32).copy(),
'bbox_outside_weights' : bbox_outside_weights.astype(np.float32).copy()}
def OICR(boxes, cls_prob, im_labels, cls_prob_new):
cls_prob = cls_prob.data.cpu().numpy()
cls_prob_new = cls_prob_new.data.cpu().numpy()
if cls_prob.shape[1] != im_labels.shape[1]:
cls_prob = cls_prob[:, 1:]
eps = 1e-9
cls_prob[cls_prob < eps] = eps
cls_prob[cls_prob > 1 - eps] = 1 - eps
proposals = _get_highest_score_proposals(boxes, cls_prob, im_labels)
labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights \
= get_proposal_clusters(boxes.copy(), proposals, im_labels.copy())
return {'labels' : labels.reshape(1, -1).astype(np.int64).copy(),
'cls_loss_weights' : cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'gt_assignment' : gt_assignment.reshape(1, -1).astype(np.float32).copy(),
'bbox_targets' : bbox_targets.astype(np.float32).copy(),
'bbox_inside_weights' : bbox_inside_weights.astype(np.float32).copy(),
'bbox_outside_weights' : bbox_outside_weights.astype(np.float32).copy()}
def _get_highest_score_proposals(boxes, cls_prob, im_labels):
"""Get proposals with highest score."""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :]
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
max_index = np.argmax(cls_prob_tmp)
gt_boxes = np.vstack((gt_boxes, boxes[max_index, :].reshape(1, -1)))
gt_classes = np.vstack((gt_classes, (i + 1) * np.ones((1, 1), dtype=np.int32)))
gt_scores = np.vstack((gt_scores,
cls_prob_tmp[max_index] * np.ones((1, 1), dtype=np.float32)))
cls_prob[max_index, :] = 0
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
return proposals
def _get_top_ranking_propoals(probs):
"""Get top ranking proposals by k-means"""
kmeans = KMeans(n_clusters=cfg.TRAIN.NUM_KMEANS_CLUSTER,
random_state=cfg.RNG_SEED).fit(probs)
high_score_label = np.argmax(kmeans.cluster_centers_)
index = np.where(kmeans.labels_ == high_score_label)[0]
if len(index) == 0:
index = np.array([np.argmax(probs)])
return index
def _build_graph(boxes, iou_threshold):
"""Build graph based on box IoU"""
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
boxes.astype(dtype=np.float32, copy=False))
return (overlaps > iou_threshold).astype(np.float32)
def _get_graph_centers(boxes, cls_prob, im_labels):
"""Get graph centers."""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :].copy()
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
idxs = np.where(cls_prob_tmp >= 0)[0]
idxs_tmp = _get_top_ranking_propoals(cls_prob_tmp[idxs].reshape(-1, 1))
idxs = idxs[idxs_tmp]
boxes_tmp = boxes[idxs, :].copy()
cls_prob_tmp = cls_prob_tmp[idxs]
graph = _build_graph(boxes_tmp, cfg.TRAIN.GRAPH_IOU_THRESHOLD)
keep_idxs = []
gt_scores_tmp = []
count = cls_prob_tmp.size
while True:
order = np.sum(graph, axis=1).argsort()[::-1]
tmp = order[0]
keep_idxs.append(tmp)
inds = np.where(graph[tmp, :] > 0)[0]
gt_scores_tmp.append(np.max(cls_prob_tmp[inds]))
graph[:, inds] = 0
graph[inds, :] = 0
count = count - len(inds)
if count <= 5:
break
gt_boxes_tmp = boxes_tmp[keep_idxs, :].copy()
gt_scores_tmp = np.array(gt_scores_tmp).copy()
keep_idxs_new = np.argsort(gt_scores_tmp)\
[-1:(-1 - min(len(gt_scores_tmp), cfg.TRAIN.MAX_PC_NUM)):-1]
gt_boxes = np.vstack((gt_boxes, gt_boxes_tmp[keep_idxs_new, :]))
gt_scores = np.vstack((gt_scores,
gt_scores_tmp[keep_idxs_new].reshape(-1, 1)))
gt_classes = np.vstack((gt_classes,
(i + 1) * np.ones((len(keep_idxs_new), 1), dtype=np.int32)))
# If a proposal is chosen as a cluster center,
# we simply delete a proposal from the candidata proposal pool,
# because we found that the results of different strategies are similar and this strategy is more efficient
cls_prob = np.delete(cls_prob.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
boxes = np.delete(boxes.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
return proposals
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = box_utils.bbox_transform_inv(ex_rois, gt_rois,
cfg.MODEL.BBOX_REG_WEIGHTS)
return np.hstack((labels[:, np.newaxis], targets)).astype(
np.float32, copy=False)
def _expand_bbox_targets(bbox_target_data):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
num_bbox_reg_classes = cfg.MODEL.NUM_CLASSES + 1
clss = bbox_target_data[:, 0]
bbox_targets = blob_utils.zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_inside_weights = blob_utils.zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_inside_weights
def get_proposal_clusters(all_rois, proposals, im_labels):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
# overlaps: (rois x gt_boxes)
gt_boxes = proposals['gt_boxes']
gt_labels = proposals['gt_classes']
gt_scores = proposals['gt_scores']
overlaps = box_utils.bbox_overlaps(
all_rois.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_labels[gt_assignment, 0]
cls_loss_weights = gt_scores[gt_assignment, 0]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those with < FG_THRESH overlap
bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
ig_inds = np.where(max_overlaps < cfg.TRAIN.BG_THRESH)[0]
cls_loss_weights[ig_inds] = 0.0
labels[bg_inds] = 0
if cfg.MODEL.WITH_FRCNN:
bbox_targets = _compute_targets(all_rois, gt_boxes[gt_assignment, :],
labels)
bbox_targets, bbox_inside_weights = _expand_bbox_targets(bbox_targets)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype) \
* cls_loss_weights.reshape(-1, 1)
else:
bbox_targets, bbox_inside_weights, bbox_outside_weights = np.array([0]), np.array([0]), np.array([0])
gt_assignment[bg_inds] = -1
return labels, cls_loss_weights, gt_assignment, bbox_targets, bbox_inside_weights, bbox_outside_weights
class PCLLosses(nn.Module):
def forward(ctx, pcl_probs, labels, cls_loss_weights, gt_assignments):
cls_loss = 0.0
weight = cls_loss_weights.view(-1).float()
labels = labels.view(-1)
gt_assignments = gt_assignments.view(-1)
for gt_assignment in gt_assignments.unique():
inds = torch.nonzero(gt_assignment == gt_assignments,
as_tuple=False).view(-1)
if gt_assignment == -1:
assert labels[inds].sum() == 0
cls_loss -= (torch.log(pcl_probs[inds, 0].clamp(1e-9, 10000))
* weight[inds]).sum()
else:
assert labels[inds].unique().size(0) == 1
label_cur = labels[inds[0]]
cls_loss -= torch.log(
pcl_probs[inds, label_cur].clamp(1e-9, 10000).mean()
) * weight[inds].sum()
return cls_loss / max(float(pcl_probs.size(0)), 1.)
class OICRLosses(nn.Module):
def __init__(self):
super(OICRLosses, self).__init__()
def forward(self, prob, labels, cls_loss_weights, gt_assignments, eps = 1e-6):
loss = torch.log(prob + eps)[range(prob.size(0)), labels]
loss *= -cls_loss_weights
ret = loss.mean()
return ret
| [
"utils.blob.zeros",
"numpy.sum",
"numpy.argmax",
"sklearn.cluster.KMeans",
"numpy.zeros",
"torch.nonzero",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.max",
"numpy.where",
"numpy.array",
"utils.boxes.bbox_transform_inv",
"torch.log",
"numpy.vstack"
] | [((2877, 2911), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (2885, 2911), True, 'import numpy as np\n'), ((2929, 2961), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'np.int32'}), '((0, 1), dtype=np.int32)\n', (2937, 2961), True, 'import numpy as np\n'), ((2978, 3012), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'np.float32'}), '((0, 1), dtype=np.float32)\n', (2986, 3012), True, 'import numpy as np\n'), ((3898, 3932), 'numpy.argmax', 'np.argmax', (['kmeans.cluster_centers_'], {}), '(kmeans.cluster_centers_)\n', (3907, 3932), True, 'import numpy as np\n'), ((4614, 4648), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (4622, 4648), True, 'import numpy as np\n'), ((4666, 4698), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'np.int32'}), '((0, 1), dtype=np.int32)\n', (4674, 4698), True, 'import numpy as np\n'), ((4715, 4749), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'np.float32'}), '((0, 1), dtype=np.float32)\n', (4723, 4749), True, 'import numpy as np\n'), ((7127, 7201), 'utils.boxes.bbox_transform_inv', 'box_utils.bbox_transform_inv', (['ex_rois', 'gt_rois', 'cfg.MODEL.BBOX_REG_WEIGHTS'], {}), '(ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)\n', (7155, 7201), True, 'import utils.boxes as box_utils\n'), ((7923, 7978), 'utils.blob.zeros', 'blob_utils.zeros', (['(clss.size, 4 * num_bbox_reg_classes)'], {}), '((clss.size, 4 * num_bbox_reg_classes))\n', (7939, 7978), True, 'import utils.blob as blob_utils\n'), ((8005, 8041), 'utils.blob.zeros', 'blob_utils.zeros', (['bbox_targets.shape'], {}), '(bbox_targets.shape)\n', (8021, 8041), True, 'import utils.blob as blob_utils\n'), ((3946, 3990), 'numpy.where', 'np.where', (['(kmeans.labels_ == high_score_label)'], {}), '(kmeans.labels_ == high_score_label)\n', (3954, 3990), True, 'import numpy as np\n'), ((8053, 8071), 'numpy.where', 'np.where', (['(clss > 0)'], {}), '(clss > 0)\n', (8061, 8071), True, 'import numpy as np\n'), ((9172, 9217), 'numpy.where', 'np.where', (['(max_overlaps >= cfg.TRAIN.FG_THRESH)'], {}), '(max_overlaps >= cfg.TRAIN.FG_THRESH)\n', (9180, 9217), True, 'import numpy as np\n'), ((9299, 9343), 'numpy.where', 'np.where', (['(max_overlaps < cfg.TRAIN.FG_THRESH)'], {}), '(max_overlaps < cfg.TRAIN.FG_THRESH)\n', (9307, 9343), True, 'import numpy as np\n'), ((9362, 9406), 'numpy.where', 'np.where', (['(max_overlaps < cfg.TRAIN.BG_THRESH)'], {}), '(max_overlaps < cfg.TRAIN.BG_THRESH)\n', (9370, 9406), True, 'import numpy as np\n'), ((3154, 3177), 'numpy.argmax', 'np.argmax', (['cls_prob_tmp'], {}), '(cls_prob_tmp)\n', (3163, 3177), True, 'import numpy as np\n'), ((3781, 3855), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'cfg.TRAIN.NUM_KMEANS_CLUSTER', 'random_state': 'cfg.RNG_SEED'}), '(n_clusters=cfg.TRAIN.NUM_KMEANS_CLUSTER, random_state=cfg.RNG_SEED)\n', (3787, 3855), False, 'from sklearn.cluster import KMeans\n'), ((6019, 6072), 'numpy.vstack', 'np.vstack', (['(gt_boxes, gt_boxes_tmp[keep_idxs_new, :])'], {}), '((gt_boxes, gt_boxes_tmp[keep_idxs_new, :]))\n', (6028, 6072), True, 'import numpy as np\n'), ((7256, 7299), 'numpy.hstack', 'np.hstack', (['(labels[:, np.newaxis], targets)'], {}), '((labels[:, np.newaxis], targets))\n', (7265, 7299), True, 'import numpy as np\n'), ((9709, 9775), 'numpy.array', 'np.array', (['(bbox_inside_weights > 0)'], {'dtype': 'bbox_inside_weights.dtype'}), '(bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)\n', (9717, 9775), True, 'import numpy as np\n'), ((9913, 9926), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (9921, 9926), True, 'import numpy as np\n'), ((9928, 9941), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (9936, 9941), True, 'import numpy as np\n'), ((9943, 9956), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (9951, 9956), True, 'import numpy as np\n'), ((11265, 11286), 'torch.log', 'torch.log', (['(prob + eps)'], {}), '(prob + eps)\n', (11274, 11286), False, 'import torch\n'), ((4045, 4061), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (4054, 4061), True, 'import numpy as np\n'), ((4886, 4913), 'numpy.where', 'np.where', (['(cls_prob_tmp >= 0)'], {}), '(cls_prob_tmp >= 0)\n', (4894, 4913), True, 'import numpy as np\n'), ((5891, 5916), 'numpy.argsort', 'np.argsort', (['gt_scores_tmp'], {}), '(gt_scores_tmp)\n', (5901, 5916), True, 'import numpy as np\n'), ((5478, 5505), 'numpy.where', 'np.where', (['(graph[tmp, :] > 0)'], {}), '(graph[tmp, :] > 0)\n', (5486, 5505), True, 'import numpy as np\n'), ((5546, 5572), 'numpy.max', 'np.max', (['cls_prob_tmp[inds]'], {}), '(cls_prob_tmp[inds])\n', (5552, 5572), True, 'import numpy as np\n'), ((5831, 5854), 'numpy.array', 'np.array', (['gt_scores_tmp'], {}), '(gt_scores_tmp)\n', (5839, 5854), True, 'import numpy as np\n'), ((10435, 10497), 'torch.nonzero', 'torch.nonzero', (['(gt_assignment == gt_assignments)'], {'as_tuple': '(False)'}), '(gt_assignment == gt_assignments, as_tuple=False)\n', (10448, 10497), False, 'import torch\n'), ((3318, 3349), 'numpy.ones', 'np.ones', (['(1, 1)'], {'dtype': 'np.int32'}), '((1, 1), dtype=np.int32)\n', (3325, 3349), True, 'import numpy as np\n'), ((3459, 3492), 'numpy.ones', 'np.ones', (['(1, 1)'], {'dtype': 'np.float32'}), '((1, 1), dtype=np.float32)\n', (3466, 3492), True, 'import numpy as np\n'), ((5348, 5369), 'numpy.sum', 'np.sum', (['graph'], {'axis': '(1)'}), '(graph, axis=1)\n', (5354, 5369), True, 'import numpy as np\n')] |
"""
Read in the results for different algorithms and different amounts of
temporal binning and plot the -log(FPF) over the binning factor.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import argparse
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hsr4hci.config import get_experiments_dir
from hsr4hci.data import load_metadata
from hsr4hci.plotting import (
adjust_luminosity,
set_fontsize,
)
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# -------------------------------------------------------------------------
# Preliminaries
# -------------------------------------------------------------------------
script_start = time.time()
print('\nPLOT -LOG(FPF) OVER BINNING FACTOR\n', flush=True)
# -------------------------------------------------------------------------
# Set up parser and get command line arguments
# -------------------------------------------------------------------------
# Set up argument parser
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset, e.g., "beta_pictoris__lp".',
)
parser.add_argument(
'--planet',
type=str,
default='b',
help='Planet, e.g., "b".',
)
args = parser.parse_args()
# Get arguments
dataset = args.dataset
planet = args.planet
# -------------------------------------------------------------------------
# Define shortcuts
# -------------------------------------------------------------------------
# Define directory for the dataset that we are processing
dataset_dir = (
get_experiments_dir()
/ 'appendix'
/ 'D.1_fpf-as-function-of-temporal-binning'
/ dataset
)
# Load the metadata of the data set (e.g., because we need the DIT)
print('Loading data set metadata...', end=' ', flush=True)
metadata = load_metadata(name_or_path=dataset)
dit = metadata['DIT_STACK']
print('Done!', flush=True)
# Initialize a new plot to which we will add everything
fig, ax1 = plt.subplots(figsize=(18.4 / 2.54, 18.4 / 2.54 / 2))
fig.subplots_adjust(left=0.052, right=0.998, top=0.905, bottom=0.095)
# -------------------------------------------------------------------------
# Load and plot the results for PCA
# -------------------------------------------------------------------------
# Read in the results for PCA into a pandas DataFrame
file_path = dataset_dir / 'pca' / f'metrics__{planet}.tsv'
df = pd.read_csv(file_path, sep='\t')
# Get the maximum binning factor (for plot limits)
max_binning_factor = df.binning_factor.max()
# Plot results for different numbers of principal components
for i, n_components in enumerate([10, 20, 50, 100]):
print(f'Plotting PCA (n={n_components})...', end=' ', flush=True)
# Select the subset of the data frame for the current n_components
df_selection = df[df['n_components'] == n_components]
# Select results and sort by binning factor
idx = np.argsort(df_selection.binning_factor.values)
binning_factor = df_selection.binning_factor.values[idx]
log_fpf_mean = df_selection.log_fpf_mean.values[idx]
# Plot the -log(FPF) over the binning factor
ax1.plot(
binning_factor,
log_fpf_mean,
ls='-',
color=f'C{i + 2}',
marker='o',
markerfacecolor=f'C{i + 2}',
markeredgecolor='white',
markersize=4,
label=f'PCA (n={n_components})',
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Load and plot the results for HSR
# -------------------------------------------------------------------------
for i, algorithm in enumerate(['signal_fitting', 'signal_masking']):
for oc, ls, marker, amount in [
('', '-', 's', 1.0),
('__oc', '--', 'D', 1.3),
]:
print(f'Plotting {algorithm}{oc}...', end=' ', flush=True)
# Read in the results for signal fitting into a pandas DataFrame
file_path = (
dataset_dir / f'{algorithm}{oc}' / f'metrics__{planet}.tsv'
)
df = pd.read_csv(file_path, sep='\t')
# Select results and sort by binning factor
idx = np.argsort(df.binning_factor.values)
binning_factor = df.binning_factor.values[idx]
log_fpf_mean = df.log_fpf_mean.values[idx]
# Construct label for legend
version = 'SF' if algorithm == 'signal_fitting' else 'SM'
obscon = '+OC' if oc else ''
label = f'HSR ({version}{obscon})'
# Plot the -log(FPF) over the binning factor
color = adjust_luminosity(color=f'C{i}', amount=amount)
ax1.plot(
binning_factor,
log_fpf_mean,
ls=ls,
color=color,
marker=marker,
markerfacecolor=color,
markeredgecolor='white',
markersize=4,
label=label,
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Set up plot options and save results
# -------------------------------------------------------------------------
# Add a secondary x-axis on top which converts the binning factor to
# an effective integration time by multiplying the binning factor with
# the DIT of a single frame (= 0.2 seconds for most L' band data sets)
ax2 = ax1.secondary_xaxis(
location='top', functions=(lambda x: dit * x, lambda x: x / dit)
)
# Add a legend to the plot
ax1.legend(
loc='lower center',
ncol=8,
fontsize=6,
handletextpad=0.5,
mode="expand",
columnspacing=1.5,
)
# Set axes scale and limits
ax1.set_xscale('log')
ax1.set_xlim(0.9, 1.1 * max_binning_factor)
ax1.set_ylim(0.0, None)
# Set up font sizes
for ax in (ax1, ax2):
set_fontsize(ax=ax, fontsize=6)
# Add labels to axes
ax1.set_xlabel('Binning factor')
ax2.set_xlabel('Effective Integration Time (s)')
ax1.set_ylabel(r'$-\log_\mathrm{10}(\mathrm{FPF})$')
# Save plot
file_path = dataset_dir / 'log_fpf_over_binning_factor.pdf'
plt.savefig(file_path, dpi=600)
# -------------------------------------------------------------------------
# Postliminaries
# -------------------------------------------------------------------------
print(f'\nThis took {time.time() - script_start:.1f} seconds!\n')
| [
"hsr4hci.plotting.set_fontsize",
"argparse.ArgumentParser",
"pandas.read_csv",
"hsr4hci.data.load_metadata",
"time.time",
"numpy.argsort",
"hsr4hci.plotting.adjust_luminosity",
"matplotlib.pyplot.subplots",
"hsr4hci.config.get_experiments_dir",
"matplotlib.pyplot.savefig"
] | [((979, 990), 'time.time', 'time.time', ([], {}), '()\n', (988, 990), False, 'import time\n'), ((1310, 1335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1333, 1335), False, 'import argparse\n'), ((2255, 2290), 'hsr4hci.data.load_metadata', 'load_metadata', ([], {'name_or_path': 'dataset'}), '(name_or_path=dataset)\n', (2268, 2290), False, 'from hsr4hci.data import load_metadata\n'), ((2430, 2482), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18.4 / 2.54, 18.4 / 2.54 / 2)'}), '(figsize=(18.4 / 2.54, 18.4 / 2.54 / 2))\n', (2442, 2482), True, 'import matplotlib.pyplot as plt\n'), ((2889, 2921), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '"""\t"""'}), "(file_path, sep='\\t')\n", (2900, 2921), True, 'import pandas as pd\n'), ((6841, 6872), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_path'], {'dpi': '(600)'}), '(file_path, dpi=600)\n', (6852, 6872), True, 'import matplotlib.pyplot as plt\n'), ((3430, 3476), 'numpy.argsort', 'np.argsort', (['df_selection.binning_factor.values'], {}), '(df_selection.binning_factor.values)\n', (3440, 3476), True, 'import numpy as np\n'), ((6551, 6582), 'hsr4hci.plotting.set_fontsize', 'set_fontsize', ([], {'ax': 'ax', 'fontsize': '(6)'}), '(ax=ax, fontsize=6)\n', (6563, 6582), False, 'from hsr4hci.plotting import adjust_luminosity, set_fontsize\n'), ((4679, 4711), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '"""\t"""'}), "(file_path, sep='\\t')\n", (4690, 4711), True, 'import pandas as pd\n'), ((4787, 4823), 'numpy.argsort', 'np.argsort', (['df.binning_factor.values'], {}), '(df.binning_factor.values)\n', (4797, 4823), True, 'import numpy as np\n'), ((5216, 5263), 'hsr4hci.plotting.adjust_luminosity', 'adjust_luminosity', ([], {'color': 'f"""C{i}"""', 'amount': 'amount'}), "(color=f'C{i}', amount=amount)\n", (5233, 5263), False, 'from hsr4hci.plotting import adjust_luminosity, set_fontsize\n'), ((1985, 2006), 'hsr4hci.config.get_experiments_dir', 'get_experiments_dir', ([], {}), '()\n', (2004, 2006), False, 'from hsr4hci.config import get_experiments_dir\n'), ((7081, 7092), 'time.time', 'time.time', ([], {}), '()\n', (7090, 7092), False, 'import time\n')] |
import pandas as pd
import numpy as np
import sqlite3
import click
import os
from .data_handling import check_sqlite_table
from .report import plot_scores
def export_tsv(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue):
con = sqlite3.connect(infile)
ipf_present = False
if ipf:
ipf_present = check_sqlite_table(con, "SCORE_IPF")
# Main query for peptidoform IPF
if ipf_present and ipf=='peptidoform':
idx_query = '''
CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID);
CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID);
CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID);
CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID);
'''
if check_sqlite_table(con, "FEATURE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);"
if check_sqlite_table(con, "FEATURE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms1_feature_id ON SCORE_MS1 (FEATURE_ID);"
score_ms1_pep = "SCORE_MS1.PEP"
link_ms1 = "LEFT JOIN SCORE_MS1 ON SCORE_MS1.FEATURE_ID = FEATURE.ID"
else:
score_ms1_pep = "NULL"
link_ms1 = ""
if check_sqlite_table(con, "SCORE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_IPF"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_feature_id ON SCORE_IPF (FEATURE_ID);"
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_peptide_id ON SCORE_IPF (PEPTIDE_ID);"
query = '''
SELECT RUN.ID AS id_run,
PEPTIDE.ID AS id_peptide,
PEPTIDE_IPF.MODIFIED_SEQUENCE || '_' || PRECURSOR.ID AS transition_group_id,
PRECURSOR.DECOY AS decoy,
RUN.ID AS run_id,
RUN.FILENAME AS filename,
FEATURE.EXP_RT AS RT,
FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt,
FEATURE.DELTA_RT AS delta_rt,
FEATURE.NORM_RT AS iRT,
PRECURSOR.LIBRARY_RT AS assay_iRT,
FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT,
FEATURE.ID AS id,
PEPTIDE_IPF.UNMODIFIED_SEQUENCE AS Sequence,
PEPTIDE_IPF.MODIFIED_SEQUENCE AS FullPeptideName,
PRECURSOR.CHARGE AS Charge,
PRECURSOR.PRECURSOR_MZ AS mz,
FEATURE_MS2.AREA_INTENSITY AS Intensity,
FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area,
FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex,
FEATURE.LEFT_WIDTH AS leftWidth,
FEATURE.RIGHT_WIDTH AS rightWidth,
%s AS ms1_pep,
SCORE_MS2.PEP AS ms2_pep,
SCORE_IPF.PRECURSOR_PEAKGROUP_PEP AS precursor_pep,
SCORE_IPF.PEP AS ipf_pep,
SCORE_MS2.RANK AS peak_group_rank,
SCORE_MS2.SCORE AS d_score,
SCORE_MS2.QVALUE AS ms2_m_score,
SCORE_IPF.QVALUE AS m_score
FROM PRECURSOR
INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID
INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID
INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID
LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
%s
LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_IPF ON SCORE_IPF.FEATURE_ID = FEATURE.ID
INNER JOIN PEPTIDE AS PEPTIDE_IPF ON SCORE_IPF.PEPTIDE_ID = PEPTIDE_IPF.ID
WHERE SCORE_MS2.QVALUE < %s AND SCORE_IPF.PEP < %s
ORDER BY transition_group_id,
peak_group_rank;
''' % (score_ms1_pep, link_ms1, max_rs_peakgroup_qvalue, ipf_max_peptidoform_pep)
# Main query for augmented IPF
elif ipf_present and ipf=='augmented':
idx_query = '''
CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID);
CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID);
CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID);
CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID);
'''
if check_sqlite_table(con, "FEATURE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);"
if check_sqlite_table(con, "FEATURE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms1_feature_id ON SCORE_MS1 (FEATURE_ID);"
score_ms1_pep = "SCORE_MS1.PEP"
link_ms1 = "LEFT JOIN SCORE_MS1 ON SCORE_MS1.FEATURE_ID = FEATURE.ID"
else:
score_ms1_pep = "NULL"
link_ms1 = ""
if check_sqlite_table(con, "SCORE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_IPF"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_feature_id ON SCORE_IPF (FEATURE_ID);"
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ipf_peptide_id ON SCORE_IPF (PEPTIDE_ID);"
query = '''
SELECT RUN.ID AS id_run,
PEPTIDE.ID AS id_peptide,
PRECURSOR.ID AS transition_group_id,
PRECURSOR.DECOY AS decoy,
RUN.ID AS run_id,
RUN.FILENAME AS filename,
FEATURE.EXP_RT AS RT,
FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt,
FEATURE.DELTA_RT AS delta_rt,
FEATURE.NORM_RT AS iRT,
PRECURSOR.LIBRARY_RT AS assay_iRT,
FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT,
FEATURE.ID AS id,
PEPTIDE.UNMODIFIED_SEQUENCE AS Sequence,
PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName,
PRECURSOR.CHARGE AS Charge,
PRECURSOR.PRECURSOR_MZ AS mz,
FEATURE_MS2.AREA_INTENSITY AS Intensity,
FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area,
FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex,
FEATURE.LEFT_WIDTH AS leftWidth,
FEATURE.RIGHT_WIDTH AS rightWidth,
SCORE_MS2.RANK AS peak_group_rank,
SCORE_MS2.SCORE AS d_score,
SCORE_MS2.QVALUE AS m_score,
%s AS ms1_pep,
SCORE_MS2.PEP AS ms2_pep
FROM PRECURSOR
INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID
INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID
INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID
LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
%s
LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID
WHERE SCORE_MS2.QVALUE < %s
ORDER BY transition_group_id,
peak_group_rank;
''' % (score_ms1_pep, link_ms1, max_rs_peakgroup_qvalue)
query_augmented = '''
SELECT FEATURE_ID AS id,
MODIFIED_SEQUENCE AS ipf_FullUniModPeptideName,
PRECURSOR_PEAKGROUP_PEP AS ipf_precursor_peakgroup_pep,
PEP AS ipf_peptidoform_pep,
QVALUE AS ipf_peptidoform_m_score
FROM SCORE_IPF
INNER JOIN PEPTIDE ON SCORE_IPF.PEPTIDE_ID = PEPTIDE.ID
WHERE SCORE_IPF.PEP < %s;
''' % ipf_max_peptidoform_pep
# Main query for standard OpenSWATH
else:
idx_query = '''
CREATE INDEX IF NOT EXISTS idx_precursor_precursor_id ON PRECURSOR (ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_precursor_id ON PRECURSOR_PEPTIDE_MAPPING (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_feature_precursor_id ON FEATURE (PRECURSOR_ID);
CREATE INDEX IF NOT EXISTS idx_precursor_peptide_mapping_peptide_id ON PRECURSOR_PEPTIDE_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_peptide_id ON PEPTIDE (ID);
CREATE INDEX IF NOT EXISTS idx_run_run_id ON RUN (ID);
CREATE INDEX IF NOT EXISTS idx_feature_run_id ON FEATURE (RUN_ID);
CREATE INDEX IF NOT EXISTS idx_feature_feature_id ON FEATURE (ID);
'''
if check_sqlite_table(con, "FEATURE_MS1"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms1_feature_id ON FEATURE_MS1 (FEATURE_ID);"
if check_sqlite_table(con, "FEATURE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_feature_ms2_feature_id ON FEATURE_MS2 (FEATURE_ID);"
if check_sqlite_table(con, "SCORE_MS2"):
idx_query += "CREATE INDEX IF NOT EXISTS idx_score_ms2_feature_id ON SCORE_MS2 (FEATURE_ID);"
query = '''
SELECT RUN.ID AS id_run,
PEPTIDE.ID AS id_peptide,
PRECURSOR.ID AS transition_group_id,
PRECURSOR.DECOY AS decoy,
RUN.ID AS run_id,
RUN.FILENAME AS filename,
FEATURE.EXP_RT AS RT,
FEATURE.EXP_RT - FEATURE.DELTA_RT AS assay_rt,
FEATURE.DELTA_RT AS delta_rt,
FEATURE.NORM_RT AS iRT,
PRECURSOR.LIBRARY_RT AS assay_iRT,
FEATURE.NORM_RT - PRECURSOR.LIBRARY_RT AS delta_iRT,
FEATURE.ID AS id,
PEPTIDE.UNMODIFIED_SEQUENCE AS Sequence,
PEPTIDE.MODIFIED_SEQUENCE AS FullPeptideName,
PRECURSOR.CHARGE AS Charge,
PRECURSOR.PRECURSOR_MZ AS mz,
FEATURE_MS2.AREA_INTENSITY AS Intensity,
FEATURE_MS1.AREA_INTENSITY AS aggr_prec_Peak_Area,
FEATURE_MS1.APEX_INTENSITY AS aggr_prec_Peak_Apex,
FEATURE.LEFT_WIDTH AS leftWidth,
FEATURE.RIGHT_WIDTH AS rightWidth,
SCORE_MS2.RANK AS peak_group_rank,
SCORE_MS2.SCORE AS d_score,
SCORE_MS2.QVALUE AS m_score
FROM PRECURSOR
INNER JOIN PRECURSOR_PEPTIDE_MAPPING ON PRECURSOR.ID = PRECURSOR_PEPTIDE_MAPPING.PRECURSOR_ID
INNER JOIN PEPTIDE ON PRECURSOR_PEPTIDE_MAPPING.PEPTIDE_ID = PEPTIDE.ID
INNER JOIN FEATURE ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN RUN ON RUN.ID = FEATURE.RUN_ID
LEFT JOIN FEATURE_MS1 ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
LEFT JOIN FEATURE_MS2 ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
LEFT JOIN SCORE_MS2 ON SCORE_MS2.FEATURE_ID = FEATURE.ID
WHERE SCORE_MS2.QVALUE < %s
ORDER BY transition_group_id,
peak_group_rank;
''' % max_rs_peakgroup_qvalue
# Execute main SQLite query
click.echo("Info: Reading peak group-level results.")
con.executescript(idx_query) # Add indices
data = pd.read_sql_query(query, con)
# Augment OpenSWATH results with IPF scores
if ipf_present and ipf=='augmented':
data_augmented = pd.read_sql_query(query_augmented, con)
data_augmented = data_augmented.groupby('id').apply(lambda x: pd.Series({'ipf_FullUniModPeptideName': ";".join(x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_FullUniModPeptideName']), 'ipf_precursor_peakgroup_pep': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_precursor_peakgroup_pep'].values[0], 'ipf_peptidoform_pep': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_peptidoform_pep'].values[0], 'ipf_peptidoform_m_score': x[x['ipf_peptidoform_pep'] == np.min(x['ipf_peptidoform_pep'])]['ipf_peptidoform_m_score'].values[0]})).reset_index(level='id')
data = pd.merge(data, data_augmented, how='left', on='id')
# Append transition-level quantities
if transition_quantification:
if check_sqlite_table(con, "SCORE_TRANSITION"):
idx_transition_query = '''
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID);
CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id_feature_id ON FEATURE_TRANSITION (TRANSITION_ID, FEATURE_ID);
CREATE INDEX IF NOT EXISTS idx_score_transition_transition_id_feature_id ON SCORE_TRANSITION (TRANSITION_ID, FEATURE_ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID);
'''
transition_query = '''
SELECT FEATURE_TRANSITION.FEATURE_ID AS id,
GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area,
GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex,
GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation
FROM FEATURE_TRANSITION
INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID
INNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID AND FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID
WHERE TRANSITION.DECOY == 0 AND SCORE_TRANSITION.PEP < %s
GROUP BY FEATURE_TRANSITION.FEATURE_ID
''' % max_transition_pep
else:
idx_transition_query = '''
CREATE INDEX IF NOT EXISTS idx_feature_transition_transition_id ON FEATURE_TRANSITION (TRANSITION_ID);
CREATE INDEX IF NOT EXISTS idx_transition_transition_id ON TRANSITION (ID);
CREATE INDEX IF NOT EXISTS idx_feature_transition_feature_id ON FEATURE_TRANSITION (FEATURE_ID);
'''
transition_query = '''
SELECT FEATURE_ID AS id,
GROUP_CONCAT(AREA_INTENSITY,';') AS aggr_Peak_Area,
GROUP_CONCAT(APEX_INTENSITY,';') AS aggr_Peak_Apex,
GROUP_CONCAT(TRANSITION.ID || "_" || TRANSITION.TYPE || TRANSITION.ORDINAL || "_" || TRANSITION.CHARGE,';') AS aggr_Fragment_Annotation
FROM FEATURE_TRANSITION
INNER JOIN TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID
GROUP BY FEATURE_ID
'''
click.echo("Info: Reading transition-level results.")
con.executescript(idx_transition_query) # Add indices
data_transition = pd.read_sql_query(transition_query, con)
data = pd.merge(data, data_transition, how='left', on=['id'])
# Append concatenated protein identifier
click.echo("Info: Reading protein identifiers.")
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_protein_id ON PEPTIDE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_protein_protein_id ON PROTEIN (ID);
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_peptide_id ON PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID);
''')
data_protein = pd.read_sql_query('''
SELECT PEPTIDE_ID AS id_peptide,
GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,';') AS ProteinName
FROM PEPTIDE_PROTEIN_MAPPING
INNER JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID
GROUP BY PEPTIDE_ID;
''', con)
data = pd.merge(data, data_protein, how='inner', on=['id_peptide'])
# Append peptide error-rate control
peptide_present = False
if peptide:
peptide_present = check_sqlite_table(con, "SCORE_PEPTIDE")
if peptide_present and peptide:
click.echo("Info: Reading peptide-level results.")
data_peptide_run = pd.read_sql_query('''
SELECT RUN_ID AS id_run,
PEPTIDE_ID AS id_peptide,
QVALUE AS m_score_peptide_run_specific
FROM SCORE_PEPTIDE
WHERE CONTEXT == 'run-specific';
''', con)
if len(data_peptide_run.index) > 0:
data = pd.merge(data, data_peptide_run, how='inner', on=['id_run','id_peptide'])
data_peptide_experiment = pd.read_sql_query('''
SELECT RUN_ID AS id_run,
PEPTIDE_ID AS id_peptide,
QVALUE AS m_score_peptide_experiment_wide
FROM SCORE_PEPTIDE
WHERE CONTEXT == 'experiment-wide';
''', con)
if len(data_peptide_experiment.index) > 0:
data = pd.merge(data, data_peptide_experiment, on=['id_run','id_peptide'])
data_peptide_global = pd.read_sql_query('''
SELECT PEPTIDE_ID AS id_peptide,
QVALUE AS m_score_peptide_global
FROM SCORE_PEPTIDE
WHERE CONTEXT == 'global';
''', con)
if len(data_peptide_global.index) > 0:
data = pd.merge(data, data_peptide_global[data_peptide_global['m_score_peptide_global'] < max_global_peptide_qvalue], on=['id_peptide'])
# Append protein error-rate control
protein_present = False
if protein:
protein_present = check_sqlite_table(con, "SCORE_PROTEIN")
if protein_present and protein:
click.echo("Info: Reading protein-level results.")
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_protein_id ON PEPTIDE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_peptide_id ON PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_score_protein_protein_id ON SCORE_PROTEIN (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_score_protein_run_id ON SCORE_PROTEIN (RUN_ID);
''')
data_protein_run = pd.read_sql_query('''
SELECT RUN_ID AS id_run,
PEPTIDE_ID AS id_peptide,
MIN(QVALUE) AS m_score_protein_run_specific
FROM PEPTIDE_PROTEIN_MAPPING
INNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID
WHERE CONTEXT == 'run-specific'
GROUP BY RUN_ID,
PEPTIDE_ID;
''', con)
if len(data_protein_run.index) > 0:
data = pd.merge(data, data_protein_run, how='inner', on=['id_run','id_peptide'])
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_protein_id ON PEPTIDE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_peptide_id ON PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_score_protein_protein_id ON SCORE_PROTEIN (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_score_protein_run_id ON SCORE_PROTEIN (RUN_ID);
''')
data_protein_experiment = pd.read_sql_query('''
SELECT RUN_ID AS id_run,
PEPTIDE_ID AS id_peptide,
MIN(QVALUE) AS m_score_protein_experiment_wide
FROM PEPTIDE_PROTEIN_MAPPING
INNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID
WHERE CONTEXT == 'experiment-wide'
GROUP BY RUN_ID,
PEPTIDE_ID;
''', con)
if len(data_protein_experiment.index) > 0:
data = pd.merge(data, data_protein_experiment, how='inner', on=['id_run','id_peptide'])
con.executescript('''
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_protein_id ON PEPTIDE_PROTEIN_MAPPING (PROTEIN_ID);
CREATE INDEX IF NOT EXISTS idx_peptide_protein_mapping_peptide_id ON PEPTIDE_PROTEIN_MAPPING (PEPTIDE_ID);
CREATE INDEX IF NOT EXISTS idx_score_protein_protein_id ON SCORE_PROTEIN (PROTEIN_ID);
''')
data_protein_global = pd.read_sql_query('''
SELECT PEPTIDE_ID AS id_peptide,
MIN(QVALUE) AS m_score_protein_global
FROM PEPTIDE_PROTEIN_MAPPING
INNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID
WHERE CONTEXT == 'global'
GROUP BY PEPTIDE_ID;
''', con)
if len(data_protein_global.index) > 0:
data = pd.merge(data, data_protein_global[data_protein_global['m_score_protein_global'] < max_global_protein_qvalue], how='inner', on=['id_peptide'])
if outcsv:
sep = ","
else:
sep = "\t"
if format == 'legacy_split':
data = data.drop(['id_run','id_peptide'], axis=1)
data.groupby('filename').apply(lambda x: x.to_csv(os.path.basename(x['filename'].values[0]) + '.tsv', sep=sep, index=False))
elif format == 'legacy_merged':
data.drop(['id_run','id_peptide'], axis=1).to_csv(outfile, sep=sep, index=False)
elif format == 'matrix':
# select top ranking peak group only
data = data.iloc[data.groupby(['run_id','transition_group_id']).apply(lambda x: x['m_score'].idxmin())]
# restructure dataframe to matrix
data = data[['transition_group_id','Sequence','FullPeptideName','ProteinName','filename','Intensity']]
data = data.pivot_table(index=['transition_group_id','Sequence','FullPeptideName','ProteinName'], columns='filename', values='Intensity')
data.to_csv(outfile, sep=sep, index=True)
con.close()
def export_score_plots(infile):
con = sqlite3.connect(infile)
if check_sqlite_table(con, "SCORE_MS2"):
outfile = infile.split(".osw")[0] + "_ms2_score_plots.pdf"
table_ms2 = pd.read_sql_query('''
SELECT *,
RUN_ID || '_' || PRECURSOR_ID AS GROUP_ID
FROM FEATURE_MS2
INNER JOIN
(SELECT RUN_ID,
ID,
PRECURSOR_ID,
EXP_RT
FROM FEATURE) AS FEATURE ON FEATURE_MS2.FEATURE_ID = FEATURE.ID
INNER JOIN
(SELECT ID,
CHARGE AS VAR_PRECURSOR_CHARGE,
DECOY
FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN
(SELECT PRECURSOR_ID AS ID,
COUNT(*) AS VAR_TRANSITION_NUM_SCORE
FROM TRANSITION_PRECURSOR_MAPPING
INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID
WHERE DETECTING==1
GROUP BY PRECURSOR_ID) AS VAR_TRANSITION_SCORE ON FEATURE.PRECURSOR_ID = VAR_TRANSITION_SCORE.ID
INNER JOIN SCORE_MS2 ON FEATURE.ID = SCORE_MS2.FEATURE_ID
WHERE RANK == 1
ORDER BY RUN_ID,
PRECURSOR.ID ASC,
FEATURE.EXP_RT ASC;
''', con)
plot_scores(table_ms2, outfile)
if check_sqlite_table(con, "SCORE_MS1"):
outfile = infile.split(".osw")[0] + "_ms1_score_plots.pdf"
table_ms1 = pd.read_sql_query('''
SELECT *,
RUN_ID || '_' || PRECURSOR_ID AS GROUP_ID
FROM FEATURE_MS1
INNER JOIN
(SELECT RUN_ID,
ID,
PRECURSOR_ID,
EXP_RT
FROM FEATURE) AS FEATURE ON FEATURE_MS1.FEATURE_ID = FEATURE.ID
INNER JOIN
(SELECT ID,
CHARGE AS VAR_PRECURSOR_CHARGE,
DECOY
FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN SCORE_MS1 ON FEATURE.ID = SCORE_MS1.FEATURE_ID
WHERE RANK == 1
ORDER BY RUN_ID,
PRECURSOR.ID ASC,
FEATURE.EXP_RT ASC;
''', con)
plot_scores(table_ms1, outfile)
if check_sqlite_table(con, "SCORE_TRANSITION"):
outfile = infile.split(".osw")[0] + "_transition_score_plots.pdf"
table_transition = pd.read_sql_query('''
SELECT TRANSITION.DECOY AS DECOY,
FEATURE_TRANSITION.*,
PRECURSOR.CHARGE AS VAR_PRECURSOR_CHARGE,
TRANSITION.VAR_PRODUCT_CHARGE AS VAR_PRODUCT_CHARGE,
SCORE_TRANSITION.*,
RUN_ID || '_' || FEATURE_TRANSITION.FEATURE_ID || '_' || PRECURSOR_ID || '_' || FEATURE_TRANSITION.TRANSITION_ID AS GROUP_ID
FROM FEATURE_TRANSITION
INNER JOIN
(SELECT RUN_ID,
ID,
PRECURSOR_ID,
EXP_RT
FROM FEATURE) AS FEATURE ON FEATURE_TRANSITION.FEATURE_ID = FEATURE.ID
INNER JOIN PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID
INNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID
AND FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID
INNER JOIN
(SELECT ID,
CHARGE AS VAR_PRODUCT_CHARGE,
DECOY
FROM TRANSITION) AS TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID
ORDER BY RUN_ID,
PRECURSOR.ID,
FEATURE.EXP_RT,
TRANSITION.ID;
''', con)
plot_scores(table_transition, outfile)
con.close()
| [
"os.path.basename",
"pandas.merge",
"click.echo",
"numpy.min",
"sqlite3.connect",
"pandas.read_sql_query"
] | [((392, 415), 'sqlite3.connect', 'sqlite3.connect', (['infile'], {}), '(infile)\n', (407, 415), False, 'import sqlite3\n'), ((11066, 11119), 'click.echo', 'click.echo', (['"""Info: Reading peak group-level results."""'], {}), "('Info: Reading peak group-level results.')\n", (11076, 11119), False, 'import click\n'), ((11178, 11207), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'con'], {}), '(query, con)\n', (11195, 11207), True, 'import pandas as pd\n'), ((14556, 14604), 'click.echo', 'click.echo', (['"""Info: Reading protein identifiers."""'], {}), "('Info: Reading protein identifiers.')\n", (14566, 14604), False, 'import click\n'), ((14937, 15197), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT PEPTIDE_ID AS id_peptide,\n GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,\';\') AS ProteinName\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID\nGROUP BY PEPTIDE_ID;\n"""', 'con'], {}), '(\n """\nSELECT PEPTIDE_ID AS id_peptide,\n GROUP_CONCAT(PROTEIN.PROTEIN_ACCESSION,\';\') AS ProteinName\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = PROTEIN.ID\nGROUP BY PEPTIDE_ID;\n"""\n , con)\n', (14954, 15197), True, 'import pandas as pd\n'), ((15199, 15259), 'pandas.merge', 'pd.merge', (['data', 'data_protein'], {'how': '"""inner"""', 'on': "['id_peptide']"}), "(data, data_protein, how='inner', on=['id_peptide'])\n", (15207, 15259), True, 'import pandas as pd\n'), ((20561, 20584), 'sqlite3.connect', 'sqlite3.connect', (['infile'], {}), '(infile)\n', (20576, 20584), False, 'import sqlite3\n'), ((11321, 11360), 'pandas.read_sql_query', 'pd.read_sql_query', (['query_augmented', 'con'], {}), '(query_augmented, con)\n', (11338, 11360), True, 'import pandas as pd\n'), ((12001, 12052), 'pandas.merge', 'pd.merge', (['data', 'data_augmented'], {'how': '"""left"""', 'on': '"""id"""'}), "(data, data_augmented, how='left', on='id')\n", (12009, 12052), True, 'import pandas as pd\n'), ((14253, 14306), 'click.echo', 'click.echo', (['"""Info: Reading transition-level results."""'], {}), "('Info: Reading transition-level results.')\n", (14263, 14306), False, 'import click\n'), ((14395, 14435), 'pandas.read_sql_query', 'pd.read_sql_query', (['transition_query', 'con'], {}), '(transition_query, con)\n', (14412, 14435), True, 'import pandas as pd\n'), ((14451, 14505), 'pandas.merge', 'pd.merge', (['data', 'data_transition'], {'how': '"""left"""', 'on': "['id']"}), "(data, data_transition, how='left', on=['id'])\n", (14459, 14505), True, 'import pandas as pd\n'), ((15457, 15507), 'click.echo', 'click.echo', (['"""Info: Reading peptide-level results."""'], {}), "('Info: Reading peptide-level results.')\n", (15467, 15507), False, 'import click\n'), ((15535, 15732), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_run_specific\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'run-specific\';\n"""', 'con'], {}), '(\n """\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_run_specific\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'run-specific\';\n"""\n , con)\n', (15552, 15732), True, 'import pandas as pd\n'), ((15895, 16098), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_experiment_wide\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'experiment-wide\';\n"""', 'con'], {}), '(\n """\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_experiment_wide\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'experiment-wide\';\n"""\n , con)\n', (15912, 16098), True, 'import pandas as pd\n'), ((16258, 16418), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_global\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'global\';\n"""', 'con'], {}), '(\n """\nSELECT PEPTIDE_ID AS id_peptide,\n QVALUE AS m_score_peptide_global\nFROM SCORE_PEPTIDE\nWHERE CONTEXT == \'global\';\n"""\n , con)\n', (16275, 16418), True, 'import pandas as pd\n'), ((16802, 16852), 'click.echo', 'click.echo', (['"""Info: Reading protein-level results."""'], {}), "('Info: Reading protein-level results.')\n", (16812, 16852), False, 'import click\n'), ((17295, 17634), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_run_specific\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'run-specific\'\nGROUP BY RUN_ID,\n PEPTIDE_ID;\n"""', 'con'], {}), '(\n """\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_run_specific\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'run-specific\'\nGROUP BY RUN_ID,\n PEPTIDE_ID;\n"""\n , con)\n', (17312, 17634), True, 'import pandas as pd\n'), ((18212, 18557), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_experiment_wide\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'experiment-wide\'\nGROUP BY RUN_ID,\n PEPTIDE_ID;\n"""', 'con'], {}), '(\n """\nSELECT RUN_ID AS id_run,\n PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_experiment_wide\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'experiment-wide\'\nGROUP BY RUN_ID,\n PEPTIDE_ID;\n"""\n , con)\n', (18229, 18557), True, 'import pandas as pd\n'), ((19066, 19351), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_global\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'global\'\nGROUP BY PEPTIDE_ID;\n"""', 'con'], {}), '(\n """\nSELECT PEPTIDE_ID AS id_peptide,\n MIN(QVALUE) AS m_score_protein_global\nFROM PEPTIDE_PROTEIN_MAPPING\nINNER JOIN SCORE_PROTEIN ON PEPTIDE_PROTEIN_MAPPING.PROTEIN_ID = SCORE_PROTEIN.PROTEIN_ID\nWHERE CONTEXT == \'global\'\nGROUP BY PEPTIDE_ID;\n"""\n , con)\n', (19083, 19351), True, 'import pandas as pd\n'), ((20718, 21621), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT *,\n RUN_ID || \'_\' || PRECURSOR_ID AS GROUP_ID\nFROM FEATURE_MS2\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_MS2.FEATURE_ID = FEATURE.ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRECURSOR_CHARGE,\n DECOY\n FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN\n (SELECT PRECURSOR_ID AS ID,\n COUNT(*) AS VAR_TRANSITION_NUM_SCORE\n FROM TRANSITION_PRECURSOR_MAPPING\n INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID\n WHERE DETECTING==1\n GROUP BY PRECURSOR_ID) AS VAR_TRANSITION_SCORE ON FEATURE.PRECURSOR_ID = VAR_TRANSITION_SCORE.ID\nINNER JOIN SCORE_MS2 ON FEATURE.ID = SCORE_MS2.FEATURE_ID\nWHERE RANK == 1\nORDER BY RUN_ID,\n PRECURSOR.ID ASC,\n FEATURE.EXP_RT ASC;\n"""', 'con'], {}), '(\n """\nSELECT *,\n RUN_ID || \'_\' || PRECURSOR_ID AS GROUP_ID\nFROM FEATURE_MS2\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_MS2.FEATURE_ID = FEATURE.ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRECURSOR_CHARGE,\n DECOY\n FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN\n (SELECT PRECURSOR_ID AS ID,\n COUNT(*) AS VAR_TRANSITION_NUM_SCORE\n FROM TRANSITION_PRECURSOR_MAPPING\n INNER JOIN TRANSITION ON TRANSITION_PRECURSOR_MAPPING.TRANSITION_ID = TRANSITION.ID\n WHERE DETECTING==1\n GROUP BY PRECURSOR_ID) AS VAR_TRANSITION_SCORE ON FEATURE.PRECURSOR_ID = VAR_TRANSITION_SCORE.ID\nINNER JOIN SCORE_MS2 ON FEATURE.ID = SCORE_MS2.FEATURE_ID\nWHERE RANK == 1\nORDER BY RUN_ID,\n PRECURSOR.ID ASC,\n FEATURE.EXP_RT ASC;\n"""\n , con)\n', (20735, 21621), True, 'import pandas as pd\n'), ((21785, 22354), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT *,\n RUN_ID || \'_\' || PRECURSOR_ID AS GROUP_ID\nFROM FEATURE_MS1\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_MS1.FEATURE_ID = FEATURE.ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRECURSOR_CHARGE,\n DECOY\n FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN SCORE_MS1 ON FEATURE.ID = SCORE_MS1.FEATURE_ID\nWHERE RANK == 1\nORDER BY RUN_ID,\n PRECURSOR.ID ASC,\n FEATURE.EXP_RT ASC;\n"""', 'con'], {}), '(\n """\nSELECT *,\n RUN_ID || \'_\' || PRECURSOR_ID AS GROUP_ID\nFROM FEATURE_MS1\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_MS1.FEATURE_ID = FEATURE.ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRECURSOR_CHARGE,\n DECOY\n FROM PRECURSOR) AS PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN SCORE_MS1 ON FEATURE.ID = SCORE_MS1.FEATURE_ID\nWHERE RANK == 1\nORDER BY RUN_ID,\n PRECURSOR.ID ASC,\n FEATURE.EXP_RT ASC;\n"""\n , con)\n', (21802, 22354), True, 'import pandas as pd\n'), ((22539, 23570), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\nSELECT TRANSITION.DECOY AS DECOY,\n FEATURE_TRANSITION.*,\n PRECURSOR.CHARGE AS VAR_PRECURSOR_CHARGE,\n TRANSITION.VAR_PRODUCT_CHARGE AS VAR_PRODUCT_CHARGE,\n SCORE_TRANSITION.*,\n RUN_ID || \'_\' || FEATURE_TRANSITION.FEATURE_ID || \'_\' || PRECURSOR_ID || \'_\' || FEATURE_TRANSITION.TRANSITION_ID AS GROUP_ID\nFROM FEATURE_TRANSITION\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_TRANSITION.FEATURE_ID = FEATURE.ID\nINNER JOIN PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID\nAND FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRODUCT_CHARGE,\n DECOY\n FROM TRANSITION) AS TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID\nORDER BY RUN_ID,\n PRECURSOR.ID,\n FEATURE.EXP_RT,\n TRANSITION.ID;\n"""', 'con'], {}), '(\n """\nSELECT TRANSITION.DECOY AS DECOY,\n FEATURE_TRANSITION.*,\n PRECURSOR.CHARGE AS VAR_PRECURSOR_CHARGE,\n TRANSITION.VAR_PRODUCT_CHARGE AS VAR_PRODUCT_CHARGE,\n SCORE_TRANSITION.*,\n RUN_ID || \'_\' || FEATURE_TRANSITION.FEATURE_ID || \'_\' || PRECURSOR_ID || \'_\' || FEATURE_TRANSITION.TRANSITION_ID AS GROUP_ID\nFROM FEATURE_TRANSITION\nINNER JOIN\n (SELECT RUN_ID,\n ID,\n PRECURSOR_ID,\n EXP_RT\n FROM FEATURE) AS FEATURE ON FEATURE_TRANSITION.FEATURE_ID = FEATURE.ID\nINNER JOIN PRECURSOR ON FEATURE.PRECURSOR_ID = PRECURSOR.ID\nINNER JOIN SCORE_TRANSITION ON FEATURE_TRANSITION.FEATURE_ID = SCORE_TRANSITION.FEATURE_ID\nAND FEATURE_TRANSITION.TRANSITION_ID = SCORE_TRANSITION.TRANSITION_ID\nINNER JOIN\n (SELECT ID,\n CHARGE AS VAR_PRODUCT_CHARGE,\n DECOY\n FROM TRANSITION) AS TRANSITION ON FEATURE_TRANSITION.TRANSITION_ID = TRANSITION.ID\nORDER BY RUN_ID,\n PRECURSOR.ID,\n FEATURE.EXP_RT,\n TRANSITION.ID;\n"""\n , con)\n', (22556, 23570), True, 'import pandas as pd\n'), ((15786, 15860), 'pandas.merge', 'pd.merge', (['data', 'data_peptide_run'], {'how': '"""inner"""', 'on': "['id_run', 'id_peptide']"}), "(data, data_peptide_run, how='inner', on=['id_run', 'id_peptide'])\n", (15794, 15860), True, 'import pandas as pd\n'), ((16159, 16227), 'pandas.merge', 'pd.merge', (['data', 'data_peptide_experiment'], {'on': "['id_run', 'id_peptide']"}), "(data, data_peptide_experiment, on=['id_run', 'id_peptide'])\n", (16167, 16227), True, 'import pandas as pd\n'), ((16475, 16609), 'pandas.merge', 'pd.merge', (['data', "data_peptide_global[data_peptide_global['m_score_peptide_global'] <\n max_global_peptide_qvalue]"], {'on': "['id_peptide']"}), "(data, data_peptide_global[data_peptide_global[\n 'm_score_peptide_global'] < max_global_peptide_qvalue], on=['id_peptide'])\n", (16483, 16609), True, 'import pandas as pd\n'), ((17688, 17762), 'pandas.merge', 'pd.merge', (['data', 'data_protein_run'], {'how': '"""inner"""', 'on': "['id_run', 'id_peptide']"}), "(data, data_protein_run, how='inner', on=['id_run', 'id_peptide'])\n", (17696, 17762), True, 'import pandas as pd\n'), ((18618, 18703), 'pandas.merge', 'pd.merge', (['data', 'data_protein_experiment'], {'how': '"""inner"""', 'on': "['id_run', 'id_peptide']"}), "(data, data_protein_experiment, how='inner', on=['id_run',\n 'id_peptide'])\n", (18626, 18703), True, 'import pandas as pd\n'), ((19408, 19560), 'pandas.merge', 'pd.merge', (['data', "data_protein_global[data_protein_global['m_score_protein_global'] <\n max_global_protein_qvalue]"], {'how': '"""inner"""', 'on': "['id_peptide']"}), "(data, data_protein_global[data_protein_global[\n 'm_score_protein_global'] < max_global_protein_qvalue], how='inner', on\n =['id_peptide'])\n", (19416, 19560), True, 'import pandas as pd\n'), ((19764, 19805), 'os.path.basename', 'os.path.basename', (["x['filename'].values[0]"], {}), "(x['filename'].values[0])\n", (19780, 19805), False, 'import os\n'), ((11509, 11541), 'numpy.min', 'np.min', (["x['ipf_peptidoform_pep']"], {}), "(x['ipf_peptidoform_pep'])\n", (11515, 11541), True, 'import numpy as np\n'), ((11635, 11667), 'numpy.min', 'np.min', (["x['ipf_peptidoform_pep']"], {}), "(x['ipf_peptidoform_pep'])\n", (11641, 11667), True, 'import numpy as np\n'), ((11764, 11796), 'numpy.min', 'np.min', (["x['ipf_peptidoform_pep']"], {}), "(x['ipf_peptidoform_pep'])\n", (11770, 11796), True, 'import numpy as np\n'), ((11889, 11921), 'numpy.min', 'np.min', (["x['ipf_peptidoform_pep']"], {}), "(x['ipf_peptidoform_pep'])\n", (11895, 11921), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import argparse
import Nets
import os
import sys
import time
import cv2
import json
import datetime
import shutil
from matplotlib import pyplot as plt
from Data_utils import data_reader,weights_utils,preprocessing
from Losses import loss_factory
from Sampler import sampler_factory
#static params
MAX_DISP=256
PIXEL_TH = 3
def scale_tensor(tensor,scale):
return preprocessing.rescale_image(tensor,[tf.shape(tensor)[1]//scale,tf.shape(tensor)[2]//scale])
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def main(args):
#load json file config
with open(args.blockConfig) as json_data:
train_config = json.load(json_data)
#read input data
with tf.variable_scope('input_reader'):
data_set = data_reader.dataset(
args.list,
batch_size = 1,
crop_shape=args.imageShape,
num_epochs=1,
augment=False,
is_training=False,
shuffle=False
)
left_img_batch, right_img_batch, gt_image_batch = data_set.get_batch()
inputs={
'left':left_img_batch,
'right':right_img_batch,
'target':gt_image_batch
}
#build inference network
with tf.variable_scope('model'):
net_args = {}
net_args['left_img'] = left_img_batch
net_args['right_img'] = right_img_batch
net_args['split_layers'] = [None]
net_args['sequence'] = True
net_args['train_portion'] = 'BEGIN'
net_args['bulkhead'] = True if args.mode=='MAD' else False
stereo_net = Nets.get_stereo_net(args.modelName, net_args)
print('Stereo Prediction Model:\n', stereo_net)
predictions = stereo_net.get_disparities()
full_res_disp = predictions[-1]
#build real full resolution loss
with tf.variable_scope('full_res_loss'):
# reconstruction loss between warped right image and original left image
full_reconstruction_loss = loss_factory.get_reprojection_loss('mean_SSIM_l1',reduced=True)(predictions,inputs)
#build validation ops
with tf.variable_scope('validation_error'):
# compute error against gt
abs_err = tf.abs(full_res_disp - gt_image_batch)
valid_map = tf.where(tf.equal(gt_image_batch, 0), tf.zeros_like(gt_image_batch, dtype=tf.float32), tf.ones_like(gt_image_batch, dtype=tf.float32))
filtered_error = abs_err * valid_map
abs_err = tf.reduce_sum(filtered_error) / tf.reduce_sum(valid_map)
bad_pixel_abs = tf.where(tf.greater(filtered_error, PIXEL_TH), tf.ones_like(filtered_error, dtype=tf.float32), tf.zeros_like(filtered_error, dtype=tf.float32))
bad_pixel_perc = tf.reduce_sum(bad_pixel_abs) / tf.reduce_sum(valid_map)
#build train ops
disparity_trainer = tf.train.MomentumOptimizer(args.lr,0.9)
train_ops = []
if args.mode == 'MAD':
#build train ops for separate portion of the network
predictions = predictions[:-1] #remove full res disp
inputs_modules = {
'left':scale_tensor(left_img_batch,args.reprojectionScale),
'right':scale_tensor(right_img_batch,args.reprojectionScale),
'target':scale_tensor(gt_image_batch,args.reprojectionScale)/args.reprojectionScale
}
assert(len(predictions)==len(train_config))
for counter,p in enumerate(predictions):
print('Build train ops for disparity {}'.format(counter))
#rescale predictions to proper resolution
multiplier = tf.cast(tf.shape(left_img_batch)[1]//tf.shape(p)[1],tf.float32)
p = preprocessing.resize_to_prediction(p,inputs_modules['left'])*multiplier
#compute reprojection error
with tf.variable_scope('reprojection_'+str(counter)):
reconstruction_loss = loss_factory.get_reprojection_loss('mean_SSIM_l1',reduced=True)([p],inputs_modules)
#build train op
layer_to_train = train_config[counter]
print('Going to train on {}'.format(layer_to_train))
var_accumulator=[]
for name in layer_to_train:
var_accumulator+=stereo_net.get_variables(name)
print('Number of variable to train: {}'.format(len(var_accumulator)))
#add new training op
train_ops.append(disparity_trainer.minimize(reconstruction_loss,var_list=var_accumulator))
print('Done')
print('='*50)
#create Sampler to fetch portions to train
sampler = sampler_factory.get_sampler(args.sampleMode,args.numBlocks,args.fixedID)
elif args.mode=='FULL':
#build single train op for the full network
train_ops.append(disparity_trainer.minimize(full_reconstruction_loss))
if args.summary:
#add summaries
tf.summary.scalar('EPE',abs_err)
tf.summary.scalar('bad3',bad_pixel_perc)
tf.summary.image('full_res_disp',preprocessing.colorize_img(full_res_disp,cmap='jet'),max_outputs=1)
tf.summary.image('gt_disp',preprocessing.colorize_img(gt_image_batch,cmap='jet'),max_outputs=1)
#create summary logger
summary_op = tf.summary.merge_all()
logger = tf.summary.FileWriter(args.output)
#start session
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
#init stuff
sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
#restore disparity inference weights
var_to_restore = weights_utils.get_var_to_restore_list(args.weights, [])
assert(len(var_to_restore)>0)
restorer = tf.train.Saver(var_list=var_to_restore)
restorer.restore(sess,args.weights)
print('Disparity Net Restored?: {}, number of restored variables: {}'.format(True,len(var_to_restore)))
num_actions=len(train_ops)
if args.mode=='FULL':
selected_train_ops = train_ops
else:
selected_train_ops = [tf.no_op()]
epe_accumulator = []
bad3_accumulator = []
time_accumulator = []
exec_time = 0
fetch_counter=[0]*num_actions
sample_distribution=np.zeros(shape=[num_actions])
temp_score = np.zeros(shape=[num_actions])
loss_t_2 = 0
loss_t_1 = 0
expected_loss = 0
last_trained_blocks = []
reset_counter=0
step=0
max_steps=data_set.get_max_steps()
try:
start_time = time.time()
while True:
#fetch new network portion to train
if step%args.sampleFrequency==0 and args.mode=='MAD':
#Sample
distribution = softmax(sample_distribution)
blocks_to_train = sampler.sample(distribution)
selected_train_ops = [train_ops[i] for i in blocks_to_train]
#accumulate sampling statistics
for l in blocks_to_train:
fetch_counter[l]+=1
#build list of tensorflow operations that needs to be executed
#errors and full resolution loss
tf_fetches = [abs_err,bad_pixel_perc,full_reconstruction_loss]
if args.summary and step%100==0:
#summaries
tf_fetches = tf_fetches + [summary_op]
#update ops
tf_fetches = tf_fetches+selected_train_ops
if args.logDispStep!=-1 and step%args.logDispStep==0:
#prediction for serialization to disk
tf_fetches=tf_fetches + [full_res_disp]
#run network
fetches = sess.run(tf_fetches)
new_loss = fetches[2]
if args.mode == 'MAD':
#update sampling probabilities
if step==0:
loss_t_2 = new_loss
loss_t_1 = new_loss
expected_loss = 2*loss_t_1-loss_t_2
gain_loss=expected_loss-new_loss
sample_distribution = 0.99*sample_distribution
for i in last_trained_blocks:
sample_distribution[i] += 0.01*gain_loss
last_trained_blocks=blocks_to_train
loss_t_2 = loss_t_1
loss_t_1 = new_loss
#accumulate performance metrics
epe_accumulator.append(fetches[0])
bad3_accumulator.append(fetches[1])
if step%100==0:
#log on terminal
fbTime = (time.time()-start_time)
exec_time += fbTime
fbTime = fbTime/100
if args.summary:
logger.add_summary(fetches[3],global_step=step)
missing_time=(max_steps-step)*fbTime
print('Step:{:4d}\tbad3:{:.2f}\tEPE:{:.2f}\tSSIM:{:.2f}\tf/b time:{:3f}\tMissing time:{}'.format(step,fetches[1], fetches[0],new_loss,fbTime,datetime.timedelta(seconds=missing_time)))
start_time = time.time()
#reset network if necessary
if new_loss>args.SSIMTh:
restorer.restore(sess,args.weights)
reset_counter+=1
#save disparity if requested
if args.logDispStep!=-1 and step%args.logDispStep==0:
dispy=fetches[-1]
dispy_to_save = np.clip(dispy[0], 0, MAX_DISP)
dispy_to_save = (dispy_to_save*256.0).astype(np.uint16)
cv2.imwrite(os.path.join(args.output, 'disparities/disparity_{}.png'.format(step)), dispy_to_save)
step+=1
except tf.errors.OutOfRangeError:
pass
finally:
epe_array = epe_accumulator
bad3_array = bad3_accumulator
epe_accumulator = np.sum(epe_accumulator)
bad3_accumulator = np.sum(bad3_accumulator)
with open(os.path.join(args.output, 'stats.csv'), 'w+') as f_out:
# report series
f_out.write('Metrics,cumulative,average\n')
f_out.write('EPE,{},{}\n'.format(epe_accumulator,epe_accumulator/step))
f_out.write('bad3,{},{}\n'.format(bad3_accumulator,bad3_accumulator/step))
f_out.write('time,{},{}\n'.format(exec_time,exec_time/step))
f_out.write('FPS,{}\n'.format(1/(exec_time/step)))
f_out.write('#resets,{}\n'.format(reset_counter))
f_out.write('Blocks')
for n in range(len(predictions)):
f_out.write(',{}'.format(n))
f_out.write(',final\n')
f_out.write('fetch_counter')
for c in fetch_counter:
f_out.write(',{}'.format(c))
f_out.write('\n')
for c in sample_distribution:
f_out.write(',{}'.format(c))
f_out.write('\n')
step_time = exec_time/step
time_array = [str(x*step_time) for x in range(len(epe_array))]
with open(os.path.join(args.output,'series.csv'),'w+') as f_out:
f_out.write('Iteration,Time,EPE,bad3\n')
for i,(t,e,b) in enumerate(zip(time_array,epe_array,bad3_array)):
f_out.write('{},{},{},{}\n'.format(i,t,e,b))
print('Result saved in {}'.format(args.output))
print('All Done, Bye Bye!')
if __name__=='__main__':
parser=argparse.ArgumentParser(description='Script for online Adaptation of a Deep Stereo Network')
parser.add_argument("-l","--list", help='path to the list file with frames to be processed', required=True)
parser.add_argument("-o","--output", help="path to the output folder where the results will be saved", required=True)
parser.add_argument("--weights",help="path to the initial weights for the disparity estimation network",required=True)
parser.add_argument("--modelName", help="name of the stereo model to be used", default="Dispnet", choices=Nets.STEREO_FACTORY.keys())
parser.add_argument("--numBlocks", help="number of CNN portions to train at each iteration",type=int,default=1)
parser.add_argument("--lr", help="value for learning rate",default=0.0001, type=float)
parser.add_argument("--blockConfig",help="path to the block_config json file",required=True)
parser.add_argument("--sampleMode",help="choose the sampling heuristic to use",choices=sampler_factory.AVAILABLE_SAMPLER,default='SAMPLE')
parser.add_argument("--fixedID",help="index of the portions of network to train, used only if sampleMode=FIXED",type=int,nargs='+',default=[0])
parser.add_argument("--reprojectionScale",help="compute all loss function at 1/reprojectionScale",default=1,type=int)
parser.add_argument("--summary",help='flag to enable tensorboard summaries',action='store_true')
parser.add_argument("--imageShape", help='two int for the size of the crop extracted from each image [height,width]', nargs='+', type=int, default=[320,1216])
parser.add_argument("--SSIMTh",help="reset network to initial configuration if loss is above this value",type=float,default=0.5)
parser.add_argument("--sampleFrequency",help="sample new network portions to train every K frame",type=int,default=1)
parser.add_argument("--mode",help="online adaptation mode: NONE - perform only inference, FULL - full online backprop, MAD - backprop only on portions of the network", choices=['NONE','FULL','MAD'], default='MAD')
parser.add_argument("--logDispStep", help="save disparity every K step, -1 to disable", default=-1, type=int)
args=parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
if args.logDispStep!=-1 and not os.path.exists(os.path.join(args.output, 'disparities')):
os.makedirs(os.path.join(args.output, 'disparities'))
shutil.copy(args.blockConfig,os.path.join(args.output,'config.json'))
with open(os.path.join(args.output, 'params.sh'), 'w+') as out:
sys.argv[0] = os.path.join(os.getcwd(), sys.argv[0])
out.write('#!/bin/bash\n')
out.write('python3 ')
out.write(' '.join(sys.argv))
out.write('\n')
main(args)
| [
"Data_utils.weights_utils.get_var_to_restore_list",
"tensorflow.reduce_sum",
"numpy.sum",
"argparse.ArgumentParser",
"tensorflow.zeros_like",
"numpy.clip",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"numpy.exp",
"Nets.get_stereo_net",
"Losses.loss_factory.get_reprojectio... | [((2620, 2660), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['args.lr', '(0.9)'], {}), '(args.lr, 0.9)\n', (2646, 2660), True, 'import tensorflow as tf\n'), ((4809, 4841), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (4822, 4841), True, 'import tensorflow as tf\n'), ((9812, 9909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for online Adaptation of a Deep Stereo Network"""'}), "(description=\n 'Script for online Adaptation of a Deep Stereo Network')\n", (9835, 9909), False, 'import argparse\n'), ((586, 595), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (592, 595), True, 'import numpy as np\n'), ((726, 746), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (735, 746), False, 'import json\n'), ((773, 806), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input_reader"""'], {}), "('input_reader')\n", (790, 806), True, 'import tensorflow as tf\n'), ((821, 960), 'Data_utils.data_reader.dataset', 'data_reader.dataset', (['args.list'], {'batch_size': '(1)', 'crop_shape': 'args.imageShape', 'num_epochs': '(1)', 'augment': '(False)', 'is_training': '(False)', 'shuffle': '(False)'}), '(args.list, batch_size=1, crop_shape=args.imageShape,\n num_epochs=1, augment=False, is_training=False, shuffle=False)\n', (840, 960), False, 'from Data_utils import data_reader, weights_utils, preprocessing\n'), ((1186, 1212), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), "('model')\n", (1203, 1212), True, 'import tensorflow as tf\n'), ((1492, 1537), 'Nets.get_stereo_net', 'Nets.get_stereo_net', (['args.modelName', 'net_args'], {}), '(args.modelName, net_args)\n', (1511, 1537), False, 'import Nets\n'), ((1709, 1743), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""full_res_loss"""'], {}), "('full_res_loss')\n", (1726, 1743), True, 'import tensorflow as tf\n'), ((1965, 2002), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""validation_error"""'], {}), "('validation_error')\n", (1982, 2002), True, 'import tensorflow as tf\n'), ((2045, 2083), 'tensorflow.abs', 'tf.abs', (['(full_res_disp - gt_image_batch)'], {}), '(full_res_disp - gt_image_batch)\n', (2051, 2083), True, 'import tensorflow as tf\n'), ((4129, 4203), 'Sampler.sampler_factory.get_sampler', 'sampler_factory.get_sampler', (['args.sampleMode', 'args.numBlocks', 'args.fixedID'], {}), '(args.sampleMode, args.numBlocks, args.fixedID)\n', (4156, 4203), False, 'from Sampler import sampler_factory\n'), ((4389, 4422), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""EPE"""', 'abs_err'], {}), "('EPE', abs_err)\n", (4406, 4422), True, 'import tensorflow as tf\n'), ((4424, 4465), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""bad3"""', 'bad_pixel_perc'], {}), "('bad3', bad_pixel_perc)\n", (4441, 4465), True, 'import tensorflow as tf\n'), ((4707, 4729), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4727, 4729), True, 'import tensorflow as tf\n'), ((4741, 4775), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['args.output'], {}), '(args.output)\n', (4762, 4775), True, 'import tensorflow as tf\n'), ((5070, 5125), 'Data_utils.weights_utils.get_var_to_restore_list', 'weights_utils.get_var_to_restore_list', (['args.weights', '[]'], {}), '(args.weights, [])\n', (5107, 5125), False, 'from Data_utils import data_reader, weights_utils, preprocessing\n'), ((5171, 5210), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_to_restore'}), '(var_list=var_to_restore)\n', (5185, 5210), True, 'import tensorflow as tf\n'), ((5630, 5659), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_actions]'}), '(shape=[num_actions])\n', (5638, 5659), True, 'import numpy as np\n'), ((5675, 5704), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_actions]'}), '(shape=[num_actions])\n', (5683, 5704), True, 'import numpy as np\n'), ((11955, 11982), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (11969, 11982), False, 'import os\n'), ((11986, 12010), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (11997, 12010), False, 'import os\n'), ((12188, 12228), 'os.path.join', 'os.path.join', (['args.output', '"""config.json"""'], {}), "(args.output, 'config.json')\n", (12200, 12228), False, 'import os\n'), ((605, 614), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (611, 614), True, 'import numpy as np\n'), ((1850, 1914), 'Losses.loss_factory.get_reprojection_loss', 'loss_factory.get_reprojection_loss', (['"""mean_SSIM_l1"""'], {'reduced': '(True)'}), "('mean_SSIM_l1', reduced=True)\n", (1884, 1914), False, 'from Losses import loss_factory\n'), ((2107, 2134), 'tensorflow.equal', 'tf.equal', (['gt_image_batch', '(0)'], {}), '(gt_image_batch, 0)\n', (2115, 2134), True, 'import tensorflow as tf\n'), ((2136, 2183), 'tensorflow.zeros_like', 'tf.zeros_like', (['gt_image_batch'], {'dtype': 'tf.float32'}), '(gt_image_batch, dtype=tf.float32)\n', (2149, 2183), True, 'import tensorflow as tf\n'), ((2185, 2231), 'tensorflow.ones_like', 'tf.ones_like', (['gt_image_batch'], {'dtype': 'tf.float32'}), '(gt_image_batch, dtype=tf.float32)\n', (2197, 2231), True, 'import tensorflow as tf\n'), ((2285, 2314), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['filtered_error'], {}), '(filtered_error)\n', (2298, 2314), True, 'import tensorflow as tf\n'), ((2317, 2341), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['valid_map'], {}), '(valid_map)\n', (2330, 2341), True, 'import tensorflow as tf\n'), ((2369, 2405), 'tensorflow.greater', 'tf.greater', (['filtered_error', 'PIXEL_TH'], {}), '(filtered_error, PIXEL_TH)\n', (2379, 2405), True, 'import tensorflow as tf\n'), ((2407, 2453), 'tensorflow.ones_like', 'tf.ones_like', (['filtered_error'], {'dtype': 'tf.float32'}), '(filtered_error, dtype=tf.float32)\n', (2419, 2453), True, 'import tensorflow as tf\n'), ((2455, 2502), 'tensorflow.zeros_like', 'tf.zeros_like', (['filtered_error'], {'dtype': 'tf.float32'}), '(filtered_error, dtype=tf.float32)\n', (2468, 2502), True, 'import tensorflow as tf\n'), ((2523, 2551), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['bad_pixel_abs'], {}), '(bad_pixel_abs)\n', (2536, 2551), True, 'import tensorflow as tf\n'), ((2554, 2578), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['valid_map'], {}), '(valid_map)\n', (2567, 2578), True, 'import tensorflow as tf\n'), ((4500, 4553), 'Data_utils.preprocessing.colorize_img', 'preprocessing.colorize_img', (['full_res_disp'], {'cmap': '"""jet"""'}), "(full_res_disp, cmap='jet')\n", (4526, 4553), False, 'from Data_utils import data_reader, weights_utils, preprocessing\n'), ((4597, 4651), 'Data_utils.preprocessing.colorize_img', 'preprocessing.colorize_img', (['gt_image_batch'], {'cmap': '"""jet"""'}), "(gt_image_batch, cmap='jet')\n", (4623, 4651), False, 'from Data_utils import data_reader, weights_utils, preprocessing\n'), ((5870, 5881), 'time.time', 'time.time', ([], {}), '()\n', (5879, 5881), False, 'import time\n'), ((8481, 8504), 'numpy.sum', 'np.sum', (['epe_accumulator'], {}), '(epe_accumulator)\n', (8487, 8504), True, 'import numpy as np\n'), ((8527, 8551), 'numpy.sum', 'np.sum', (['bad3_accumulator'], {}), '(bad3_accumulator)\n', (8533, 8551), True, 'import numpy as np\n'), ((10360, 10386), 'Nets.STEREO_FACTORY.keys', 'Nets.STEREO_FACTORY.keys', ([], {}), '()\n', (10384, 10386), False, 'import Nets\n'), ((12116, 12156), 'os.path.join', 'os.path.join', (['args.output', '"""disparities"""'], {}), "(args.output, 'disparities')\n", (12128, 12156), False, 'import os\n'), ((12240, 12278), 'os.path.join', 'os.path.join', (['args.output', '"""params.sh"""'], {}), "(args.output, 'params.sh')\n", (12252, 12278), False, 'import os\n'), ((12323, 12334), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12332, 12334), False, 'import os\n'), ((3344, 3405), 'Data_utils.preprocessing.resize_to_prediction', 'preprocessing.resize_to_prediction', (['p', "inputs_modules['left']"], {}), "(p, inputs_modules['left'])\n", (3378, 3405), False, 'from Data_utils import data_reader, weights_utils, preprocessing\n'), ((4866, 4905), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (4880, 4905), True, 'import tensorflow as tf\n'), ((4942, 4975), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4973, 4975), True, 'import tensorflow as tf\n'), ((4976, 5008), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (5006, 5008), True, 'import tensorflow as tf\n'), ((5476, 5486), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (5484, 5486), True, 'import tensorflow as tf\n'), ((12059, 12099), 'os.path.join', 'os.path.join', (['args.output', '"""disparities"""'], {}), "(args.output, 'disparities')\n", (12071, 12099), False, 'import os\n'), ((445, 461), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (453, 461), True, 'import tensorflow as tf\n'), ((472, 488), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (480, 488), True, 'import tensorflow as tf\n'), ((3531, 3595), 'Losses.loss_factory.get_reprojection_loss', 'loss_factory.get_reprojection_loss', (['"""mean_SSIM_l1"""'], {'reduced': '(True)'}), "('mean_SSIM_l1', reduced=True)\n", (3565, 3595), False, 'from Losses import loss_factory\n'), ((7850, 7861), 'time.time', 'time.time', ([], {}), '()\n', (7859, 7861), False, 'import time\n'), ((8131, 8161), 'numpy.clip', 'np.clip', (['dispy[0]', '(0)', 'MAX_DISP'], {}), '(dispy[0], 0, MAX_DISP)\n', (8138, 8161), True, 'import numpy as np\n'), ((8565, 8603), 'os.path.join', 'os.path.join', (['args.output', '"""stats.csv"""'], {}), "(args.output, 'stats.csv')\n", (8577, 8603), False, 'import os\n'), ((9468, 9507), 'os.path.join', 'os.path.join', (['args.output', '"""series.csv"""'], {}), "(args.output, 'series.csv')\n", (9480, 9507), False, 'import os\n'), ((3281, 3305), 'tensorflow.shape', 'tf.shape', (['left_img_batch'], {}), '(left_img_batch)\n', (3289, 3305), True, 'import tensorflow as tf\n'), ((3310, 3321), 'tensorflow.shape', 'tf.shape', (['p'], {}), '(p)\n', (3318, 3321), True, 'import tensorflow as tf\n'), ((7451, 7462), 'time.time', 'time.time', ([], {}), '()\n', (7460, 7462), False, 'import time\n'), ((7789, 7829), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'missing_time'}), '(seconds=missing_time)\n', (7807, 7829), False, 'import datetime\n')] |
"""This module provides a pseudo-random generator.""" # Module docstring
import numpy
XORSHIFT32_DEFAULT_SHIFTS = 13, 17, 5
"""Default triple for xorshift32.""" # Attribute docstring
def xorshift32(last_value, shift_triple=None):
"""Returns the next pseudo-random uint32 from current value and triple.
See <NAME>. Xorshift RNGs: http://www.jstatsoft.org/v08/i14/paper
""" # Function docstring
x = numpy.uint32(last_value) # Work with 32bits integer
a, b, c = shift_triple or XORSHIFT32_DEFAULT_SHIFTS
x ^= x << a
x ^= x >> b
x ^= x << c
return x
class RandomGenerator32(object):
"""Xorshift-based uint32 pseudo-random generator.""" # Class docstring
DEFAULT_SHIFTS = 13, 17, 5
"""The default triple of shift.""" # Attribute docstring
def __init__(self, seed, triple=None):
"""Initialize generator with the given seed.
A triple for xorshift can be added.
"""
self.triple = triple or self.DEFAULT_SHIFTS
"""The triple used by the instance.""" # Attribute docstring
self._seed = numpy.uint32(seed)
self._last_rand = self._seed
def rand(self):
"""Returns a pseudo-random integer.""" # Method doctring
self._last_rand = xorshift32(self._last_rand, self.triple)
return self._last_rand
@property
def seed(self):
"""The initialization seed (read-only).""" # Property docstring
return self._seed
| [
"numpy.uint32"
] | [((424, 448), 'numpy.uint32', 'numpy.uint32', (['last_value'], {}), '(last_value)\n', (436, 448), False, 'import numpy\n'), ((1098, 1116), 'numpy.uint32', 'numpy.uint32', (['seed'], {}), '(seed)\n', (1110, 1116), False, 'import numpy\n')] |
#!/usr/bin/env python
import argparse
import os
import cv2
import numpy as np
import plantcv as pcv
# Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i", "--image", help="Input image file.", required=True)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=False)
parser.add_argument("-r", "--result", help="result file.", required=False)
parser.add_argument("-r2", "--coresult", help="result file.", required=False)
parser.add_argument("-p", "--pdfs", help="Naive Bayes PDF file.", required=True)
parser.add_argument("-w", "--writeimg", help="write out images.", default=False, action="store_true")
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", default=None)
args = parser.parse_args()
return args
def main():
# Get options
args = options()
# Initialize device counter
device = 0
# Read in the input image
vis, path, filename = pcv.readimage(filename=args.image, debug=args.debug)
# Parse camera metadata
metadata = filename.split("_")
camera = metadata[1]
if camera == "SV":
zoom = metadata[3]
elif camera == "TV":
zoom = metadata[2]
else:
pcv.fatal_error("Unknown camera type: {0}".format(camera))
# Classify each pixel as plant or background (background and system components)
device, masks = pcv.naive_bayes_classifier(img=vis, pdf_file=args.pdfs, device=device, debug=args.debug)
# Fill in small contours
device, mask_filled = pcv.fill(img=np.copy(masks["plant"]), mask=np.copy(masks["plant"]), size=50, device=device,
debug=args.debug)
# Define a region of interest
if camera == "TV":
device, roi, roi_hierarchy = pcv.define_roi(img=vis, shape="rectangle", device=device, roi=None,
roi_input="default", debug=args.debug, adjust=True, x_adj=500,
y_adj=250, w_adj=-500, h_adj=-300)
elif camera == "SV":
device, roi, roi_hierarchy = pcv.define_roi(img=vis, shape="rectangle", device=device, roi=None,
roi_input="default", debug=args.debug, adjust=True, x_adj=600,
y_adj=250, w_adj=-600, h_adj=-700)
# Find contours
device, obj, obj_hierarchy = pcv.find_objects(img=vis, mask=mask_filled, device=device, debug=args.debug)
# Keep contours that overlap the ROI
device, roi_obj, roi_obj_hierarchy, obj_mask, obj_area = pcv.roi_objects(img=vis, roi_type="partial",
roi_contour=roi,
roi_hierarchy=roi_hierarchy,
object_contour=obj,
obj_hierarchy=obj_hierarchy, device=device,
debug=args.debug)
# Combine remaining contours into a single object (the plant)
device, plant_obj, plant_mask = pcv.object_composition(img=vis, contours=roi_obj, hierarchy=roi_obj_hierarchy,
device=device, debug=args.debug)
# Analyze the shape features of the plant object
if args.writeimg:
outfile = os.path.join(args.outdir, filename)
else:
outfile = False
device, shape_header, shape_data, shape_img = pcv.analyze_object(img=vis, imgname=filename, obj=plant_obj,
mask=plant_mask, device=device, debug=args.debug,
filename=outfile)
# Write data to results file
results = open(args.result, "a")
# Write shapes results
results.write("\t".join(map(str, shape_header)) + "\n")
results.write("\t".join(map(str, shape_data)) + "\n")
for row in shape_img:
results.write("\t".join(map(str, row)) + "\n")
# If this is a side-view image, calculate boundary-line results
# The boundary line position depends on the camera zoom level
if camera == "SV":
if zoom == "z300":
device, boundary_header, boundary_data, boundary_image = pcv.analyze_bound(img=vis, imgname=filename,
obj=plant_obj, mask=plant_mask,
line_position=680, device=device,
debug=args.debug,
filename=outfile)
results.write("\t".join(map(str, boundary_header)) + "\n")
results.write("\t".join(map(str, boundary_data)) + "\n")
for row in boundary_image:
results.write("\t".join(map(str, row)) + "\n")
elif zoom == "z1":
device, boundary_header, boundary_data, boundary_image = pcv.analyze_bound(img=vis, imgname=filename,
obj=plant_obj, mask=plant_mask,
line_position=670, device=device,
debug=args.debug,
filename=outfile)
results.write("\t".join(map(str, boundary_header)) + "\n")
results.write("\t".join(map(str, boundary_data)) + "\n")
for row in boundary_image:
results.write("\t".join(map(str, row)) + "\n")
# Analyze color
device, color_headers, color_data, analysis_images = pcv.analyze_color(img=vis, imgname=filename, mask=plant_mask,
bins=256, device=device, debug=args.debug,
hist_plot_type=None, pseudo_channel="v",
pseudo_bkg="img", resolution=300,
filename=outfile)
results.write("\t".join(map(str, color_headers)) + "\n")
results.write("\t".join(map(str, color_data)) + "\n")
for row in analysis_images:
results.write("\t".join(map(str, row)) + "\n")
results.close()
# Find the corresponding NIR image
device, nirpath = pcv.get_nir(path=path, filename=filename, device=device, debug=args.debug)
nir, nir_path, nir_filename = pcv.readimage(filename=nirpath, debug=args.debug)
device, nir = pcv.rgb2gray(img=nir, device=device, debug=args.debug)
if camera == "TV":
# The top-view camera needs to be rotated
device, nir = pcv.flip(img=nir, direction="vertical", device=device, debug=args.debug)
device, nir = pcv.flip(img=nir, direction="horizontal", device=device, debug=args.debug)
# Rescale the size of the VIS plant mask to fit on the smaller NIR image
device, nir_mask = pcv.resize(img=plant_mask, resize_x=0.278, resize_y=0.278, device=device, debug=args.debug)
# Map the plant mask onto the NIR image
# Settings depend on the camera and zoom level
if camera == "TV":
device, newmask = pcv.crop_position_mask(img=nir, mask=nir_mask, device=device, x=3, y=7, v_pos="bottom",
h_pos="right", debug=args.debug)
elif camera == "SV":
if zoom == "z300":
device, newmask = pcv.crop_position_mask(img=nir, mask=nir_mask, device=device, x=43, y=6, v_pos="top",
h_pos="right", debug=args.debug)
elif zoom == "z1":
device, newmask = pcv.crop_position_mask(img=nir, mask=nir_mask, device=device, x=39, y=6, v_pos="top",
h_pos="right", debug=args.debug)
# Identify contours
device, nir_objects, nir_hierarchy = pcv.find_objects(img=cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR), mask=newmask,
device=device, debug=args.debug)
# Combine contours into a single object (plant)
device, nir_combined, nir_combinedmask = pcv.object_composition(img=cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR),
contours=nir_objects, hierarchy=nir_hierarchy,
device=device, debug=args.debug)
if args.writeimg:
outfile = os.path.join(args.outdir, nir_filename)
else:
outfile = False
# Measure the NIR contour shape properties
device, nir_shape_header, nir_shape_data, nir_shape_img = pcv.analyze_object(
img=cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR), imgname=nir_filename, obj=nir_combined, mask=nir_combinedmask,
device=device, debug=args.debug, filename=outfile)
# Write data to results file
results = open(args.coresult, "a")
# Write shapes results
results.write("\t".join(map(str, nir_shape_header)) + "\n")
results.write("\t".join(map(str, nir_shape_data)) + "\n")
for row in nir_shape_img:
results.write("\t".join(map(str, row)) + "\n")
# Analyze NIR signal
device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(img=nir,
rgbimg=cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR),
mask=nir_combinedmask, bins=256,
device=device, histplot=False,
debug=args.debug, filename=outfile)
results.write("\t".join(map(str, nhist_header)) + "\n")
results.write("\t".join(map(str, nhist_data)) + "\n")
for row in nir_imgs:
results.write("\t".join(map(str, row)) + "\n")
results.close()
if __name__ == '__main__':
main()
| [
"plantcv.analyze_object",
"argparse.ArgumentParser",
"plantcv.resize",
"plantcv.analyze_color",
"plantcv.get_nir",
"numpy.copy",
"cv2.cvtColor",
"plantcv.naive_bayes_classifier",
"plantcv.rgb2gray",
"plantcv.find_objects",
"plantcv.define_roi",
"plantcv.analyze_bound",
"plantcv.crop_position... | [((162, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Imaging processing with opencv"""'}), "(description='Imaging processing with opencv')\n", (185, 231), False, 'import argparse\n'), ((1077, 1129), 'plantcv.readimage', 'pcv.readimage', ([], {'filename': 'args.image', 'debug': 'args.debug'}), '(filename=args.image, debug=args.debug)\n', (1090, 1129), True, 'import plantcv as pcv\n'), ((1503, 1595), 'plantcv.naive_bayes_classifier', 'pcv.naive_bayes_classifier', ([], {'img': 'vis', 'pdf_file': 'args.pdfs', 'device': 'device', 'debug': 'args.debug'}), '(img=vis, pdf_file=args.pdfs, device=device,\n debug=args.debug)\n', (1529, 1595), True, 'import plantcv as pcv\n'), ((2544, 2620), 'plantcv.find_objects', 'pcv.find_objects', ([], {'img': 'vis', 'mask': 'mask_filled', 'device': 'device', 'debug': 'args.debug'}), '(img=vis, mask=mask_filled, device=device, debug=args.debug)\n', (2560, 2620), True, 'import plantcv as pcv\n'), ((2724, 2906), 'plantcv.roi_objects', 'pcv.roi_objects', ([], {'img': 'vis', 'roi_type': '"""partial"""', 'roi_contour': 'roi', 'roi_hierarchy': 'roi_hierarchy', 'object_contour': 'obj', 'obj_hierarchy': 'obj_hierarchy', 'device': 'device', 'debug': 'args.debug'}), "(img=vis, roi_type='partial', roi_contour=roi, roi_hierarchy\n =roi_hierarchy, object_contour=obj, obj_hierarchy=obj_hierarchy, device\n =device, debug=args.debug)\n", (2739, 2906), True, 'import plantcv as pcv\n'), ((3385, 3501), 'plantcv.object_composition', 'pcv.object_composition', ([], {'img': 'vis', 'contours': 'roi_obj', 'hierarchy': 'roi_obj_hierarchy', 'device': 'device', 'debug': 'args.debug'}), '(img=vis, contours=roi_obj, hierarchy=\n roi_obj_hierarchy, device=device, debug=args.debug)\n', (3407, 3501), True, 'import plantcv as pcv\n'), ((3770, 3903), 'plantcv.analyze_object', 'pcv.analyze_object', ([], {'img': 'vis', 'imgname': 'filename', 'obj': 'plant_obj', 'mask': 'plant_mask', 'device': 'device', 'debug': 'args.debug', 'filename': 'outfile'}), '(img=vis, imgname=filename, obj=plant_obj, mask=\n plant_mask, device=device, debug=args.debug, filename=outfile)\n', (3788, 3903), True, 'import plantcv as pcv\n'), ((6236, 6442), 'plantcv.analyze_color', 'pcv.analyze_color', ([], {'img': 'vis', 'imgname': 'filename', 'mask': 'plant_mask', 'bins': '(256)', 'device': 'device', 'debug': 'args.debug', 'hist_plot_type': 'None', 'pseudo_channel': '"""v"""', 'pseudo_bkg': '"""img"""', 'resolution': '(300)', 'filename': 'outfile'}), "(img=vis, imgname=filename, mask=plant_mask, bins=256,\n device=device, debug=args.debug, hist_plot_type=None, pseudo_channel=\n 'v', pseudo_bkg='img', resolution=300, filename=outfile)\n", (6253, 6442), True, 'import plantcv as pcv\n'), ((7022, 7096), 'plantcv.get_nir', 'pcv.get_nir', ([], {'path': 'path', 'filename': 'filename', 'device': 'device', 'debug': 'args.debug'}), '(path=path, filename=filename, device=device, debug=args.debug)\n', (7033, 7096), True, 'import plantcv as pcv\n'), ((7131, 7180), 'plantcv.readimage', 'pcv.readimage', ([], {'filename': 'nirpath', 'debug': 'args.debug'}), '(filename=nirpath, debug=args.debug)\n', (7144, 7180), True, 'import plantcv as pcv\n'), ((7199, 7253), 'plantcv.rgb2gray', 'pcv.rgb2gray', ([], {'img': 'nir', 'device': 'device', 'debug': 'args.debug'}), '(img=nir, device=device, debug=args.debug)\n', (7211, 7253), True, 'import plantcv as pcv\n'), ((7620, 7715), 'plantcv.resize', 'pcv.resize', ([], {'img': 'plant_mask', 'resize_x': '(0.278)', 'resize_y': '(0.278)', 'device': 'device', 'debug': 'args.debug'}), '(img=plant_mask, resize_x=0.278, resize_y=0.278, device=device,\n debug=args.debug)\n', (7630, 7715), True, 'import plantcv as pcv\n'), ((1888, 2062), 'plantcv.define_roi', 'pcv.define_roi', ([], {'img': 'vis', 'shape': '"""rectangle"""', 'device': 'device', 'roi': 'None', 'roi_input': '"""default"""', 'debug': 'args.debug', 'adjust': '(True)', 'x_adj': '(500)', 'y_adj': '(250)', 'w_adj': '(-500)', 'h_adj': '(-300)'}), "(img=vis, shape='rectangle', device=device, roi=None,\n roi_input='default', debug=args.debug, adjust=True, x_adj=500, y_adj=\n 250, w_adj=-500, h_adj=-300)\n", (1902, 2062), True, 'import plantcv as pcv\n'), ((3650, 3685), 'os.path.join', 'os.path.join', (['args.outdir', 'filename'], {}), '(args.outdir, filename)\n', (3662, 3685), False, 'import os\n'), ((7349, 7421), 'plantcv.flip', 'pcv.flip', ([], {'img': 'nir', 'direction': '"""vertical"""', 'device': 'device', 'debug': 'args.debug'}), "(img=nir, direction='vertical', device=device, debug=args.debug)\n", (7357, 7421), True, 'import plantcv as pcv\n'), ((7444, 7518), 'plantcv.flip', 'pcv.flip', ([], {'img': 'nir', 'direction': '"""horizontal"""', 'device': 'device', 'debug': 'args.debug'}), "(img=nir, direction='horizontal', device=device, debug=args.debug)\n", (7452, 7518), True, 'import plantcv as pcv\n'), ((7857, 7981), 'plantcv.crop_position_mask', 'pcv.crop_position_mask', ([], {'img': 'nir', 'mask': 'nir_mask', 'device': 'device', 'x': '(3)', 'y': '(7)', 'v_pos': '"""bottom"""', 'h_pos': '"""right"""', 'debug': 'args.debug'}), "(img=nir, mask=nir_mask, device=device, x=3, y=7,\n v_pos='bottom', h_pos='right', debug=args.debug)\n", (7879, 7981), True, 'import plantcv as pcv\n'), ((9161, 9200), 'os.path.join', 'os.path.join', (['args.outdir', 'nir_filename'], {}), '(args.outdir, nir_filename)\n', (9173, 9200), False, 'import os\n'), ((1661, 1684), 'numpy.copy', 'np.copy', (["masks['plant']"], {}), "(masks['plant'])\n", (1668, 1684), True, 'import numpy as np\n'), ((1691, 1714), 'numpy.copy', 'np.copy', (["masks['plant']"], {}), "(masks['plant'])\n", (1698, 1714), True, 'import numpy as np\n'), ((2220, 2394), 'plantcv.define_roi', 'pcv.define_roi', ([], {'img': 'vis', 'shape': '"""rectangle"""', 'device': 'device', 'roi': 'None', 'roi_input': '"""default"""', 'debug': 'args.debug', 'adjust': '(True)', 'x_adj': '(600)', 'y_adj': '(250)', 'w_adj': '(-600)', 'h_adj': '(-700)'}), "(img=vis, shape='rectangle', device=device, roi=None,\n roi_input='default', debug=args.debug, adjust=True, x_adj=600, y_adj=\n 250, w_adj=-600, h_adj=-700)\n", (2234, 2394), True, 'import plantcv as pcv\n'), ((4588, 4738), 'plantcv.analyze_bound', 'pcv.analyze_bound', ([], {'img': 'vis', 'imgname': 'filename', 'obj': 'plant_obj', 'mask': 'plant_mask', 'line_position': '(680)', 'device': 'device', 'debug': 'args.debug', 'filename': 'outfile'}), '(img=vis, imgname=filename, obj=plant_obj, mask=plant_mask,\n line_position=680, device=device, debug=args.debug, filename=outfile)\n', (4605, 4738), True, 'import plantcv as pcv\n'), ((8596, 8633), 'cv2.cvtColor', 'cv2.cvtColor', (['nir', 'cv2.COLOR_GRAY2BGR'], {}), '(nir, cv2.COLOR_GRAY2BGR)\n', (8608, 8633), False, 'import cv2\n'), ((8865, 8902), 'cv2.cvtColor', 'cv2.cvtColor', (['nir', 'cv2.COLOR_GRAY2BGR'], {}), '(nir, cv2.COLOR_GRAY2BGR)\n', (8877, 8902), False, 'import cv2\n'), ((9376, 9413), 'cv2.cvtColor', 'cv2.cvtColor', (['nir', 'cv2.COLOR_GRAY2BGR'], {}), '(nir, cv2.COLOR_GRAY2BGR)\n', (9388, 9413), False, 'import cv2\n'), ((10040, 10077), 'cv2.cvtColor', 'cv2.cvtColor', (['nir', 'cv2.COLOR_GRAY2BGR'], {}), '(nir, cv2.COLOR_GRAY2BGR)\n', (10052, 10077), False, 'import cv2\n'), ((5421, 5571), 'plantcv.analyze_bound', 'pcv.analyze_bound', ([], {'img': 'vis', 'imgname': 'filename', 'obj': 'plant_obj', 'mask': 'plant_mask', 'line_position': '(670)', 'device': 'device', 'debug': 'args.debug', 'filename': 'outfile'}), '(img=vis, imgname=filename, obj=plant_obj, mask=plant_mask,\n line_position=670, device=device, debug=args.debug, filename=outfile)\n', (5438, 5571), True, 'import plantcv as pcv\n'), ((8109, 8231), 'plantcv.crop_position_mask', 'pcv.crop_position_mask', ([], {'img': 'nir', 'mask': 'nir_mask', 'device': 'device', 'x': '(43)', 'y': '(6)', 'v_pos': '"""top"""', 'h_pos': '"""right"""', 'debug': 'args.debug'}), "(img=nir, mask=nir_mask, device=device, x=43, y=6,\n v_pos='top', h_pos='right', debug=args.debug)\n", (8131, 8231), True, 'import plantcv as pcv\n'), ((8338, 8460), 'plantcv.crop_position_mask', 'pcv.crop_position_mask', ([], {'img': 'nir', 'mask': 'nir_mask', 'device': 'device', 'x': '(39)', 'y': '(6)', 'v_pos': '"""top"""', 'h_pos': '"""right"""', 'debug': 'args.debug'}), "(img=nir, mask=nir_mask, device=device, x=39, y=6,\n v_pos='top', h_pos='right', debug=args.debug)\n", (8360, 8460), True, 'import plantcv as pcv\n')] |
import ipyvolume as ipv
import ipywidgets as ipw
import numpy as np
from ipywidgets_bokeh import IPyWidget
from bokeh.layouts import column, row
from bokeh.models import Slider
from bokeh.plotting import curdoc
x, y, z = np.random.random((3, 1000))
ipv.quickscatter(x, y, z, size=1, marker="sphere")
plot = ipv.current.figure
x_slider = Slider(start=0, end=359, value=0, step=1, title="X-axis")
y_slider = Slider(start=0, end=359, value=0, step=1, title="Y-axis")
z_slider = Slider(start=0, end=359, value=0, step=1, title="Z-axis")
def randomize(button):
x, y, z = np.random.random((3, 1000))
scatter = plot.scatters[0]
with plot.hold_sync():
scatter.x = x
scatter.y = y
scatter.z = z
randomize_button = ipw.Button(description="Randomize")
randomize_button.on_click(randomize)
def change_anglex(change):
v = round(np.degrees(change["new"])) % 360
x_slider.value = v
def change_angley(change):
v = round(np.degrees(change["new"])) % 360
y_slider.value = v
def change_anglez(change):
v = round(np.degrees(change["new"])) % 360
z_slider.value = v
plot.observe(change_anglex, names="anglex")
plot.observe(change_angley, names="angley")
plot.observe(change_anglez, names="anglez")
def change_x(_attr, _old, new):
plot.anglex = np.radians(new)
def change_y(_attr, _old, new):
plot.angley = np.radians(new)
def change_z(_attr, _old, new):
plot.anglez = np.radians(new)
x_slider.on_change("value", change_x)
y_slider.on_change("value", change_y)
z_slider.on_change("value", change_z)
button_wrapper = IPyWidget(widget=randomize_button)
plot_wrapper = IPyWidget(widget=plot)
vbox = column([x_slider, y_slider, z_slider, button_wrapper])
hbox = row([vbox, plot_wrapper])
doc = curdoc()
doc.add_root(hbox)
| [
"numpy.radians",
"ipyvolume.quickscatter",
"ipywidgets.Button",
"bokeh.models.Slider",
"numpy.degrees",
"numpy.random.random",
"bokeh.plotting.curdoc",
"ipywidgets_bokeh.IPyWidget",
"bokeh.layouts.column",
"bokeh.layouts.row"
] | [((223, 250), 'numpy.random.random', 'np.random.random', (['(3, 1000)'], {}), '((3, 1000))\n', (239, 250), True, 'import numpy as np\n'), ((251, 301), 'ipyvolume.quickscatter', 'ipv.quickscatter', (['x', 'y', 'z'], {'size': '(1)', 'marker': '"""sphere"""'}), "(x, y, z, size=1, marker='sphere')\n", (267, 301), True, 'import ipyvolume as ipv\n'), ((340, 397), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0)', 'end': '(359)', 'value': '(0)', 'step': '(1)', 'title': '"""X-axis"""'}), "(start=0, end=359, value=0, step=1, title='X-axis')\n", (346, 397), False, 'from bokeh.models import Slider\n'), ((409, 466), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0)', 'end': '(359)', 'value': '(0)', 'step': '(1)', 'title': '"""Y-axis"""'}), "(start=0, end=359, value=0, step=1, title='Y-axis')\n", (415, 466), False, 'from bokeh.models import Slider\n'), ((478, 535), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0)', 'end': '(359)', 'value': '(0)', 'step': '(1)', 'title': '"""Z-axis"""'}), "(start=0, end=359, value=0, step=1, title='Z-axis')\n", (484, 535), False, 'from bokeh.models import Slider\n'), ((746, 781), 'ipywidgets.Button', 'ipw.Button', ([], {'description': '"""Randomize"""'}), "(description='Randomize')\n", (756, 781), True, 'import ipywidgets as ipw\n'), ((1574, 1608), 'ipywidgets_bokeh.IPyWidget', 'IPyWidget', ([], {'widget': 'randomize_button'}), '(widget=randomize_button)\n', (1583, 1608), False, 'from ipywidgets_bokeh import IPyWidget\n'), ((1624, 1646), 'ipywidgets_bokeh.IPyWidget', 'IPyWidget', ([], {'widget': 'plot'}), '(widget=plot)\n', (1633, 1646), False, 'from ipywidgets_bokeh import IPyWidget\n'), ((1655, 1709), 'bokeh.layouts.column', 'column', (['[x_slider, y_slider, z_slider, button_wrapper]'], {}), '([x_slider, y_slider, z_slider, button_wrapper])\n', (1661, 1709), False, 'from bokeh.layouts import column, row\n'), ((1717, 1742), 'bokeh.layouts.row', 'row', (['[vbox, plot_wrapper]'], {}), '([vbox, plot_wrapper])\n', (1720, 1742), False, 'from bokeh.layouts import column, row\n'), ((1750, 1758), 'bokeh.plotting.curdoc', 'curdoc', ([], {}), '()\n', (1756, 1758), False, 'from bokeh.plotting import curdoc\n'), ((574, 601), 'numpy.random.random', 'np.random.random', (['(3, 1000)'], {}), '((3, 1000))\n', (590, 601), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.radians', 'np.radians', (['new'], {}), '(new)\n', (1304, 1309), True, 'import numpy as np\n'), ((1360, 1375), 'numpy.radians', 'np.radians', (['new'], {}), '(new)\n', (1370, 1375), True, 'import numpy as np\n'), ((1426, 1441), 'numpy.radians', 'np.radians', (['new'], {}), '(new)\n', (1436, 1441), True, 'import numpy as np\n'), ((861, 886), 'numpy.degrees', 'np.degrees', (["change['new']"], {}), "(change['new'])\n", (871, 886), True, 'import numpy as np\n'), ((958, 983), 'numpy.degrees', 'np.degrees', (["change['new']"], {}), "(change['new'])\n", (968, 983), True, 'import numpy as np\n'), ((1055, 1080), 'numpy.degrees', 'np.degrees', (["change['new']"], {}), "(change['new'])\n", (1065, 1080), True, 'import numpy as np\n')] |
import argparse
from abc import ABC
from typing import Optional
import numpy as np
def create_treatment_assignment_dict(
all_treatment_ids, sorted_selected_idx, propensities_of_selected_treatments
) -> dict:
selected_treatment_ids = [all_treatment_ids[id] for id in sorted_selected_idx]
return {
"treatment_ids": selected_treatment_ids,
"propensities": propensities_of_selected_treatments,
}
class TreatmentAssignmentPolicy(ABC):
def __init__(self, treatment_ids: list, args: argparse.Namespace):
self.treatment_ids = treatment_ids
self.bias = args.bias
self.args = args
def assign_treatment(self, unit: np.ndarray):
pass
def get_assignments_for_unit(
self, unit: np.ndarray, mode: str, num_test_treatments_per_unit: int = 5
):
pass
def __get_most_likely_assignments_for_unit(
self, unit: np.ndarray, num_test_treatments_per_unit: int = 5
):
pass
class RandomTAP(TreatmentAssignmentPolicy):
def __init__(self, treatment_ids: list, args, weights: Optional[np.ndarray] = None):
super().__init__(treatment_ids, args)
self.dim_covariates = args.dim_covariates
self.policy = args.propensity_covariates_preprocessing
self.weights = weights if weights else self.sample_weights()
def sample_weights(self) -> np.ndarray:
weights = np.zeros(shape=(len(self.treatment_ids), self.dim_covariates))
for i in range(len(self.treatment_ids)):
weights[i] = (
np.random.uniform(size=(self.dim_covariates), low=0.0, high=1.0)
if self.args.treatment_assignment_matrix_distribution == "uniform"
else np.random.multivariate_normal(
mean=self.dim_covariates * [0.0],
cov=1.0 * np.eye(self.dim_covariates),
size=(1),
)
)
return weights
def assign_treatment(self, unit: np.ndarray) -> int:
propensity_probabilities = softmax(
self.bias * np.matmul(self.weights, self.preprocess_covariates(unit))
)
assigned_treatment = np.random.choice(
a=self.treatment_ids, p=propensity_probabilities
)
return assigned_treatment
def preprocess_covariates(self, covariates: np.ndarray) -> np.ndarray:
if self.policy == "squared":
return covariates ** 2
return covariates
def get_assignments_for_unit(
self, unit: np.ndarray, mode: str, num_test_treatments_per_unit: int = 5
):
assignments = None
if mode == "most":
assignments = self.__get_most_likely_assignments_for_unit(
unit=unit, num_test_treatments_per_unit=num_test_treatments_per_unit
)
return assignments
def __get_most_likely_assignments_for_unit(
self, unit: np.ndarray, num_test_treatments_per_unit: int = 3
) -> dict:
propensity_probabilities = softmax(
np.matmul(self.weights, self.preprocess_covariates(unit))
)
sorted_ids = np.argsort(propensity_probabilities)
sorted_ids = sorted_ids[-num_test_treatments_per_unit:].tolist()
propensities_of_selected_treatments = propensity_probabilities[sorted_ids]
return create_treatment_assignment_dict(
all_treatment_ids=self.treatment_ids,
sorted_selected_idx=sorted_ids,
propensities_of_selected_treatments=propensities_of_selected_treatments,
)
def softmax(x: np.ndarray) -> np.ndarray:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
| [
"numpy.random.uniform",
"numpy.argsort",
"numpy.max",
"numpy.random.choice",
"numpy.eye"
] | [((2177, 2243), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'self.treatment_ids', 'p': 'propensity_probabilities'}), '(a=self.treatment_ids, p=propensity_probabilities)\n', (2193, 2243), True, 'import numpy as np\n'), ((3128, 3164), 'numpy.argsort', 'np.argsort', (['propensity_probabilities'], {}), '(propensity_probabilities)\n', (3138, 3164), True, 'import numpy as np\n'), ((3624, 3633), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3630, 3633), True, 'import numpy as np\n'), ((1556, 1618), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.dim_covariates', 'low': '(0.0)', 'high': '(1.0)'}), '(size=self.dim_covariates, low=0.0, high=1.0)\n', (1573, 1618), True, 'import numpy as np\n'), ((1840, 1867), 'numpy.eye', 'np.eye', (['self.dim_covariates'], {}), '(self.dim_covariates)\n', (1846, 1867), True, 'import numpy as np\n')] |
from src.constants import NUM_CORES
import pandas as pd
import numpy as np
from multiprocessing.pool import Pool
def parallelize_dataframe(df: pd.DataFrame, func, n_cores=NUM_CORES):
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
| [
"numpy.array_split",
"multiprocessing.pool.Pool"
] | [((207, 234), 'numpy.array_split', 'np.array_split', (['df', 'n_cores'], {}), '(df, n_cores)\n', (221, 234), True, 'import numpy as np\n'), ((247, 260), 'multiprocessing.pool.Pool', 'Pool', (['n_cores'], {}), '(n_cores)\n', (251, 260), False, 'from multiprocessing.pool import Pool\n')] |
import numpy as np
matrix = np.random.randint(10, size=(5, 5))
print(matrix);
i=0;
j=0;
shortestPath = [];
while "true":
if i >= 5:
break;
center = int(matrix[i][j]);
nearElements = [];
top = 0;
if i > 0:
nearElements.append([i - 1, j, (matrix[i - 1][j])]);
top = (matrix[i - 1][j]);
bottom = 0;
if i < 4:
nearElements.append([i + 1, j, (matrix[i + 1][j])]);
bottom = (matrix[i + 1][j]);
right = 0;
if j < 4:
nearElements.append([i, j + 1, (matrix[i][j + 1])]);
right = (matrix[i][j + 1]);
left = 0;
if j != 0:
nearElements.append([i, j - 1, (matrix[i][j - 1])]);
left = (matrix[i][j - 1]);
for idxI in range(len(nearElements)):
twoElementSum = center + nearElements[idxI][2];
for idxJ in range(idxI + 1, len(nearElements)-1):
tempSum = center + nearElements[idxI][2] + nearElements[idxJ][2];
if tempSum == 21:
print("****************************************");
print ("Path that sums up to 21: ");
print(center," (",i,",", j,")");
print(nearElements[idxI][2]," (" ,nearElements[idxI][0],",", nearElements[idxI][1],")");
print(nearElements[idxJ][2]," (", nearElements[idxJ][1],",", nearElements[idxJ][2],")");
print("****************************************");
if not shortestPath:
shortestPath.append([i, j, center]);
shortestPath.append([nearElements[idxI][0], nearElements[idxI][1], nearElements[idxI][2]]);
shortestPath.append([nearElements[idxJ][0], nearElements[idxJ][1], nearElements[idxJ][2]]);
for idxK in range(j + 1,len(nearElements) - 1):
fourSum = center + nearElements[idxI][2] + nearElements[idxJ][2] + nearElements[idxK][2];
if fourSum == 21:
print();
print ("Path that sums up to 21: ");
print(center," (",i,",", j,")");
print(nearElements[idxI][2]," (" ,nearElements[idxI][0],",", nearElements[idxI][1],")");
print(nearElements[idxJ][2]," (", nearElements[idxJ][1],",", nearElements[idxJ][2],")");
print(nearElements[idxK][2]," (", nearElements[idxK][1],",", nearElements[idxK][2],")");
if not shortestPath:
shortestPath.append([i, j, center]);
shortestPath.append([nearElements[idxI][0], nearElements[idxI][1], nearElements[idxI][2]]);
shortestPath.append([nearElements[idxJ][0], nearElements[idxJ][1], nearElements[idxJ][2]]);
shortestPath.append([nearElements[idxK][0], nearElements[idxK][1], nearElements[idxK][2]]);
j+=1
if j == 5:
i+=1;
j = 0;
if len(shortestPath)>0:
print ("Shortest path ");
for idx in range(0,len(shortestPath)):
print (shortestPath[idx][2] , "(", shortestPath[idx][0],",",shortestPath[idx][1],")");
else:
print ("No path found that sums up to 21"); | [
"numpy.random.randint"
] | [((28, 62), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5, 5)'}), '(10, size=(5, 5))\n', (45, 62), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
def reshape_img(matrix):
"""
Reshape an existing 2D pandas.dataframe into 3D-numpy.ndarray
"""
try:
return matrix.values.reshape(-1, 28, 28)
except AttributeError as e:
print(e)
def add_color_channel(matrix):
"""
Add missing color channels to previously reshaped image
"""
matrix = np.stack((matrix, ) *3, axis = -1)
return matrix
def convert_ndarry(matrix):
"""
Convert pandas.series into numpy.ndarray
"""
try:
return matrix.values.flatten()
except AttributeError as e:
print(e)
def get_data(arch, wd, sz, X_train, Y_train, X_valid, Y_valid, test_df):
data = ImageClassifierData.from_arrays(path=wd,
trn=(X_train, Y_train),
val=(X_valid, Y_valid),
classes=Y_train,
test=test_df,
tfms=tfms_from_model(arch, sz))
return data
def pgfit(learn,arch, PATH, data, bs, X_train, Y_train, X_valid, Y_valid, test_df, imgsz_sched=[] ):
# define image size variation during training
if len(imgsz_sched) == 0:
imgsz_sched = [28] # [4, 8, 16, 24, 28]
# define differential learning rates
lr = np.array([0.001, 0.0075, 0.01])
index = 0
sz = 14
# simply change data size for each training epoch
learn.set_data(get_data(arch, PATH, sz, X_train, Y_train, X_valid, Y_valid, test_df))
learn.freeze()
learn.fit(1e-2,1, cycle_len=1, cycle_mult=2)
# by default, [:-2] layers are all freezed initially
learn.unfreeze()
# find optimal learning rate
learn.fit(1e-2, 3, cycle_len=1, cycle_mult = 2)
sz = 28
# simply change data size for each training epoch
learn.set_data(get_data(arch, PATH, sz, X_train, Y_train, X_valid, Y_valid, test_df))
learn.freeze()
learn.fit(1e-2, 1, cycle_len=1, cycle_mult=2)
# by default, [:-2] layers are all freezed initially
learn.unfreeze()
# find optimal learning rate
learn.fit(lr, 3, cycle_len=1, cycle_mult = 2)
# plot loss vs. learning rate
# learn.sched.plot()
def pgfit1(learn,arch, PATH, data, bs, X_train, Y_train, X_valid, Y_valid, test_df, imgsz_sched=[] ):
# define image size variation during training
if len(imgsz_sched) == 0:
imgsz_sched = [28] # [4, 8, 16, 24, 28]
# define differential learning rates
lr = np.array([0.001, 0.0075, 0.01])
index = 0
for sz in imgsz_sched:
# simply change data size for each training epoch
learn.set_data(get_data(arch, PATH, sz, X_train, Y_train, X_valid, Y_valid, test_df))
if index > 0:
learn.fit(1e-2, 1,wd=wd)
# by default, [:-2] layers are all freezed initially
learn.unfreeze()
# find optimal learning rate
learn.lr_find()
learn.fit(lr, 3, cycle_len=3, cycle_mult = 2)
index += 1
# plot loss vs. learning rate
# learn.sched.plot()
def predict_test_classification(learn):
log_preds, y_test = learn.TTA(is_test=True)
probs = np.mean(np.exp(log_preds), 0)
accuracy_np(probs, y)
print(torch.cuda.is_available(), torch.backends.cudnn.enabled)
wd = "../../../MNIST/"
# load data
train_df = pd.read_csv(f"{wd}train.csv")
test_df = pd.read_csv(f"{wd}test.csv")
print(train_df.shape, test_df.shape)
# create validation dataset
val_df = train_df.sample(frac=0.2, random_state=1337)
val_df.shape
# remove validation data from train dataset
train_df = train_df.drop(val_df.index)
train_df.shape
# separate labels from data
Y_train = train_df["label"]
Y_valid = val_df["label"]
X_train = train_df.drop("label", axis=1)
X_valid = val_df.drop("label", axis=1)
print(X_train.shape, X_valid.shape)
print(Y_train.shape, Y_valid.shape)
# display an actual image/digit
img = X_train.iloc[0,:].values.reshape(28,28)
plt.imshow(img, cmap="gray")
# reshape data and add color channels
X_train = reshape_img(X_train)
X_train = add_color_channel(X_train)
X_valid = reshape_img(X_valid)
X_valid = add_color_channel(X_valid)
test_df = reshape_img(test_df)
test_df = add_color_channel(test_df)
# convert y_train and y_valid into proper numpy.ndarray
Y_train = convert_ndarry(Y_train)
Y_valid = convert_ndarry(Y_valid)
preprocessed_data = [X_train, Y_train, X_valid, Y_valid, test_df]
print([e.shape for e in preprocessed_data])
print([type(e) for e in preprocessed_data])
arch = resnet34
imgsz_sched=[4, 8, 16, 24, 28]
#imgsz_sched = []
bs = 128
PATH='/home/ubuntu/MNIST/'
data = ImageClassifierData.from_arrays(path=wd,
trn=(X_train, Y_train),
val=(X_valid, Y_valid),
classes=Y_train,
test=test_df,
tfms=tfms_from_model(arch, 14))
learn = ConvLearner.pretrained(arch, data, precompute=True)
pgfit(learn, arch, PATH, data, bs, X_train, Y_train, X_valid, Y_valid, test_df,imgsz_sched )
predict_test_classification(learn)
| [
"numpy.stack",
"pandas.read_csv",
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.exp"
] | [((3629, 3658), 'pandas.read_csv', 'pd.read_csv', (['f"""{wd}train.csv"""'], {}), "(f'{wd}train.csv')\n", (3640, 3658), True, 'import pandas as pd\n'), ((3669, 3697), 'pandas.read_csv', 'pd.read_csv', (['f"""{wd}test.csv"""'], {}), "(f'{wd}test.csv')\n", (3680, 3697), True, 'import pandas as pd\n'), ((4246, 4274), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (4256, 4274), True, 'import matplotlib.pyplot as plt\n'), ((623, 655), 'numpy.stack', 'np.stack', (['((matrix,) * 3)'], {'axis': '(-1)'}), '((matrix,) * 3, axis=-1)\n', (631, 655), True, 'import numpy as np\n'), ((1615, 1646), 'numpy.array', 'np.array', (['[0.001, 0.0075, 0.01]'], {}), '([0.001, 0.0075, 0.01])\n', (1623, 1646), True, 'import numpy as np\n'), ((2788, 2819), 'numpy.array', 'np.array', (['[0.001, 0.0075, 0.01]'], {}), '([0.001, 0.0075, 0.01])\n', (2796, 2819), True, 'import numpy as np\n'), ((3468, 3485), 'numpy.exp', 'np.exp', (['log_preds'], {}), '(log_preds)\n', (3474, 3485), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
import pandas as pd
class DecisionStump:
def __init__(self, epsilon: float = 1e-6):
r"""A depth-1 decision tree classifier
Args:
epsilon: float
To classify all the points in the training set as +1,
the model will set the dividing line (threshold) to
threshold = min(x_best_feature) - epsilon
"""
self.epsilon = epsilon
self.best_feature = '' # label of the best feature column
self.threshold = 0.0 # dividing line
self.inverse = False
def train(self, X_train: pd.DataFrame, y_train: pd.Series,
weights: pd.Series = None, full_error: bool = False):
n_data = len(X_train)
# Compute errors for all possible dividing lines
errors = []
for feature in X_train.columns:
x_col = X_train[feature]
# Iterate over all data points
err = [self.weighted_error(y_train,
self._predict(x_col,
threshold=xi,
inverse=False),
weights)
for xi in x_col]
# Set the threshold below the minimum of current feature
threshold_min = min(x_col) - self.epsilon
y_pred = self._predict(x_col, threshold=threshold_min, inverse=False)
err.append(self.weighted_error(y_train, y_pred, weights))
# Store the errors
errors.append(pd.Series(err, name=f"{feature}"))
# Inverse the decision
# Iterate over all data points
err = [self.weighted_error(y_train,
self._predict(x_col,
threshold=xi,
inverse=True),
weights)
for xi in x_col]
# Set the threshold below the minimum of current feature
threshold_min = min(x_col) - self.epsilon
y_pred = self._predict(x_col, threshold=threshold_min, inverse=True)
err.append(self.weighted_error(y_train, y_pred, weights))
# Store the errors
errors.append(pd.Series(err, name=f"{feature}-inverse"))
errors = pd.DataFrame(errors).T
errors_arr = errors.to_numpy()
# Find the minimizer of the errors
best_data, best_feature = np.unravel_index(np.argmin(errors_arr, axis=None),
errors_arr.shape)
err_min = errors_arr[best_data, best_feature]
# Store parameters
self.inverse = bool(best_feature % 2) # odd columns
self.best_feature = X_train.columns[best_feature // 2]
if best_data == n_data: # last error corresponds to the minimum threshold
self.threshold = min(X_train[self.best_feature]) - self.epsilon
else:
self.threshold = X_train[self.best_feature][best_data]
# Return the errors
if full_error:
return errors, err_min
else:
return err_min
def eval_model(self, X_test: pd.DataFrame, y_test: pd.Series,
weights: pd.Series = None) -> Tuple[pd.Series, float]:
y_pred = self.predict(X_test)
error = self.weighted_error(y_test, y_pred, weights)
return y_pred, error
def predict(self, X_test: pd.DataFrame) -> pd.Series:
return self._predict(X_test[self.best_feature],
self.threshold, self.inverse)
@staticmethod
def _predict(x: pd.Series, threshold: float, inverse: bool):
if inverse:
y_pred = 2 * (x <= threshold) - 1
else:
y_pred = 2 * (x > threshold) - 1
y_pred.name = 'y_pred'
return y_pred
@staticmethod
def weighted_error(y_true: pd.Series, y_pred: pd.Series,
weights: pd.Series = None) -> float:
if weights is None:
return np.average(y_true != y_pred)
else:
return weights.dot(y_pred != y_true)
| [
"pandas.DataFrame",
"numpy.average",
"pandas.Series",
"numpy.argmin"
] | [((2456, 2476), 'pandas.DataFrame', 'pd.DataFrame', (['errors'], {}), '(errors)\n', (2468, 2476), True, 'import pandas as pd\n'), ((2612, 2644), 'numpy.argmin', 'np.argmin', (['errors_arr'], {'axis': 'None'}), '(errors_arr, axis=None)\n', (2621, 2644), True, 'import numpy as np\n'), ((4179, 4207), 'numpy.average', 'np.average', (['(y_true != y_pred)'], {}), '(y_true != y_pred)\n', (4189, 4207), True, 'import numpy as np\n'), ((1624, 1657), 'pandas.Series', 'pd.Series', (['err'], {'name': 'f"""{feature}"""'}), "(err, name=f'{feature}')\n", (1633, 1657), True, 'import pandas as pd\n'), ((2395, 2436), 'pandas.Series', 'pd.Series', (['err'], {'name': 'f"""{feature}-inverse"""'}), "(err, name=f'{feature}-inverse')\n", (2404, 2436), True, 'import pandas as pd\n')] |
# coding: utf-8
# ## Data Visualizations
# In[141]:
from matplotlib import pyplot as plt
import numpy as np
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
get_ipython().magic('matplotlib inline')
# In[7]:
path = "./data/"
file_list = os.listdir(path)
file_list
# In[10]:
data = np.load(path+file_list[0])
# In[16]:
for key in data:
print(key )
# ## Visualizations For Channel C3 for subject A01T
# In[19]:
signal = data['s']
# In[21]:
np.shape(signal)
# 25 Channels
# In[72]:
channelC3 = signal[:, 7] # The index 7 represent the channel C3
x = 7
# Extract the type of the event 7 in this case the type is 768 (in the table this is a Start of a trial event).
etype = data['etyp'].T[0, x]
# This is the position of the event in the raw signal
epos = data['epos'].T[0, x]
edur = data['edur'].T[0, x] # And this is the duration of this event
(len(data['epos']))
len(data['etyp'])
len(data['edur'])
# Then I extract the signal related the event selected.
trial = channelC3[epos:epos+edur]
len(trial)
plt.plot(trial)
plt.xlabel('Time')
plt.ylabel('Raw Signal')
plt.title("Signal for Start of a trial event")
plt.show()
# In[73]:
# Then for know the class of this trial (7) you need to read the type of the inmediate next event
trial_type = data['etyp'].T[0, x+1]
print(trial_type)
epos = data['epos'].T[0, x+1]
edur = data['edur'].T[0, x+1]
trial = channelC3[epos:epos+edur]
plt.xlabel('Time')
plt.ylabel('Raw Signal')
plt.title("Signal for Cue onset tounge - Class 4 ")
plt.plot(trial)
plt.show()
# In[81]:
# Then for know the class of this trial (7) you need to read the type of the inmediate next event
x = 13
trial_type = data['etyp'].T[0, x+1]
print(trial_type)
epos = data['epos'].T[0, x+1]
edur = data['edur'].T[0, x+1]
trial = channelC3[epos:epos+edur]
plt.xlabel('Time')
plt.ylabel('Raw Signal')
plt.title("Signal for Cue onset left - Class 1 ")
plt.plot(trial)
plt.show()
# # TSNE
#
# In[97]:
import numpy as np
from sklearn.manifold import TSNE
# In[98]:
X = np.load("X_train.npy")
Y = np.load("Y_train.npy")
# In[99]:
np.shape(X)
# In[100]:
np.shape(Y)
# In[101]:
Y[0]==2
# In[102]:
v = TSNE(n_components=2).fit_transform(X)
# ### Other Plots
#
# In[134]:
X = np.load("./LDA/X_val.npy")
Y = np.load("./LDA/Y_val.npy")
v = TSNE(n_components=2).fit_transform(X)
# ## Plotting the Features
# In[104]:
import plotly
from plotly.offline import init_notebook_mode
import plotly.graph_objs as go
plotly.offline.init_notebook_mode(connected=True)
# In[135]:
t1_0 = []
t1_1 = []
t2_0 = []
t2_1 = []
t3_0 = []
t3_1 = []
t4_0 = []
t4_1 = []
l = v.shape[0]
for i in range(0,l):
if(Y[i]==0):
t1_0.append(v[i][0])
t1_1.append(v[i][1])
if(Y[i]==1):
t2_0.append(v[i][0])
t2_1.append(v[i][1])
if(Y[i]==2):
t3_0.append(v[i][0])
t3_1.append(v[i][1])
if(Y[i]==3):
t4_0.append(v[i][0])
t4_1.append(v[i][1])
# In[137]:
trace1 = go.Scatter(
x = t1_0,
y = t1_1,
name = 'Class 1 : Tounge ',
mode = 'markers',
marker = dict(
size = 10,
color = 'rgba(240,98,146, .8)',
line = dict(
width = 2,
color = 'rgb(240,98,146)'
)
)
)
trace2 = go.Scatter(
x = t2_0,
y = t2_1,
name = 'Class 2 : Feet',
mode = 'markers',
marker = dict(
size = 10,
color = 'rgba(186,104,200, .8)',
line = dict(
width = 2,
color = 'rgb(186,104,200)'
)
)
)
trace3 = go.Scatter(
x = t3_0,
y = t3_1,
name = 'Class 3 : Left',
mode = 'markers',
marker = dict(
size = 10,
color = 'rgba(156,204,101, .8)',
line = dict(
width = 2,
color = 'rgb(156,204,101)'
)
)
)
trace4 = go.Scatter(
x = t4_0,
y = t4_1,
name = 'Class 4 : Right',
mode = 'markers',
marker = dict(
size = 10,
color = 'rgba(255,241,118, .8)',
line = dict(
width = 2,
color = 'rgb(255,241,118)'
)
)
)
data = [trace1,trace2, trace3, trace4, ]
layout = dict(title = 'Scatter Plot for Different Classes using LDA on validation set',
yaxis = dict(zeroline = False),
xaxis = dict(zeroline = False)
)
fig = dict(data=data, layout=layout)
plotly.offline.plot(fig, filename='styled-scatter')
# In[ ]:
for i in range(22):
f,psd = welch(X[0,i],250)
plt.plot(f,psd)
plt.set_title('Power Spectral Density')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power/Frequency (dB/Hz)')
plt.savefig('PSD before filtering.png')
for l in range(len(Y)):
Y[l] = labels[Y[l]]
# Pre-processing
X = preprocess(X)
# Visualization of filtered signal - how only one frequency band (8-24Hz) remains now.
for i in range(22):
f,psd = welch(X[0,i,:],250)
plt.set_title('Power Spectral Density')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power/Frequency (dB/Hz)')
plt.plot(f,psd)
plt.savefig('PSD after filtering.png')
# ## Class Conditional Density Plots
#
# In[139]:
X = np.load("X_train.npy")
Y = np.load("Y_train.npy")
v = TSNE(n_components=2).fit_transform(X)
t1_0 = []
t1_1 = []
t2_0 = []
t2_1 = []
t3_0 = []
t3_1 = []
t4_0 = []
t4_1 = []
l = v.shape[0]
for i in range(0,l):
if(Y[i]==0):
t1_0.append(v[i][0])
t1_1.append(v[i][1])
if(Y[i]==1):
t2_0.append(v[i][0])
t2_1.append(v[i][1])
if(Y[i]==2):
t3_0.append(v[i][0])
t3_1.append(v[i][1])
if(Y[i]==3):
t4_0.append(v[i][0])
t4_1.append(v[i][1])
# In[158]:
sns.distplot(t1_0, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 1").set_title('Class Conditional Density')
sns.distplot(t2_0, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 2")
sns.distplot(t3_0, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 3")
sns.distplot(t4_0, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 4")
# In[159]:
sns.distplot(t1_1, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 1").set_title('Class Conditional Density')
sns.distplot(t2_1, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 2")
sns.distplot(t3_1, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 3")
sns.distplot(t4_1, hist=False, rug=False,kde = True,kde_kws={"shade": True},label="Class 4")
| [
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE",
"plotly.offline.plot",
"numpy.shape",
"seaborn.distplot",
"matplotlib.pyplot.set_title",
"matplotlib.pyplot.ylabel",
"plotly.offline.init_notebook_mode",
"matplotlib.pyplot.xla... | [((321, 337), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (331, 337), False, 'import os\n'), ((368, 396), 'numpy.load', 'np.load', (['(path + file_list[0])'], {}), '(path + file_list[0])\n', (375, 396), True, 'import numpy as np\n'), ((538, 554), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (546, 554), True, 'import numpy as np\n'), ((1106, 1121), 'matplotlib.pyplot.plot', 'plt.plot', (['trial'], {}), '(trial)\n', (1114, 1121), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1140), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1132, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Raw Signal"""'], {}), "('Raw Signal')\n", (1151, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1213), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal for Start of a trial event"""'], {}), "('Signal for Start of a trial event')\n", (1176, 1213), True, 'import matplotlib.pyplot as plt\n'), ((1214, 1224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1222, 1224), True, 'import matplotlib.pyplot as plt\n'), ((1485, 1503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1495, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1528), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Raw Signal"""'], {}), "('Raw Signal')\n", (1514, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1581), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal for Cue onset tounge - Class 4 """'], {}), "('Signal for Cue onset tounge - Class 4 ')\n", (1539, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1597), 'matplotlib.pyplot.plot', 'plt.plot', (['trial'], {}), '(trial)\n', (1590, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1606, 1608), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1894), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1886, 1894), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1919), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Raw Signal"""'], {}), "('Raw Signal')\n", (1905, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1970), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal for Cue onset left - Class 1 """'], {}), "('Signal for Cue onset left - Class 1 ')\n", (1930, 1970), True, 'import matplotlib.pyplot as plt\n'), ((1971, 1986), 'matplotlib.pyplot.plot', 'plt.plot', (['trial'], {}), '(trial)\n', (1979, 1986), True, 'import matplotlib.pyplot as plt\n'), ((1987, 1997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2116), 'numpy.load', 'np.load', (['"""X_train.npy"""'], {}), "('X_train.npy')\n", (2101, 2116), True, 'import numpy as np\n'), ((2121, 2143), 'numpy.load', 'np.load', (['"""Y_train.npy"""'], {}), "('Y_train.npy')\n", (2128, 2143), True, 'import numpy as np\n'), ((2157, 2168), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2165, 2168), True, 'import numpy as np\n'), ((2183, 2194), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (2191, 2194), True, 'import numpy as np\n'), ((2313, 2339), 'numpy.load', 'np.load', (['"""./LDA/X_val.npy"""'], {}), "('./LDA/X_val.npy')\n", (2320, 2339), True, 'import numpy as np\n'), ((2344, 2370), 'numpy.load', 'np.load', (['"""./LDA/Y_val.npy"""'], {}), "('./LDA/Y_val.npy')\n", (2351, 2370), True, 'import numpy as np\n'), ((2546, 2595), 'plotly.offline.init_notebook_mode', 'plotly.offline.init_notebook_mode', ([], {'connected': '(True)'}), '(connected=True)\n', (2579, 2595), False, 'import plotly\n'), ((4447, 4498), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'filename': '"""styled-scatter"""'}), "(fig, filename='styled-scatter')\n", (4466, 4498), False, 'import plotly\n'), ((5210, 5232), 'numpy.load', 'np.load', (['"""X_train.npy"""'], {}), "('X_train.npy')\n", (5217, 5232), True, 'import numpy as np\n'), ((5237, 5259), 'numpy.load', 'np.load', (['"""Y_train.npy"""'], {}), "('Y_train.npy')\n", (5244, 5259), True, 'import numpy as np\n'), ((5870, 5967), 'seaborn.distplot', 'sns.distplot', (['t2_0'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 2"""'}), "(t2_0, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 2')\n", (5882, 5967), True, 'import seaborn as sns\n'), ((5963, 6060), 'seaborn.distplot', 'sns.distplot', (['t3_0'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 3"""'}), "(t3_0, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 3')\n", (5975, 6060), True, 'import seaborn as sns\n'), ((6056, 6153), 'seaborn.distplot', 'sns.distplot', (['t4_0'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 4"""'}), "(t4_0, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 4')\n", (6068, 6153), True, 'import seaborn as sns\n'), ((6295, 6392), 'seaborn.distplot', 'sns.distplot', (['t2_1'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 2"""'}), "(t2_1, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 2')\n", (6307, 6392), True, 'import seaborn as sns\n'), ((6388, 6485), 'seaborn.distplot', 'sns.distplot', (['t3_1'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 3"""'}), "(t3_1, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 3')\n", (6400, 6485), True, 'import seaborn as sns\n'), ((6481, 6578), 'seaborn.distplot', 'sns.distplot', (['t4_1'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 4"""'}), "(t4_1, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 4')\n", (6493, 6578), True, 'import seaborn as sns\n'), ((4565, 4581), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'psd'], {}), '(f, psd)\n', (4573, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4624), 'matplotlib.pyplot.set_title', 'plt.set_title', (['"""Power Spectral Density"""'], {}), "('Power Spectral Density')\n", (4598, 4624), True, 'import matplotlib.pyplot as plt\n'), ((4629, 4657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4639, 4657), True, 'import matplotlib.pyplot as plt\n'), ((4662, 4699), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power/Frequency (dB/Hz)"""'], {}), "('Power/Frequency (dB/Hz)')\n", (4672, 4699), True, 'import matplotlib.pyplot as plt\n'), ((4704, 4743), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""PSD before filtering.png"""'], {}), "('PSD before filtering.png')\n", (4715, 4743), True, 'import matplotlib.pyplot as plt\n'), ((4973, 5012), 'matplotlib.pyplot.set_title', 'plt.set_title', (['"""Power Spectral Density"""'], {}), "('Power Spectral Density')\n", (4986, 5012), True, 'import matplotlib.pyplot as plt\n'), ((5017, 5045), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (5027, 5045), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power/Frequency (dB/Hz)"""'], {}), "('Power/Frequency (dB/Hz)')\n", (5060, 5087), True, 'import matplotlib.pyplot as plt\n'), ((5092, 5108), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'psd'], {}), '(f, psd)\n', (5100, 5108), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5150), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""PSD after filtering.png"""'], {}), "('PSD after filtering.png')\n", (5123, 5150), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2255), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2239, 2255), False, 'from sklearn.manifold import TSNE\n'), ((2375, 2395), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2379, 2395), False, 'from sklearn.manifold import TSNE\n'), ((5264, 5284), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5268, 5284), False, 'from sklearn.manifold import TSNE\n'), ((5738, 5835), 'seaborn.distplot', 'sns.distplot', (['t1_0'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 1"""'}), "(t1_0, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 1')\n", (5750, 5835), True, 'import seaborn as sns\n'), ((6163, 6260), 'seaborn.distplot', 'sns.distplot', (['t1_1'], {'hist': '(False)', 'rug': '(False)', 'kde': '(True)', 'kde_kws': "{'shade': True}", 'label': '"""Class 1"""'}), "(t1_1, hist=False, rug=False, kde=True, kde_kws={'shade': True},\n label='Class 1')\n", (6175, 6260), True, 'import seaborn as sns\n')] |
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from model.model_fn import build_compile_model_pred
def get_numpy_dataset(fname, batch_size=64):
tmp = np.transpose(np.load(fname), [3,0,1,2])
BATCH_SIZE = batch_size
test_dataset = tf.data.Dataset.from_tensor_slices((tmp, np.zeros((tmp.shape[0], 1))))
test_dataset = test_dataset.batch(BATCH_SIZE)
iterator = tf.compat.v1.data.make_one_shot_iterator(test_dataset)
initializer = iterator.make_initializer(test_dataset)
return (iterator, initializer), tmp.shape[0]
parser = argparse.ArgumentParser(
description='gamma-net predicts log10 gamma power for a given image')
parser.add_argument('--input', type=str, nargs=1,
help='input size 84x84 .png or .npy', default='examples/sample.png')
args = parser.parse_args()
input_name = args.input[0]
out_name, file_ext = os.path.splitext(input_name)
# params
WEIGHT_DIR = 'weights.last.h5'
BATCH_SIZE = 64
# model
model = build_compile_model_pred(WEIGHT_DIR)
# modelname
flag_numpy = 1 if file_ext=='.npy' else 0
if flag_numpy:
from skimage.transform import resize
tmp_input = np.load(input_name).astype(np.float32)
test_steps = 1
# resize
test_input = np.zeros((tmp_input.shape[0], 84, 84, 3))
for im_id in range(tmp_input.shape[0]):
test_input[im_id, :, :, :] = resize(tmp_input[im_id, :, :, :], (84, 84, 3), anti_aliasing=True)
test_input[im_id, :, :, :] = test_input[im_id, :, :, ::-1]
test_input[im_id, :, :, :] -= [103.939, 116.779, 123.68]
pred = model.predict(test_input, steps=test_steps)
np.save(out_name + '_pred.npy', pred)
else:
from skimage.io import imread
from skimage.transform import resize
img = imread(input_name)
# resize image to 84x84
img = resize(img, (84, 84, 3), anti_aliasing=True)
img = img[:, :, ::-1]
img[:, :, :] -= [103.939, 116.779, 123.68]
pred = model.predict(np.expand_dims(img, axis=0), steps=1)
print('gamma-net prediction: ', pred[0][0])
| [
"numpy.load",
"numpy.save",
"argparse.ArgumentParser",
"model.model_fn.build_compile_model_pred",
"numpy.zeros",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"numpy.expand_dims",
"skimage.transform.resize",
"os.path.splitext",
"skimage.io.imread"
] | [((615, 713), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""gamma-net predicts log10 gamma power for a given image"""'}), "(description=\n 'gamma-net predicts log10 gamma power for a given image')\n", (638, 713), False, 'import argparse\n'), ((929, 957), 'os.path.splitext', 'os.path.splitext', (['input_name'], {}), '(input_name)\n', (945, 957), False, 'import os\n'), ((1032, 1068), 'model.model_fn.build_compile_model_pred', 'build_compile_model_pred', (['WEIGHT_DIR'], {}), '(WEIGHT_DIR)\n', (1056, 1068), False, 'from model.model_fn import build_compile_model_pred\n'), ((442, 496), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.compat.v1.data.make_one_shot_iterator', (['test_dataset'], {}), '(test_dataset)\n', (482, 496), True, 'import tensorflow as tf\n'), ((1286, 1327), 'numpy.zeros', 'np.zeros', (['(tmp_input.shape[0], 84, 84, 3)'], {}), '((tmp_input.shape[0], 84, 84, 3))\n', (1294, 1327), True, 'import numpy as np\n'), ((1668, 1705), 'numpy.save', 'np.save', (["(out_name + '_pred.npy')", 'pred'], {}), "(out_name + '_pred.npy', pred)\n", (1675, 1705), True, 'import numpy as np\n'), ((1801, 1819), 'skimage.io.imread', 'imread', (['input_name'], {}), '(input_name)\n', (1807, 1819), False, 'from skimage.io import imread\n'), ((1859, 1903), 'skimage.transform.resize', 'resize', (['img', '(84, 84, 3)'], {'anti_aliasing': '(True)'}), '(img, (84, 84, 3), anti_aliasing=True)\n', (1865, 1903), False, 'from skimage.transform import resize\n'), ((232, 246), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (239, 246), True, 'import numpy as np\n'), ((1409, 1475), 'skimage.transform.resize', 'resize', (['tmp_input[im_id, :, :, :]', '(84, 84, 3)'], {'anti_aliasing': '(True)'}), '(tmp_input[im_id, :, :, :], (84, 84, 3), anti_aliasing=True)\n', (1415, 1475), False, 'from skimage.transform import resize\n'), ((2007, 2034), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2021, 2034), True, 'import numpy as np\n'), ((347, 374), 'numpy.zeros', 'np.zeros', (['(tmp.shape[0], 1)'], {}), '((tmp.shape[0], 1))\n', (355, 374), True, 'import numpy as np\n'), ((1197, 1216), 'numpy.load', 'np.load', (['input_name'], {}), '(input_name)\n', (1204, 1216), True, 'import numpy as np\n')] |
"""
To describe the workflow using some methods
** Intended to be used line by line
"""
import matplotlib.pyplot as plt # to be removed later.
import numpy.ma as ma
import numpy as np
from astropy import wcs
import data_reduction as dr
import image_synthesis as im
working_dir = "/home/pranshu/Downloads/work/Jupiter_2021/"
bc = dr.BinaryConvert() # Call the object
# mkid_files = bc.select_files() # Select the MKID readout files
# # # to average and save the fsp files to a single binary file.
# bc.save_fsp_pickle(mkid_files, working_dir + "Jupiter_0601", mode="single_col") #mode can be "average" of "single_col"
data = bc.load_pickle(working_dir + "Jupiter_0601.pickle")
# data = bc.load_pickle('Mars_0601.pickle')
data = np.array(data)
print(len(data))
print(data.shape)
sample_rate = 256
# for i in range(len(data)):
# bc.plot(data, i+1)
exclude = [9, 12, 23, 25, 28, 35, 39, 41, 49, 58] # excluded pixels were selected using plotted data
bc.pixels_to_exclude(exclude) # to exclude these pixels from further calculations.
# freq = bc.identify_frequencies(data, 4000, exclude=True, plot=False, save_file=True, file_path=working_dir)
# IMP: set the clipping length properly
clipped = bc.clip(data, sample_rate * 70, -sample_rate * 75) # clip for decorrelation
mean_sub = bc.mean_center(clipped)
# print(len(mean_sub)) # Important, use only the good and mean subtracted data.
# plt.plot(mean_sub[5], ',')
# plt.show()
# for i in range(10):
# plt.plot(mean_sub[i], alpha = 0.5)
# plt.show()
res_pca = bc.pca_decor(mean_sub, n_components=3)
# for i in range(10):
# plt.plot(res_pca[i][:, 1], alpha = 0.5)
# plt.show()
flat = bc.flatten(res_pca, 0)
# IMP: reduce avg_points if the masked data is too sparse
mask = bc.get_sigmaclipped_mask(flat, avg_points=32, sigma=4, maxiters=4, axis=1)
# # ----------- TO GET THE SIGMA CLIPPED MASK PLOT --------------------
pix = 4
# plt.plot(running_avged[pix])
plt.plot(ma.masked_array(mean_sub[pix], ~mask[pix]), 'o', label='Masked data')
plt.plot(ma.masked_array(mean_sub[pix], mask[pix]), label='Unmasked data', alpha = 0.5)
plt.legend()
plt.title('Sigma clipping at 3-sigma')
plt.show()
# -------------------------------------------------------------------
chunk_matrix = bc.make_chunk_matrix(mask, num_chunks=5600)
# plt.imshow(chunk_matrix)
# plt.colorbar()
# plt.show()
# --------- CHUNK PCA METHOD CHECKING ------------------------------
#
final = bc.chunkpca_decor(mask, chunk_matrix, mean_sub)
#
plt.rcParams['figure.figsize'] = [16, 8]
N = 32
pix = 4
plt.plot(final[pix][:,2])# - np.mean(final[pix][:,0]))
plt.plot(final[pix][:,1])
plt.plot(ma.masked_array(mean_sub[pix][(0):(70000)], mask[pix][(0):(70000)]), alpha=0.6)
plt.show()
plt.plot(np.convolve(final[pix][:,0], np.ones((N,))/N, mode='same'))
plt.show()
#
final_flat = bc.flatten(final, 0)
bc.save_as_pickle(final_flat, working_dir + "final_flat_selected_by_beamsize")
cleaned = bc.load_pickle(working_dir + "final_flat_selected_by_beamsize")
plt.plot(cleaned[0])
plt.show()
# # ---------------- Calibration ---------------
# # # plt.plot(cleaned[0], )
# # # plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"data_reduction.BinaryConvert",
"numpy.array",
"numpy.ma.masked_array"
] | [((334, 352), 'data_reduction.BinaryConvert', 'dr.BinaryConvert', ([], {}), '()\n', (350, 352), True, 'import data_reduction as dr\n'), ((735, 749), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (743, 749), True, 'import numpy as np\n'), ((2110, 2122), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2120, 2122), True, 'import matplotlib.pyplot as plt\n'), ((2123, 2161), 'matplotlib.pyplot.title', 'plt.title', (['"""Sigma clipping at 3-sigma"""'], {}), "('Sigma clipping at 3-sigma')\n", (2132, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2162, 2172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2170, 2172), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2576), 'matplotlib.pyplot.plot', 'plt.plot', (['final[pix][:, 2]'], {}), '(final[pix][:, 2])\n', (2558, 2576), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2631), 'matplotlib.pyplot.plot', 'plt.plot', (['final[pix][:, 1]'], {}), '(final[pix][:, 1])\n', (2613, 2631), True, 'import matplotlib.pyplot as plt\n'), ((2720, 2730), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2728, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2801, 2811), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2809, 2811), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3023), 'matplotlib.pyplot.plot', 'plt.plot', (['cleaned[0]'], {}), '(cleaned[0])\n', (3011, 3023), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3032, 3034), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1994), 'numpy.ma.masked_array', 'ma.masked_array', (['mean_sub[pix]', '(~mask[pix])'], {}), '(mean_sub[pix], ~mask[pix])\n', (1967, 1994), True, 'import numpy.ma as ma\n'), ((2031, 2072), 'numpy.ma.masked_array', 'ma.masked_array', (['mean_sub[pix]', 'mask[pix]'], {}), '(mean_sub[pix], mask[pix])\n', (2046, 2072), True, 'import numpy.ma as ma\n'), ((2640, 2699), 'numpy.ma.masked_array', 'ma.masked_array', (['mean_sub[pix][0:70000]', 'mask[pix][0:70000]'], {}), '(mean_sub[pix][0:70000], mask[pix][0:70000])\n', (2655, 2699), True, 'import numpy.ma as ma\n'), ((2770, 2783), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (2777, 2783), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file snowflake_neural_logic.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-06-01 17:19:23
################################ jupyter-vim #######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# %pylab --no-import-all
#####################################################################################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
import tensorflow as tf
import memory_util
import sys
#####################################################################################
# <codecell> global
#####################################################################################
memory_util.vlog(1)
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['figure.figsize'] = (12.0, 12.0)
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
tf.logging.set_verbosity(tf.logging.ERROR)
#####################################################################################
# <codecell> plot snowflake
#####################################################################################
# 一下代码只是为了理解神经元(op节点)操作数据的过程, 操作是手动加上的,而非机器学习得到
# 的, 本实例: 一个2D平面现行不可分, 如何做到可分, 实际上就是多条线进行划分
sess = tf.Session()
##
inputs = tf.placeholder(dtype=tf.float32, shape=(2), name='inputs')
# 外圈半径
r1 = 10
# 内圈半径
r2 = r1/2 / np.cos(30*np.pi / 180)
# 位移
m = r1 + 5
# TF打印, 添加中间OP的打印
debug_ops = []
xs = []
ys = []
angles = np.array([i*np.pi / 180 for i in range(0, 360) if i % 30 == 0])
for i, angle in enumerate(angles, 0):
if i % 2 == 1:
xs.append(r1 * np.cos(angle) + m)
ys.append(r1 * np.sin(angle) + m)
else:
xs.append(r2 * np.cos(angle) + m)
ys.append(r2 * np.sin(angle) + m)
xs.append(xs[0])
ys.append(ys[0])
path = Path([(x,y) for x, y in zip(xs, ys)])
patch = patches.PathPatch(path, facecolor='grey', lw=0, alpha=0.5)
plt.gca().add_patch(patch)
plt.xlim(0, 40)
plt.ylim(0, 40)
# 计算线性方程参数k,b,计算新点
def plot_linear_equation(name, i, j, x1, x2):
k = (ys[i] - ys[j]) / (xs[i] - xs[j])
b = ys[i] - k * xs[i]
print('%s: (%d, %d) k = %.2f, b = %.2f' % (name, i, j, k, b))
xp = (x1, x2)
yp = (k*x1+b, k*x2+b)
plt.plot(xp, yp, color='g')
plt.text(xp[1], yp[1], s = r' %s: $y = %.2fx %s %.2f$' % (
name, k, '' if b < 0 else '+', b))
w = tf.constant((k, -1), dtype=tf.float32)
b = tf.constant((b, 0), dtype=tf.float32)
debug_ops.append(tf.print('%s: ' % name, w * inputs + b,
output_stream=sys.stderr))
return tf.cast(tf.nn.sigmoid(
tf.math.reduce_sum(w * inputs + b)) < 0.5, tf.int32)
def check_result(d, i, x, y):
data = sess.run(d, feed_dict={inputs:[x, y]})
res = 0
if ((data[0] == 0 and data[1] == 1 and data[2] == 0 and
(data[3] == 1 and data[4] == 0 and data[5] == 1)) or
(data[0] == 0 and data[2] == 1 and data[4] == 0) or
(data[1] == 1 and data[2] == 0 and data[4] == 1) or
(data[1] == 0 and data[3] == 1 and data[4] == 0) or
(data[1] == 1 and data[3] == 0 and data[5] == 1) or
(data[0] == 0 and data[3] == 1 and data[5] == 0) or
(data[0] == 1 and data[2] == 0 and data[5] == 1)): # noqa:E129
res = 1
if res == 0:
plt.scatter(x, y, s=100, color='yellow', marker='o')
else:
plt.scatter(x, y, s=100, color='blue', marker='o')
plt.text(x, y, s=r'(%d %d)' % (x, y))
l1 = plot_linear_equation('l1', 3, 7, 4, 17)
l2 = plot_linear_equation('l2', 1, 9, 13, 26)
l3 = plot_linear_equation('l3', 5, 1, 4, 27)
l4 = plot_linear_equation('l4', 7, 11, 4, 27)
l5 = plot_linear_equation('l5', 3, 11, 13, 25)
l6 = plot_linear_equation('l6', 5, 9, 4, 17)
with tf.control_dependencies(debug_ops):
data = tf.stack([l1, l2, l3, l4, l5, l6])
with memory_util.capture_stderr() as stderr:
for i in range(10):
points = np.random.randint(5, 26, 2, dtype=np.int32)
check_result(data, i, points[0], points[1])
print(stderr.getvalue())
##
sess.close() # noqa
| [
"tensorflow.print",
"tensorflow.logging.set_verbosity",
"numpy.random.randint",
"numpy.sin",
"matplotlib.pyplot.gca",
"tensorflow.stack",
"tensorflow.placeholder",
"matplotlib.patches.PathPatch",
"memory_util.capture_stderr",
"memory_util.vlog",
"tensorflow.control_dependencies",
"matplotlib.p... | [((826, 845), 'memory_util.vlog', 'memory_util.vlog', (['(1)'], {}), '(1)\n', (842, 845), False, 'import memory_util\n'), ((1079, 1121), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (1103, 1121), True, 'import tensorflow as tf\n'), ((1424, 1436), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1434, 1436), True, 'import tensorflow as tf\n'), ((1451, 1507), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(2)', 'name': '"""inputs"""'}), "(dtype=tf.float32, shape=2, name='inputs')\n", (1465, 1507), True, 'import tensorflow as tf\n'), ((2032, 2090), 'matplotlib.patches.PathPatch', 'patches.PathPatch', (['path'], {'facecolor': '"""grey"""', 'lw': '(0)', 'alpha': '(0.5)'}), "(path, facecolor='grey', lw=0, alpha=0.5)\n", (2049, 2090), True, 'import matplotlib.patches as patches\n'), ((2119, 2134), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(40)'], {}), '(0, 40)\n', (2127, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2150), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(40)'], {}), '(0, 40)\n', (2143, 2150), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1569), 'numpy.cos', 'np.cos', (['(30 * np.pi / 180)'], {}), '(30 * np.pi / 180)\n', (1551, 1569), True, 'import numpy as np\n'), ((2399, 2426), 'matplotlib.pyplot.plot', 'plt.plot', (['xp', 'yp'], {'color': '"""g"""'}), "(xp, yp, color='g')\n", (2407, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2525), 'matplotlib.pyplot.text', 'plt.text', (['xp[1]', 'yp[1]'], {'s': "(' %s: $y = %.2fx %s %.2f$' % (name, k, '' if b < 0 else '+', b))"}), "(xp[1], yp[1], s=' %s: $y = %.2fx %s %.2f$' % (name, k, '' if b < 0\n else '+', b))\n", (2439, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2541, 2579), 'tensorflow.constant', 'tf.constant', (['(k, -1)'], {'dtype': 'tf.float32'}), '((k, -1), dtype=tf.float32)\n', (2552, 2579), True, 'import tensorflow as tf\n'), ((2588, 2625), 'tensorflow.constant', 'tf.constant', (['(b, 0)'], {'dtype': 'tf.float32'}), '((b, 0), dtype=tf.float32)\n', (2599, 2625), True, 'import tensorflow as tf\n'), ((3571, 3607), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y'], {'s': "('(%d %d)' % (x, y))"}), "(x, y, s='(%d %d)' % (x, y))\n", (3579, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3890, 3924), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['debug_ops'], {}), '(debug_ops)\n', (3913, 3924), True, 'import tensorflow as tf\n'), ((3937, 3971), 'tensorflow.stack', 'tf.stack', (['[l1, l2, l3, l4, l5, l6]'], {}), '([l1, l2, l3, l4, l5, l6])\n', (3945, 3971), True, 'import tensorflow as tf\n'), ((2091, 2100), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2098, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2712), 'tensorflow.print', 'tf.print', (["('%s: ' % name)", '(w * inputs + b)'], {'output_stream': 'sys.stderr'}), "('%s: ' % name, w * inputs + b, output_stream=sys.stderr)\n", (2655, 2712), True, 'import tensorflow as tf\n'), ((3445, 3497), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(100)', 'color': '"""yellow"""', 'marker': '"""o"""'}), "(x, y, s=100, color='yellow', marker='o')\n", (3456, 3497), True, 'import matplotlib.pyplot as plt\n'), ((3516, 3566), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(100)', 'color': '"""blue"""', 'marker': '"""o"""'}), "(x, y, s=100, color='blue', marker='o')\n", (3527, 3566), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4009), 'memory_util.capture_stderr', 'memory_util.capture_stderr', ([], {}), '()\n', (4007, 4009), False, 'import memory_util\n'), ((4070, 4113), 'numpy.random.randint', 'np.random.randint', (['(5)', '(26)', '(2)'], {'dtype': 'np.int32'}), '(5, 26, 2, dtype=np.int32)\n', (4087, 4113), True, 'import numpy as np\n'), ((2764, 2798), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(w * inputs + b)'], {}), '(w * inputs + b)\n', (2782, 2798), True, 'import tensorflow as tf\n'), ((1788, 1801), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1794, 1801), True, 'import numpy as np\n'), ((1830, 1843), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1836, 1843), True, 'import numpy as np\n'), ((1882, 1895), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1888, 1895), True, 'import numpy as np\n'), ((1924, 1937), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1930, 1937), True, 'import numpy as np\n')] |
from cosmogrb.instruments.gbm import GBMGRB_CPL
import popsynth
from popsynth.aux_samplers.normal_aux_sampler import NormalAuxSampler
from popsynth.aux_samplers.trunc_normal_aux_sampler import TruncatedNormalAuxSampler
from popsynth.aux_samplers.lognormal_aux_sampler import LogNormalAuxSampler
from cosmogrb.instruments.gbm import GBM_CPL_Universe
from dask.distributed import Client, LocalCluster
import numpy as np
# this is a script that is used to generate the test data for the
# pytest. it is meant to be run from the top of the pacakge
class TDecaySampler(popsynth.AuxiliarySampler):
def __init__(self):
super(TDecaySampler, self).__init__(name="tdecay", observed=False)
def true_sampler(self, size):
t90 = 10 ** self._secondary_samplers["log_t90"].true_values
trise = self._secondary_samplers["trise"].true_values
self._true_values = (
1.0 / 50.0 * (10 * t90 + trise + np.sqrt(trise) * np.sqrt(20 * t90 + trise))
)
class DurationSampler(popsynth.AuxiliarySampler):
def __init__(self):
super(DurationSampler, self).__init__(name="duration", observed=False)
def true_sampler(self, size):
t90 = 10 ** self._secondary_samplers["log_t90"].true_values
self._true_values = 1.5 * t90
r0_true = 0.13
rise_true = 0.1
decay_true = 4.0
peak_true = 1.5
td_true = 3.0
sigma_true = 1.0
Lmin_true = 1e50
alpha_true = 1.5
r_max = 5.0
pop_gen = popsynth.populations.ParetoSFRPopulation(
r0=r0_true,
rise=rise_true,
decay=decay_true,
peak=peak_true,
Lmin=Lmin_true,
alpha=alpha_true,
r_max=r_max,
)
ep = LogNormalAuxSampler(name="log_ep", observed=False)
ep.mu = 500
ep.tau = 0.5
alpha = TruncatedNormalAuxSampler(name="alpha", observed=False)
alpha.lower = -1.5
alpha.upper = 0.0
alpha.mu = -1.0
alpha.tau = 0.25
tau = TruncatedNormalAuxSampler(name="tau", observed=False)
tau.lower = 1.5
tau.upper = 2.5
tau.mu = 2.0
tau.tau = 0.25
trise = TruncatedNormalAuxSampler(name="trise", observed=False)
trise.lower = 0.01
trise.upper = 5.0
trise.mu = 1.0
trise.tau = 1.0
t90 = LogNormalAuxSampler(name="log_t90", observed=False)
t90.mu = 10
t90.tau = 0.25
tdecay = TDecaySampler()
duration = DurationSampler()
tdecay.set_secondary_sampler(t90)
tdecay.set_secondary_sampler(trise)
duration.set_secondary_sampler(t90)
pop_gen.add_observed_quantity(ep)
pop_gen.add_observed_quantity(tau)
pop_gen.add_observed_quantity(alpha)
pop_gen.add_observed_quantity(tdecay)
pop_gen.add_observed_quantity(duration)
pop = pop_gen.draw_survey(no_selection=True, boundary=1e-2)
pop.writeto("cosmogrb/data/test_grb_pop.h5")
| [
"popsynth.aux_samplers.trunc_normal_aux_sampler.TruncatedNormalAuxSampler",
"popsynth.populations.ParetoSFRPopulation",
"numpy.sqrt",
"popsynth.aux_samplers.lognormal_aux_sampler.LogNormalAuxSampler"
] | [((1452, 1606), 'popsynth.populations.ParetoSFRPopulation', 'popsynth.populations.ParetoSFRPopulation', ([], {'r0': 'r0_true', 'rise': 'rise_true', 'decay': 'decay_true', 'peak': 'peak_true', 'Lmin': 'Lmin_true', 'alpha': 'alpha_true', 'r_max': 'r_max'}), '(r0=r0_true, rise=rise_true, decay=\n decay_true, peak=peak_true, Lmin=Lmin_true, alpha=alpha_true, r_max=r_max)\n', (1492, 1606), False, 'import popsynth\n'), ((1640, 1690), 'popsynth.aux_samplers.lognormal_aux_sampler.LogNormalAuxSampler', 'LogNormalAuxSampler', ([], {'name': '"""log_ep"""', 'observed': '(False)'}), "(name='log_ep', observed=False)\n", (1659, 1690), False, 'from popsynth.aux_samplers.lognormal_aux_sampler import LogNormalAuxSampler\n'), ((1725, 1780), 'popsynth.aux_samplers.trunc_normal_aux_sampler.TruncatedNormalAuxSampler', 'TruncatedNormalAuxSampler', ([], {'name': '"""alpha"""', 'observed': '(False)'}), "(name='alpha', observed=False)\n", (1750, 1780), False, 'from popsynth.aux_samplers.trunc_normal_aux_sampler import TruncatedNormalAuxSampler\n'), ((1860, 1913), 'popsynth.aux_samplers.trunc_normal_aux_sampler.TruncatedNormalAuxSampler', 'TruncatedNormalAuxSampler', ([], {'name': '"""tau"""', 'observed': '(False)'}), "(name='tau', observed=False)\n", (1885, 1913), False, 'from popsynth.aux_samplers.trunc_normal_aux_sampler import TruncatedNormalAuxSampler\n'), ((1985, 2040), 'popsynth.aux_samplers.trunc_normal_aux_sampler.TruncatedNormalAuxSampler', 'TruncatedNormalAuxSampler', ([], {'name': '"""trise"""', 'observed': '(False)'}), "(name='trise', observed=False)\n", (2010, 2040), False, 'from popsynth.aux_samplers.trunc_normal_aux_sampler import TruncatedNormalAuxSampler\n'), ((2118, 2169), 'popsynth.aux_samplers.lognormal_aux_sampler.LogNormalAuxSampler', 'LogNormalAuxSampler', ([], {'name': '"""log_t90"""', 'observed': '(False)'}), "(name='log_t90', observed=False)\n", (2137, 2169), False, 'from popsynth.aux_samplers.lognormal_aux_sampler import LogNormalAuxSampler\n'), ((941, 955), 'numpy.sqrt', 'np.sqrt', (['trise'], {}), '(trise)\n', (948, 955), True, 'import numpy as np\n'), ((958, 983), 'numpy.sqrt', 'np.sqrt', (['(20 * t90 + trise)'], {}), '(20 * t90 + trise)\n', (965, 983), True, 'import numpy as np\n')] |
from skyfield.api import EarthSatellite, load, wgs84
from vpython.vpython import print_to_string
import getData
import numpy as np
from vpython import *
def inf_loop():
ts = load.timescale()
line1 ,line2,name = getData.get_sat_data()
satellite = EarthSatellite(line1, line2, name, ts)
R = 6.563e+6
scale = 3.3270e+4
cns = canvas(width = 1300,height = 700,background= color.black)
newCns =canvas(width = 500,height = 500,background= color.white)
sphere(canvas=cns,pos=vector(0,0,0),radius=R/scale,texture=textures.earth)
textOption = ''
box = 0
index = 0
simstarttime = ts.now()
tnow = simstarttime
passedOnce = False
while True:
t= ts.now()
#print(t,tnow)
if t - tnow > 0.00005:
tnow = t
geocentric = satellite.at(t)
current_coord = np.array(geocentric.position.km)*1000
temp = current_coord/scale
lat, lon = wgs84.latlon_of(geocentric)
if passedOnce:
box.visible = False
textOption.visible = False
box = sphere(canvas=cns,pos=vector(temp[0],temp[1],temp[2]),radius=10,color=color.white)
textOption = text(canvas=newCns,text=f'Latitude: {lat} Longitude: {lon}',pos=vector(0,0,0), depth=0,align='center',color=color.black)
passedOnce = True
if t - simstarttime > 24*60*60:
break
while True:
inf_loop()
| [
"skyfield.api.load.timescale",
"skyfield.api.wgs84.latlon_of",
"skyfield.api.EarthSatellite",
"getData.get_sat_data",
"numpy.array"
] | [((179, 195), 'skyfield.api.load.timescale', 'load.timescale', ([], {}), '()\n', (193, 195), False, 'from skyfield.api import EarthSatellite, load, wgs84\n'), ((220, 242), 'getData.get_sat_data', 'getData.get_sat_data', ([], {}), '()\n', (240, 242), False, 'import getData\n'), ((259, 297), 'skyfield.api.EarthSatellite', 'EarthSatellite', (['line1', 'line2', 'name', 'ts'], {}), '(line1, line2, name, ts)\n', (273, 297), False, 'from skyfield.api import EarthSatellite, load, wgs84\n'), ((972, 999), 'skyfield.api.wgs84.latlon_of', 'wgs84.latlon_of', (['geocentric'], {}), '(geocentric)\n', (987, 999), False, 'from skyfield.api import EarthSatellite, load, wgs84\n'), ((872, 904), 'numpy.array', 'np.array', (['geocentric.position.km'], {}), '(geocentric.position.km)\n', (880, 904), True, 'import numpy as np\n')] |
# coding: utf-8
# # Introduction
# In this assignment, we analyse signals using the Fast Fourier transform, or the FFT for short. The FFT is a fast implementation of the Discrete Fourier transform(DFT). It runs in $\mathcal{O}(n \log n)$ time complexity. We find the FFTs of various types of signals using the numpy.fft module. We also attempt to approximate the continuous time fourier transform of a gaussian by windowing and sampling in time domain, and then taking the DFT. We iteratively increase window size and number of samples until we obtain an estimate of required accuracy.
# In[1]:
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
# # Spectrum of $\sin^3(t)$
# Using the following identity:
# $$\sin^3(t) = \frac{3}{4}\sin(t) - \frac{1}{4}\sin(3t)$$
#
# We expect two sets of peaks at frequencies of 1 and 3, with heights corresponding to half of $0.75$ and $0.25$.
# In[2]:
x = np.linspace(-4*pi,4*pi,513)[:-1]
w = np.linspace(-64,64,513)[:-1]
y1 = (np.sin(x))**3
Y1 = fftshift(fft(y1))/512
fig,ax = plt.subplots(2)
ax[0].plot(w,abs(Y1))
ax[0].set_xlim([-10,10])
ax[0].set_title(r"Magnitude and Phase plots of DFT of $\sin^3(t)$ ")
ax[0].set_ylabel(r"$|Y|$",size=16)
ax[0].set_xlabel(r"$\omega$",size=16)
ax[0].grid(True)
ii1 = np.where(abs(Y1)<10**-3)
ph = angle(Y1)
ph[ii1] = 0
ax[1].plot(w,ph,"ro")
ax[1].set_xlim([-10,10])
ax[1].grid(True)
ax[1].set_ylabel(r"Phase of $Y$",size=16)
ax[1].set_xlabel(r"$\omega$",size=16)
plt.show()
# We observe the peaks in the magnitude at the expected frequencies of 1 and 3, along with the expected amplitudes. The phases of the peaks are also in agreement with what is expected(one is a positive sine while the other is a negative sine).
# # Spectrum of $\cos^3(t)$
# Using the following identity:
# $$\cos^3(t) = \frac{3}{4}\cos(t) + \frac{1}{4}\cos(3t)$$
#
# We expect two sets of peaks at frequencies of 1 and 3, with heights corresponding to half of $0.75$ and $0.25$.
# In[3]:
x = np.linspace(-4*pi,4*pi,129)[:-1]
w = np.linspace(-16,16,129)[:-1]
y2 = (np.cos(x))**3
Y2 = fftshift(fft(y2))/128
fig,bx = plt.subplots(2)
bx[0].plot(w,abs(Y2))
bx[0].set_xlim([-10,10])
bx[0].grid(True)
bx[0].set_ylabel(r"$|Y|$",size=16)
bx[0].set_xlabel(r"$\omega$",size=16)
bx[0].set_title(r"Magnitude and Phase plots of DFT of $\cos^3(t)$ ")
ii2 = np.where(abs(Y2)>10**-3)
bx[1].plot(w[ii2],angle(Y2[ii2]),"ro")
bx[1].set_xlim([-10,10])
bx[1].grid(True)
bx[1].set_ylabel(r"Phase of $Y$",size=16)
bx[1].set_xlabel(r"$\omega$",size=16)
plt.show()
# We observe the peaks in the magnitude at the expected frequencies of 1 and 3, along with the expected amplitudes. The phases of the peaks are also in agreement with what is expected(both are positive cosines).
# # Freq Modulation
# We find the DFT of the following frequency modulated signal:
#
# $$ \cos(20t +5 \cos(t))$$
# In[4]:
x = np.linspace(-4*pi,4*pi,513)[:-1]
w = np.linspace(-64,64,513)[:-1]
y1 = cos(20*x + 5*cos(x))
Y1 = fftshift(fft(y1))/512
fig,ax = plt.subplots(2)
ax[0].plot(w,abs(Y1))
ax[0].set_xlim([-40,40])
ax[0].grid(True)
ax[0].set_ylabel(r"$|Y|$",size=16)
ax[0].set_xlabel(r"$\omega$",size=16)
ax[0].set_title(r"Magnitude and Phase plots of DFT of $ \cos(20t +5 \cos(t))$ ")
ii1 = np.where(abs(Y1)<10**-3)
ph = angle(Y1)
ph[ii1] = 0
ax[1].plot(w,ph,"ro")
ax[1].set_xlim([-40,40])
ax[1].grid(True)
ax[1].set_ylabel(r"Phase of $Y$",size=16)
ax[1].set_xlabel(r"$\omega$",size=16)
plt.show()
# # Continuous time Fourier Transform of Gaussian
# The fourier transform of a signal $x(t)$ is defined as follows:
#
# $$X(\omega) = \frac{1}{2 \pi} \int_{- \infty}^{\infty} x(t) e^{-j \omega t} dt$$
#
# We can approximate this by the fourier transform of the windowed version of the signal $x(t)$, with a sufficiently large window. Let the window be of size $T$. We get:
#
# $$X(\omega) \approx \frac{1}{2 \pi} \int_{- \frac{T}{2}}^{\frac{T}{2}} x(t) e^{-j \omega t} dt$$
#
# We can write the integral approximately as a Reimann sum:
#
# $$X(\omega) \approx \frac{\Delta t}{2 \pi} \sum_{n = -\frac{N}{2}}^{\frac{N}{2}-1} x(n \Delta t) e^{-j \omega n \Delta t}$$
#
# Where we divide the integration domain into $N$ parts (assume $N$ is even), each of width $\Delta t = \frac{T}{N}$.
#
# Now, we sample our spectrum with a sampling period in the frequency domain of $\Delta \omega = \frac{2 \pi}{T}$, which makes our continuous time signal periodic with period equal to the window size $T$. Our transform then becomes:
#
# $$X(k \Delta \omega) \approx \frac{\Delta t}{2 \pi} \sum_{n = -\frac{N}{2}}^{\frac{N}{2}-1} x(n \Delta t) e^{-j k n \Delta \omega \Delta t}$$
#
# Which simplifies to:
#
# $$X(k \Delta \omega) \approx \frac{\Delta t}{2 \pi} \sum_{n = -\frac{N}{2}}^{\frac{N}{2}-1}
# x(n \Delta t) e^{-j \frac{2 \pi}{N} k n}$$
#
# Noticing that the summation is of the form of a DFT, we can finally write:
#
# $$X(k \Delta \omega) \approx \frac{\Delta t}{2 \pi} DFT \{x(n \Delta t)\}$$
#
# The two approximations we made were:
#
# * The fourier transform of the windowed signal is approximately the same as that of the original.
# * The integral was approximated as a Reimann sum.
#
# We can improve these approximations by making the window size $T$ larger, and by decreasing the time domain sampling period or increasing the number of samples $N$. We implement this in an iterative algorithm in the next part.
#
# The analytical expression of the fourier transform of the gaussian:
#
# $$x(t) = e^{\frac{-t^2}{2}}$$
#
# Was found as:
#
# $$X(j \omega) = \frac{1}{\sqrt{2 \pi}}e^{\frac{-\omega^2}{2}}$$
#
# We also compare the numerical results with the expected analytical expression.
# In[5]:
def ideal(w):
return (1/np.sqrt(2*pi)) * (exp((-1*w*w)/2))
def tol(N=128,tol=10**-6):
T = 8*pi
N = 128
error = 10**10
yold =0
while error>tol:
x = np.linspace(-T/2,T/2,N+1)[:-1]
w = pi* np.linspace(-N/T,N/T,N+1)[:-1]
Y1 = (T/(2*pi*N)) * fftshift(fft(ifftshift(exp(-x*x/2))))
error = sum(abs(Y1-ideal(w)))
yold = Y1
T = T*2
N = N*2
print("max error =" + str(error))
fig,ax = plt.subplots(2)
ax[0].plot(w,abs(Y1))
ax[0].set_xlim([-10,10])
ax[0].grid(True)
ax[0].set_ylabel(r"$|Y|$",size=16)
ax[0].set_xlabel(r"$\omega$",size=16)
ax[0].set_title(r"Magnitude and Phase plots(calculated) of DFT of $ \exp(-t^{2}/2)$ ")
ii1 = np.where(abs(Y1)<10**-3)
ph = angle(Y1)
ph[ii1] =0
ax[1].plot(w,ph,"r+")
ax[1].set_xlim([-10,10])
ax[1].grid(True)
ax[1].set_ylabel(r"Phase of $Y$",size=16)
ax[1].set_xlabel(r"$\omega$",size=16)
plt.show()
fig2,bx = plt.subplots(2)
bx[0].plot(w,abs(ideal(w)))
bx[0].set_xlim([-10,10])
bx[0].grid(True)
bx[0].set_ylabel(r"$|Y|$",size=16)
bx[0].set_xlabel(r"$\omega$",size=16)
bx[0].set_title(r"Magnitude and Phase plots(ideal) of DFT of $ \exp(-t^{2}/2)$ ")
bx[1].plot(w,angle(ideal(w)),"r+")
bx[1].set_xlim([-10,10])
bx[1].grid(True)
bx[1].set_ylabel(r"Phase of $Y$",size=16)
bx[1].set_xlabel(r"$\omega$",size=16)
plt.show()
# In[6]:
tol()
# # Conclusions
# * From the above pairs of plots, it is clear that with a sufficiently large window size and sampling rate, the DFT approximates the CTFT of the gaussian.
# * This is because the magnitude of the gaussian quickly approaches $0$ for large values of time. This means that there is lesser frequency domain aliasing due to windowing. This can be interpreted as follows:
# * Windowing in time is equivalent to convolution with a sinc in frequency domain. A large enough window means that the sinc is tall and thin. This tall and thin sinc is approximately equivalent to a delta function for a sufficiently large window. This means that convolution with this sinc does not change the spectrum much.
# * Sampling after windowing is done so that the DFT can be calculated using the Fast Fourier Transform. This is then a sampled version of the DTFT of the sampled time domain signal. With sufficiently large sampling rates, this approximates the CTFT of the original time domain signal.
# * This process is done on the gaussian and the results are in agreement with what is expected.
| [
"matplotlib.pyplot.show",
"numpy.sin",
"numpy.cos",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((1048, 1063), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1060, 1063), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1481, 1483), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2121), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (2118, 2121), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2528, 2530), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3020), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (3017, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((926, 959), 'numpy.linspace', 'np.linspace', (['(-4 * pi)', '(4 * pi)', '(513)'], {}), '(-4 * pi, 4 * pi, 513)\n', (937, 959), True, 'import numpy as np\n'), ((963, 988), 'numpy.linspace', 'np.linspace', (['(-64)', '(64)', '(513)'], {}), '(-64, 64, 513)\n', (974, 988), True, 'import numpy as np\n'), ((998, 1007), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1004, 1007), True, 'import numpy as np\n'), ((1984, 2017), 'numpy.linspace', 'np.linspace', (['(-4 * pi)', '(4 * pi)', '(129)'], {}), '(-4 * pi, 4 * pi, 129)\n', (1995, 2017), True, 'import numpy as np\n'), ((2021, 2046), 'numpy.linspace', 'np.linspace', (['(-16)', '(16)', '(129)'], {}), '(-16, 16, 129)\n', (2032, 2046), True, 'import numpy as np\n'), ((2056, 2065), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (2062, 2065), True, 'import numpy as np\n'), ((2877, 2910), 'numpy.linspace', 'np.linspace', (['(-4 * pi)', '(4 * pi)', '(513)'], {}), '(-4 * pi, 4 * pi, 513)\n', (2888, 2910), True, 'import numpy as np\n'), ((2914, 2939), 'numpy.linspace', 'np.linspace', (['(-64)', '(64)', '(513)'], {}), '(-64, 64, 513)\n', (2925, 2939), True, 'import numpy as np\n'), ((6142, 6157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (6154, 6157), True, 'import matplotlib.pyplot as plt\n'), ((6643, 6653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6651, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6668, 6683), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (6680, 6683), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7122, 7124), True, 'import matplotlib.pyplot as plt\n'), ((5708, 5723), 'numpy.sqrt', 'np.sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (5715, 5723), True, 'import numpy as np\n'), ((5859, 5892), 'numpy.linspace', 'np.linspace', (['(-T / 2)', '(T / 2)', '(N + 1)'], {}), '(-T / 2, T / 2, N + 1)\n', (5870, 5892), True, 'import numpy as np\n'), ((5906, 5939), 'numpy.linspace', 'np.linspace', (['(-N / T)', '(N / T)', '(N + 1)'], {}), '(-N / T, N / T, N + 1)\n', (5917, 5939), True, 'import numpy as np\n')] |
import csv
import glob
import numpy as np
save_path = './save/test/'
def parse_scores(fname, maximize_F1=True):
# f'ensumble_F1=({results['F1']:05.2f})_EM=({results['EM']:05.2f}).csv'
str_list = fname.split('(')
F1 = float(str_list[-2][:5])
EM = float(str_list[-1][:5])
if maximize_F1:
return F1
else:
return EM
def max_element_idxs(counts):
m = max(counts)
return np.where(np.array(counts) == m)[0]
class VotingEnsemble:
def __init__(self, exp_names, split='dev', save_name='submission', maximize_F1=True):
print(f'Generating ensemble for {split}.')
self.vote_dict = {}
self.save_path = save_path + split + '_' + save_name + '.csv'
for exp_name in exp_names:
if split == 'dev':
csv_paths = glob.glob(save_path + exp_name + '/val*.csv')
else:
csv_paths = glob.glob(save_path + exp_name + '/test*.csv')
for csv_path in csv_paths:
if csv_path[-5] != ')':
continue
score = parse_scores(csv_path, maximize_F1)
with open(csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
uuid, pred = row['Id'], row['Predicted']
if uuid not in self.vote_dict.keys():
self.vote_dict[uuid] = {score: pred}
else:
self.vote_dict[uuid].update({score: pred})
def ensemble(self):
# self.vote_dict = {id: {score: pred, score: ...}, id: ...}
for uuid in sorted(self.vote_dict):
scores = []
preds = []
for s, p in self.vote_dict[uuid].items():
scores.append(s)
preds.append(p)
preds_count = [preds.count(p) for p in preds]
max_count_idxs = max_element_idxs(preds_count)
if len(max_count_idxs) > 1:
scores_in_count_tie = np.array(scores)[max_count_idxs]
idx_max_score_in_count_tie = np.argmax(scores_in_count_tie)
final_pred_id = max_count_idxs[idx_max_score_in_count_tie]
else:
final_pred_id = max_count_idxs[0]
self.vote_dict[uuid] = preds[final_pred_id]
with open(self.save_path, 'w', newline='', encoding='utf-8') as csv_fh:
csv_writer = csv.writer(csv_fh, delimiter=',')
csv_writer.writerow(['Id', 'Predicted'])
for uuid in sorted(self.vote_dict):
csv_writer.writerow([uuid, self.vote_dict[uuid]])
if __name__ == '__main__':
exp_names = ['qanet_D=128_encblk=7_head=8_bs=24_run-04-dev-ensemble-course', # qanet-large [best]
'qanet_D=96_encblk=5_head=6_bs=64_run-01-dev-emsemble-course', # qanet-mid
'qanet_D=128_encblk=7_head=8_bs=24_run-01-dev-ensemble-myazaure', # qanet-large
'bidaf_D=100_charEmb=True_fusion=True_bs=64_run-01-dev-ensemble-course', # bidaf+char_emb+fusion
'qanet_D=128_encblk=7_head=8_bs=24_run-02-dev-ensemble-myazure', # qanet-large
'qanet_D=96_encblk=5_head=6_bs=32_run-01-dev-F1-67.98-EM-64.27-course', # qanet-mid best
'qanet_D=128_encblk=7_head=8_bs=24_run-01-dev-F1-70.38-EM-66.81-course', # qanet-large 2nd best
]
voting_ensemble = VotingEnsemble(exp_names, split='dev')
voting_ensemble.ensemble() | [
"csv.writer",
"numpy.argmax",
"csv.DictReader",
"numpy.array",
"glob.glob"
] | [((2509, 2542), 'csv.writer', 'csv.writer', (['csv_fh'], {'delimiter': '""","""'}), "(csv_fh, delimiter=',')\n", (2519, 2542), False, 'import csv\n'), ((425, 441), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (433, 441), True, 'import numpy as np\n'), ((810, 855), 'glob.glob', 'glob.glob', (["(save_path + exp_name + '/val*.csv')"], {}), "(save_path + exp_name + '/val*.csv')\n", (819, 855), False, 'import glob\n'), ((902, 948), 'glob.glob', 'glob.glob', (["(save_path + exp_name + '/test*.csv')"], {}), "(save_path + exp_name + '/test*.csv')\n", (911, 948), False, 'import glob\n'), ((2169, 2199), 'numpy.argmax', 'np.argmax', (['scores_in_count_tie'], {}), '(scores_in_count_tie)\n', (2178, 2199), True, 'import numpy as np\n'), ((1209, 1233), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (1223, 1233), False, 'import csv\n'), ((2091, 2107), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2099, 2107), True, 'import numpy as np\n')] |
"""
Contains the function 'ornstein_uhlenbeck' and auxiliary functions.
"""
import math
import numpy as np
def ornstein_uhlenbeck(n1, n2, h=0.1):
"""
Computes the Ornstein-Uhlenbeck covariance matrix with given size and correlation length.
The entries of the matrix are given by
cov[i,j] = exp(-||pos(i) - pos(j)||/h),
where pos(i) is the normalized position of the i-th pixel.
:param n1: An integer. The image is assumed to have shape (n1, n2).
:param n2: An integer. The image is assumed to have shape (n1, n2).
:param h: The correlation length. A float. Small h corresponds to the assumption that distant pixels are
uncorrelated.
:return: The Ornstein-Uhlenbeck covariance matrix, a numpy array of shape (n1*n2, n1*n2).
"""
# This function is simply a vectorized implementation of the above index-wise formula.
# First, we compute the vector of normalized positions for all n*n-1 pixels.
p = np.zeros((n1*n2, 2))
for i in range(n1*n2):
p[i, :] = _pos(i, n1, n2)
pdiff0 = np.subtract.outer(p[:,0], p[:,0])
pdiff1 = np.subtract.outer(p[:,1], p[:,1])
pdiff = np.dstack((pdiff0, pdiff1))
diffnorm = np.linalg.norm(pdiff, axis=2)
cov = np.exp(-diffnorm / h)
return cov
def _pos(i, n1, n2):
"""
Given a picture of size (n,n), assuming that the pixels are in lexicographic order, and that the image is square.
Returns an approximate position for each pixel, scaling the image to [0,1]^2.
That is, the i-th pixel has the position [(i % n2) / (n2-1), (i // n2) / (n1-1)] in the domain [0,1]x[0,1].
:param i: The index of the pixel, in lexicographic order. An integer between 0 and n1*n2-1.
For example, i=n2-1 corresponds to the pixel at the upper right corner of the image.
:return: The normalized position as a numpy vector of size 2.
"""
x_position = (i % n2) / (n2-1)
y_position = (i // n2) / (n1-1)
return np.array([x_position, y_position]) | [
"numpy.dstack",
"numpy.zeros",
"numpy.subtract.outer",
"numpy.linalg.norm",
"numpy.exp",
"numpy.array"
] | [((964, 986), 'numpy.zeros', 'np.zeros', (['(n1 * n2, 2)'], {}), '((n1 * n2, 2))\n', (972, 986), True, 'import numpy as np\n'), ((1059, 1094), 'numpy.subtract.outer', 'np.subtract.outer', (['p[:, 0]', 'p[:, 0]'], {}), '(p[:, 0], p[:, 0])\n', (1076, 1094), True, 'import numpy as np\n'), ((1106, 1141), 'numpy.subtract.outer', 'np.subtract.outer', (['p[:, 1]', 'p[:, 1]'], {}), '(p[:, 1], p[:, 1])\n', (1123, 1141), True, 'import numpy as np\n'), ((1152, 1179), 'numpy.dstack', 'np.dstack', (['(pdiff0, pdiff1)'], {}), '((pdiff0, pdiff1))\n', (1161, 1179), True, 'import numpy as np\n'), ((1195, 1224), 'numpy.linalg.norm', 'np.linalg.norm', (['pdiff'], {'axis': '(2)'}), '(pdiff, axis=2)\n', (1209, 1224), True, 'import numpy as np\n'), ((1235, 1256), 'numpy.exp', 'np.exp', (['(-diffnorm / h)'], {}), '(-diffnorm / h)\n', (1241, 1256), True, 'import numpy as np\n'), ((1955, 1989), 'numpy.array', 'np.array', (['[x_position, y_position]'], {}), '([x_position, y_position])\n', (1963, 1989), True, 'import numpy as np\n')] |
import os
import glob
import multiprocessing
import itertools
import argparse
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
from sklearn.cluster import DBSCAN
from subprocess import PIPE, Popen
import scipy.spatial
from scipy.optimize import curve_fit
import warnings
warnings.simplefilter(action='ignore', category=Warning)
import qrdar
import pcd_io
import ply_io
# def apply_rotation(M, df):
# if 'a' not in df.columns:
# df.loc[:, 'a'] = 1
# r_ = np.dot(M, df[['x', 'y', 'z', 'a']].T).T
# df.loc[:, ['x', 'y', 'z']] = r_[:, :3]
# return df[['x', 'y', 'z']]
# def apply_rotation_2D(M, df):
# if 'a' not in df.columns:
# df.loc[:, 'a'] = 1
# r_ = np.dot(M, df[['x', 'y', 'a']].T).T
# df.loc[:, ['x', 'y']] = r_[:, :2]
# return df[['x', 'y']]
def rigid_transform_3D(A, B, d=3):
"""
http://nghiaho.com/uploads/code/rigid_transform_3D.py_
"""
assert len(A) == len(B)
A = np.matrixlib.defmatrix.matrix(A)
B = np.matrixlib.defmatrix.matrix(B)
N = A.shape[0]; # total points
centroid_A = mean(A, axis=0).reshape(1, d)
centroid_B = mean(B, axis=0).reshape(1, d)
# centre the points
AA = A - np.tile(centroid_A, (N, 1))
BB = B - np.tile(centroid_B, (N, 1))
# dot is matrix multiplication for array
H = transpose(AA) * BB
U, S, Vt = linalg.svd(H)
R = np.dot(Vt.T, U.T)
t = -R*centroid_A.T + centroid_B.T
M, N = np.identity(d+1), np.identity(d+1)
M[:d, :d] = R
N[:d, d] = t.reshape(-1, d)
return np.dot(N, M)
def read_aruco2(pc,
expected,
figs=False,
marker_template=None,
codes_dict='aruco_mip_16h3',
verbose=False):
if verbose: print ("extracting aruco")
pc.loc[:, 'intensity'] = pc.refl
targets = qrdar.identify_codes(pc,
expected=expected,
print_figure=True,
marker_template=marker_template,
codes_dict=codes_dict,
verbose=verbose)
targets.rename(columns={'code':'aruco'}, inplace=True)
targets = targets[targets.confidence == 1]
targets.reset_index(inplace=True)
sticker_centres = pd.DataFrame(columns=['x', 'y', 'z', 'aruco'])
i = 0
for ix, row in targets.iterrows():
for col in ['c0', 'c1', 'c2', 'c3']:
if isinstance(row[col], float): continue
sticker_centres.loc[i, :] = list(row[col]) + [row.aruco]
i += 1
return sticker_centres#[['aruco', 'x', 'y']]
def identify_ground2(pc, target_centres):
nominal_plane = target_centres[['x', 'y', 'z']].copy()
nominal_plane.z = 0
M = qrdar.common.rigid_transform_3D(target_centres[['x', 'y', 'z']].astype(float).values,
nominal_plane.astype(float).values)
pc.loc[:, ['x', 'y', 'z']] = qrdar.common.apply_rotation(M, pc)
pc.loc[pc.z < .05, 'is_branch'] = False
return pc, M
def find_buckets(pc, target_centres, N, bucket_height=.38, bucket_radius=.15):
"""
Returns: pc, bucket_centres
"""
### find buckets and remove ###
print ('finding buckets')
buckets = pc[pc.z.between(.1, .4)]
# voxelise to speed-up dbscan
buckets.loc[:, 'xx'] = (buckets.x // .005) * .005
buckets.loc[:, 'yy'] = (buckets.y // .005) * .005
buckets.loc[:, 'zz'] = (buckets.z // .005) * .005
buckets.sort_values(['xx', 'yy', 'zz', 'refl'], inplace=True)
bucket_voxels = buckets[~buckets[['xx', 'yy', 'zz']].duplicated()]
# print(buckets)
dbscan = DBSCAN(min_samples=20, eps=.05).fit(bucket_voxels[['xx', 'yy', 'zz']])
bucket_voxels.loc[:, 'labels_'] = dbscan.labels_
# merge results back
buckets = pd.merge(buckets, bucket_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx', 'yy', 'zz'])
# find three largest targets (assumed buckets)
labels = buckets.labels_.value_counts().index[:N]
buckets = buckets[buckets.labels_.isin(labels)]
bucket_centres = buckets.groupby('labels_')[['x', 'y']].mean().reset_index()
bucket_centres.loc[:, 'aruco'] = -1
try:
# pair up aruco and buckets , identify and label bucket points
for i, lbl in enumerate(buckets.labels_.unique()):
bucket = buckets[buckets.labels_ == lbl]
X, Y = bucket[['x', 'y']].mean(), target_centres[['x', 'y']].astype(float)
dist2bucket = np.linalg.norm(X - Y, axis=1)
aruco = target_centres.loc[np.where(dist2bucket == dist2bucket.min())].aruco.values[0]
print ('bucket {} associated with aruco {}'.format(lbl, aruco))
bucket_centres.loc[bucket_centres.labels_ == lbl, 'aruco'] = aruco
# identify buckets points
x_shift = bucket_centres[bucket_centres.aruco == aruco].x.values
y_shift = bucket_centres[bucket_centres.aruco == aruco].y.values
pc.dist = np.sqrt((pc.x - x_shift)**2 + (pc.y - y_shift)**2)
idx = pc[(pc.z < bucket_height) & (pc.dist < bucket_radius) & (pc.is_branch)].index
pc.loc[idx, 'is_branch'] = False
# label branch base with aruco
idx = pc[(pc.z < bucket_height + .5) & (pc.dist < bucket_radius)].index
pc.loc[idx, 'aruco'] = aruco
except Exception as err:
plt.scatter(buckets.x.loc[::100], buckets.y.loc[::100], c=buckets.labels_.loc[::100])
plt.scatter(target_centres.x, target_centres.y)
[plt.text(r.x, r.y, r.aruco) for ix, r in target_centres.iterrows()]
raise Exception
return pc, bucket_centres
def isolate_branches(pc, N, translation, odir):
print ('\tsegmenting branches')
min_sample, iterate = 10, True
while iterate:
branches = pc[pc.is_branch]
branches.loc[:, 'xx'] = (branches.x // .005) * .005
branches.loc[:, 'yy'] = (branches.y // .005) * .005
branches.loc[:, 'zz'] = (branches.z // .005) * .005
branch_voxels = branches[~branches[['xx', 'yy', 'zz']].duplicated()]
dbscan = DBSCAN(min_samples=min_sample, eps=.02).fit(branch_voxels[['xx', 'yy', 'zz']])
branch_voxels.loc[:, 'labels_'] = dbscan.labels_
branches = pd.merge(branches, branch_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx', 'yy', 'zz'])
labels = branches.labels_.value_counts().index[:N]
branches = branches[branches.labels_.isin(labels)]
width = branches.groupby('labels_').agg({'x':np.ptp, 'y':np.ptp})
if np.any(width < .1):
min_sample += 10
else: iterate = False
cols = [u'pid', u'tot_rtn', u'x', u'y', u'z', u'dev', u'refl', u'rtn_N', u'sel', u'sp', u'rng', u'spot_size']
for i, label in enumerate(branches.labels_.unique()):
b = branches[branches.labels_ == label]
print(b[(b.z < .5) & (~np.isnan(b.aruco))].aruco.value_counts())
aruco = b[(b.z < .5) & (~np.isnan(b.aruco))].aruco.value_counts().index[0]
tag = translation[(translation.code == aruco)].name.values[0]
b.loc[:, ['x', 'y', 'z']] = qrdar.common.apply_rotation(np.linalg.inv(M), b)
ply_io.write_ply(os.path.join(odir, '{}.ply'.format(tag)), b[[c for c in cols if c in b.columns]])
print ('\tsaved branch to:', os.path.join(args.odir, '{}.ply'.format(tag)))
def read_pc(args):
pc = qrdar.io.read_ply(args.pc)
pc = pc[pc.dev <= 10]
pc.loc[:, 'is_branch'] = True
pc.loc[:, 'aruco'] = np.nan
if args.verbose: print ("number of points:", len(pc))
return pc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pc', type=str, help='path to point cloud')
parser.add_argument('-t', '--translation', type=str, help='path to .csv with tag translation,\
this should have the form "name, project, code" \
where name is the branch name, project is the name\
of the file and code is the qrDAR number')
parser.add_argument('-o', '--odir', type=str, help='output directory for branches')
parser.add_argument('--bucket-height', type=float, default=.4, help='height of the bucket')
parser.add_argument('--bucket-radius', type=float, default=.15, help='radius of the bucket')
parser.add_argument('--verbose', action='store_true', help='print something')
args = parser.parse_args()
# path = '2019-07-26.012.riproject/ascii/2019-07-26.012.ply'
project = os.path.split(args.pc)[1].split('.')[0]
if args.verbose: print ('processing project:', project)
# reading in translation will need to be edited
# dependent on formatting etc.
ctag = lambda row: '{}-{}-{}'.format(*row[['plot', 'treetag', 'light']])
translation = pd.read_csv(args.translation)
translation.rename(columns={c:c.lower() for c in translation.columns}, inplace=True)
#translation.loc[:, 'tag'] = translation.apply(ctag, axis=1)
#translation.tag = [t.replace('-nan', '') for t in translation.tag]
translation = translation[translation.project == project]
n_targets = len(translation[translation.project == project])
expected = translation[translation.project == project].code.astype(int).values
if args.verbose: print('expecting targets:', n_targets)
# read in branch scan
pc = read_pc(args)
### read aruco targets ###
target_centres = read_aruco2(pc, expected, verbose=args.verbose)
if args.verbose: print('targets identified')
### identify ground ###
pc, M = identify_ground2(pc, target_centres)
if args.verbose: print('ground identified')
### find buckets ###
pc, buket_centres = find_buckets(pc, target_centres, n_targets,
bucket_height=args.bucket_height,
bucket_radius=args.bucket_radius)
if args.verbose: print('buckets found')
### isolate branches ###
isolate_branches(pc, n_targets, translation, args.odir)
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"qrdar.identify_codes",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.tile",
"sklearn.cluster.DBSCAN",
"pandas.DataFrame",
"warnings.simplefilter",
"pandas.merge",
"numpy.identity",
"qrdar.common.apply_rotation",
"numpy.matrixlib.defmatrix.matrix",
... | [((295, 351), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'Warning'}), "(action='ignore', category=Warning)\n", (316, 351), False, 'import warnings\n'), ((1014, 1046), 'numpy.matrixlib.defmatrix.matrix', 'np.matrixlib.defmatrix.matrix', (['A'], {}), '(A)\n', (1043, 1046), True, 'import numpy as np\n'), ((1055, 1087), 'numpy.matrixlib.defmatrix.matrix', 'np.matrixlib.defmatrix.matrix', (['B'], {}), '(B)\n', (1084, 1087), True, 'import numpy as np\n'), ((1442, 1459), 'numpy.dot', 'np.dot', (['Vt.T', 'U.T'], {}), '(Vt.T, U.T)\n', (1448, 1459), True, 'import numpy as np\n'), ((1621, 1633), 'numpy.dot', 'np.dot', (['N', 'M'], {}), '(N, M)\n', (1627, 1633), True, 'import numpy as np\n'), ((1932, 2071), 'qrdar.identify_codes', 'qrdar.identify_codes', (['pc'], {'expected': 'expected', 'print_figure': '(True)', 'marker_template': 'marker_template', 'codes_dict': 'codes_dict', 'verbose': 'verbose'}), '(pc, expected=expected, print_figure=True,\n marker_template=marker_template, codes_dict=codes_dict, verbose=verbose)\n', (1952, 2071), False, 'import qrdar\n'), ((2416, 2462), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['x', 'y', 'z', 'aruco']"}), "(columns=['x', 'y', 'z', 'aruco'])\n", (2428, 2462), True, 'import pandas as pd\n'), ((3088, 3122), 'qrdar.common.apply_rotation', 'qrdar.common.apply_rotation', (['M', 'pc'], {}), '(M, pc)\n', (3115, 3122), False, 'import qrdar\n'), ((3965, 4055), 'pandas.merge', 'pd.merge', (['buckets', "bucket_voxels[['xx', 'yy', 'zz', 'labels_']]"], {'on': "['xx', 'yy', 'zz']"}), "(buckets, bucket_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx',\n 'yy', 'zz'])\n", (3973, 4055), True, 'import pandas as pd\n'), ((7604, 7630), 'qrdar.io.read_ply', 'qrdar.io.read_ply', (['args.pc'], {}), '(args.pc)\n', (7621, 7630), False, 'import qrdar\n'), ((7850, 7875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7873, 7875), False, 'import argparse\n'), ((9146, 9175), 'pandas.read_csv', 'pd.read_csv', (['args.translation'], {}), '(args.translation)\n', (9157, 9175), True, 'import pandas as pd\n'), ((1261, 1288), 'numpy.tile', 'np.tile', (['centroid_A', '(N, 1)'], {}), '(centroid_A, (N, 1))\n', (1268, 1288), True, 'import numpy as np\n'), ((1302, 1329), 'numpy.tile', 'np.tile', (['centroid_B', '(N, 1)'], {}), '(centroid_B, (N, 1))\n', (1309, 1329), True, 'import numpy as np\n'), ((1520, 1538), 'numpy.identity', 'np.identity', (['(d + 1)'], {}), '(d + 1)\n', (1531, 1538), True, 'import numpy as np\n'), ((1538, 1556), 'numpy.identity', 'np.identity', (['(d + 1)'], {}), '(d + 1)\n', (1549, 1556), True, 'import numpy as np\n'), ((6452, 6543), 'pandas.merge', 'pd.merge', (['branches', "branch_voxels[['xx', 'yy', 'zz', 'labels_']]"], {'on': "['xx', 'yy', 'zz']"}), "(branches, branch_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx',\n 'yy', 'zz'])\n", (6460, 6543), True, 'import pandas as pd\n'), ((6743, 6762), 'numpy.any', 'np.any', (['(width < 0.1)'], {}), '(width < 0.1)\n', (6749, 6762), True, 'import numpy as np\n'), ((3802, 3834), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'min_samples': '(20)', 'eps': '(0.05)'}), '(min_samples=20, eps=0.05)\n', (3808, 3834), False, 'from sklearn.cluster import DBSCAN\n'), ((4637, 4666), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - Y)'], {'axis': '(1)'}), '(X - Y, axis=1)\n', (4651, 4666), True, 'import numpy as np\n'), ((5136, 5190), 'numpy.sqrt', 'np.sqrt', (['((pc.x - x_shift) ** 2 + (pc.y - y_shift) ** 2)'], {}), '((pc.x - x_shift) ** 2 + (pc.y - y_shift) ** 2)\n', (5143, 5190), True, 'import numpy as np\n'), ((7350, 7366), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (7363, 7366), True, 'import numpy as np\n'), ((6297, 6337), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'min_samples': 'min_sample', 'eps': '(0.02)'}), '(min_samples=min_sample, eps=0.02)\n', (6303, 6337), False, 'from sklearn.cluster import DBSCAN\n'), ((8863, 8885), 'os.path.split', 'os.path.split', (['args.pc'], {}), '(args.pc)\n', (8876, 8885), False, 'import os\n'), ((7091, 7108), 'numpy.isnan', 'np.isnan', (['b.aruco'], {}), '(b.aruco)\n', (7099, 7108), True, 'import numpy as np\n'), ((7166, 7183), 'numpy.isnan', 'np.isnan', (['b.aruco'], {}), '(b.aruco)\n', (7174, 7183), True, 'import numpy as np\n')] |
"""
Functions for reading .csv files
"""
import numpy as np
from . import functions as fn
from .babelscan import Scan
"----------------------------LOAD FUNCTIONS---------------------------------"
def read_csv_file(filename):
"""
Reads text file, assumes comma separated and comments defined by #
:param filename: str path to file
:return: headers, data: list of str, array
"""
with open(filename) as f:
lines = f.readlines()
# find time_start of data
for n, ln in enumerate(lines):
values = ln.split(',')
if len(values) < 2: continue
value1 = values[0]
if not value1:
# line starts with ,
value1 = values[1]
try:
float(value1)
break
except ValueError:
continue
# Headers
try:
header_line = lines[n-1].strip().strip('#')
header = header_line.split(',')
except (NameError, IndexError):
raise Exception('%s contains no headers' % filename)
# Data
data = np.genfromtxt(lines[n:], delimiter=',')
# Build dict
# return {name: col for name, col in zip(header, data.T)}
return header, data
"----------------------------------------------------------------------------------------------------------------------"
"---------------------------------------------- CsvScan -------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class CsvScan(Scan):
"""
Scan for .csv files
Reads data into babelscan class, storing data in the internal namespace
Scan data and metadata can be requested using the the name of the dataset (e.g. 'eta')
Usage:
d = DatScan('file.csv')
d('eta') >> finds data column or metadata called 'eta', returns the array
d.axes() >> automatically finds the default xaxis, returns the array
d.signal() >> automatically finds the default yaxis, returns the array
d.image(idx) >> finds the image location if available and returns a detector image
"""
def __init__(self, filename, **kwargs):
self.filename = filename
self.file = fn.file2name(filename)
self.scan_number = fn.scanfile2number(filename)
namespace = {
'filename': filename,
'filetitle': self.file,
'scan_number': self.scan_number
}
alt_names = {
# shortcut: name in file
'cmd': 'scan_command',
}
super(CsvScan, self).__init__(namespace, alt_names, **kwargs)
#self._label_str.extend(['scanno', 'filetitle'])
def reset(self):
"""Reset the namespace"""
self._namespace = {
'filename': self.filename,
'filetitle': self.file,
'scanno': self.scan_number
}
def __repr__(self):
out = 'CsvScan(filename: %s, namespace: %d, associations: %d)'
return out % (self.filename, len(self._namespace), len(self._alt_names))
def _load_data(self, name):
"""
Load data from hdf file
Overloads Scan._load_data to read hdf file
if 'name' not available, raises KeyError
:param name: str name or address of data
"""
header, data = read_csv_file(self.filename)
dataobj = {name: col for name, col in zip(header, data.T)}
self._namespace.update(dataobj)
# Set axes, signal defaults
self.add2namespace(header[0], other_names=self._axes_str[0])
self.add2namespace(header[1], other_names=self._signal_str[0])
super(CsvScan, self)._load_data(name)
| [
"numpy.genfromtxt"
] | [((1052, 1091), 'numpy.genfromtxt', 'np.genfromtxt', (['lines[n:]'], {'delimiter': '""","""'}), "(lines[n:], delimiter=',')\n", (1065, 1091), True, 'import numpy as np\n')] |
import torch.nn as nn
import numpy as np
from .captionAPI import *
class BaseAttack:
def __init__(self, encoder, decoder, word_map, attack_norm, device, config):
self.encoder = encoder
self.decoder = decoder
self.word_map = word_map
self.attack_norm = attack_norm
self.device = device
self.config = config
self.encoder = self.encoder.to(self.device)
self.encoder.eval()
self.decoder = self.decoder.to(self.device)
self.decoder.eval()
self.softmax = nn.Softmax(dim=1)
self.bce_loss = nn.BCELoss(reduction='none')
self.mse_Loss = nn.MSELoss(reduction='none')
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
self.max_iter = config['max_iter']
self.max_per = config['max_per']
self.beams = config['beams']
self.max_len = config['max_len']
self.lr = config['lr']
def get_trans_len(self, x):
seqs_len = prediction_len_batch(x, self.encoder, self.decoder, self.word_map, self.max_len, self.device)
return np.array(seqs_len)
def run_attack(self, x):
pass
def compute_loss(self, x):
pass
| [
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.nn.BCELoss",
"torch.nn.Softmax",
"numpy.array",
"torch.nn.Flatten"
] | [((545, 562), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (555, 562), True, 'import torch.nn as nn\n'), ((587, 615), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (597, 615), True, 'import torch.nn as nn\n'), ((640, 668), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (650, 668), True, 'import torch.nn as nn\n'), ((692, 704), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (702, 704), True, 'import torch.nn as nn\n'), ((725, 734), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (732, 734), True, 'import torch.nn as nn\n'), ((1090, 1108), 'numpy.array', 'np.array', (['seqs_len'], {}), '(seqs_len)\n', (1098, 1108), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from pymongo import MongoClient
import re
import distance_calculation
class GeneticAlgorithm:
# Initialization
def __init__(self, path=None, n_gene=256, n_parent=10, change_ratio=0.1):
self.n_gene = n_gene # 一世代の遺伝子の個数
self.n_parent = 10 # 親として残す個体数
self.change_ratio = change_ratio # 突然変異で変化させる場所の数
self.distance = distance_calculation.DistCalculation()
self.before_each_distance = []
self.after_each_distance = []
self.all_distance = []
if path is not None:
self.set_loc(np.array(pd.read_csv(path)))
# Initialize the gene randomly
def init_genes(self, ):
self.genes = np.zeros((self.n_gene, self.num_data), np.int)
order = np.arange(self.num_data)
for i in range(self.n_gene):
np.random.shuffle(order)
self.genes[i] = order.copy()
self.sort()
# Set the coordinates
def set_location(self, locations):
self.loc = locations # x,y座標
self.num_data = len(self.loc) # データ数
self.dist = distance.squareform(distance.pdist(self.loc)) # 距離の表を作成
self.init_genes() # 遺伝子を初期化
def cost(self, order):
return np.sum([self.dist[order[i], order[(i + 1) % self.num_data]] for i in np.arange(self.num_data)])
def plot(self, country_list, order=None):
# 初期配置
if order is None:
for i in range(len(self.loc[:, 1]) - 1):
country1 = self.loc[:, 1][i], self.loc[:, 0][i]
country2 = self.loc[:, 1][i+1], self.loc[:, 0][i+1]
print(country1, country2)
# print(dist.dist_on_sphere(country1, country2))
# before_each_distance.append(dist.dist_on_sphere(country1, country2))
print(dist.dist_test(country1, country2))
self.before_each_distance.append(dist.dist_test(country1, country2))
plt.plot(self.loc[:, 0], self.loc[:, 1])
plt.plot(self.loc[:, 0], self.loc[:, 1], 'o', markersize=6)
for i, (x, y) in enumerate(zip(self.loc[:, 0], self.loc[:, 1])):
plt.annotate(country_list[i], (x, y))
# 最適化した配置
else:
for i in range(len(self.loc[order, 1]) - 1):
country3 = self.loc[order, 1][i], self.loc[order, 0][i]
country4 = self.loc[order, 1][i+1], self.loc[order, 0][i+1]
# print(dist.dist_on_sphere(country1, country2))
self.after_each_distance.append(self.distance.dist_on_sphere(country3, country4))
plt.plot(self.loc[order, 0], self.loc[order, 1])
plt.plot(self.loc[order, 0], self.loc[order, 1], 'o', markersize=6)
for i, (x, y) in enumerate(zip(self.loc[:, 0], self.loc[:, 1])):
plt.annotate(country_list[i], (x, y))
plt.xlim(-180, 180)
plt.ylim(-90, 90)
plt.xlabel('longitude')
plt.ylabel('latitude')
plt.show()
# print('最適化実行前 : ' + str(sum(before_each_distance)))
print('最適化実行後 : ' + str(sum(self.after_each_distance)))
self.all_distance.append(round(sum(self.after_each_distance)))
# Genetic Algorithm
def gen_algo(self, n_step):
for i in range(n_step):
print("Generation : %d, Cost : %lf" % (i, self.cost(self.genes[0])))
self.evolution()
self.result = self.genes[0]
return self.result
# Genetic evolution
def evolution(self):
# 突然変異
for i in range(self.n_parent, self.n_gene):
self.genes[i] = self.mutation(np.random.randint(self.n_parent))
self.sort() # ソートする
def sort(self):
# コストを計算し,ソート
gene_cost = np.array([self.cost(i) for i in self.genes])
self.genes = self.genes[np.argsort(gene_cost)]
# Return the mutated gene
def mutation(self, index):
n_change = int(self.change_ratio * self.num_data)
gene = self.genes[index].copy()
for i in range(n_change):
# n_changeの個数だけ値を入れ替える
left = np.random.randint(self.num_data)
right = np.random.randint(self.num_data)
temp = gene[left]
gene[left] = gene[right]
gene[right] = temp
return gene
def plot_result(self):
x = [i for i in range(100, 2100, 100)]
plt.plot(x, self.all_distance)
plt.xticks(np.arange(100, 2100, 200))
for (i, j, k) in zip(x, self.all_distance, self.all_distance):
plt.plot(i, j, 'o')
plt.annotate(k, xy=(i, j))
plt.show()
def save_mongo(collection, db):
save = db[collection]
with open('./country.txt', 'r', encoding='utf-8') as text:
for line in text:
country_list = {}
line = re.sub('[\r\n]+$', '', line)
element = line.split(',')
country_list['country'] = element[0]
country_list['ido'] = element[1]
country_list['keido'] = element[2]
# コレクションに追加(コレクションもここで作成)
save.insert(country_list)
def make_matrix(collection, db, country_list):
# print(np.random.random_sample((10, 2)) * 10)
query = db[collection].find().limit(1000000)
country_array = np.empty((0, 2), int)
for record in query:
country_array = np.append(country_array, np.array([[int(record['keido']), int(record['ido'])]]), axis=0)
country_list.append(record['country'])
return country_array, country_list
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.annotate",
"numpy.empty",
"pandas.read_csv",
"numpy.zeros",
"numpy.argsort",
"numpy.random.randint",
"numpy.arange",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.y... | [((5360, 5381), 'numpy.empty', 'np.empty', (['(0, 2)', 'int'], {}), '((0, 2), int)\n', (5368, 5381), True, 'import numpy as np\n'), ((467, 505), 'distance_calculation.DistCalculation', 'distance_calculation.DistCalculation', ([], {}), '()\n', (503, 505), False, 'import distance_calculation\n'), ((782, 828), 'numpy.zeros', 'np.zeros', (['(self.n_gene, self.num_data)', 'np.int'], {}), '((self.n_gene, self.num_data), np.int)\n', (790, 828), True, 'import numpy as np\n'), ((845, 869), 'numpy.arange', 'np.arange', (['self.num_data'], {}), '(self.num_data)\n', (854, 869), True, 'import numpy as np\n'), ((2957, 2976), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-180)', '(180)'], {}), '(-180, 180)\n', (2965, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3002), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-90)', '(90)'], {}), '(-90, 90)\n', (2993, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""longitude"""'], {}), "('longitude')\n", (3021, 3034), True, 'import matplotlib.pyplot as plt\n'), ((3043, 3065), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""latitude"""'], {}), "('latitude')\n", (3053, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3082, 3084), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4499), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.all_distance'], {}), '(x, self.all_distance)\n', (4477, 4499), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4704, 4706), True, 'import matplotlib.pyplot as plt\n'), ((919, 943), 'numpy.random.shuffle', 'np.random.shuffle', (['order'], {}), '(order)\n', (936, 943), True, 'import numpy as np\n'), ((1195, 1219), 'scipy.spatial.distance.pdist', 'distance.pdist', (['self.loc'], {}), '(self.loc)\n', (1209, 1219), False, 'from scipy.spatial import distance\n'), ((2030, 2070), 'matplotlib.pyplot.plot', 'plt.plot', (['self.loc[:, 0]', 'self.loc[:, 1]'], {}), '(self.loc[:, 0], self.loc[:, 1])\n', (2038, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2142), 'matplotlib.pyplot.plot', 'plt.plot', (['self.loc[:, 0]', 'self.loc[:, 1]', '"""o"""'], {'markersize': '(6)'}), "(self.loc[:, 0], self.loc[:, 1], 'o', markersize=6)\n", (2091, 2142), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2736), 'matplotlib.pyplot.plot', 'plt.plot', (['self.loc[order, 0]', 'self.loc[order, 1]'], {}), '(self.loc[order, 0], self.loc[order, 1])\n', (2696, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2749, 2816), 'matplotlib.pyplot.plot', 'plt.plot', (['self.loc[order, 0]', 'self.loc[order, 1]', '"""o"""'], {'markersize': '(6)'}), "(self.loc[order, 0], self.loc[order, 1], 'o', markersize=6)\n", (2757, 2816), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3929), 'numpy.argsort', 'np.argsort', (['gene_cost'], {}), '(gene_cost)\n', (3918, 3929), True, 'import numpy as np\n'), ((4180, 4212), 'numpy.random.randint', 'np.random.randint', (['self.num_data'], {}), '(self.num_data)\n', (4197, 4212), True, 'import numpy as np\n'), ((4233, 4265), 'numpy.random.randint', 'np.random.randint', (['self.num_data'], {}), '(self.num_data)\n', (4250, 4265), True, 'import numpy as np\n'), ((4519, 4544), 'numpy.arange', 'np.arange', (['(100)', '(2100)', '(200)'], {}), '(100, 2100, 200)\n', (4528, 4544), True, 'import numpy as np\n'), ((4629, 4648), 'matplotlib.pyplot.plot', 'plt.plot', (['i', 'j', '"""o"""'], {}), "(i, j, 'o')\n", (4637, 4648), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4687), 'matplotlib.pyplot.annotate', 'plt.annotate', (['k'], {'xy': '(i, j)'}), '(k, xy=(i, j))\n', (4673, 4687), True, 'import matplotlib.pyplot as plt\n'), ((4906, 4934), 're.sub', 're.sub', (["'[\\r\\n]+$'", '""""""', 'line'], {}), "('[\\r\\n]+$', '', line)\n", (4912, 4934), False, 'import re\n'), ((2236, 2273), 'matplotlib.pyplot.annotate', 'plt.annotate', (['country_list[i]', '(x, y)'], {}), '(country_list[i], (x, y))\n', (2248, 2273), True, 'import matplotlib.pyplot as plt\n'), ((2910, 2947), 'matplotlib.pyplot.annotate', 'plt.annotate', (['country_list[i]', '(x, y)'], {}), '(country_list[i], (x, y))\n', (2922, 2947), True, 'import matplotlib.pyplot as plt\n'), ((3705, 3737), 'numpy.random.randint', 'np.random.randint', (['self.n_parent'], {}), '(self.n_parent)\n', (3722, 3737), True, 'import numpy as np\n'), ((677, 694), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (688, 694), True, 'import pandas as pd\n'), ((1381, 1405), 'numpy.arange', 'np.arange', (['self.num_data'], {}), '(self.num_data)\n', (1390, 1405), True, 'import numpy as np\n')] |
import numpy as np
import autoarray as aa
def test__pixelization_index_for_sub_slim_index__matches_util():
grid = aa.Grid2D.manual_slim(
[
[1.5, -1.0],
[1.3, 0.0],
[1.0, 1.9],
[-0.20, -1.0],
[-5.0, 0.32],
[6.5, 1.0],
[-0.34, -7.34],
[-0.34, 0.75],
[-6.0, 8.0],
],
pixel_scales=1.0,
shape_native=(3, 3),
)
pixelization_grid = aa.Grid2DRectangular.overlay_grid(
shape_native=(3, 3), grid=grid
)
mapper = aa.Mapper(
source_grid_slim=grid, source_pixelization_grid=pixelization_grid
)
pixelization_index_for_sub_slim_index_util = aa.util.grid_2d.grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim=grid,
shape_native=pixelization_grid.shape_native,
pixel_scales=pixelization_grid.pixel_scales,
origin=pixelization_grid.origin,
).astype(
"int"
)
assert (
mapper.pixelization_index_for_sub_slim_index
== pixelization_index_for_sub_slim_index_util
).all()
def test__reconstruction_from__matches_util():
grid = aa.Grid2D.manual_slim(
[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],
pixel_scales=1.0,
shape_native=(2, 2),
)
pixelization_grid = aa.Grid2DRectangular.overlay_grid(
shape_native=(4, 3), grid=grid
)
mapper = aa.Mapper(
source_grid_slim=grid, source_pixelization_grid=pixelization_grid
)
solution = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0])
recon_pix = mapper.reconstruction_from(solution_vector=solution)
recon_pix_util = aa.util.array_2d.array_2d_native_from(
array_2d_slim=solution,
mask_2d=np.full(fill_value=False, shape=(4, 3)),
sub_size=1,
)
assert (recon_pix.native == recon_pix_util).all()
assert recon_pix.shape_native == (4, 3)
pixelization_grid = aa.Grid2DRectangular.overlay_grid(
shape_native=(3, 4), grid=grid
)
mapper = aa.Mapper(
source_grid_slim=grid, source_pixelization_grid=pixelization_grid
)
solution = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0])
recon_pix = mapper.reconstruction_from(solution_vector=solution)
recon_pix_util = aa.util.array_2d.array_2d_native_from(
array_2d_slim=solution,
mask_2d=np.full(fill_value=False, shape=(3, 4)),
sub_size=1,
)
assert (recon_pix.native == recon_pix_util).all()
assert recon_pix.shape_native == (3, 4)
def test__pixel_signals_from__matches_util(grid_2d_7x7, image_7x7):
pixelization_grid = aa.Grid2DRectangular.overlay_grid(
shape_native=(3, 3), grid=grid_2d_7x7
)
mapper = aa.Mapper(
source_grid_slim=grid_2d_7x7,
source_pixelization_grid=pixelization_grid,
hyper_data=image_7x7,
)
pixel_signals = mapper.pixel_signals_from(signal_scale=2.0)
pixel_signals_util = aa.util.mapper.adaptive_pixel_signals_from(
pixels=9,
signal_scale=2.0,
pixelization_index_for_sub_slim_index=mapper.pixelization_index_for_sub_slim_index,
slim_index_for_sub_slim_index=grid_2d_7x7.mask.slim_index_for_sub_slim_index,
hyper_image=image_7x7,
)
assert (pixel_signals == pixel_signals_util).all()
| [
"numpy.full",
"autoarray.util.mapper.adaptive_pixel_signals_from",
"autoarray.Grid2DRectangular.overlay_grid",
"numpy.array",
"autoarray.Mapper",
"autoarray.Grid2D.manual_slim",
"autoarray.util.grid_2d.grid_pixel_indexes_2d_slim_from"
] | [((129, 321), 'autoarray.Grid2D.manual_slim', 'aa.Grid2D.manual_slim', (['[[1.5, -1.0], [1.3, 0.0], [1.0, 1.9], [-0.2, -1.0], [-5.0, 0.32], [6.5, 1.0\n ], [-0.34, -7.34], [-0.34, 0.75], [-6.0, 8.0]]'], {'pixel_scales': '(1.0)', 'shape_native': '(3, 3)'}), '([[1.5, -1.0], [1.3, 0.0], [1.0, 1.9], [-0.2, -1.0], [\n -5.0, 0.32], [6.5, 1.0], [-0.34, -7.34], [-0.34, 0.75], [-6.0, 8.0]],\n pixel_scales=1.0, shape_native=(3, 3))\n', (150, 321), True, 'import autoarray as aa\n'), ((505, 570), 'autoarray.Grid2DRectangular.overlay_grid', 'aa.Grid2DRectangular.overlay_grid', ([], {'shape_native': '(3, 3)', 'grid': 'grid'}), '(shape_native=(3, 3), grid=grid)\n', (538, 570), True, 'import autoarray as aa\n'), ((603, 679), 'autoarray.Mapper', 'aa.Mapper', ([], {'source_grid_slim': 'grid', 'source_pixelization_grid': 'pixelization_grid'}), '(source_grid_slim=grid, source_pixelization_grid=pixelization_grid)\n', (612, 679), True, 'import autoarray as aa\n'), ((1223, 1337), 'autoarray.Grid2D.manual_slim', 'aa.Grid2D.manual_slim', (['[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]'], {'pixel_scales': '(1.0)', 'shape_native': '(2, 2)'}), '([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],\n pixel_scales=1.0, shape_native=(2, 2))\n', (1244, 1337), True, 'import autoarray as aa\n'), ((1396, 1461), 'autoarray.Grid2DRectangular.overlay_grid', 'aa.Grid2DRectangular.overlay_grid', ([], {'shape_native': '(4, 3)', 'grid': 'grid'}), '(shape_native=(4, 3), grid=grid)\n', (1429, 1461), True, 'import autoarray as aa\n'), ((1494, 1570), 'autoarray.Mapper', 'aa.Mapper', ([], {'source_grid_slim': 'grid', 'source_pixelization_grid': 'pixelization_grid'}), '(source_grid_slim=grid, source_pixelization_grid=pixelization_grid)\n', (1503, 1570), True, 'import autoarray as aa\n'), ((1605, 1675), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0])\n', (1613, 1675), True, 'import numpy as np\n'), ((2053, 2118), 'autoarray.Grid2DRectangular.overlay_grid', 'aa.Grid2DRectangular.overlay_grid', ([], {'shape_native': '(3, 4)', 'grid': 'grid'}), '(shape_native=(3, 4), grid=grid)\n', (2086, 2118), True, 'import autoarray as aa\n'), ((2151, 2227), 'autoarray.Mapper', 'aa.Mapper', ([], {'source_grid_slim': 'grid', 'source_pixelization_grid': 'pixelization_grid'}), '(source_grid_slim=grid, source_pixelization_grid=pixelization_grid)\n', (2160, 2227), True, 'import autoarray as aa\n'), ((2262, 2332), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0, 2.0, 3.0])\n', (2270, 2332), True, 'import numpy as np\n'), ((2783, 2855), 'autoarray.Grid2DRectangular.overlay_grid', 'aa.Grid2DRectangular.overlay_grid', ([], {'shape_native': '(3, 3)', 'grid': 'grid_2d_7x7'}), '(shape_native=(3, 3), grid=grid_2d_7x7)\n', (2816, 2855), True, 'import autoarray as aa\n'), ((2888, 2998), 'autoarray.Mapper', 'aa.Mapper', ([], {'source_grid_slim': 'grid_2d_7x7', 'source_pixelization_grid': 'pixelization_grid', 'hyper_data': 'image_7x7'}), '(source_grid_slim=grid_2d_7x7, source_pixelization_grid=\n pixelization_grid, hyper_data=image_7x7)\n', (2897, 2998), True, 'import autoarray as aa\n'), ((3124, 3393), 'autoarray.util.mapper.adaptive_pixel_signals_from', 'aa.util.mapper.adaptive_pixel_signals_from', ([], {'pixels': '(9)', 'signal_scale': '(2.0)', 'pixelization_index_for_sub_slim_index': 'mapper.pixelization_index_for_sub_slim_index', 'slim_index_for_sub_slim_index': 'grid_2d_7x7.mask.slim_index_for_sub_slim_index', 'hyper_image': 'image_7x7'}), '(pixels=9, signal_scale=2.0,\n pixelization_index_for_sub_slim_index=mapper.\n pixelization_index_for_sub_slim_index, slim_index_for_sub_slim_index=\n grid_2d_7x7.mask.slim_index_for_sub_slim_index, hyper_image=image_7x7)\n', (3166, 3393), True, 'import autoarray as aa\n'), ((748, 953), 'autoarray.util.grid_2d.grid_pixel_indexes_2d_slim_from', 'aa.util.grid_2d.grid_pixel_indexes_2d_slim_from', ([], {'grid_scaled_2d_slim': 'grid', 'shape_native': 'pixelization_grid.shape_native', 'pixel_scales': 'pixelization_grid.pixel_scales', 'origin': 'pixelization_grid.origin'}), '(grid_scaled_2d_slim=grid,\n shape_native=pixelization_grid.shape_native, pixel_scales=\n pixelization_grid.pixel_scales, origin=pixelization_grid.origin)\n', (795, 953), True, 'import autoarray as aa\n'), ((1857, 1896), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': '(4, 3)'}), '(fill_value=False, shape=(4, 3))\n', (1864, 1896), True, 'import numpy as np\n'), ((2514, 2553), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': '(3, 4)'}), '(fill_value=False, shape=(3, 4))\n', (2521, 2553), True, 'import numpy as np\n')] |
import numpy as np
from .DLX import DLX
from .Node import Node
class Sudoku:
def solve(self, sudokArr):
solver = DLX()
solver.create_matrix(sudokArr)
dlx_solution, found = solver.search()
return dlx_solution, found
# converts the quadruple linked list solution form back to numpy array
def returnSol(self, solved, found, pretty = False, autoPrint = False):
if not found:
# if no solution is found then return a matrix consisting of entirely -1's
# enforce -1.0 as otherwise returns an int numpy array for invalid solutions
return np.full((9,9),-1.0)
solution = [0] * 81
for i in solved:
solution[(i.row - 1) // 9] = i.row % 9 if i.row%9 != 0 else 9
if pretty: # if the prettier view of the sudoku is required then pretty is set to true
# disgusting list comprehension to print out the array in a more presentatble way. (converts to string, splits the string into rows, space seperated in 3's, then a new line on every third row)
print("\n\n".join(["\n".join([" ".join(" ".join([("".join(str(i) for i in solution))[i + j + k:i + j + k + 3] for i in range(0, 9, 3)])) for j in range(0, 27, 9)]) for k in range(0, 81, 27)]), end = "\n\n")
# converts the 1d array solution to a 2d numpy array to be returned
solNPA = np.asarray([[solution[i] for i in range(j,j+9)] for j in range(0,81,9)], dtype = np.float64)
if autoPrint:
print(solNPA)
return solNPA
| [
"numpy.full"
] | [((620, 641), 'numpy.full', 'np.full', (['(9, 9)', '(-1.0)'], {}), '((9, 9), -1.0)\n', (627, 641), True, 'import numpy as np\n')] |
"""# Part 2: Second Approach :
Gilbert-Shannon-Reeds-Shuffling-Algorithm
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
def get_random_number_for_right_deck(num):
if num>0:
return np.random.randint(0,num+1)
else:
raise ValueError("deck size cannot be less than or equal to zero")
def should_drop_from_right_deck(n_left, n_right):
if n_left >= 0 and n_right >= 0:
if n_right==0:
return False
elif n_left==0:
return True
else:
randint=np.random.randint(1,n_left+n_right)
return randint%2==0
else:
raise ValueError("n_left or n_right cannot be negative")
def shuffle(cards,get_random_number_for_right_deck,should_drop_from_right_deck):
num=len(cards)
n_right=get_random_number_for_right_deck(num)
n_left=num-n_right
leftIndex=-(num)
shuffledCards=[]
for i in range(num):
if(should_drop_from_right_deck(n_left, n_right)):
rightIndex=num-n_right
shuffledCards.append(cards[rightIndex])
n_right-=1
elif n_left!=0:
shuffledCards.append(cards[leftIndex])
n_left-=1
leftIndex+=1
return np.array(shuffledCards)
#implenting Gibert shannon reeds model
def genrate_right_deck_gsr(num):
if num>0:
return np.random.binomial(num,p=0.5)
else:
raise ValueError("deck size cannot be less than or equal to zero")
def drop_right_deck_gsr(n_left,n_right):
if n_left >= 0 and n_right >= 0:
if n_right==0:
return False
elif n_left==0:
return True
else:
randint=np.random.binomial(num=1,p=n_right/(n_left+n_right))
return randint==0
else:
raise ValueError("n_left or n_right cannot be negative")
#For testing GSR Shuffle call
#GSRShuffledCards=shuffle(cards,genrate_right_deck_gsr,drop_right_deck_gsr)
def differentiate_sequence(seq1,seq2):
s1=set(seq1)
s2=set(seq2)
result=s1.intersection(s2)
return len(result)
def caluclate_error_shuffle(seq1,seq2,div):
l1=len(seq1)
l2=len(seq2)
caluclate_error_shuffle=0
if l1== l2:
if l1<=div:
return np.square(l1)
for i in range(l1):
for j in range(l2):
if i + div > l1:
break
c1=sorted(seq1[i:i+div-1])
if j+div >l2:
break
c2=sorted(seq2[j:j+div-1])
caluclate_error_shuffle+=np.square(differentiate_sequence(c1,c2))
else:
raise ValueError("sizes of sequences cannot be different")
return caluclate_error_shuffle
def findRandomness(seq1,seq2):
division=5 # checking for five overlapping sequence, i.e it can be any 5! combination
overlapping5Error=caluclate_error_shuffle(seq1,seq2,division)
max5Error = caluclate_error_shuffle(seq1,seq1,5)
return 1-overlapping5Error/max5Error
def caluclateRandomnessShuffles(cards,sizeShuffle):
''' Calling shuffle method using using Gilbert-Shannon-Reeds model for different shuffle size and
calculating randomness for each of the shuffle
Args:
cards: original deck
sizeShuffle: int of how many shuffle you have to perform
Returns:
pair of (NoOfshuffle,randomNess) indicating what is randomNess Value for each of successive shuffles
'''
NoOfshuffle=[]
randomNess=[]
GSRShuffledCards=shuffle(cards,genrate_right_deck_gsr,drop_right_deck_gsr)
for i in range(sizeShuffle):
r=findRandomness(cards,GSRShuffledCards)
NoOfshuffle.append(i)
randomNess.append(r)
GSRShuffledCards=shuffle(GSRShuffledCards,genrate_right_deck_gsr,drop_right_deck_gsr)
return NoOfshuffle,randomNess
def createGraph(NoOfshuffle,randdomNess,title,estimateShuffle):
sizeShuffle=len(NoOfshuffle)
y_intrsct = [estimateShuffle -2, estimateShuffle -1, estimateShuffle,estimateShuffle +1,estimateShuffle+2]
x_intrsct = np.interp(y_intrsct, NoOfshuffle, randdomNess)
fig, ax = plt.subplots(figsize=(10,10))
ax.plot(randdomNess,NoOfshuffle, color='r')
ax.set_xlabel(xlabel='RandomNess', size=20)
ax.set_ylabel(ylabel='NoOfShuffles', size=20)
ax.set_title(title)
custom_ticks=np.arange(1,sizeShuffle+1)
ax.set_yticks(custom_ticks)
ax.grid(axis='y')
ax.vlines(x_intrsct, *ax.get_ylim())
plt.show()
def boundaryCheckGSRalgo(startDeckSize,totalDecks,sizeShuffle):
''' implemets Gilbert-Shannon-Reeds model and plots graph for checking its implenting for
finding optimal no. of shuffles
Args:
startDeckSize: size of staring deck
totalDecks : it is no. of time startDeckSize muliply by 2 , i.e 26,52,104,208, so num here is 1,2,3,4
sizeShuffle : number fo shuffles to perform
'''
decks=[]
a=startDeckSize
for i in range(totalDecks):
decks.append(a)
a*=2
for i in range(len(decks)):
cards=np.arange(1,decks[i]+1)
NoOfshuffle,randdomNess = caluclateRandomnessShuffles(cards,sizeShuffle)
title='Deck of '+str(decks[i])
estimateShuffle=np.floor(1.5*np.log2(decks[i]))
createGraph(NoOfshuffle,randdomNess,title,estimateShuffle)
get_random_number_for_right_deck(10)
should_drop_from_right_deck(2,5)
cards=np.arange(1,53)
shuffle(cards,get_random_number_for_right_deck,should_drop_from_right_deck)
genrate_right_deck_gsr(10)
drop_right_deck_gsr(10)
caluclate_error_shuffle(cards,GSRShuffledCards,5)
#for checking
boundaryCheckGSRalgo(26,3,20)
| [
"matplotlib.pyplot.show",
"numpy.random.binomial",
"numpy.log2",
"numpy.square",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.interp",
"matplotlib.pyplot.subplots"
] | [((5526, 5542), 'numpy.arange', 'np.arange', (['(1)', '(53)'], {}), '(1, 53)\n', (5535, 5542), True, 'import numpy as np\n'), ((1301, 1324), 'numpy.array', 'np.array', (['shuffledCards'], {}), '(shuffledCards)\n', (1309, 1324), True, 'import numpy as np\n'), ((4156, 4202), 'numpy.interp', 'np.interp', (['y_intrsct', 'NoOfshuffle', 'randdomNess'], {}), '(y_intrsct, NoOfshuffle, randdomNess)\n', (4165, 4202), True, 'import numpy as np\n'), ((4218, 4248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4230, 4248), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4465), 'numpy.arange', 'np.arange', (['(1)', '(sizeShuffle + 1)'], {}), '(1, sizeShuffle + 1)\n', (4445, 4465), True, 'import numpy as np\n'), ((4568, 4578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4576, 4578), True, 'import matplotlib.pyplot as plt\n'), ((286, 315), 'numpy.random.randint', 'np.random.randint', (['(0)', '(num + 1)'], {}), '(0, num + 1)\n', (303, 315), True, 'import numpy as np\n'), ((1427, 1457), 'numpy.random.binomial', 'np.random.binomial', (['num'], {'p': '(0.5)'}), '(num, p=0.5)\n', (1445, 1457), True, 'import numpy as np\n'), ((5180, 5206), 'numpy.arange', 'np.arange', (['(1)', '(decks[i] + 1)'], {}), '(1, decks[i] + 1)\n', (5189, 5206), True, 'import numpy as np\n'), ((2320, 2333), 'numpy.square', 'np.square', (['l1'], {}), '(l1)\n', (2329, 2333), True, 'import numpy as np\n'), ((616, 654), 'numpy.random.randint', 'np.random.randint', (['(1)', '(n_left + n_right)'], {}), '(1, n_left + n_right)\n', (633, 654), True, 'import numpy as np\n'), ((1760, 1817), 'numpy.random.binomial', 'np.random.binomial', ([], {'num': '(1)', 'p': '(n_right / (n_left + n_right))'}), '(num=1, p=n_right / (n_left + n_right))\n', (1778, 1817), True, 'import numpy as np\n'), ((5361, 5378), 'numpy.log2', 'np.log2', (['decks[i]'], {}), '(decks[i])\n', (5368, 5378), True, 'import numpy as np\n')] |
import pandas as pd
import numpy
import numpy.random
import os
from sklearn.metrics import pairwise_distances
import pickle
TAGS = ["numerical-binsensitive"] #, "categorical-binsensitive"]
TRAINING_PERCENT = 2.0 / 3.0
class ProcessedData():
def __init__(self, data_obj):
self.data = data_obj
self.dfs = dict((k, pd.read_csv(self.data.get_filename(k)))
for k in TAGS)
self.splits = dict((k, []) for k in TAGS)
self.has_splits = False
def get_processed_filename(self, tag):
return self.data.get_filename(tag)
def get_dataframe(self, tag):
return self.dfs[tag]
def create_train_test_splits(self, num):
if self.has_splits:
return self.splits
for i in range(0, num):
# we first shuffle a list of indices so that each subprocessed data
# is split consistently
n = len(list(self.dfs.values())[0])
a = numpy.arange(n)
numpy.random.shuffle(a)
split_ix = int(n * TRAINING_PERCENT)
train_fraction = a[:split_ix]
test_fraction = a[split_ix:]
for (k, v) in self.dfs.items():
train = self.dfs[k].iloc[train_fraction]
test = self.dfs[k].iloc[test_fraction]
self.splits[k].append((train, test))
self.has_splits = True
return self.splits
def get_sensitive_values(self, tag):
"""
Returns a dictionary mapping sensitive attributes in the data to a list of all possible
sensitive values that appear.
"""
df = self.get_dataframe(tag)
all_sens = self.data.get_sensitive_attributes_with_joint()
sensdict = {}
for sens in all_sens:
sensdict[sens] = list(set(df[sens].values.tolist()))
return sensdict
def generate_distance_matrix(self, distance_metric = 'euclidean'):
if distance_metric not in ['euclidean', 'cosine', 'seuclidean']:
raise Exception('In order to compute the distance matrix, type should be euclidean or cosine')
for k in TAGS:
class_attribute = self.data.get_class_attribute()
df = self.dfs[k]
path = os.path.splitext(self.data.get_filename(k))[0]
filename = path + '_distance_matrix_' + distance_metric + '.pkl'
# Select features and compute distance metric
features = df.loc[:, df.columns != 'class_attribute']
distance_matrix = pairwise_distances(features, metric = distance_metric)
# Save as pkl file
pickle.dump(distance_matrix, open(filename, 'wb'), protocol = 4)
print("Saved distance matrix for dataset " + self.data.get_dataset_name())
return None
| [
"sklearn.metrics.pairwise_distances",
"numpy.arange",
"numpy.random.shuffle"
] | [((966, 981), 'numpy.arange', 'numpy.arange', (['n'], {}), '(n)\n', (978, 981), False, 'import numpy\n'), ((994, 1017), 'numpy.random.shuffle', 'numpy.random.shuffle', (['a'], {}), '(a)\n', (1014, 1017), False, 'import numpy\n'), ((2545, 2597), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['features'], {'metric': 'distance_metric'}), '(features, metric=distance_metric)\n', (2563, 2597), False, 'from sklearn.metrics import pairwise_distances\n')] |
import cv2
import numpy as np
from utils import conversions as conv
def read_pair(pair_file_path, line_index=0):
f = open(pair_file_path)
line = f.readlines()[line_index].split()
rgb_path = 'rgbd_dataset_freiburg2_desk/' + line[1]
depth_path = 'rgbd_dataset_freiburg2_desk/' + line[3]
timestamp = line[0]
image = cv2.imread(rgb_path, cv2.IMREAD_GRAYSCALE).astype('float64') / 255
depth = cv2.imread(depth_path, cv2.IMREAD_ANYDEPTH).astype('float64') / 5000
return image, depth, timestamp
def read_absolute_poses(pose_path):
f = open(pose_path)
line = f.readlines()
pose_num = len(line)
absolute_pose = []
for i in range(pose_num):
pose = line[i].split()[1:]
trans = conv.quater_to_trans(np.asarray(pose, dtype='double'))
absolute_pose.append(trans)
return absolute_pose
def read_pose_index(pose_ind_path):
f = open(pose_ind_path)
line = f.readlines()
num = len(line)
pose_index = np.zeros(num, dtype=int)
for i in range(num):
index = line[i].split()[1]
pose_index[i] = index
return pose_index
| [
"cv2.imread",
"numpy.asarray",
"numpy.zeros"
] | [((983, 1007), 'numpy.zeros', 'np.zeros', (['num'], {'dtype': 'int'}), '(num, dtype=int)\n', (991, 1007), True, 'import numpy as np\n'), ((759, 791), 'numpy.asarray', 'np.asarray', (['pose'], {'dtype': '"""double"""'}), "(pose, dtype='double')\n", (769, 791), True, 'import numpy as np\n'), ((339, 381), 'cv2.imread', 'cv2.imread', (['rgb_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(rgb_path, cv2.IMREAD_GRAYSCALE)\n', (349, 381), False, 'import cv2\n'), ((418, 461), 'cv2.imread', 'cv2.imread', (['depth_path', 'cv2.IMREAD_ANYDEPTH'], {}), '(depth_path, cv2.IMREAD_ANYDEPTH)\n', (428, 461), False, 'import cv2\n')] |
#!/usr/bin/env python
import math
import numpy as np
import operator
import pygame
from scripts.grid import Grid
from scripts.motors import Motors
def action2drive(action):
if action == "Reverse":
return [-255, -255]
elif action == "Left":
return [-255, 255]
elif action == "Right":
return [255, -255]
elif action == "Forward":
return [255, 255]
else:
return [0, 0]
class Buggy(object):
def __init__(self, nb=0, col=(0, 0, 0), pos=(0, 0), angle=0, res=20,
sonars=None, encoders=None, servos=None, cameras=None, gyroscopes=None):
self.nb = nb
self.col = col
self.pos = pos
self.angle = angle
self.mode = "Manual"
self.modes = ["Manual", "Roam"]
self.grid = Grid(300, res)
self.servo_angles = [0, 0]
self.sonars = sonars
self.servos = servos
self.cameras = cameras
self.encoders = encoders
self.gyroscopes = gyroscopes
self.current_camera = 0
self.motors = Motors(nb)
self.encoder_last = [encoder.read() for encoder in encoders] # For removing offset
self.body = ((-6, -9), (6, -9), (6, 9), (-6, 9))
def update(self, drive_cmd, servo_cmd):
action = self._check_for_override() # Check sonar devices for override commands
if action != "None": # If the override command is not "None"
drive_cmd = action2drive(action) # Get the drive command, given the action to take
self.motors.drive(drive_cmd) # Publish motor control (drive)
disp = self._get_displacement() # Calculate total displacement
x = disp * math.sin(-self.angle) # Calculate displacement in x
y = disp * math.cos(-self.angle) # Calculate displacement in y
self.pos = [self.pos[0] + x, self.pos[1] + y] # Calculate the position of the buggy
self._update_sonars() # Update sonar devices
self._update_servos(servo_cmd) # Update the servo angles
self._update_gyroscopes() # Update gyroscope angles
self.grid.update(self.sonars) # Update the grid
def draw(self, display, pos, scale):
outline = self._transform(pos, scale) # Transform body given buggy position and orientation
pygame.draw.polygon(display, self.col, outline) # Draw buggy
self._draw_sonar(display, pos, scale) # Draw each sonar device too
def get_frame(self):
if self.cameras:
return self.cameras[self.current_camera].get_frame()
else:
return False
def _check_for_override(self):
for sonar in self.sonars:
if sonar.action != "None": # If the device action is not "None"
if sonar.data < sonar.min_dist and sonar.data != 0: # If the sonar data is lower than min_dist
return sonar.action # Return the action to take
return "None"
def _update_gyroscopes(self):
for gyroscope in self.gyroscopes:
data = gyroscope.get_data()
for ch in ['(', ' ', ')']: # Characters to remove from string
if ch in data:
data = data.replace(ch, '') # Remove character
data = data.split(',') # Split data by ,
self.angle = -float(data[gyroscope.axis]) # Convert to float (- due to phone orientation)
def _update_sonars(self):
for sonar in self.sonars:
sonar.update(self.pos, self.angle) # Update all sonar
def _update_servos(self, servo_change):
for i, angle in enumerate(servo_change):
if angle == "reset": # If servo is required to reset
servo_change[i] = -self.servo_angles[i] # Change in sonar angle = - current angle
self.servo_angles = map(operator.add, self.servo_angles, servo_change)
for servo in self.servos: # For each servo
if self.servo_angles[servo.axis] > servo.max: # Restrict angles as per config file
self.servo_angles[servo.axis] = servo.max
elif self.servo_angles[servo.axis] < servo.min:
self.servo_angles[servo.axis] = servo.min
servo.move(self.servo_angles[servo.axis]) # Move the servo
def _draw_sonar(self, display, pos, scale):
for sonar in self.sonars:
sonar.update(self.pos, self.angle) # Update sonar device
sonar.draw(display, pos, self.angle, scale) # Draw sonar device
def _transform(self, pos, scale):
outline = [] # Initialise outline of buggy
for point in self.body: # For every point in the outline
if point[0]: # If point != 0
angle = math.atan(-float(point[1]) / float(point[0])) - self.angle # Calculate angle
else: # Else avoid divide by zero
angle = math.pi / 4 - self.angle # atan(x/0) = 90
if point[0] < 0: # If x coord of point is less than zero
angle += math.pi # Flip by 180 degrees
l = (point[0] ** 2 + point[1] ** 2) ** 0.5 # Hypotenuse of point
x = round(-l * math.cos(angle), 0) * scale + pos[0] # Rotate and shift x
y = round(-l * math.sin(angle), 0) * scale + pos[1] # -l because of pixel coordinate system
outline.append([x, y]) # Append coord to outline
return outline
def _get_displacement(self):
twisting = self.motors.left_dir != self.motors.right_dir # twisting = true if motor dirs are opposite
encoder_data = [float(encoder.read()) for encoder in self.encoders]
encoder_step = [float(encoder.dist_per_tick) for encoder in self.encoders]
if twisting:
self.encoder_last = encoder_data # Store last encoder value
disp = 0 # displacement = 0
else:
disp = np.array(encoder_data) - np.array(self.encoder_last) # Number of clicks since last checked
disp *= np.array(encoder_step) # Multiply by distance travelled per click
disp = np.mean(disp) # Take mean encoder value
self.encoder_last = encoder_data # Difference between current and last value
return disp
| [
"scripts.motors.Motors",
"scripts.grid.Grid",
"math.sin",
"numpy.mean",
"numpy.array",
"math.cos",
"pygame.draw.polygon"
] | [((889, 903), 'scripts.grid.Grid', 'Grid', (['(300)', 'res'], {}), '(300, res)\n', (893, 903), False, 'from scripts.grid import Grid\n'), ((1152, 1162), 'scripts.motors.Motors', 'Motors', (['nb'], {}), '(nb)\n', (1158, 1162), False, 'from scripts.motors import Motors\n'), ((2792, 2839), 'pygame.draw.polygon', 'pygame.draw.polygon', (['display', 'self.col', 'outline'], {}), '(display, self.col, outline)\n', (2811, 2839), False, 'import pygame\n'), ((1940, 1961), 'math.sin', 'math.sin', (['(-self.angle)'], {}), '(-self.angle)\n', (1948, 1961), False, 'import math\n'), ((2042, 2063), 'math.cos', 'math.cos', (['(-self.angle)'], {}), '(-self.angle)\n', (2050, 2063), False, 'import math\n'), ((7544, 7566), 'numpy.array', 'np.array', (['encoder_step'], {}), '(encoder_step)\n', (7552, 7566), True, 'import numpy as np\n'), ((7666, 7679), 'numpy.mean', 'np.mean', (['disp'], {}), '(disp)\n', (7673, 7679), True, 'import numpy as np\n'), ((7425, 7447), 'numpy.array', 'np.array', (['encoder_data'], {}), '(encoder_data)\n', (7433, 7447), True, 'import numpy as np\n'), ((7450, 7477), 'numpy.array', 'np.array', (['self.encoder_last'], {}), '(self.encoder_last)\n', (7458, 7477), True, 'import numpy as np\n'), ((6461, 6476), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (6469, 6476), False, 'import math\n'), ((6562, 6577), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (6570, 6577), False, 'import math\n')] |
#!/usr/bin/env python
import numpy as np
from . import pg_utilities
from . import imports_and_exports
import sys
from numpy import matlib
"""
.. module:: analyse_tree
:synopsis: One sentence synopis (brief) could appear in module index.
:synopsis:A longer synopsis that could appear on the home page for that module in documentation.
"""
def analyse_branching(geom,ordering_system,conversionFactor,voxelSize):
""" Does a branching analysis on the tree defined by 'geom'
Inputs:
- geom: A geometry structure consisting of element list, node location and radii/lengths
- ordering_system: the ordering system to be used in analysis (e.g. 'strahler', 'horsfield'
Returns: Prints to screen a table of branching properties (one per generation, one per order) and overall summary statistics
"""
elem_cnct = pg_utilities.element_connectivity_1D(geom['nodes'], geom['elems'])
orders = evaluate_orders(geom['nodes'], geom['elems'])
# Find Results
branchGeom = arrange_by_branches(geom, elem_cnct['elem_up'], orders[ordering_system],orders['generation'])
[geom, branchGeom] = find_branch_angles(geom, orders, elem_cnct, branchGeom, voxelSize, conversionFactor)
major_minor_results=major_minor(geom, elem_cnct['elem_down']) #major/minor child stuff
# tabulate data
generation_summary_statistics(geom, orders, major_minor_results)
summary_statistics(branchGeom, geom, orders, major_minor_results,'strahler')
return geom
def arrange_by_branches(geom, elem_up, order,generation):
""" Finds properties of according to each Branch of the tree, where a branch is a set of elements with the
same order. Ordering system can be any defined in 'evaluate_ordering'
Inputs:
- geom: contains elems, and various element properties (length, radius etc.)
- elem_up - contains index of upstream elements for each element
- order: contains order of each element
- generation: contains generation of each element
Outputs:
branchGeom: contains the properties arrange in arrays according to each branch:
radius / length / euclidean length / strahler order: all M x 1 arrays where M is number of branches
branches: an N x 1 array where N is the number of elements, contains branch number of each element
"""
# find branches, which are branches with the same 'generation' as one another
num_elems = len(order)
branches = np.zeros(num_elems,dtype=int)
branchNum = 0
for i in range(0, num_elems):
if generation[i] != generation[elem_up[i, 1]]: # does not belong with upstream branch
branchNum = branchNum + 1
else:
branchNum = branches[elem_up[i, 1]]
branches[i] = branchNum
num_branches = int(max(branches)) +1 #including inlet
# sort results into branch groups
lengths = geom['length']
radii = geom['radii']
nodes= geom['nodes']
elems = geom['elems']
branchRad = np.zeros(num_branches)
branchLen = np.zeros(num_branches)
branchEucLen = np.zeros(num_branches)
branchOrder = -1. * np.ones(num_branches)
for i in range(0, num_branches):
branchElements = np.where(branches == i) #find elements belonging to branch number
branchElements = branchElements[0]
for j in range(0, len(branchElements)): #go through all elements in branch
ne = branchElements[j]
branchOrder[i] = order[ne]
branchLen[i] = branchLen[i] + lengths[ne]
branchRad[i] = branchRad[i] + radii[ne]
branchRad[i] = branchRad[i] / len(branchElements) # to get average radius
startNode=nodes[int(elems[branchElements[0],1]),:]
endNode=nodes[int(elems[branchElements[len(branchElements)-1],2]),:]
branchEucLen[i]=np.sqrt(np.sum(np.square(startNode[1:4]-endNode[1:4])))
return {'radii': branchRad, 'length': branchLen, 'euclidean length': branchEucLen, 'order': branchOrder,
'branches': branches}
def arrange_by_strahler_order(geom, find_inlet_loc, inlet_loc):
""" Rearranges elems (and corresponding properties) according to their strahler order
Inputs:
- geom: A geometry structure consisting of element list, node location and radii/lengths
- inlet_loc: the coordinates of the parent node for the entire tree (if known)
- find_inlet_loc: boolean variable specifying whether to use inlet location provided (0) or to find the inlet location automatically (1)
Returns:
- geom: contains elems and properties, reordered according to strahler order so that no element can be higher in the element list than a higher order branch
"""
# set up arrays
nodes = geom['nodes']
elem_properties = np.column_stack([geom['radii'], geom['length'], geom['euclidean length'], geom['elems']])
elems = np.copy(geom['elems']) # as elems is altered in this function
elems = elems[:, 1:3] # get rid of first column which contains element numbef
radii = geom['radii']
Ne = len(elems)
Nn = len(nodes)
elem_properties_new = np.zeros([Ne, 6])
# find parent node
(elems, elem_properties) = find_parent_node(find_inlet_loc, inlet_loc, nodes, radii, elems, elem_properties)
# loop through by strahler order
counter_new = 0
counter = 1
while (counter < Ne):
# find elements which are terminal
terminal_elems = np.zeros([Ne, 1])
# go through each node
for i in range(0, Nn + 1):
# find number of occurrences of the node
places = np.where(elems == i)
ind1 = places[0]
ind2 = places[1]
if (len(ind1) == 1) and ((ind1[0]) != 0): # if occurs once, then element is terminal (avoids root element)
ind1 = ind1[0]
ind2 = ind2[0]
# swap to ensure element points right way
if ind2 == 0:
elems[ind1, :] = row_swap_1d(np.squeeze(elems[ind1, :]), 1, 0)
elem_properties[ind1, 4:6] = row_swap_1d(np.squeeze(elem_properties[ind1, 4:6]), 1, 0)
# assign element under the new element ordering scheme
elem_properties_new[counter_new, :] = elem_properties[ind1, :]
counter_new = counter_new + 1
terminal_elems[ind1] = 1
# join up element with upstream elements
nodeNumNew = elems[ind1, 0] # this is node number at other end of element
nodeNum = i
places = np.where(elems == nodeNumNew) # find where the new node occurs
ind1 = places[0]
ind2 = places[1]
counter2 = 1
while ((len(ind1) == 2) & (counter2 < Ne)): # as can only be present twice if a joining node
# see if branch joins to yet another branch, that we haven't yet encountered (i.e. not nodeNum)
if (elems[ind1[0], ~ind2[0]] == nodeNum):
k = 1
else:
k = 0
terminal_elems[ind1[k]] = 1 # label terminal_elems as joining elements
# switch the way element points
if (ind2[k] == 0):
elems[ind1[k], :] = row_swap_1d(np.squeeze(elems[ind1[k], :]), 1, 0)
elem_properties[ind1[k], 4:6] = row_swap_1d(np.squeeze(elem_properties[ind1[k], 4:6]), 1, 0)
nodeNum = nodeNumNew
nodeNumNew = elems[ind1[k], 0]
# assign new order
elem_properties_new[counter_new, :] = elem_properties[ind1[k], :]
counter_new = counter_new + 1
# update loop criteria
places = np.where(elems == nodeNumNew)
ind1 = places[0]
ind2 = places[1]
counter2 = counter2 + 1
# update elems to 'get rid of' terminal elements from the list
terminal_elems[0] = 0 # the root node can never be terminal
terminal_elems_pair = np.column_stack([terminal_elems, terminal_elems])
elems[terminal_elems_pair == 1] = -1
# loop exit criteria
places = np.where(terminal_elems == 1)
places = places[1]
if len(places) == 0:
counter = Ne + 1
counter = counter + 1
# assign root element in new order systems
elem_properties_new[Ne - 1, :] = elem_properties[0, :]
# reduce size due to elements removed
elem_properties_new = elem_properties_new[0:Ne, :]
# reverse order
elem_properties_new = np.flip(elem_properties_new, 0)
elems = geom['elems']
elems = elems[0:Ne, :]
elems[:, 1:3] = elem_properties_new[:, 4:6]
radii = elem_properties_new[:, 0]
lengths = elem_properties_new[:, 1]
euclid_lengths = elem_properties_new[:, 2]
return {'elems': elems, 'radii': radii, 'length': lengths, 'euclidean length': euclid_lengths, 'nodes': nodes}
def calc_terminal_branch(node_loc, elems):
""" Generates a list of terminal nodes associated with a branching geometry based on element connectivity.
Inputs:
- node_loc: array of coordinates (locations) of nodes of tree branches
- elems: array of elements showing element connectivity
Returns:
- terminal_elems: array of terminal element number
- terminal_nodes: array of terminal nodes
- total_terminals: total number of terminal branches in the whole tree
A way you might want to use me is:
>>> node_loc =np.array([[ 0.,0.,0.,-1.,2.,0.,0.], [1.,0.,0.,-0.5,2.,0.,0.],[2.,0.,-0.5,0.,1.31578947,0.,0.],[3.,0.,0.5,0.,0.,0.,0.]])
>>> elems = np.array([[0 ,0 ,1], [1 ,1 ,2], [2 ,1 ,3]])
>>> calc_terminal_branch(node_loc,elems)
This will return:
>>> terminal_elems: [1 2]
>>> terminal_nodes: [2 3]
>>> total_terminals: 2
"""
# This function generates a list of terminal nodes associated with a branching geometry
# inputs are node locations and elements
num_elems = len(elems)
num_nodes = len(node_loc)
elem_cnct = pg_utilities.element_connectivity_1D(node_loc, elems)
terminal_branches = np.zeros(num_elems, dtype=int)
terminal_nodes = np.zeros(num_nodes, dtype=int)
num_term = 0
for ne in range(0, num_elems):
if elem_cnct['elem_down'][ne][0] == 0: # no downstream element
terminal_branches[num_term] = ne
terminal_nodes[num_term] = elems[ne][2] # node that leaves the terminal element
num_term = num_term + 1
terminal_branches = np.resize(terminal_branches, num_term)
terminal_nodes = np.resize(terminal_nodes, num_term)
print('Total number of terminals assessed, num_terminals = ' + str(num_term))
return {'terminal_elems': terminal_branches, 'terminal_nodes': terminal_nodes, 'total_terminals': num_term}
def cal_br_vol_samp_grid(rectangular_mesh, branch_nodes, branch_elems, branch_radius, volume, thickness, ellipticity,
start_elem):
""" Calculate total volume and diameter of branches in each sampling grid element
Inputs are:
- rectangular_mesh: rectangular sampling grid
- branch_nodes: array of coordinates (locations) of nodes of tree branches
- branch_elems: array of element showing element connectivity
- branch_radius: array of branch radius
- volume: volume of placenta
- thickness: thickness of placenta
- ellipticity: ellipticity of placenta
- start_elem: number of element to start calculating tissue volume
Return:
- br_vol_in_grid: array of total tissue volume in each sampling grid element
- br_diameter_in_grid: array of total diameter*volume in each sampling grid element
A way you might want to use me is:
>>> thickness = 2.1 #mm
>>> ellipticity = 1.00 #no unit
>>> volume=5 #mm3
>>> rectangular_mesh = {}
>>> rectangular_mesh['nodes'] = np.array([[-0.5, -0.5, -1.5],[ 0.5, -0.5,-1.5],[-0.5, 0.5 ,-1.5],[ 0.5 , 0.5, -1.5],[-0.5 ,-0.5, -0.5],[ 0.5 ,-0.5 ,-0.5],[-0.5 , 0.5 ,-0.5],[ 0.5 , 0.5 ,-0.5],[-0.5, -0.5 , 0.5],[ 0.5, -0.5 , 0.5],[-0.5 ,0.5 , 0.5],[ 0.5 , 0.5 ,0.5]])
>>> rectangular_mesh['elems'] = [[ 0, 0, 1, 2, 3, 4, 5, 6, 7],[1,4,5,6,7,8,9,10,11]]
>>> rectangular_mesh['total_elems'] = 2
>>> branch_elems={}
>>> branch_elems['elems']=[[0 ,0, 1]]
>>> branch_nodes={}
>>> branch_nodes['nodes']=np.array([[ 0.,0.,0., -1., 2.,0.,0.],[ 1.,0.,0.,-0.5 ,2.,0.,0.]])
>>> branch_radius=[0.1]
>>> start_elem=0
>>> cal_br_vol_samp_grid(rectangular_mesh, branch_nodes['nodes'], branch_elems['elems'],branch_radius, volume, thickness,ellipticity, start_elem)
This will return:
>>> br_vol_in_grid[0]: 0.01396263
>>> br_diameter_in_grid[0]: 0.00279253
"""
# Define the resolution of cylinder for analysis
num_points_xy = 8
num_points_z = 8
# Define information about sampling grid required to place data points in correct locations
total_sample_elems = rectangular_mesh['total_elems']
gr = pg_utilities.samp_gr_for_node_loc(rectangular_mesh)
# Define the placental ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity) # calculate radii of ellipsoid
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
unit_cyl_points = np.zeros((num_points_xy * num_points_xy * num_points_z, 3))
# Define a cylinder of points of radius 1 and length 1
x = np.linspace(-1, 1, num_points_xy)
y = np.linspace(-1, 1, num_points_xy)
num_accepted = 0
for k in range(0, num_points_z + 1):
for i in range(0, num_points_xy):
for j in range(0, num_points_xy):
if (x[i] ** 2 + y[j] ** 2) <= 1:
new_z = 1 / np.double(num_points_z) * k
unit_cyl_points[num_accepted][0] = x[i]
unit_cyl_points[num_accepted][1] = y[j]
unit_cyl_points[num_accepted][2] = new_z
num_accepted = num_accepted + 1
unit_cyl_points.resize(num_accepted, 3, refcheck=False)
cyl_points = np.copy(unit_cyl_points)
cylindervector = np.array([0.0, 0.0, 1.0])
###Define and initialise arrays to be populated
# The volume of each branch
vol_each_br = np.zeros(len(branch_elems))
# Array for total volume of sampling grid in each element
total_vol_samp_gr = np.zeros(total_sample_elems)
# Array for diameter variable of sampling grid in each element (this variable is to be used for weighted diameter calculation)
total_diameter_samp_gr = np.zeros(total_sample_elems)
# initialise counters
branch_count = 0
volume_outside_ellipsoid = 0.0
volume_inside_ellipsoid = 0.0
for ne in range(start_elem, len(branch_elems)): # len(branch_elems)): # looping for all branchs in tree
node1 = branch_nodes[branch_elems[ne][1]][1:4] # coor of start node of a branch element
node2 = branch_nodes[branch_elems[ne][2]][1:4] # coor of end node of a branch element
node1in = pg_utilities.check_in_on_ellipsoid(node1[0], node1[1], node1[2], x_radius, y_radius, z_radius)
node2in = pg_utilities.check_in_on_ellipsoid(node2[0], node2[1], node2[2], x_radius, y_radius, z_radius)
if not node1in and not node2in:
print('Warning, element ' + str(ne) + 'is not in ellipsoid, if this is not expected check your geometry')
print('Skipping this element from analysis')
continue
elif not node1in or not node2in:
print('Warning, element ' + str(ne) + 'has one node not in the ellipsoid.')
print('The first node ' + str(node1) + ' is ' + str(node1in) + ' (True means inside).')
print('The second node ' + str(node2) + ' is ' + str(node2in) + ' (True means inside).')
print('Skipping this element from analysis')
continue
branch_vector = node2 - node1
r = branch_radius[ne]
length = np.linalg.norm(branch_vector)
vol_each_br[ne] = np.pi * length * r ** 2.0
vol_per_point = vol_each_br[ne] / (np.double(num_accepted))
cyl_points[:, 0:2] = unit_cyl_points[:, 0:2] * r
cyl_points[:, 2] = unit_cyl_points[:, 2] * length
desiredvector = branch_vector / np.linalg.norm(branch_vector)
rotation_axis = np.cross(desiredvector, cylindervector)
if np.linalg.norm(rotation_axis) == 0: # aligned
if node2[2] - node1[2] < 0:
cyl_points[:, 2] = -1.0 * cyl_points[:, 2]
else:
angle = pg_utilities.angle_two_vectors(cylindervector, desiredvector)
rotation_mat = pg_utilities.rotation_matrix_3d(rotation_axis, angle)
cyl_points = np.array(np.matrix(cyl_points) * np.matrix(rotation_mat))
cyl_points[:, 0] = cyl_points[:, 0] + node1[0]
cyl_points[:, 1] = cyl_points[:, 1] + node1[1]
cyl_points[:, 2] = cyl_points[:, 2] + node1[2]
# Array for vol distribution of inidvidual branch (not total)
vol_distribution_each_br = np.zeros(total_sample_elems, dtype=float)
for nt in range(0, num_accepted):
coord_point = cyl_points[nt][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius,
y_radius, z_radius)
if inside:
nelem = pg_utilities.locate_node(gr[0], gr[1], gr[2], gr[3], gr[4], gr[5], gr[6], gr[7], gr[8],
coord_point)
total_vol_samp_gr[nelem] = total_vol_samp_gr[nelem] + vol_per_point
vol_distribution_each_br[nelem] = vol_distribution_each_br[nelem] + vol_per_point
volume_inside_ellipsoid = volume_inside_ellipsoid + vol_per_point
else:
# Data points lie outside the ellipsoid - this is OK in some cases, so the code shouldn't exit. However,
# users should be able to check how much is outside of ellipsoid if they believe their branching geometry
# is set up NOT to go outside the ellipsoid at all.
volume_outside_ellipsoid = volume_outside_ellipsoid + vol_per_point
total_diameter_samp_gr = total_diameter_samp_gr + vol_distribution_each_br * 2.0 * r # this variable is calculated as summation of diameter * vol of branch in grid (to be used for weight_diam)
percent_outside = volume_outside_ellipsoid / np.sum(total_vol_samp_gr) * 100.0
total_vol_ml = (volume_outside_ellipsoid + np.sum(total_vol_samp_gr))/1000.0
sum_branch_ml = np.sum(vol_each_br)/1000.0
print('Analysis complete ' + str(percent_outside) + '% of analysed points lie outside the ellipsoid.')
print('Total branch volume analysed ' + str(total_vol_ml) + ' (compared with summed branch vol ' + str(
sum_branch_ml) + ')')
return {'br_vol_in_grid': total_vol_samp_gr, 'br_diameter_in_grid': total_diameter_samp_gr}
def conductivity_samp_gr(vol_frac, weighted_diameter, elem_list):
"""Calculate conductivity of sampling grid element where villous branches are located
Inputs are:
- vol_frac: tissue volume fraction of sampling grid element
- weighted_diameter: weighted diameter of sampling grid element
- elem_list: list of elements to assess
Return:
- conductivity: conductivity of sampling grid element where the placental tissue are located
will be in the same units as the weighted diameter (typically mm)
A way you might want to use me:
>>> vol_frac= [0.72401065]
>>> weighted_diameter=[0.17988357]
>>> non_empties=[0]
>>> conductivity_samp_gr(vol_frac,weighted_diameter,non_empties)
This will return:
>>> conductivity: 7.20937313e-06"""
max_cond = 0.52
conductivity = np.zeros(len(vol_frac))
for i in range(0, len(elem_list)):
ne = elem_list[i]
if vol_frac[ne] != 0.0:
conductivity[ne] = weighted_diameter[ne] ** 2 * (1 - vol_frac[ne]) ** 3 / (180.0 * vol_frac[ne] ** 2)
elif vol_frac[ne] == 0.0: # see mabelles thesis
conductivity[ne] = max_cond
if conductivity[ne] > max_cond:
conductivity[ne] = max_cond
return conductivity
def ellipse_volume_to_grid(rectangular_mesh, volume, thickness, ellipticity, num_test_points):
""" Calculates the placental volume associated with each element in a samplling grid
Inputs are:
- rectangular_mesh: the rectangular sampling grid
- volume: placental volume
- thickness: placental thickness
- ellipiticity: placental ellipticity
- num_test_points: resolution of integration quadrature
Return:
- pl_vol_in_grid: array of placental volume in each sampling grid element
- non_empty_rects: array of sampling grid elements that are occupied by placental tissue
A way you might want to use me is:
>>> thickness = (3.0 * 1 / (4.0 * np.pi)) ** (1.0 / 3.0) * 2.0 #mm
>>> ellipticity = 1.00 #no unit
>>> spacing = 1.0 #no unit
>>> volume=1 #mm3
>>> rectangular_mesh = {}
>>> rectangular_mesh['nodes'] = [[0., 0., 0.], [ thickness/2.0, 0., 0.],[0., thickness/2.0, 0.],[ thickness/2.0, thickness/2.0, 0.],[0., 0., thickness/2.0], [ thickness/2.0, 0., thickness/2.0],[0., thickness/2.0,thickness/2.0],[ thickness/2.0, thickness/2.0, thickness/2.0]]
>>> rectangular_mesh['elems'] = [[ 0, 0, 1, 2, 3, 4, 5, 6, 7]]
>>> rectangular_mesh['total_nodes'] =8
>>> rectangular_mesh['total_elems'] = 1
>>> num_test_points=25
>>> ellipse_volume_to_grid(rectangular_mesh, volume, thickness, ellipticity, num_test_points)
This will return:
>>> pl_vol_in_grid: 0.12485807941
>>> non_empty_rects: 0
"""
total_elems = rectangular_mesh['total_elems']
elems = rectangular_mesh['elems']
nodes = rectangular_mesh['nodes']
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# Initialise the array that defines the volume of placenta in each grid element
pl_vol_in_grid = np.zeros(total_elems)
non_empty_loc = np.zeros(total_elems, dtype=int)
non_empty_count = 0
for ne in range(0, len(elems)): # looping through elements
count_in_range = 0
nod_in_range = np.zeros(8, dtype=int)
# define range of x, y , and z in the element
startx = nodes[elems[ne][1]][0]
endx = nodes[elems[ne][8]][0]
starty = nodes[elems[ne][1]][1]
endy = nodes[elems[ne][8]][1]
startz = nodes[elems[ne][1]][2]
endz = nodes[elems[ne][8]][2]
for nod in range(1, 9):
check_in_range = pg_utilities.check_in_ellipsoid(nodes[elems[ne][nod]][0], nodes[elems[ne][nod]][1],
nodes[elems[ne][nod]][2], x_radius, y_radius, z_radius)
check_on_range = pg_utilities.check_on_ellipsoid(nodes[elems[ne][nod]][0], nodes[elems[ne][nod]][1],
nodes[elems[ne][nod]][2], x_radius, y_radius, z_radius)
if check_in_range or check_on_range:
count_in_range = count_in_range + 1
nod_in_range[nod - 1] = 1
if count_in_range == 8: # if all 8 nodes are inside the ellipsoid
non_empty_loc[non_empty_count] = ne
non_empty_count = non_empty_count + 1
pl_vol_in_grid[ne] = (endx - startx) * (endy - starty) * (
endz - startz) # the placental vol in that samp_grid_el is same as vol of samp_grid_el
elif count_in_range == 0: # if all 8 nodes are outside the ellpsiod
# since this samp_grid_el is completely outside, the placental vol is zero
pl_vol_in_grid[ne] = 0
else: # if some nodes in and some nodes out, the samp_grid_el is at the edge of ellipsoid
# Use trapezoidal quadrature to caculate the volume under the surface of the ellipsoid in each element
non_empty_loc[non_empty_count] = ne
non_empty_count = non_empty_count + 1
# need to map to positive quadrant
repeat = False
if (startz < 0 and endz <= 0):
# need to project to positive z axis
startz = abs(nodes[elems[ne][8]][2])
endz = abs(nodes[elems[ne][1]][2])
elif (startz < 0 and endz > 0):
# Need to split into components above and below the axis and sum the two
startz = 0
endz = abs(nodes[elems[ne][1]][2])
startz_2 = 0
endz_2 = nodes[elems[ne][8]][2]
repeat = True
xVector = np.linspace(startx, endx, num_test_points)
yVector = np.linspace(starty, endy, num_test_points)
xv, yv = np.meshgrid(xVector, yVector)
zv = z_radius ** 2 * (1 - (xv / x_radius) ** 2 - (yv / y_radius) ** 2)
for i in range(num_test_points):
for j in range(num_test_points):
if zv[i, j] <= startz ** 2:
zv[i, j] = startz ** 2
zv[i, j] = np.sqrt(zv[i, j])
if zv[i, j] > endz:
zv[i, j] = endz
elif zv[i, j] < startz:
zv[i, j] = startz
intermediate = np.zeros(num_test_points)
for i in range(0, num_test_points):
intermediate[i] = np.trapz(zv[:, i], xVector)
Value1 = np.trapz(intermediate, yVector)
pl_vol_in_grid[ne] = (Value1 - startz * (endx - startx) * (endy - starty))
if repeat:
xVector = np.linspace(startx, endx, num_test_points)
yVector = np.linspace(starty, endy, num_test_points)
xv, yv = np.meshgrid(xVector, yVector)
zv = z_radius ** 2 * (1 - (xv / x_radius) ** 2 - (yv / y_radius) ** 2)
for i in range(num_test_points):
for j in range(num_test_points):
if zv[i, j] <= startz_2 ** 2:
zv[i, j] = startz_2 ** 2
zv[i, j] = np.sqrt(zv[i, j])
if zv[i, j] > endz_2:
zv[i, j] = endz_2
elif zv[i, j] < startz_2:
zv[i, j] = startz_2
intermediate = np.zeros(num_test_points)
for i in range(0, num_test_points):
intermediate[i] = np.trapz(zv[:, i], xVector)
Value1 = np.trapz(intermediate, yVector)
pl_vol_in_grid[ne] = pl_vol_in_grid[ne] + (Value1 - startz_2 * (endx - startx) * (
endy - starty))
print('Number of Non-empty cells: ' + str(non_empty_count))
print('Total number of cells: ' + str(total_elems))
non_empty_loc = np.resize(non_empty_loc, non_empty_count)
return {'pl_vol_in_grid': pl_vol_in_grid, 'non_empty_rects': non_empty_loc}
def evaluate_orders(node_loc, elems):
"""Calculates generations, Horsfield orders, Strahler orders for a given tree
Works for diverging trees only, but accounts for more than three elements joining at a node
Inputs:
node_loc = array with location of nodes
elems = array with location of elements
"""
num_elems = len(elems)
# Calculate connectivity of elements
elem_connect = pg_utilities.element_connectivity_1D(node_loc, elems)
elem_upstream = elem_connect['elem_up']
elem_downstream = elem_connect['elem_down']
# Initialise order definition arrays
strahler = np.zeros(len(elems), dtype=int)
horsfield = np.zeros(len(elems), dtype=int)
generation = np.zeros(len(elems), dtype=int)
# Calculate generation of each element
maxgen = 1 # Maximum possible generation
for ne in range(0, num_elems):
ne0 = elem_upstream[ne][1]
if elem_upstream[ne][0] != 0:
# Calculate parent generation
n_generation = generation[ne0]
if elem_downstream[ne0][0] == 1:
# Continuation of previous element
generation[ne] = n_generation
elif elem_downstream[ne0][0] >= 2:
# Bifurcation (or morefurcation)
generation[ne] = n_generation + 1
else:
generation[ne] = 1 # Inlet
maxgen = np.maximum(maxgen, generation[ne])
# Now need to loop backwards to do ordering systems
for ne in range(num_elems - 1, -1, -1):
n_horsfield = np.maximum(horsfield[ne], 1)
n_children = elem_downstream[ne][0]
if n_children == 1:
if generation[elem_downstream[ne][1]] == 0:
n_children = 0
temp_strahler = 0
strahler_add = 1
if n_children >= 2: # Bifurcation downstream
temp_strahler = strahler[elem_downstream[ne][1]] # first daughter
for noelem in range(1, n_children + 1):
ne2 = elem_downstream[ne][noelem]
temp_horsfield = horsfield[ne2]
if temp_horsfield > n_horsfield:
n_horsfield = temp_horsfield
if strahler[ne2] < temp_strahler:
strahler_add = 0
elif strahler[ne2] > temp_strahler:
strahler_add = 0
temp_strahler = strahler[ne2] # strahler of highest daughter
n_horsfield = n_horsfield + 1
elif n_children == 1:
ne2 = elem_downstream[ne][1] # element no of daughter
n_horsfield = horsfield[ne2]
strahler_add = strahler[ne2]
horsfield[ne] = n_horsfield
strahler[ne] = temp_strahler + strahler_add
return {'strahler': strahler, 'horsfield': horsfield, 'generation': generation}
def define_radius_by_order(node_loc, elems, system, inlet_elem, inlet_radius, radius_ratio):
""" This function defines radii in a branching tree by 'order' of the vessel
Inputs are:
- node_loc: The nodes in the branching tree
- elems: The elements in the branching tree
- system: 'strahler','horsfield' or 'generation' to define vessel order
- inlet_elem: element number that you want to define as having inlet_radius
- inlet_radius: the radius of your inlet vessel
- radius ratio: Strahler or Horsfield type ratio, defines the slope of log(order) vs log(radius)
Returns:
-radius of each branch
A way you might want to use me is:
>>> node_loc =np.array([[ 0.,0.,0.,-1.,2.,0.,0.], [1.,0.,0.,-0.5,2.,0.,0.],[2.,0.,-0.5,0.,1.31578947,0.,0.],[3.,0.,0.5,0.,0.,0.,0.]])
>>> elems = np.array([[0 ,0 ,1], [1 ,1 ,2], [2 ,1 ,3]])
>>> system='strahler'
>>> inlet_elem=0
>>> inlet_radius=0.1
>>> radius_ratio=1.53
>>> define_radius_by_order(node_loc, elems, system, inlet_elem, inlet_radius, radius_ratio)
This will return:
>> radius: [ 0.1, 0.06535948 , 0.06535948]"""
num_elems = len(elems)
radius = np.zeros(num_elems) # initialise radius array
# Evaluate orders in the system
orders = evaluate_orders(node_loc, elems)
elem_order = orders[system]
ne = inlet_elem
n_max_ord = elem_order[ne]
radius[ne] = inlet_radius
for ne in range(0, num_elems):
radius[ne] = 10. ** (np.log10(radius_ratio) * (elem_order[ne] - n_max_ord) + np.log10(inlet_radius))
return radius
def define_radius_by_order_stem(node_loc, elems, system, filename_stem, inlet_radius, radius_ratio):
""" This function defines radii in a branching tree by 'order' of the vessel
Inputs are:
- node_loc: The nodes in the branching tree
- elems: The elements in the branching tree
- system: 'strahler','horsfield' or 'generation' to define vessel order
- filename_stem: filename that includes list of stem villi location and element number
- inlet_radius: the radius of your inlet vessel
- radius ratio: Strahler or Horsfield type ratio, defines the slope of log(order) vs log(radius)
Returns:
-radius of each branch
A way you might want to use me is:
"""
num_elems = len(elems)
radius = np.zeros(num_elems) # initialise radius array
#define stem elems and connectivity
stem_elems = imports_and_exports.import_stemxy(filename_stem)['elem']
elem_cnct = pg_utilities.element_connectivity_1D(node_loc, elems)
# Evaluate orders in the system
orders = evaluate_orders(node_loc, elems)
elem_order = orders[system]
for stem in range(0,len(stem_elems)):
#For each stem need to form a list of elems that are children of that element
ne = stem_elems[stem]
elem_list = pg_utilities.group_elem_parent(ne,elem_cnct['elem_down'])
n_max_ord = elem_order[ne]
radius[ne] = inlet_radius
for noelem in range(0, len(elem_list)):
ne = elem_list[noelem]
radius[ne] = 10. ** (np.log10(radius_ratio) * (elem_order[ne] - n_max_ord) + np.log10(inlet_radius))
return radius
def define_elem_lengths(node_loc, elems):
""" This function defines element length in a branching tree
Inputs are:
- node_loc: The nodes in the branching tree
- elems: The elements in the branching tree
Returns:
-length of each branch
A way you might want to use me is:
"""
num_elems = len(elems)
# length array
lengths = np.zeros(num_elems)
for ne in range(0, num_elems):
np1 = elems[ne][1]
np2 = elems[ne][2]
point1 = node_loc[np1][1:4]
point2 = node_loc[np2][1:4]
lengths[ne] = np.linalg.norm(point1 - point2)
return lengths
def find_branch_angles(geom, orders, elem_connect, branchGeom, voxelSize, conversionFactor):
"""Finds branch angles + L/LParent & D/Dparent and scale all results into desired units and degrees
Inputs:
- geom: contains elems, and various element properties (length, radius etc.)
- orders: contains strahler order and generation of each element
- elem_connect: contains upstream and downstream elements for each element
- branchGeom: contains branch properties (length, radius, etc.)
- voxelSize: for conversion to mm (must be isotropic)
- conversionFactor: to scale radii correction, printed in log of ImageJ during MySkeletonizationProcess
Outputs:
- geom and branchGeom are altered so all there arrays are in correct units (except nodes, and radii_unscaled, which remain in voxels) ##################
- seg_angles: angle (radians) at each element junction in the tree assigned to each element according to how it branches from its parent
- diam_ratio: ratio of length/diameter of each branch, accounting for multi-segment branches
- length_ratio: ratio of parent / child lengths, accounting for multi-segment branches
- diam_ratio: length_ratio / branch_angles are the same but for whole branches
"""
# unpackage inputs
nodes = geom['nodes']
elems = geom['elems']
elems = elems[:, 1:3] # get rid of useless first column
radii = geom['radii']
lengths = geom['length']
branches = branchGeom['branches']
branchRad = branchGeom['radii']
branchLen = branchGeom['length']
strahler = orders['strahler']
generations = orders['generation']
elem_up = elem_connect['elem_up']
# new arrays
num_elems = len(elems)
num_branches = len(branchRad)
branch_angles = -1. * np.ones(num_branches) # results by branch (Strahler)
diam_ratio_branch = -1. * np.ones(num_branches)
length_ratio_branch = -1. * np.ones(num_branches)
diam_ratio = -1. * np.ones(num_elems) # results by generation
length_ratio = -1. * np.ones(num_elems)
seg_angles = -1. * np.ones(num_elems)
# find results for each element (ignoring parent element)
for ne in range(1, num_elems):
neUp = elem_up[ne, 1] # find parent
if (generations[neUp] < generations[ne]): # there is branching but not necessarily a new strahler branch
# parent node
endNode = int(elems[neUp, 0])
startNode = int(elems[neUp, 1])
v_parent = nodes[endNode, :] - nodes[startNode, :]
v_parent = v_parent / np.linalg.norm(v_parent)
d_parent = 2 * radii[neUp]
L_parent = lengths[neUp]
# daughter
endNode = int(elems[ne, 1])
startNode = int(elems[ne, 0])
v_daughter = nodes[startNode, :] - nodes[endNode, :]
v_daughter = v_daughter / np.linalg.norm(v_daughter)
d_daughter = 2 * radii[ne]
L_daughter = lengths[ne]
# calculate angle
dotProd = np.dot(v_parent, v_daughter)
if abs(dotProd <= 1):
angle=np.arccos(dotProd)
seg_angles[ne] = angle
else:
angle=-1
print('Angle Error, element: ' + str(ne))
if d_parent != 0:
diam_ratio[ne] = d_daughter/ d_parent
if L_parent != 0:
length_ratio[ne] = L_daughter / L_parent
if (strahler[neUp] > strahler[ne]): #then this also is a new strahler branch
# assign results
branchNum = int(branches[ne])-1
parentBranch = int(branches[neUp])-1
branch_angles[branchNum] = angle
if branchRad[parentBranch] != 0:
diam_ratio_branch[branchNum] = branchRad[branchNum] / branchRad[parentBranch]
if branchLen[parentBranch] != 0:
length_ratio_branch[branchNum] = branchLen[branchNum] / branchLen[parentBranch]
# scale results into mm and degrees & package them up
geom['radii'] = geom['radii'] / conversionFactor * voxelSize
geom['length'] = geom['length'] * voxelSize
geom['nodes'] = geom['nodes'] * voxelSize
geom['euclidean length'] = geom['euclidean length'] * voxelSize
geom['branch angles'] = seg_angles * 180 / np.pi
geom['diam_ratio'] = diam_ratio
geom['length_ratio'] = length_ratio
branchGeom['radii']= branchGeom['radii']/ conversionFactor
branchGeom['radii'] = branchGeom['radii'] * voxelSize
branchGeom['branch_angles'] = branch_angles * 180 / np.pi
branchGeom['length'] = branchGeom['length'] * voxelSize
branchGeom['euclidean length'] = branchGeom['euclidean length'] * voxelSize
branchGeom['length ratio'] = length_ratio_branch
branchGeom['diam ratio'] = diam_ratio_branch
return (geom, branchGeom)
def find_parent_node(find_inlet_loc, inlet_loc, nodes, radii, elems, elem_properties):
"""Finds the parent node in array either from given coordinates or by finding the terminal branch with the largest radius
Inputs:
- nodes: an list of node coordinates in with structure [node num, coord1,coord2,coord3,...]
- radii: N x 1 array with radius of each element
- elems: an Nx2(!!!) array with node indices for start and end of node
- elem_properties:an N x K array, with each row containing various element properties (radii etc.)
- inlet_loc : the coordinates of the parent node for the entire tree (if known)
= find_inlet_loc - a boolean variable specifying whether to use inlet location provided (0) or to find the inlet location automatically (1)
Returns: elems and elem_properties updates so that inlet element is the first element in the list
"""
# will define inlet as terminal element of largest radius
if find_inlet_loc == 1:
maxRad = -1
# go through each node
for i in range(0, len(nodes) + 1):
# find number of occurrences of the node
places = np.where(elems == i)
ind1 = places[0]
ind2 = places[1]
if (len(ind1) == 1): # if occurs once, then element is terminal (avoids root element)
ind1 = ind1[0]
ind2 = ind2[0]
radius = radii[ind1]
if radius > maxRad:
maxRad = radius
maxRadInd = i
inlet_loc = np.squeeze(nodes[maxRadInd, 1:4])
Nn_root = maxRadInd
# find root node and element from coordinates provided
else:
Nn_root = pg_utilities.is_member(inlet_loc, nodes[:,1:4])
if (Nn_root == -1):
print("Warning, root node not located")
print('Inlet Coordinates:' + str(inlet_loc))
# find root element
Ne_place = np.where(elems == Nn_root)
Ne_root = Ne_place[0] # only need first index
if len(Ne_root) > 1:
print("Warning, root node is associated with multiple elements")
if len(Ne_root) == 0:
print("Warning, no root element located")
Ne_root = Ne_root[0]
# make root element the first element
elems = pg_utilities.row_swap_2d(elems, 0, Ne_root)
elem_properties = pg_utilities.row_swap_2d(elem_properties, 0, Ne_root)
# get element pointing right way
if (np.squeeze(Ne_place[1]) != 0):
elems[0, :] = pg_utilities.row_swap_1d(np.squeeze(elems[0, :]), 1, 0)
elem_properties[0, 4:6] = pg_utilities.row_swap_1d(np.squeeze(elem_properties[0, 4:6]), 1, 0)
return (elems, elem_properties)
def generation_summary_statistics(geom, orders, major_minor_results):
"""Calculates statistics on branching tree and display as table, sorting my generations in the tree
Inputs:
- geom: contains various element properties (length, radius etc.) by element
- orders: contains strahler order and generation of each element
Outputs: table of information according to generation prints to screen
"""
# unpack inputs
generation = orders['generation']
diam = 2 * geom['radii']
length = geom['length']
euclid_length = geom['euclidean length']
angles = geom['branch angles']
diam_ratio = geom['diam_ratio']
length_ratio = geom['length_ratio']
Minor_angle = major_minor_results['Minor_angle']
Major_angle = major_minor_results['Major_angle']
D_Major_Minor = major_minor_results['D_maj_min']
D_min_parent = major_minor_results['D_min_P']
D_maj_parent = major_minor_results['D_maj_P']
L_Major_Minor = major_minor_results['L_maj_min']
L_min_parent = major_minor_results['L_min_P']
L_maj_parent = major_minor_results['L_maj_P']
# statisitcs by generation
num_gens= int(max(generation))
values_by_gen = np.zeros([num_gens, 34])
for n_gen in range(0, num_gens):
element_list = (generation == n_gen + 1)
diam_list = np.extract(element_list, diam)
len_list = np.extract(element_list, length)
# account for zero diameters
diam_bool = diam_list > 0
len_bool = len_list > 0
list = np.logical_and(diam_bool, len_bool)
diam_list = diam_list[list]
len_list = len_list[list]
# assign stats for each order
values_by_gen[n_gen, 0] = n_gen + 1 # order
values_by_gen[n_gen, 1] = len(np.extract(element_list, element_list)) # number of branches
values_by_gen[n_gen, 2] = np.mean(np.extract(element_list, length)) # length
values_by_gen[n_gen, 3] = np.std(np.extract(element_list, length)) # length std
values_by_gen[n_gen, 4] = np.mean(diam_list) # diameter
values_by_gen[n_gen, 5] = np.std(diam_list) # diameter std
values_by_gen[n_gen, 6] = np.mean(np.extract(element_list, euclid_length)) # euclidean length
values_by_gen[n_gen, 7] = np.std(np.extract(element_list, euclid_length)) # euclidean length std
values_by_gen[n_gen, 8] = np.mean(len_list / diam_list) # length / diameter
values_by_gen[n_gen, 9] = np.std(len_list / diam_list) # length / diameter std
values_by_gen[n_gen, 10] = np.mean(
np.extract(element_list, length) / np.extract(element_list, euclid_length)) # tortuosity
values_by_gen[n_gen, 11] = np.std(
np.extract(element_list, length) / np.extract(element_list, euclid_length)) # tortuosity
if n_gen > 0:
angle_list = np.extract(element_list, angles)
angle_list = angle_list[angle_list > 0]
if len(angle_list)>0:
values_by_gen[n_gen, 12] = np.mean(angle_list) # angles
values_by_gen[n_gen, 13] = np.std(angle_list) # angles std
Minor_angle_list = np.extract(element_list, Minor_angle)
Minor_angle_list = Minor_angle_list[Minor_angle_list > 0]
Major_angle_list = np.extract(element_list, Major_angle)
Major_angle_list = Major_angle_list[Major_angle_list > 0]
if len(Minor_angle_list) > 0:
values_by_gen[n_gen, 14] = np.mean(Minor_angle_list) # minor angles
values_by_gen[n_gen, 15] = np.std(Minor_angle_list)
values_by_gen[n_gen, 16] = np.mean(Major_angle_list) # major angles
values_by_gen[n_gen, 17] = np.std(Major_angle_list)
lengthRatio = np.extract(element_list, length_ratio)
lengthRatio = lengthRatio[lengthRatio > 0]
L_min_parent_list = np.extract(element_list, L_min_parent)
L_min_parent_list = L_min_parent_list[L_min_parent_list > 0]
L_maj_parent_list = np.extract(element_list, L_maj_parent)
L_maj_parent_list = L_maj_parent_list[L_maj_parent_list > 0]
L_Major_Minor_list = np.extract(element_list, L_Major_Minor)
L_Major_Minor_list = L_Major_Minor_list[L_Major_Minor_list > 0]
if len(L_min_parent_list) > 0:
values_by_gen[n_gen, 18] = np.mean(lengthRatio) # len ratio
values_by_gen[n_gen, 19] = np.std(lengthRatio) # len ratio
values_by_gen[n_gen, 20] = np.mean(L_min_parent_list)
values_by_gen[n_gen, 21] = np.std(L_min_parent_list)
values_by_gen[n_gen, 22] = np.mean(L_maj_parent_list)
values_by_gen[n_gen, 23] = np.std(L_maj_parent_list)
values_by_gen[n_gen, 24] = np.mean(L_Major_Minor_list)
values_by_gen[n_gen, 25] = np.std(L_Major_Minor_list)
diamRatio = np.extract(element_list, diam_ratio)
diamRatio = diamRatio[diamRatio > 0]
D_min_parent_list = np.extract(element_list, D_min_parent)
D_min_parent_list = D_min_parent_list[D_min_parent_list > 0]
D_maj_parent_list = np.extract(element_list, D_maj_parent)
D_maj_parent_list = D_maj_parent_list[D_maj_parent_list > 0]
D_Major_Minor_list = np.extract(element_list, D_Major_Minor)
D_Major_Minor_list = D_Major_Minor_list[D_Major_Minor_list > 0]
if len(D_min_parent_list) > 0:
values_by_gen[n_gen, 26] = np.mean(diamRatio) # diam ratio
values_by_gen[n_gen, 27] = np.std(diamRatio) # diam std
values_by_gen[n_gen, 28] = np.mean(D_min_parent_list)
values_by_gen[n_gen, 29] = np.std(D_min_parent_list)
values_by_gen[n_gen, 30] = np.mean(D_maj_parent_list)
values_by_gen[n_gen, 31] = np.std(D_maj_parent_list)
values_by_gen[n_gen, 32] = np.mean(D_Major_Minor_list)
values_by_gen[n_gen, 33] = np.std(D_Major_Minor_list)
# statistics independent of order
values_overall = np.zeros([1, 34])
element_list = (generation > 0)
diam_list = np.extract(element_list, diam)
len_list = np.extract(element_list, length)
len_list = len_list[diam_list > 0]
diam_list = diam_list[diam_list > 0]
angle_list = np.extract(element_list, angles)
angle_list = angle_list[angle_list > 0]
Minor_angle_list = np.extract(element_list, Minor_angle)
Minor_angle_list = Minor_angle_list[Minor_angle_list > 0]
Major_angle_list = np.extract(element_list, Major_angle)
Major_angle_list = Major_angle_list[Major_angle_list > 0]
L_min_parent_list = np.extract(element_list, L_min_parent)
L_min_parent_list = L_min_parent_list[L_min_parent_list > 0]
L_maj_parent_list = np.extract(element_list, L_maj_parent)
L_maj_parent_list = L_maj_parent_list[L_maj_parent_list > 0]
L_Major_Minor_list = np.extract(element_list, L_Major_Minor)
L_Major_Minor_list = L_Major_Minor_list[L_Major_Minor_list > 0]
D_min_parent_list = np.extract(element_list, D_min_parent)
D_min_parent_list = D_min_parent_list[D_min_parent_list > 0]
D_maj_parent_list = np.extract(element_list, D_maj_parent)
D_maj_parent_list = D_maj_parent_list[D_maj_parent_list > 0]
D_Major_Minor_list = np.extract(element_list, D_Major_Minor)
D_Major_Minor_list = D_Major_Minor_list[D_Major_Minor_list > 0]
# assign stats for each order
values_overall[0, 0] = -1
values_overall[0, 1] = len(np.extract(element_list, element_list)) # number of branches
values_overall[0, 2] = np.mean(len_list) # length
values_overall[0, 3] = np.std(len_list) # length std
values_overall[0, 4] = np.mean(diam_list) # diameter
values_overall[0, 5] = np.std(diam_list) # diameter std
values_overall[0, 6] = np.mean(np.extract(element_list, euclid_length)) # euclidean length
values_overall[0, 7] = np.std(np.extract(element_list, euclid_length)) # euclidean length std
values_overall[0, 8] = np.mean(len_list / diam_list) # length / diameter
values_overall[0, 9] = np.std(len_list / diam_list) # length / diameter std
values_overall[0, 10] = np.mean(
np.extract(element_list, length) / np.extract(element_list, euclid_length)) # tortuosity
values_overall[0, 11] = np.std(
np.extract(element_list, length) / np.extract(element_list, euclid_length)) # tortuosity
values_overall[0, 12] = np.mean(angle_list) # angles
values_overall[0, 13] = np.std(angle_list) # angles std
values_overall[0, 14] = np.mean(Minor_angle_list) # minor angles
values_overall[0, 15] = np.std(Minor_angle_list)
values_overall[0, 16] = np.mean(Major_angle_list) # major angles
values_overall[0, 17] = np.std(Major_angle_list)
lengthRatio = np.extract(element_list, length_ratio)
lengthRatio = lengthRatio[lengthRatio > 0]
values_overall[0, 18] = np.mean(lengthRatio) # len ratio
values_overall[0, 19] = np.std(lengthRatio) # len ratio
values_overall[0, 20] = np.mean(L_min_parent_list)
values_overall[0, 21] = np.std(L_min_parent_list)
values_overall[0, 22] = np.mean(L_maj_parent_list)
values_overall[0, 23] = np.std(L_maj_parent_list)
values_overall[0, 24] = np.mean(L_Major_Minor_list)
values_overall[0, 25] = np.std(L_Major_Minor_list)
diamRatio = np.extract(element_list, diam_ratio)
diamRatio = diamRatio[diamRatio > 0]
values_overall[0, 26] = np.mean(diamRatio) # diam ratio
values_overall[0, 27] = np.std(diamRatio) # diam std
values_overall[0, 28] = np.mean(D_min_parent_list)
values_overall[0, 29] = np.std(D_min_parent_list)
values_overall[0, 30] = np.mean(D_maj_parent_list)
values_overall[0, 31] = np.std(D_maj_parent_list)
values_overall[0, 32] = np.mean(D_Major_Minor_list)
values_overall[0, 33] = np.std(D_Major_Minor_list)
# 'LLparent', 'std', 'LminLparent', 'std', 'LmajLparent', 'std', 'LminLmaj', 'std', 'DDparent', 'std','DminDparent', 'std','DmajDparent', 'std','DminDmaj', 'std']
print('\n')
print('Statistics By Generation: ')
print('..................')
print(
' Gen | Num | L | L(std) | D | D(std) | LEuc |LEuc(std)| L_D | L_D(std)| Tort |Tort(std)| Ang | Ang(std)| Amin |Amin(std)| Amaj |Amaj(std)|')
for n_gen in range(0, num_gens):
print (
' %7i | %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |' % (
values_by_gen[n_gen, 0], values_by_gen[n_gen, 1],
values_by_gen[n_gen, 2], values_by_gen[n_gen, 3],
values_by_gen[n_gen, 4], values_by_gen[n_gen, 5],
values_by_gen[n_gen, 6], values_by_gen[n_gen, 7],
values_by_gen[n_gen, 8], values_by_gen[n_gen, 9],
values_by_gen[n_gen, 10], values_by_gen[n_gen, 11],
values_by_gen[n_gen, 12], values_by_gen[n_gen, 13],
values_by_gen[n_gen, 14], values_by_gen[n_gen, 15],
values_by_gen[n_gen, 16], values_by_gen[n_gen, 17]))
print('------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
print (' OVERALL | %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |' % (
values_overall[0, 1], values_overall[0, 2], values_overall[0, 3],
values_overall[0, 4], values_overall[0, 5],
values_overall[0, 6], values_overall[0, 7],
values_overall[0, 8], values_overall[0, 9],
values_overall[0, 10], values_overall[0, 11],
values_overall[0, 12], values_overall[0, 13],
values_overall[0, 14], values_overall[0, 15],
values_overall[0, 16], values_overall[0, 17]))
print('..................')
# 'DDparent', 'std','DminDparent', 'std','DmajDparent', 'std','DminDmaj', 'std']
print('\n')
print('Statistics By Generation: ')
print('..................')
print(
' Gen | L_Lp |L_Lp(std)| Lmin_Lp | std | Lmaj_Lp | std |Lmin_Lmaj| std | D_Dp | std | Dmin_Dp | std | Dmaj_Dp | std |Dmin_Dmaj| std |')
for n_gen in range(0, num_gens):
print (
' %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |' % (
values_by_gen[n_gen, 0], values_by_gen[n_gen, 18],
values_by_gen[n_gen, 19], values_by_gen[n_gen, 20],
values_by_gen[n_gen, 21], values_by_gen[n_gen, 22],
values_by_gen[n_gen, 23], values_by_gen[n_gen, 24],
values_by_gen[n_gen, 25], values_by_gen[n_gen, 26],
values_by_gen[n_gen, 27], values_by_gen[n_gen, 28],
values_by_gen[n_gen, 29], values_by_gen[n_gen, 30],
values_by_gen[n_gen, 31], values_by_gen[n_gen, 32],
values_by_gen[n_gen, 33]))
print(
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
print (
' OVERALL | %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |' % (
values_overall[0, 18], values_overall[0, 19], values_overall[0, 20],
values_overall[0, 21], values_overall[0, 22],
values_overall[0, 23], values_overall[0, 24],
values_overall[0, 25], values_overall[0, 26],
values_overall[0, 27], values_overall[0, 28],
values_overall[0, 29], values_overall[0, 30],
values_overall[0, 31], values_overall[0, 32],
values_overall[0, 33]))
print('-------------')
print(' ||||| ')
print(' \ ( ) ')
print(' ---|--- ')
print(' | \ ')
print(' | ')
print(' / \ ')
print(' / \ ')
print('-------------')
return np.concatenate((values_by_gen, values_overall),0)
def major_minor(geom, elem_down):
"""
Find the Major/Minor ratios of length, diameter and branch angle
Inputs:
- geom: contains elements, and their radii, angles and lengths
- elem_down: contains the index of the downstream elements at each element
Outputs:
- major and minor angle info for each element
"""
# extract data
radii=geom['radii']
angles=geom['branch angles']
length=geom['length']
# create arrays
Ne=len(elem_down)
Minor_angle=-1*np.ones(Ne)
Major_angle = -1*np.ones(Ne)
D_Major_Minor = -1 * np.ones(Ne)
D_min_parent = -1 * np.ones(Ne)
D_maj_parent = -1 * np.ones(Ne)
L_Major_Minor = -1 * np.ones(Ne)
L_min_parent = -1 * np.ones(Ne)
L_maj_parent = -1 * np.ones(Ne)
for i in range(0, Ne):
numDown=elem_down[i, 0]
if numDown>1: # then this element has multiple children, find minor / major child
d_min=100000
d_max=0
for j in range(1, numDown+1): #look throigh children and find widest & thinnest one
child=np.int(elem_down[i, j])
d_child=radii[child]
if d_child>=d_max:
d_max=d_child
daughter_max=child
if d_child<d_min:
d_min = d_child
daughter_min = child
if daughter_max!=daughter_min: # ensure two distinct daughters
Minor_angle[i]=angles[daughter_min]
Major_angle[i]=angles[daughter_max]
if radii[daughter_min]!=0: # avoid divide by zero errors
D_Major_Minor[i]=radii[daughter_max]/radii[daughter_min]
if radii[i] != 0:
D_min_parent[i]=radii[daughter_min]/radii[i]
D_maj_parent[i]=radii[daughter_max]/radii[i]
if length[daughter_min] != 0:
L_Major_Minor[i] = length[daughter_max] / length[daughter_min]
if length[i] != 0:
L_min_parent[i] = length[daughter_min] / length[i]
L_maj_parent[i] = length[daughter_max] / length[i]
return {'Minor_angle': Minor_angle, 'Major_angle': Major_angle, 'D_maj_min': D_Major_Minor, 'D_min_P': D_min_parent,'D_maj_P': D_maj_parent, 'L_maj_min': L_Major_Minor, 'L_min_P': L_min_parent,'L_maj_P': L_maj_parent}
#Unused function, no documentation
#def mapping_fields_from_data(datapoints,rectangular_mesh,field1, field2, field3):
# data_elems = np.zeros(len(datapoints), dtype=int)
# data_fields = np.zeros((len(datapoints),3))
# gr = pg_utilities.samp_gr_for_node_loc(rectangular_mesh)
# for nt in range(0,len(datapoints)):
# data_elems[nt] = pg_utilities.locate_node(gr[0], gr[1], gr[2], gr[3], gr[4], gr[5], gr[6], gr[7], gr[8],
# datapoints[nt][:])
# data_fields[nt,0]= field1[data_elems[nt]]
# data_fields[nt,1] = field2[data_elems[nt]]
# data_fields[nt, 2] = field3[data_elems[nt]]
#
#
# return data_fields
def mapping_mesh_sampl_gr(mesh_node_elems, non_empty_rects, conductivity, porosity, export, exportfile):
"""Map the conductivity and porosity value of mesh node with sampling grid element
Inputs are:
- mesg_node_elems: array showing where darcy nodes are located inside the sampling grid
- non_empty_rects: array of non empty sampling grid element
- conductiviy: conductivity of non-empty sampling grid element
- porosity: porosity of non-empty sampling grid element
Return:
- mapped_con_por: mapped value of conductivity and porosity of each darcy mesh node"""
mapped_con_por = np.zeros((len(mesh_node_elems), 3)).astype(object)
mapped_con_por[:, 0] = mapped_con_por[:, 0].astype(int)
if (export):
f = open(exportfile, 'w')
for el in range(0, len(mesh_node_elems)):
mapped_con_por[el, 0] = el + 1
print(non_empty_rects, mesh_node_elems)
if (np.argwhere(non_empty_rects == mesh_node_elems[el,1])):
mapped_con_por[el, 1] = conductivity[np.argwhere(non_empty_rects == mesh_node_elems[el][1])][0, 0]
mapped_con_por[el, 2] = porosity[np.where(non_empty_rects == mesh_node_elems[el][1])][0]
else: # node sits right on surface, assume empty
# print('surface node',mesh_node_elems[el][1])
mapped_con_por[el, 1] = 0.52
mapped_con_por[el, 2] = 1.0
if (export):
f.write("%s %s %s\n" % (mesh_node_elems[el][0], mapped_con_por[el, 1], mapped_con_por[el, 2]))
if (export):
f.close()
return mapped_con_por
##Unused function, no documentation
#def map_mesh_terminals(mesh_nodes, terminal_nodes, branch_nodes, export, exportfile):
# node_info = np.zeros((len(mesh_nodes), 2), dtype=int)
# for nnod in terminal_nodes:
# min_distance = 10000
# for i in range(0, len(mesh_nodes)):
# distance = np.sqrt((mesh_nodes[i][1] - branch_nodes[nnod][1]) ** 2.0 + (
# mesh_nodes[i][2] - branch_nodes[nnod][2]) ** 2.0 + (
# mesh_nodes[i][3] - branch_nodes[nnod][3]) ** 2.0)
# if (distance < min_distance):
# min_distance = distance
# close_node = int(mesh_nodes[i][0])
# node_info[close_node - 1][1] = node_info[close_node - 1][1] + 1
# if (export):
# f = open(exportfile, 'w')
# for i in range(0, len(mesh_nodes)):
# node_info[i][0] = int(mesh_nodes[i][0])
# if (export):
# f.write("%s %s\n" % (node_info[i][0], node_info[i][1]))
# if (export):
# f.close()
def node_in_sampling_grid(rectangular_mesh, mesh_node_loc):
"""Locate where the 3D mesh nodes are located inside the sampling grid mesh
Inputs are:
- rectangular mesh: rectangular sampling grid mesh
- mesh_node_loc: node locations of mesh
Return:
- mesh_node_elems: array which shows the sampling grid element where the mesh nodes are located
"""
mesh_node_elems = np.zeros((len(mesh_node_loc), 2), dtype=int)
gr = pg_utilities.samp_gr_for_node_loc(rectangular_mesh)
for nt in range(0, len(mesh_node_loc)):
coord_node = mesh_node_loc[nt][1:4]
nelem = pg_utilities.locate_node(gr[0], gr[1], gr[2], gr[3], gr[4], gr[5], gr[6], gr[7], gr[8], coord_node)
mesh_node_elems[nt][0] = int(mesh_node_loc[nt][0])
mesh_node_elems[nt][1] = nelem # record what element the darcy node is in
# print(mesh_node_elems[nt])
return mesh_node_elems
def porosity(vol_frac):
""" Calculate porosity
Input is:
- vol_frac: volume fraction of element
Return:
- porosity: porosity of element
"""
porosity = np.zeros(len(vol_frac))
porosity = 1 - vol_frac
return porosity
def summary_statistics(branchGeom, geom, orders, major_minor_results,ordering_system):
# branch inputs
branchDiam = 2 * branchGeom['radii']
branchLen = branchGeom['length']
branchEucLen = branchGeom['euclidean length']
branchOrder = branchGeom['order']
branchAngles = branchGeom['branch_angles']
branchLenRatio = branchGeom['length ratio']
branchDiamRatio = branchGeom['diam ratio']
# statisitcs by order
num_orders = int(max(branchOrder))
values_by_order = np.zeros([num_orders, 20])
for n_ord in range(0, num_orders):
branch_list = (branchOrder == n_ord + 1)
diam_list = np.extract(branch_list, branchDiam)
len_list = np.extract(branch_list, branchLen)
# account for zero diameters
diam_bool = diam_list > 0
len_bool = len_list > 0
list = np.logical_and(diam_bool, len_bool)
diam_list = diam_list[list]
len_list = len_list[list]
# assign stats for each order
values_by_order[n_ord, 0] = n_ord + 1 # order
values_by_order[n_ord, 1] = len(np.extract(branch_list, branch_list)) # number of branches
values_by_order[n_ord, 2] = np.mean(np.extract(branch_list, branchLen)) # length
values_by_order[n_ord, 3] = np.std(np.extract(branch_list, branchLen)) # length std
values_by_order[n_ord, 4] = np.mean(diam_list) # diameter
values_by_order[n_ord, 5] = np.std(diam_list) # diameter std
values_by_order[n_ord, 6] = np.mean(np.extract(branch_list, branchEucLen)) # euclidean length
values_by_order[n_ord, 7] = np.std(np.extract(branch_list, branchEucLen)) # euclidean length std
values_by_order[n_ord, 8] = np.mean(len_list / diam_list) # length / diameter
values_by_order[n_ord, 9] = np.std(len_list / diam_list) # length / diameter std
values_by_order[n_ord, 10] = np.mean(
np.extract(branch_list, branchLen) / np.extract(branch_list, branchEucLen)) # tortuosity
values_by_order[n_ord, 11] = np.std(
np.extract(branch_list, branchLen) / np.extract(branch_list, branchEucLen)) # tortuosity
if n_ord < num_orders - 1:
angle_list = np.extract(branch_list, branchAngles)
angle_list = angle_list[angle_list > 0]
values_by_order[n_ord, 12] = np.mean(angle_list) # angles
values_by_order[n_ord, 13] = np.std(angle_list) # angles std
lengthRatio = np.extract(branch_list, branchLenRatio)
lengthRatio = lengthRatio[lengthRatio > 0]
values_by_order[n_ord, 14] = np.mean(lengthRatio) # len ratio
values_by_order[n_ord, 15] = np.std(lengthRatio) # len ratio
diamRatio = np.extract(branch_list, branchDiamRatio)
diamRatio = diamRatio[diamRatio > 0]
values_by_order[n_ord, 16] = np.mean(diamRatio) # diam ratio
values_by_order[n_ord, 17] = np.std(diamRatio) # diam std
values_by_order[n_ord, 18] = values_by_order[n_ord-1, 1]/values_by_order[n_ord, 1] # Bifurcation ratio
values_by_order[n_ord, 19] = np.sum(np.square(diam_list)*np.pi/4 ) # Total CSA
# print table
header = ['LenRatio', 'std', 'DiamRatio', 'std','Bifurcation Ratio','TotalCSA']
print('\n')
print('Statistics By Order: ')
print(ordering_system)
print('..................')
print(
' Order | num | L | L(std) | D | D(std) | LEuc |LEuc(std)| L_D | L_D(std)| Tort |Tort(std)| Ang | Ang(std)| Rl | Rl(std) | Rd | Rd(std)| Rb | CSA ')
for n_ord in range(0, num_orders):
print(' %7i | %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |%7.4f | %7.4f | %7.4f |'%(values_by_order[n_ord,0],
values_by_order[n_ord, 1],values_by_order[n_ord,2],
values_by_order[n_ord, 3],values_by_order[n_ord,4],
values_by_order[n_ord, 5], values_by_order[n_ord,6],
values_by_order[n_ord, 7], values_by_order[n_ord, 8],
values_by_order[n_ord, 9], values_by_order[n_ord, 10],
values_by_order[n_ord, 11], values_by_order[n_ord, 12],
values_by_order[n_ord, 13], values_by_order[n_ord, 14],
values_by_order[n_ord, 15], values_by_order[n_ord, 16],
values_by_order[n_ord, 17],values_by_order[n_ord, 18],
values_by_order[n_ord, 19]))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
# ' %7i | %7.4f | %7.4f
# print(tabulate(values_by_order, headers=header))
# statistics independent of order
values_overall = np.zeros([1, 20])
branch_list = (branchOrder > 0)
diam_list = np.extract(branch_list, branchDiam)
len_list = np.extract(branch_list, branchLen)
len_list = len_list[diam_list > 0]
diam_list = diam_list[diam_list > 0]
angle_list = np.extract(branch_list, branchAngles)
angle_list = angle_list[angle_list > 0]
# assign stats for each order
values_overall[0, 0] = -1
values_overall[0, 1] = len(np.extract(branch_list, branch_list)) # number of branches
values_overall[0, 2] = np.mean(len_list) # length
values_overall[0, 3] = np.std(len_list) # length std
values_overall[0, 4] = np.mean(diam_list) # diameter
values_overall[0, 5] = np.std(diam_list) # diameter std
values_overall[0, 6] = np.mean(np.extract(branch_list, branchEucLen)) # euclidean length
values_overall[0, 7] = np.std(np.extract(branch_list, branchEucLen)) # euclidean length std
values_overall[0, 8] = np.mean(len_list / diam_list) # length / diameter
values_overall[0, 9] = np.std(len_list / diam_list) # length / diameter std
values_overall[0, 10] = np.mean(
np.extract(branch_list, branchLen) / np.extract(branch_list, branchEucLen)) # tortuosity
values_overall[0, 11] = np.std(
np.extract(branch_list, branchLen) / np.extract(branch_list, branchEucLen)) # tortuosity
values_overall[0, 12] = np.mean(angle_list) # angles
values_overall[0, 13] = np.std(angle_list) # angles std
lengthRatio = np.extract(branch_list, branchLenRatio)
lengthRatio = lengthRatio[lengthRatio > 0]
values_overall[0, 14] = np.mean(lengthRatio) # len ratio
values_overall[0, 15] = np.std(lengthRatio) # len ratio
diamRatio = np.extract(branch_list, branchDiamRatio)
diamRatio = diamRatio[diamRatio > 0]
values_overall[0, 16] = np.mean(diamRatio) # diam ratio
values_overall[0, 17] = np.std(diamRatio) # diam std
values_overall[0, 18] = np.mean(values_by_order[1:num_orders, 18]) # Bifurcation ratio
print (
' OVERALL | %7i | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f | %7.4f |' % (
values_overall[0, 1], values_overall[0, 2], values_overall[0, 3],
values_overall[0, 4], values_overall[0, 5],
values_overall[0, 6], values_overall[0, 7],
values_overall[0, 8], values_overall[0, 9],
values_overall[0, 10], values_overall[0, 11],
values_overall[0, 12], values_overall[0, 13],
values_overall[0, 14], values_overall[0, 15],
values_overall[0, 16],values_overall[0, 17],values_overall[0, 18]))
print('-------------')
print(' ||||| ')
print(' ( ) / ')
print(' ---|--- ')
print(' / | ')
print(' | ')
print(' / \ ')
print(' / \ ')
print('-------------')
# unpack inputs
strahler = orders['strahler']
generation = orders['generation']
diam = 2*geom['radii']
length = geom['length']
length2 = length[(diam > 0)]
diam = diam[(diam > 0)]
euclid_length = geom['euclidean length']
angles = geom['branch angles']
angles = angles[angles > 0] # get rid of first elem
diam_ratio = geom['diam_ratio']
diam_ratio = diam_ratio[(diam_ratio > 0)]
length_ratio = geom['length_ratio']
length_ratio = length_ratio[(length_ratio > 0)]
# unpack inputs
Minor_angle = major_minor_results['Minor_angle']
Minor_angle = Minor_angle[Minor_angle > 0]
Major_angle = major_minor_results['Major_angle']
Major_angle = Major_angle[Major_angle > 0]
D_Major_Minor = major_minor_results['D_maj_min']
D_Major_Minor = D_Major_Minor[D_Major_Minor > 0]
D_min_parent = major_minor_results['D_min_P']
D_min_parent = D_min_parent[(D_min_parent > 0)]
D_maj_parent = major_minor_results['D_maj_P']
D_maj_parent = D_maj_parent[(D_maj_parent > 0)]
L_Major_Minor = major_minor_results['L_maj_min']
L_Major_Minor = L_Major_Minor[L_Major_Minor > 0]
L_min_parent = major_minor_results['L_min_P']
L_min_parent = L_min_parent[(L_min_parent > 0)]
L_maj_parent = major_minor_results['L_maj_P']
L_maj_parent = L_maj_parent[(L_maj_parent > 0)]
# Segment statistics
print('Segment statistics: ')
print('..................')
print('Num Segments = ' + str(len(strahler)))
print('Total length = ' + str(np.sum(branchGeom['length'])) + ' mm')
print('Num generations = ' + str(max(generation)))
print('Num Strahler Orders = ' + str(max(strahler)))
terminalGen = generation[(strahler == 1)]
print('Average Terminal generation (std) = ' + str(np.mean(terminalGen)) + ' (' + str(np.std(terminalGen)) + ')')
print('Segment Tortuosity = ' + str(np.mean(length / euclid_length)) + ' (' + str(
np.std(length / euclid_length)) + ')')
print('Average Length (std) = ' + str(np.mean(length)) + ' (' + str(np.std(length)) + ')')
print('Average Euclidean Length (std) = ' + str(np.mean(euclid_length)) + ' (' + str(np.std(euclid_length)) + ')')
print('Average Diameter (std) = ' + str(np.mean(diam)) + ' (' + str(np.std(diam)) + ')')
print('Average L/D (std) = ' + str(np.mean(length2/diam)) + ' (' + str(np.std(length2/diam)) + ')') ########
print('Segment Angles = ' + str(np.mean(angles)) + ' (' + str(np.std(angles)) + ')')
print(' Minor Angle = ' + str(np.mean(Minor_angle)) + ' (' + str(np.std(Minor_angle)) + ')')
print(' Major Angle = ' + str(np.mean(Major_angle)) + ' (' + str(np.std(Major_angle)) + ')')
print('D/Dparent = ' + str(np.mean(diam_ratio)) + ' (' + str(np.std(diam_ratio)) + ')')
print(' Dmin/Dparent = ' + str(np.mean(D_min_parent)) + ' (' + str(np.std(D_min_parent)) + ')')
print(' Dmaj/Dparent = ' + str(np.mean(D_maj_parent)) + ' (' + str(np.std(D_maj_parent)) + ')')
print(' Dmaj/Dmin = ' + str(np.mean(D_Major_Minor)) + ' (' + str(np.std(D_Major_Minor)) + ')')
print('L/Lparent = ' + str(np.mean(length_ratio)) + ' (' + str(np.std(length_ratio)) + ')')
print(' Lmin/Lparent = ' + str(np.mean(L_min_parent)) + ' (' + str(np.std(L_min_parent)) + ')')
print(' Lmaj/Lparent = ' + str(np.mean(L_maj_parent)) + ' (' + str(np.std(L_maj_parent)) + ')')
print(' Lmaj/Lmin = ' + str(np.mean(L_Major_Minor)) + ' (' + str(np.std(L_Major_Minor)) + ')')
print('\n')
# Find Strahler Ratios: Rb, Rl, Rd
Num_Branches = values_by_order[:, 1]
Diameter_strahler = values_by_order[:, 4]
Length_strahler = values_by_order[:, 2]
Orders_strahler = values_by_order[:, 0]
print('Branching/length/diameter ratios: ')
print('..................................')
[Rb, r2] = pg_utilities.find_strahler_ratio(Orders_strahler, Num_Branches)
print('Rb = ' + str(Rb) + ' Rsq = ' + str(r2))
[Rd, r2] = pg_utilities.find_strahler_ratio(Orders_strahler, Diameter_strahler)
print('Rd = ' + str(Rd) + ' Rsq = ' + str(r2))
[Rl, r2] = pg_utilities.find_strahler_ratio(Orders_strahler, Length_strahler)
print('Rl = ' + str(Rl) + ' Rsq = ' + str(r2))
print('-------------')
print(' ||||| ')
print(' \ ( ) / ')
print(' ---|--- ')
print(' | ')
print(' | ')
print(' / \ ')
print(' / \ ')
print('-------------')
return np.concatenate((values_by_order, values_overall),0)
def smooth_on_sg(rectangular_mesh, non_empties, field):
node_field = np.zeros((rectangular_mesh['total_nodes'], 2))
for i in range(0, len(non_empties)):
ne = non_empties[i]
for j in range(1, 9):
nnod = rectangular_mesh['elems'][ne][j]
node_field[nnod][0] = node_field[nnod][0] + field[ne]
node_field[nnod][1] = node_field[nnod][1] + 1.0
for i in range(0, rectangular_mesh['total_nodes']):
if (node_field[i][1] != 0.0):
node_field[i][0] = node_field[i][0] / node_field[i][1]
for i in range(0, len(non_empties)):
ne = non_empties[i]
elem_field = 0.0
for j in range(1, 9):
nnod = rectangular_mesh['elems'][ne][j]
elem_field = elem_field + node_field[nnod][0]
elem_field = elem_field / 8.0
field[ne] = elem_field
return field
def terminal_villous_diameter(num_int_gens, num_convolutes, len_int, rad_int, len_convolute, rad_convolute):
""" The concept to calculate terminal villous diameter follows the same as terminal_villous_volume calculation.
Multiply vol of each branch with diameter of each branch and summation of them to be able to calculate the weighted_diameter in the next subroutine
Inputs:
- num_int_gens: Number of generations of intermediate villous per terminal 'stem' villus
- num_convolutes: Number of terminal convolutes per intermediate villous
- len_int: Length of a typical intermediate villous
- rad_int: Radius of a typical intermediate villous
- len_convolute: Length of a typical terminal convolute
- rad_convolute: Radius of a typical terminal convolute
Return:
- term_vill_diameter: diameter value of terminal conduits
A way you might want to use me is:
>>> num_int_gens = 3
>>> num_convolutes = 10
>>> len_int = 1.5 #mm
>>> rad_int = 0.03 #mm
>>> len_convolute = 3.0 #mm
>>> rad_convolute = 0.025 #mm
This will return:
>>> term_vill_diameter: 0.09
"""
num_ints = 1
term_vill_diameter = 0.0
for i in range(0, num_int_gens + 2):
num_ints = num_ints * 2.0
diameter_ints = num_ints * (np.pi * len_int * rad_int ** 2.0) * 2 * rad_int
if i > 0:
diameter_convolutes = num_ints * num_convolutes * (
np.pi * len_convolute * rad_convolute ** 2.0) * 2 * rad_convolute
else:
diameter_convolutes = 0.0
term_vill_diameter = term_vill_diameter + diameter_ints + diameter_convolutes
return term_vill_diameter
def terminals_in_sampling_grid_fast(rectangular_mesh, terminal_list, node_loc):
""" Counts the number of terminals in a sampling grid element, will only work with
rectangular mesh created as in generate_shapes.gen_rectangular_mesh
Inputs are:
- Rectangular mesh: the rectangular sampling grid
- terminal_list: a list of terminal branch
- node_loc: array of coordinates (locations) of nodes of tree branches
Return:
- terminals_in_grid: array showing how many terminal branches are in each sampling grid element
- terminal_elems: array showing the number of sampling grid element where terminal branches are located
A way you might want to use me is:
>>> terminal_list={}
>>> terminal_list['terminal_nodes']=[3]
>>> terminal_list['total_terminals']=1
>>> rectangular_mesh = {}
>>> rectangular_mesh['nodes'] =np.array( [[ 0., 0., 0.],[ 1., 0. , 0.],[ 0., 1. , 0.],[ 1. , 1. , 0.],[ 0., 0. , 1.],[ 1., 0. , 1.],[ 0. , 1. , 1.],[ 1. , 1. , 1.]])
>>> rectangular_mesh['elems']=[[0, 0, 1, 2, 3, 4, 5, 6, 7]]
>>> node_loc =np.array([[ 0.,0.,0.,-1.,2.,0.,0.], [1.,0.,0.,-0.5,2.,0.,0.],[2.,0.,-0.5,0.,1.31578947,0.,0.],[3.,0.,0.5,0.,0.,0.,0.]])
>>> terminals_in_sampling_grid_fast(rectangular_mesh, terminal_list, node_loc)
This will return:
>>> terminals_in_grid: 1
>>> terminal_elems: 0
"""
num_terminals = terminal_list['total_terminals']
terminals_in_grid = np.zeros(len(rectangular_mesh['elems']), dtype=int)
terminal_elems = np.zeros(num_terminals, dtype=int)
gr = pg_utilities.samp_gr_for_node_loc(rectangular_mesh)
for nt in range(0, num_terminals):
coord_terminal = node_loc[terminal_list['terminal_nodes'][nt]][1:4]
nelem = pg_utilities.locate_node(gr[0], gr[1], gr[2], gr[3], gr[4], gr[5], gr[6], gr[7], gr[8], coord_terminal)
terminals_in_grid[nelem] = terminals_in_grid[nelem] + 1
terminal_elems[nt] = nelem # record what element the terminal is in
return {'terminals_in_grid': terminals_in_grid, 'terminal_elems': terminal_elems}
def terminals_in_sampling_grid(rectangular_mesh, placenta_list, terminal_list, node_loc):
""" Counts the number of terminals in a sampling grid element for general mesh
Inputs are:
- Rectangular mesh: the rectangular sampling grid
- placenta_list: array of sampling grid element that are located inside the ellipsoid
- terminal_list: a list of terminal branch
- node_loc: array of coordinates (locations) of nodes of tree branches
Return:
- terminals_in_grid: array showing how many terminal branches are in each sampling grid element
- terminal_elems: array showing the number of sampling grid element where terminal branches are located
A way you might want to use me is:
>>> terminal_list = {}
>>> terminal_list['terminal_nodes'] = [3]
>>> terminal_list['total_terminals'] = 1
>>> placenta_list = [7]
>>> rectangular_mesh = {}
>>> rectangular_mesh['elems'] = np.zeros((8, 9), dtype=int)
>>> rectangular_mesh['nodes'] = [[0., 0., 0.], [1., 0., 0.], [0., 1., 0.], [1., 1., 0.], [0., 0., 1.], [1., 0., 1.],
[0., 1., 1.], [1., 1., 1.]]
>>> rectangular_mesh['elems'][7] = [0, 0, 1, 2, 3, 4, 5, 6, 7]
>>> node_loc =np.array([[ 0.,0.,0.,-1.,2.,0.,0.], [1.,0.,0.,-0.5,2.,0.,0.],[2.,0.,-0.5,0.,1.31578947,0.,0.],[3.,0.,0.5,0.,0.,0.,0.]])
>>> terminals_in_sampling_grid(rectangular_mesh, placenta_list, terminal_list, node_loc)
This will return:
>>> terminals_in_grid[7]: 1
>>> terminal_elems[0]: 7
"""
num_sample_elems = len(placenta_list)
num_terminals = terminal_list['total_terminals']
terminals_in_grid = np.zeros(len(rectangular_mesh['elems']), dtype=int)
terminal_mapped = np.zeros(num_terminals, dtype=int)
terminal_elems = np.zeros(num_terminals, dtype=int)
for ne_i in range(0, num_sample_elems):
# First node has min x,y,z and last node has max x,y,z
ne = placenta_list[ne_i]
if placenta_list[ne_i] > 0: # There is some placenta in this element (assuming none in el 0)
first_node = rectangular_mesh['elems'][ne][1]
last_node = rectangular_mesh['elems'][ne][8]
min_coords = rectangular_mesh['nodes'][first_node][0:3]
max_coords = rectangular_mesh['nodes'][last_node][0:3]
for nt in range(0, num_terminals):
if terminal_mapped[nt] == 0:
in_element = False
coord_terminal = node_loc[terminal_list['terminal_nodes'][nt]][1:4]
if coord_terminal[0] >= min_coords[0]:
if coord_terminal[0] < max_coords[0]:
if coord_terminal[1] >= min_coords[1]:
if coord_terminal[1] < max_coords[1]:
if coord_terminal[2] >= min_coords[2]:
if coord_terminal[2] < max_coords[2]:
in_element = True
if in_element:
terminals_in_grid[ne] = terminals_in_grid[ne] + 1
terminal_mapped[nt] = 1
terminal_elems[nt] = ne
return {'terminals_in_grid': terminals_in_grid, 'terminal_elems': terminal_elems}
def terminal_volume_to_grid(rectangular_mesh, terminal_list, node_loc, volume, thickness, ellipticity, term_total_vol,
term_tissue_vol, term_tissue_diam):
""" Calculates the volume of terminal unit associated with each sampling grid element
Inputs are:
- Rectangular mesh: the rectangular sampling grid
- terminal_list: a list of terminal branch
- node_loc: array of coordinates (locations) of nodes of tree branches
- volume: volume of placental ellipsoid
- thickness: thickness of placental ellipsoid
- ellipticity: ellipticity of placental ellipsoid
- term_total_vol: total volume of terminal villus
- term_tissue_vol: volume of terminal conduits
- term_tissue_diameter: weighted diameter of terminal conduits
Return:
- term_vol_in_grid
- term_diameter_in_grid
A way you might want to use me is:
>>> node_loc =np.array([[ 0.,0.,0.,-1.,2.,0.,0.], [1.,0.,0.,-0.5,2.,0.,0.],[2.,0.,-0.5,0.,1.31578947,0.,0.],[3.,0.,0.5,0.,0.,0.,0.]])
>>> rectangular_mesh = {}
>>> rectangular_mesh['nodes'] = np.array([[-2., -2. ,-2.],[ 0. ,-2. ,-2.],[ 2. ,-2. ,-2.],[-2. , 0., -2.],[ 0. , 0. ,-2.],[ 2. , 0. ,-2.],[-2. ,-2. , 0.],[ 0. ,-2. , 0.],[ 2. ,-2. , 0.],[-2. , 0. ,0.],[ 0. , 0., 0.],[ 2., 0. , 0.],[-2. ,-2. , 2.],[ 0. ,-2. , 2.],[ 2., -2., 2.],[-2. , 0. , 2.],[ 0., 0. , 2.],[ 2. , 0., 2.]])
>>> rectangular_mesh['elems'] = [[ 0,0,1,3,4,6,7,9,10],[ 1, 1,2,4,5,7,8,10,11],[2,6,7,9,10,12,13,15,16],[4,7,8,10,11,13,14,16,17]]
>>> rectangular_mesh['total_elems'] = 4
>>> terminal_list={}
>>> terminal_list['total_terminals']=1
>>> terminal_list['terminal_nodes']=[2]
>>> volume=5
>>> thickness=2.1
>>> ellipticity=1
>>> term_total_vol=0.04#artificial value to match with a smaller ellipsoid
>>> term_tissue_vol=1.77657064561
>>> term_tissue_diam=0.090100877305
>>> terminal_volume_to_grid(rectangular_mesh, terminal_list, node_loc,volume, thickness, ellipticity,term_total_vol, term_tissue_vol, term_tissue_diam)
This will return:
>>> term_vol_in_grid[0]: 0.44414266
>>> term_diameter_in_grid[0]: 0.08003529"""
# Define the resolution of block for analysis
num_points_xyz = 8
# number of terminals to assess
num_terminals = terminal_list['total_terminals']
# Define information about sampling grid required to place data points in correct locations
total_sample_elems = rectangular_mesh['total_elems']
gr = pg_utilities.samp_gr_for_node_loc(rectangular_mesh)
# Array for total volume and diameter of sampling grid in each element
total_vol_samp_gr = np.zeros(total_sample_elems)
total_diameter_samp_gr = np.zeros(total_sample_elems)
# Define the placental ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity) # calculate radii of ellipsoid
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
term_vol_points = np.zeros((num_points_xyz * num_points_xyz * num_points_xyz, 3))
# Define a cylinder of points of radius 1 and length 1 #ARC is this a cube
x = np.linspace(-1, 1, num_points_xyz)
y = np.linspace(-1, 1, num_points_xyz)
zlist = np.linspace(-1, 1, num_points_xyz)
num_accepted = 0
for k in range(0, num_points_xyz):
for i in range(0, num_points_xyz):
for j in range(0, num_points_xyz):
new_z = zlist[k]
term_vol_points[num_accepted][0] = x[i]
term_vol_points[num_accepted][1] = y[j]
term_vol_points[num_accepted][2] = new_z
num_accepted = num_accepted + 1
term_vol_points.resize(num_accepted, 3, refcheck=False)
term_vol_points = term_vol_points * term_total_vol ** (1.0 / 3.0)
vol_per_point = term_tissue_vol / (num_points_xyz * num_points_xyz * num_points_xyz)
total_volume = 0.0
for nt in range(0, num_terminals):
coord_terminal = node_loc[terminal_list['terminal_nodes'][nt]][1:4]
local_term_points = np.copy(term_vol_points)
local_term_points[:, 0] = local_term_points[:, 0] + coord_terminal[0]
local_term_points[:, 1] = local_term_points[:, 1] + coord_terminal[1]
local_term_points[:, 2] = local_term_points[:, 2] + coord_terminal[2]
# Array for vol distribution of inidvidual branch (not total)
vol_distribution_each_br = np.zeros(total_sample_elems, dtype=float)
for npoint in range(0, num_accepted):
coord_point = local_term_points[npoint][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius,
y_radius, z_radius)
if inside:
nelem = pg_utilities.locate_node(gr[0], gr[1], gr[2], gr[3], gr[4], gr[5], gr[6], gr[7], gr[8],
coord_point)
total_vol_samp_gr[nelem] = total_vol_samp_gr[nelem] + vol_per_point
total_volume = total_volume + vol_per_point
vol_distribution_each_br[nelem] = vol_distribution_each_br[nelem] + vol_per_point
total_diameter_samp_gr = total_diameter_samp_gr + vol_distribution_each_br * 2 * term_tissue_diam
return {'term_vol_in_grid': total_vol_samp_gr, 'term_diameter_in_grid': total_diameter_samp_gr}
def terminal_villous_volume(num_int_gens, num_convolutes, len_int, rad_int, len_convolute, rad_convolute,
smallest_radius):
""" This function calculates the average volume of a terminal villous based on structural
characteristics measured in the literature.
Inputs:
- num_int_gens: Number of generations of intermediate villous per terminal 'stem' villois
- num_convolutes: Number of terminal convolutes per intermediate villous
- len_int: Length of a typical intermediate villous
- rad_int: Radius of a typical intermediate villous
- len_convolute: Length of a typical terminal convolute
- rad_convolute: Radius of a typical terminal convolute
- smallest_radius: Minimum radius of a branch in your villoous tree
Returns:
- term_vill_volume: Typical volume of a terminal villous
A way you might want to use me is:
>>> num_int_gens = 3
>>> num_convolutes = 10
>>> len_int = 1.5 #mm
>>> rad_int = 0.03 #mm
>>> len_convolute = 3.0 #mm
>>> rad_convolute = 0.025 #mm
>>> smallest radius = 0.03 mm
>>> terminal_villous_volume(num_int_gens,num_convolutes,len_int,rad_int,len_convulute,rad_convolute,smallest_radius)
This will take the normal average data from Leiser et al (1990, IBBN:3805554680) and calculate
average volume of terminal villi to be ~1.77 mm^3
"""
# Each terminal stem villous branches to two immature intermediate villi
# and then to three generations of mature intermediate villi each with ~10 terminal conduits
num_ints = 1
term_vill_volume = 0.0
term_vill_diameter = 0.0
sum_vol_ints = 0.0
sum_vol_conv = 0.0
radius_step = (smallest_radius - rad_int)/(num_int_gens)
for i in range(0, num_int_gens + 2):
num_ints = num_ints * 2.0
vol_ints = num_ints * np.pi * len_int * (smallest_radius - i*radius_step) ** 2.0
diameter_ints = vol_ints * 2 * rad_int
sum_vol_ints = sum_vol_ints + vol_ints
if i > 0:
vol_convolutes = num_ints * num_convolutes * np.pi * len_convolute * rad_convolute ** 2.0
diameter_convolutes = vol_convolutes* 2 * rad_convolute
sum_vol_conv = sum_vol_conv + vol_convolutes
else:
vol_convolutes = 0.0
diameter_convolutes = 0.0
term_vill_volume = term_vill_volume + vol_ints + vol_convolutes
term_vill_diameter = term_vill_diameter + diameter_ints + diameter_convolutes
proportion_terminal = sum_vol_conv/(sum_vol_conv+sum_vol_ints)
return {'volume': term_vill_volume, 'diameter': term_vill_diameter, 'propterm': proportion_terminal}
def tissue_vol_in_samp_gr(term_vol_in_grid, br_vol_in_grid):
"""Calculate the total tissue volume (i.e. including terminal conduits) of tree branches
Inputs are:
- term_vol_in_grid:total volume of terminal conduits
- br_vol_in_grid:total volume of branches before terminal conduits
Return:
tissue_vol: total tissue volume of whole tree
A way you might want to use me is:
>>> term_vol_in_grid=0.444
>>> br_vol_in_grid=0.008
This will return:
>>> tissue_vol: 0.452"""
tissue_vol = br_vol_in_grid + term_vol_in_grid
return tissue_vol
def vol_frac_in_samp_gr(tissue_vol, sampling_grid_vol,max_allowed,min_allowed):
"""Calculate volume fraction of sampling grid mesh where the villous branches are located
Inputs are:
- tissue_vol: tissue volume in each sampling grid
- sampling_grid_vol:volume of sampling grid element where placental tissue are located
Return:
- vol_frac: volume fraction of sampling grid element where the placental tissue are located
A way you might want to use me:
>>> tissue_vol=[0.453]
>>> sampling_grid_vol={}
>>> sampling_grid_vol['non_empty_rects']=[0]
>>> sampling_grid_vol['pl_vol_in_grid']=[0.625]
vol_frac_in_samp_gr(tissue_vol,sampling_grid_vol)
This will return:
>>> vol_frac: 0.7248"""
volumes = sampling_grid_vol['pl_vol_in_grid']
non_empties = sampling_grid_vol['non_empty_rects']
vol_frac = np.zeros(len(volumes))
for i in range(0, len(non_empties)):
ne = non_empties[i]
vol_frac[ne] = tissue_vol[ne] / volumes[ne]
if vol_frac[ne] > max_allowed:
vol_frac[ne] = max_allowed
elif vol_frac[ne] <min_allowed:
vol_frac[ne] = min_allowed
return vol_frac
def weighted_diameter_in_samp_gr(term_diameter_in_grid, br_diameter_in_grid, tissue_vol):
""" Calculated weighted_diameter.
Weighted_diameter each sampling grid = (d1*v1+d2*v2+d3*v3+...+dn*vn)/(v1+v2+v2+...+vn)
Inputs are:
- term_vill_diameter: diameter of terminal conduits
- br_diameter_in_grid: diameter of branches in each sampling grid
- terminals_in_grid: number of terminals in each sampling grid
- tissue_vol: tissue volume in each sampling grid
Return:
- weighted_diameter: weighted diameter of each sampling grid element
"""
tissue_diameter = br_diameter_in_grid + term_diameter_in_grid
np.seterr(divide='ignore', invalid='ignore')
weighted_diameter = np.nan_to_num(tissue_diameter / tissue_vol)
return weighted_diameter
| [
"numpy.sum",
"numpy.nan_to_num",
"numpy.resize",
"numpy.maximum",
"numpy.double",
"numpy.ones",
"numpy.mean",
"numpy.linalg.norm",
"numpy.meshgrid",
"numpy.copy",
"numpy.extract",
"numpy.std",
"numpy.int",
"numpy.linspace",
"numpy.log10",
"numpy.arccos",
"numpy.trapz",
"numpy.squar... | [((2484, 2514), 'numpy.zeros', 'np.zeros', (['num_elems'], {'dtype': 'int'}), '(num_elems, dtype=int)\n', (2492, 2514), True, 'import numpy as np\n'), ((3015, 3037), 'numpy.zeros', 'np.zeros', (['num_branches'], {}), '(num_branches)\n', (3023, 3037), True, 'import numpy as np\n'), ((3054, 3076), 'numpy.zeros', 'np.zeros', (['num_branches'], {}), '(num_branches)\n', (3062, 3076), True, 'import numpy as np\n'), ((3096, 3118), 'numpy.zeros', 'np.zeros', (['num_branches'], {}), '(num_branches)\n', (3104, 3118), True, 'import numpy as np\n'), ((4797, 4890), 'numpy.column_stack', 'np.column_stack', (["[geom['radii'], geom['length'], geom['euclidean length'], geom['elems']]"], {}), "([geom['radii'], geom['length'], geom['euclidean length'],\n geom['elems']])\n", (4812, 4890), True, 'import numpy as np\n'), ((4899, 4921), 'numpy.copy', 'np.copy', (["geom['elems']"], {}), "(geom['elems'])\n", (4906, 4921), True, 'import numpy as np\n'), ((5138, 5155), 'numpy.zeros', 'np.zeros', (['[Ne, 6]'], {}), '([Ne, 6])\n', (5146, 5155), True, 'import numpy as np\n'), ((8734, 8765), 'numpy.flip', 'np.flip', (['elem_properties_new', '(0)'], {}), '(elem_properties_new, 0)\n', (8741, 8765), True, 'import numpy as np\n'), ((10335, 10365), 'numpy.zeros', 'np.zeros', (['num_elems'], {'dtype': 'int'}), '(num_elems, dtype=int)\n', (10343, 10365), True, 'import numpy as np\n'), ((10387, 10417), 'numpy.zeros', 'np.zeros', (['num_nodes'], {'dtype': 'int'}), '(num_nodes, dtype=int)\n', (10395, 10417), True, 'import numpy as np\n'), ((10742, 10780), 'numpy.resize', 'np.resize', (['terminal_branches', 'num_term'], {}), '(terminal_branches, num_term)\n', (10751, 10780), True, 'import numpy as np\n'), ((10802, 10837), 'numpy.resize', 'np.resize', (['terminal_nodes', 'num_term'], {}), '(terminal_nodes, num_term)\n', (10811, 10837), True, 'import numpy as np\n'), ((13560, 13619), 'numpy.zeros', 'np.zeros', (['(num_points_xy * num_points_xy * num_points_z, 3)'], {}), '((num_points_xy * num_points_xy * num_points_z, 3))\n', (13568, 13619), True, 'import numpy as np\n'), ((13687, 13720), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_points_xy'], {}), '(-1, 1, num_points_xy)\n', (13698, 13720), True, 'import numpy as np\n'), ((13729, 13762), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_points_xy'], {}), '(-1, 1, num_points_xy)\n', (13740, 13762), True, 'import numpy as np\n'), ((14332, 14356), 'numpy.copy', 'np.copy', (['unit_cyl_points'], {}), '(unit_cyl_points)\n', (14339, 14356), True, 'import numpy as np\n'), ((14378, 14403), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (14386, 14403), True, 'import numpy as np\n'), ((14622, 14650), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {}), '(total_sample_elems)\n', (14630, 14650), True, 'import numpy as np\n'), ((14811, 14839), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {}), '(total_sample_elems)\n', (14819, 14839), True, 'import numpy as np\n'), ((22454, 22475), 'numpy.zeros', 'np.zeros', (['total_elems'], {}), '(total_elems)\n', (22462, 22475), True, 'import numpy as np\n'), ((22496, 22528), 'numpy.zeros', 'np.zeros', (['total_elems'], {'dtype': 'int'}), '(total_elems, dtype=int)\n', (22504, 22528), True, 'import numpy as np\n'), ((27307, 27348), 'numpy.resize', 'np.resize', (['non_empty_loc', 'non_empty_count'], {}), '(non_empty_loc, non_empty_count)\n', (27316, 27348), True, 'import numpy as np\n'), ((31470, 31489), 'numpy.zeros', 'np.zeros', (['num_elems'], {}), '(num_elems)\n', (31478, 31489), True, 'import numpy as np\n'), ((32634, 32653), 'numpy.zeros', 'np.zeros', (['num_elems'], {}), '(num_elems)\n', (32642, 32653), True, 'import numpy as np\n'), ((33909, 33928), 'numpy.zeros', 'np.zeros', (['num_elems'], {}), '(num_elems)\n', (33917, 33928), True, 'import numpy as np\n'), ((41075, 41101), 'numpy.where', 'np.where', (['(elems == Nn_root)'], {}), '(elems == Nn_root)\n', (41083, 41101), True, 'import numpy as np\n'), ((43025, 43049), 'numpy.zeros', 'np.zeros', (['[num_gens, 34]'], {}), '([num_gens, 34])\n', (43033, 43049), True, 'import numpy as np\n'), ((47985, 48002), 'numpy.zeros', 'np.zeros', (['[1, 34]'], {}), '([1, 34])\n', (47993, 48002), True, 'import numpy as np\n'), ((48056, 48086), 'numpy.extract', 'np.extract', (['element_list', 'diam'], {}), '(element_list, diam)\n', (48066, 48086), True, 'import numpy as np\n'), ((48103, 48135), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (48113, 48135), True, 'import numpy as np\n'), ((48234, 48266), 'numpy.extract', 'np.extract', (['element_list', 'angles'], {}), '(element_list, angles)\n', (48244, 48266), True, 'import numpy as np\n'), ((48335, 48372), 'numpy.extract', 'np.extract', (['element_list', 'Minor_angle'], {}), '(element_list, Minor_angle)\n', (48345, 48372), True, 'import numpy as np\n'), ((48458, 48495), 'numpy.extract', 'np.extract', (['element_list', 'Major_angle'], {}), '(element_list, Major_angle)\n', (48468, 48495), True, 'import numpy as np\n'), ((48582, 48620), 'numpy.extract', 'np.extract', (['element_list', 'L_min_parent'], {}), '(element_list, L_min_parent)\n', (48592, 48620), True, 'import numpy as np\n'), ((48710, 48748), 'numpy.extract', 'np.extract', (['element_list', 'L_maj_parent'], {}), '(element_list, L_maj_parent)\n', (48720, 48748), True, 'import numpy as np\n'), ((48839, 48878), 'numpy.extract', 'np.extract', (['element_list', 'L_Major_Minor'], {}), '(element_list, L_Major_Minor)\n', (48849, 48878), True, 'import numpy as np\n'), ((48971, 49009), 'numpy.extract', 'np.extract', (['element_list', 'D_min_parent'], {}), '(element_list, D_min_parent)\n', (48981, 49009), True, 'import numpy as np\n'), ((49099, 49137), 'numpy.extract', 'np.extract', (['element_list', 'D_maj_parent'], {}), '(element_list, D_maj_parent)\n', (49109, 49137), True, 'import numpy as np\n'), ((49228, 49267), 'numpy.extract', 'np.extract', (['element_list', 'D_Major_Minor'], {}), '(element_list, D_Major_Minor)\n', (49238, 49267), True, 'import numpy as np\n'), ((49522, 49539), 'numpy.mean', 'np.mean', (['len_list'], {}), '(len_list)\n', (49529, 49539), True, 'import numpy as np\n'), ((49577, 49593), 'numpy.std', 'np.std', (['len_list'], {}), '(len_list)\n', (49583, 49593), True, 'import numpy as np\n'), ((49636, 49654), 'numpy.mean', 'np.mean', (['diam_list'], {}), '(diam_list)\n', (49643, 49654), True, 'import numpy as np\n'), ((49694, 49711), 'numpy.std', 'np.std', (['diam_list'], {}), '(diam_list)\n', (49700, 49711), True, 'import numpy as np\n'), ((49952, 49981), 'numpy.mean', 'np.mean', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (49959, 49981), True, 'import numpy as np\n'), ((50030, 50058), 'numpy.std', 'np.std', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (50036, 50058), True, 'import numpy as np\n'), ((50383, 50402), 'numpy.mean', 'np.mean', (['angle_list'], {}), '(angle_list)\n', (50390, 50402), True, 'import numpy as np\n'), ((50441, 50459), 'numpy.std', 'np.std', (['angle_list'], {}), '(angle_list)\n', (50447, 50459), True, 'import numpy as np\n'), ((50502, 50527), 'numpy.mean', 'np.mean', (['Minor_angle_list'], {}), '(Minor_angle_list)\n', (50509, 50527), True, 'import numpy as np\n'), ((50572, 50596), 'numpy.std', 'np.std', (['Minor_angle_list'], {}), '(Minor_angle_list)\n', (50578, 50596), True, 'import numpy as np\n'), ((50625, 50650), 'numpy.mean', 'np.mean', (['Major_angle_list'], {}), '(Major_angle_list)\n', (50632, 50650), True, 'import numpy as np\n'), ((50695, 50719), 'numpy.std', 'np.std', (['Major_angle_list'], {}), '(Major_angle_list)\n', (50701, 50719), True, 'import numpy as np\n'), ((50739, 50777), 'numpy.extract', 'np.extract', (['element_list', 'length_ratio'], {}), '(element_list, length_ratio)\n', (50749, 50777), True, 'import numpy as np\n'), ((50853, 50873), 'numpy.mean', 'np.mean', (['lengthRatio'], {}), '(lengthRatio)\n', (50860, 50873), True, 'import numpy as np\n'), ((50915, 50934), 'numpy.std', 'np.std', (['lengthRatio'], {}), '(lengthRatio)\n', (50921, 50934), True, 'import numpy as np\n'), ((50976, 51002), 'numpy.mean', 'np.mean', (['L_min_parent_list'], {}), '(L_min_parent_list)\n', (50983, 51002), True, 'import numpy as np\n'), ((51031, 51056), 'numpy.std', 'np.std', (['L_min_parent_list'], {}), '(L_min_parent_list)\n', (51037, 51056), True, 'import numpy as np\n'), ((51085, 51111), 'numpy.mean', 'np.mean', (['L_maj_parent_list'], {}), '(L_maj_parent_list)\n', (51092, 51111), True, 'import numpy as np\n'), ((51140, 51165), 'numpy.std', 'np.std', (['L_maj_parent_list'], {}), '(L_maj_parent_list)\n', (51146, 51165), True, 'import numpy as np\n'), ((51194, 51221), 'numpy.mean', 'np.mean', (['L_Major_Minor_list'], {}), '(L_Major_Minor_list)\n', (51201, 51221), True, 'import numpy as np\n'), ((51250, 51276), 'numpy.std', 'np.std', (['L_Major_Minor_list'], {}), '(L_Major_Minor_list)\n', (51256, 51276), True, 'import numpy as np\n'), ((51294, 51330), 'numpy.extract', 'np.extract', (['element_list', 'diam_ratio'], {}), '(element_list, diam_ratio)\n', (51304, 51330), True, 'import numpy as np\n'), ((51400, 51418), 'numpy.mean', 'np.mean', (['diamRatio'], {}), '(diamRatio)\n', (51407, 51418), True, 'import numpy as np\n'), ((51461, 51478), 'numpy.std', 'np.std', (['diamRatio'], {}), '(diamRatio)\n', (51467, 51478), True, 'import numpy as np\n'), ((51519, 51545), 'numpy.mean', 'np.mean', (['D_min_parent_list'], {}), '(D_min_parent_list)\n', (51526, 51545), True, 'import numpy as np\n'), ((51574, 51599), 'numpy.std', 'np.std', (['D_min_parent_list'], {}), '(D_min_parent_list)\n', (51580, 51599), True, 'import numpy as np\n'), ((51628, 51654), 'numpy.mean', 'np.mean', (['D_maj_parent_list'], {}), '(D_maj_parent_list)\n', (51635, 51654), True, 'import numpy as np\n'), ((51683, 51708), 'numpy.std', 'np.std', (['D_maj_parent_list'], {}), '(D_maj_parent_list)\n', (51689, 51708), True, 'import numpy as np\n'), ((51737, 51764), 'numpy.mean', 'np.mean', (['D_Major_Minor_list'], {}), '(D_Major_Minor_list)\n', (51744, 51764), True, 'import numpy as np\n'), ((51793, 51819), 'numpy.std', 'np.std', (['D_Major_Minor_list'], {}), '(D_Major_Minor_list)\n', (51799, 51819), True, 'import numpy as np\n'), ((56119, 56169), 'numpy.concatenate', 'np.concatenate', (['(values_by_gen, values_overall)', '(0)'], {}), '((values_by_gen, values_overall), 0)\n', (56133, 56169), True, 'import numpy as np\n'), ((63594, 63620), 'numpy.zeros', 'np.zeros', (['[num_orders, 20]'], {}), '([num_orders, 20])\n', (63602, 63620), True, 'import numpy as np\n'), ((67962, 67979), 'numpy.zeros', 'np.zeros', (['[1, 20]'], {}), '([1, 20])\n', (67970, 67979), True, 'import numpy as np\n'), ((68033, 68068), 'numpy.extract', 'np.extract', (['branch_list', 'branchDiam'], {}), '(branch_list, branchDiam)\n', (68043, 68068), True, 'import numpy as np\n'), ((68085, 68119), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (68095, 68119), True, 'import numpy as np\n'), ((68218, 68255), 'numpy.extract', 'np.extract', (['branch_list', 'branchAngles'], {}), '(branch_list, branchAngles)\n', (68228, 68255), True, 'import numpy as np\n'), ((68484, 68501), 'numpy.mean', 'np.mean', (['len_list'], {}), '(len_list)\n', (68491, 68501), True, 'import numpy as np\n'), ((68539, 68555), 'numpy.std', 'np.std', (['len_list'], {}), '(len_list)\n', (68545, 68555), True, 'import numpy as np\n'), ((68598, 68616), 'numpy.mean', 'np.mean', (['diam_list'], {}), '(diam_list)\n', (68605, 68616), True, 'import numpy as np\n'), ((68656, 68673), 'numpy.std', 'np.std', (['diam_list'], {}), '(diam_list)\n', (68662, 68673), True, 'import numpy as np\n'), ((68910, 68939), 'numpy.mean', 'np.mean', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (68917, 68939), True, 'import numpy as np\n'), ((68988, 69016), 'numpy.std', 'np.std', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (68994, 69016), True, 'import numpy as np\n'), ((69341, 69360), 'numpy.mean', 'np.mean', (['angle_list'], {}), '(angle_list)\n', (69348, 69360), True, 'import numpy as np\n'), ((69399, 69417), 'numpy.std', 'np.std', (['angle_list'], {}), '(angle_list)\n', (69405, 69417), True, 'import numpy as np\n'), ((69451, 69490), 'numpy.extract', 'np.extract', (['branch_list', 'branchLenRatio'], {}), '(branch_list, branchLenRatio)\n', (69461, 69490), True, 'import numpy as np\n'), ((69566, 69586), 'numpy.mean', 'np.mean', (['lengthRatio'], {}), '(lengthRatio)\n', (69573, 69586), True, 'import numpy as np\n'), ((69628, 69647), 'numpy.std', 'np.std', (['lengthRatio'], {}), '(lengthRatio)\n', (69634, 69647), True, 'import numpy as np\n'), ((69678, 69718), 'numpy.extract', 'np.extract', (['branch_list', 'branchDiamRatio'], {}), '(branch_list, branchDiamRatio)\n', (69688, 69718), True, 'import numpy as np\n'), ((69788, 69806), 'numpy.mean', 'np.mean', (['diamRatio'], {}), '(diamRatio)\n', (69795, 69806), True, 'import numpy as np\n'), ((69849, 69866), 'numpy.std', 'np.std', (['diamRatio'], {}), '(diamRatio)\n', (69855, 69866), True, 'import numpy as np\n'), ((69908, 69950), 'numpy.mean', 'np.mean', (['values_by_order[1:num_orders, 18]'], {}), '(values_by_order[1:num_orders, 18])\n', (69915, 69950), True, 'import numpy as np\n'), ((75401, 75453), 'numpy.concatenate', 'np.concatenate', (['(values_by_order, values_overall)', '(0)'], {}), '((values_by_order, values_overall), 0)\n', (75415, 75453), True, 'import numpy as np\n'), ((75528, 75574), 'numpy.zeros', 'np.zeros', (["(rectangular_mesh['total_nodes'], 2)"], {}), "((rectangular_mesh['total_nodes'], 2))\n", (75536, 75574), True, 'import numpy as np\n'), ((79588, 79622), 'numpy.zeros', 'np.zeros', (['num_terminals'], {'dtype': 'int'}), '(num_terminals, dtype=int)\n', (79596, 79622), True, 'import numpy as np\n'), ((81890, 81924), 'numpy.zeros', 'np.zeros', (['num_terminals'], {'dtype': 'int'}), '(num_terminals, dtype=int)\n', (81898, 81924), True, 'import numpy as np\n'), ((81946, 81980), 'numpy.zeros', 'np.zeros', (['num_terminals'], {'dtype': 'int'}), '(num_terminals, dtype=int)\n', (81954, 81980), True, 'import numpy as np\n'), ((86162, 86190), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {}), '(total_sample_elems)\n', (86170, 86190), True, 'import numpy as np\n'), ((86220, 86248), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {}), '(total_sample_elems)\n', (86228, 86248), True, 'import numpy as np\n'), ((86522, 86585), 'numpy.zeros', 'np.zeros', (['(num_points_xyz * num_points_xyz * num_points_xyz, 3)'], {}), '((num_points_xyz * num_points_xyz * num_points_xyz, 3))\n', (86530, 86585), True, 'import numpy as np\n'), ((86673, 86707), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_points_xyz'], {}), '(-1, 1, num_points_xyz)\n', (86684, 86707), True, 'import numpy as np\n'), ((86716, 86750), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_points_xyz'], {}), '(-1, 1, num_points_xyz)\n', (86727, 86750), True, 'import numpy as np\n'), ((86763, 86797), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_points_xyz'], {}), '(-1, 1, num_points_xyz)\n', (86774, 86797), True, 'import numpy as np\n'), ((94145, 94189), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (94154, 94189), True, 'import numpy as np\n'), ((94214, 94257), 'numpy.nan_to_num', 'np.nan_to_num', (['(tissue_diameter / tissue_vol)'], {}), '(tissue_diameter / tissue_vol)\n', (94227, 94257), True, 'import numpy as np\n'), ((3143, 3164), 'numpy.ones', 'np.ones', (['num_branches'], {}), '(num_branches)\n', (3150, 3164), True, 'import numpy as np\n'), ((3228, 3251), 'numpy.where', 'np.where', (['(branches == i)'], {}), '(branches == i)\n', (3236, 3251), True, 'import numpy as np\n'), ((5462, 5479), 'numpy.zeros', 'np.zeros', (['[Ne, 1]'], {}), '([Ne, 1])\n', (5470, 5479), True, 'import numpy as np\n'), ((8196, 8245), 'numpy.column_stack', 'np.column_stack', (['[terminal_elems, terminal_elems]'], {}), '([terminal_elems, terminal_elems])\n', (8211, 8245), True, 'import numpy as np\n'), ((8338, 8367), 'numpy.where', 'np.where', (['(terminal_elems == 1)'], {}), '(terminal_elems == 1)\n', (8346, 8367), True, 'import numpy as np\n'), ((16217, 16246), 'numpy.linalg.norm', 'np.linalg.norm', (['branch_vector'], {}), '(branch_vector)\n', (16231, 16246), True, 'import numpy as np\n'), ((16579, 16618), 'numpy.cross', 'np.cross', (['desiredvector', 'cylindervector'], {}), '(desiredvector, cylindervector)\n', (16587, 16618), True, 'import numpy as np\n'), ((17309, 17350), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {'dtype': 'float'}), '(total_sample_elems, dtype=float)\n', (17317, 17350), True, 'import numpy as np\n'), ((18894, 18913), 'numpy.sum', 'np.sum', (['vol_each_br'], {}), '(vol_each_br)\n', (18900, 18913), True, 'import numpy as np\n'), ((22668, 22690), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'int'}), '(8, dtype=int)\n', (22676, 22690), True, 'import numpy as np\n'), ((28833, 28867), 'numpy.maximum', 'np.maximum', (['maxgen', 'generation[ne]'], {}), '(maxgen, generation[ne])\n', (28843, 28867), True, 'import numpy as np\n'), ((28993, 29021), 'numpy.maximum', 'np.maximum', (['horsfield[ne]', '(1)'], {}), '(horsfield[ne], 1)\n', (29003, 29021), True, 'import numpy as np\n'), ((34114, 34145), 'numpy.linalg.norm', 'np.linalg.norm', (['(point1 - point2)'], {}), '(point1 - point2)\n', (34128, 34145), True, 'import numpy as np\n'), ((36005, 36026), 'numpy.ones', 'np.ones', (['num_branches'], {}), '(num_branches)\n', (36012, 36026), True, 'import numpy as np\n'), ((36088, 36109), 'numpy.ones', 'np.ones', (['num_branches'], {}), '(num_branches)\n', (36095, 36109), True, 'import numpy as np\n'), ((36142, 36163), 'numpy.ones', 'np.ones', (['num_branches'], {}), '(num_branches)\n', (36149, 36163), True, 'import numpy as np\n'), ((36188, 36206), 'numpy.ones', 'np.ones', (['num_elems'], {}), '(num_elems)\n', (36195, 36206), True, 'import numpy as np\n'), ((36257, 36275), 'numpy.ones', 'np.ones', (['num_elems'], {}), '(num_elems)\n', (36264, 36275), True, 'import numpy as np\n'), ((36299, 36317), 'numpy.ones', 'np.ones', (['num_elems'], {}), '(num_elems)\n', (36306, 36317), True, 'import numpy as np\n'), ((40708, 40741), 'numpy.squeeze', 'np.squeeze', (['nodes[maxRadInd, 1:4]'], {}), '(nodes[maxRadInd, 1:4])\n', (40718, 40741), True, 'import numpy as np\n'), ((41573, 41596), 'numpy.squeeze', 'np.squeeze', (['Ne_place[1]'], {}), '(Ne_place[1])\n', (41583, 41596), True, 'import numpy as np\n'), ((43160, 43190), 'numpy.extract', 'np.extract', (['element_list', 'diam'], {}), '(element_list, diam)\n', (43170, 43190), True, 'import numpy as np\n'), ((43210, 43242), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (43220, 43242), True, 'import numpy as np\n'), ((43362, 43397), 'numpy.logical_and', 'np.logical_and', (['diam_bool', 'len_bool'], {}), '(diam_bool, len_bool)\n', (43376, 43397), True, 'import numpy as np\n'), ((43871, 43889), 'numpy.mean', 'np.mean', (['diam_list'], {}), '(diam_list)\n', (43878, 43889), True, 'import numpy as np\n'), ((43936, 43953), 'numpy.std', 'np.std', (['diam_list'], {}), '(diam_list)\n', (43942, 43953), True, 'import numpy as np\n'), ((44215, 44244), 'numpy.mean', 'np.mean', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (44222, 44244), True, 'import numpy as np\n'), ((44300, 44328), 'numpy.std', 'np.std', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (44306, 44328), True, 'import numpy as np\n'), ((49432, 49470), 'numpy.extract', 'np.extract', (['element_list', 'element_list'], {}), '(element_list, element_list)\n', (49442, 49470), True, 'import numpy as np\n'), ((49764, 49803), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (49774, 49803), True, 'import numpy as np\n'), ((49859, 49898), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (49869, 49898), True, 'import numpy as np\n'), ((56692, 56703), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56699, 56703), True, 'import numpy as np\n'), ((56725, 56736), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56732, 56736), True, 'import numpy as np\n'), ((56763, 56774), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56770, 56774), True, 'import numpy as np\n'), ((56799, 56810), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56806, 56810), True, 'import numpy as np\n'), ((56835, 56846), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56842, 56846), True, 'import numpy as np\n'), ((56873, 56884), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56880, 56884), True, 'import numpy as np\n'), ((56909, 56920), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56916, 56920), True, 'import numpy as np\n'), ((56945, 56956), 'numpy.ones', 'np.ones', (['Ne'], {}), '(Ne)\n', (56952, 56956), True, 'import numpy as np\n'), ((60219, 60273), 'numpy.argwhere', 'np.argwhere', (['(non_empty_rects == mesh_node_elems[el, 1])'], {}), '(non_empty_rects == mesh_node_elems[el, 1])\n', (60230, 60273), True, 'import numpy as np\n'), ((63732, 63767), 'numpy.extract', 'np.extract', (['branch_list', 'branchDiam'], {}), '(branch_list, branchDiam)\n', (63742, 63767), True, 'import numpy as np\n'), ((63787, 63821), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (63797, 63821), True, 'import numpy as np\n'), ((63941, 63976), 'numpy.logical_and', 'np.logical_and', (['diam_bool', 'len_bool'], {}), '(diam_bool, len_bool)\n', (63955, 63976), True, 'import numpy as np\n'), ((64462, 64480), 'numpy.mean', 'np.mean', (['diam_list'], {}), '(diam_list)\n', (64469, 64480), True, 'import numpy as np\n'), ((64529, 64546), 'numpy.std', 'np.std', (['diam_list'], {}), '(diam_list)\n', (64535, 64546), True, 'import numpy as np\n'), ((64810, 64839), 'numpy.mean', 'np.mean', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (64817, 64839), True, 'import numpy as np\n'), ((64897, 64925), 'numpy.std', 'np.std', (['(len_list / diam_list)'], {}), '(len_list / diam_list)\n', (64903, 64925), True, 'import numpy as np\n'), ((68396, 68432), 'numpy.extract', 'np.extract', (['branch_list', 'branch_list'], {}), '(branch_list, branch_list)\n', (68406, 68432), True, 'import numpy as np\n'), ((68726, 68763), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (68736, 68763), True, 'import numpy as np\n'), ((68819, 68856), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (68829, 68856), True, 'import numpy as np\n'), ((87583, 87607), 'numpy.copy', 'np.copy', (['term_vol_points'], {}), '(term_vol_points)\n', (87590, 87607), True, 'import numpy as np\n'), ((87949, 87990), 'numpy.zeros', 'np.zeros', (['total_sample_elems'], {'dtype': 'float'}), '(total_sample_elems, dtype=float)\n', (87957, 87990), True, 'import numpy as np\n'), ((5622, 5642), 'numpy.where', 'np.where', (['(elems == i)'], {}), '(elems == i)\n', (5630, 5642), True, 'import numpy as np\n'), ((16342, 16365), 'numpy.double', 'np.double', (['num_accepted'], {}), '(num_accepted)\n', (16351, 16365), True, 'import numpy as np\n'), ((16524, 16553), 'numpy.linalg.norm', 'np.linalg.norm', (['branch_vector'], {}), '(branch_vector)\n', (16538, 16553), True, 'import numpy as np\n'), ((16631, 16660), 'numpy.linalg.norm', 'np.linalg.norm', (['rotation_axis'], {}), '(rotation_axis)\n', (16645, 16660), True, 'import numpy as np\n'), ((18758, 18783), 'numpy.sum', 'np.sum', (['total_vol_samp_gr'], {}), '(total_vol_samp_gr)\n', (18764, 18783), True, 'import numpy as np\n'), ((18840, 18865), 'numpy.sum', 'np.sum', (['total_vol_samp_gr'], {}), '(total_vol_samp_gr)\n', (18846, 18865), True, 'import numpy as np\n'), ((37253, 37281), 'numpy.dot', 'np.dot', (['v_parent', 'v_daughter'], {}), '(v_parent, v_daughter)\n', (37259, 37281), True, 'import numpy as np\n'), ((40301, 40321), 'numpy.where', 'np.where', (['(elems == i)'], {}), '(elems == i)\n', (40309, 40321), True, 'import numpy as np\n'), ((41651, 41674), 'numpy.squeeze', 'np.squeeze', (['elems[0, :]'], {}), '(elems[0, :])\n', (41661, 41674), True, 'import numpy as np\n'), ((41741, 41776), 'numpy.squeeze', 'np.squeeze', (['elem_properties[0, 4:6]'], {}), '(elem_properties[0, 4:6])\n', (41751, 41776), True, 'import numpy as np\n'), ((43598, 43636), 'numpy.extract', 'np.extract', (['element_list', 'element_list'], {}), '(element_list, element_list)\n', (43608, 43636), True, 'import numpy as np\n'), ((43703, 43735), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (43713, 43735), True, 'import numpy as np\n'), ((43788, 43820), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (43798, 43820), True, 'import numpy as np\n'), ((44013, 44052), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (44023, 44052), True, 'import numpy as np\n'), ((44115, 44154), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (44125, 44154), True, 'import numpy as np\n'), ((44696, 44728), 'numpy.extract', 'np.extract', (['element_list', 'angles'], {}), '(element_list, angles)\n', (44706, 44728), True, 'import numpy as np\n'), ((44996, 45033), 'numpy.extract', 'np.extract', (['element_list', 'Minor_angle'], {}), '(element_list, Minor_angle)\n', (45006, 45033), True, 'import numpy as np\n'), ((45135, 45172), 'numpy.extract', 'np.extract', (['element_list', 'Major_angle'], {}), '(element_list, Major_angle)\n', (45145, 45172), True, 'import numpy as np\n'), ((45618, 45656), 'numpy.extract', 'np.extract', (['element_list', 'length_ratio'], {}), '(element_list, length_ratio)\n', (45628, 45656), True, 'import numpy as np\n'), ((45745, 45783), 'numpy.extract', 'np.extract', (['element_list', 'L_min_parent'], {}), '(element_list, L_min_parent)\n', (45755, 45783), True, 'import numpy as np\n'), ((45889, 45927), 'numpy.extract', 'np.extract', (['element_list', 'L_maj_parent'], {}), '(element_list, L_maj_parent)\n', (45899, 45927), True, 'import numpy as np\n'), ((46034, 46073), 'numpy.extract', 'np.extract', (['element_list', 'L_Major_Minor'], {}), '(element_list, L_Major_Minor)\n', (46044, 46073), True, 'import numpy as np\n'), ((46790, 46826), 'numpy.extract', 'np.extract', (['element_list', 'diam_ratio'], {}), '(element_list, diam_ratio)\n', (46800, 46826), True, 'import numpy as np\n'), ((46908, 46946), 'numpy.extract', 'np.extract', (['element_list', 'D_min_parent'], {}), '(element_list, D_min_parent)\n', (46918, 46946), True, 'import numpy as np\n'), ((47052, 47090), 'numpy.extract', 'np.extract', (['element_list', 'D_maj_parent'], {}), '(element_list, D_maj_parent)\n', (47062, 47090), True, 'import numpy as np\n'), ((47197, 47236), 'numpy.extract', 'np.extract', (['element_list', 'D_Major_Minor'], {}), '(element_list, D_Major_Minor)\n', (47207, 47236), True, 'import numpy as np\n'), ((50130, 50162), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (50140, 50162), True, 'import numpy as np\n'), ((50165, 50204), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (50175, 50204), True, 'import numpy as np\n'), ((50264, 50296), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (50274, 50296), True, 'import numpy as np\n'), ((50299, 50338), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (50309, 50338), True, 'import numpy as np\n'), ((64181, 64217), 'numpy.extract', 'np.extract', (['branch_list', 'branch_list'], {}), '(branch_list, branch_list)\n', (64191, 64217), True, 'import numpy as np\n'), ((64286, 64320), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (64296, 64320), True, 'import numpy as np\n'), ((64375, 64409), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (64385, 64409), True, 'import numpy as np\n'), ((64608, 64645), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (64618, 64645), True, 'import numpy as np\n'), ((64710, 64747), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (64720, 64747), True, 'import numpy as np\n'), ((65311, 65348), 'numpy.extract', 'np.extract', (['branch_list', 'branchAngles'], {}), '(branch_list, branchAngles)\n', (65321, 65348), True, 'import numpy as np\n'), ((65443, 65462), 'numpy.mean', 'np.mean', (['angle_list'], {}), '(angle_list)\n', (65450, 65462), True, 'import numpy as np\n'), ((65514, 65532), 'numpy.std', 'np.std', (['angle_list'], {}), '(angle_list)\n', (65520, 65532), True, 'import numpy as np\n'), ((65574, 65613), 'numpy.extract', 'np.extract', (['branch_list', 'branchLenRatio'], {}), '(branch_list, branchLenRatio)\n', (65584, 65613), True, 'import numpy as np\n'), ((65711, 65731), 'numpy.mean', 'np.mean', (['lengthRatio'], {}), '(lengthRatio)\n', (65718, 65731), True, 'import numpy as np\n'), ((65786, 65805), 'numpy.std', 'np.std', (['lengthRatio'], {}), '(lengthRatio)\n', (65792, 65805), True, 'import numpy as np\n'), ((65844, 65884), 'numpy.extract', 'np.extract', (['branch_list', 'branchDiamRatio'], {}), '(branch_list, branchDiamRatio)\n', (65854, 65884), True, 'import numpy as np\n'), ((65976, 65994), 'numpy.mean', 'np.mean', (['diamRatio'], {}), '(diamRatio)\n', (65983, 65994), True, 'import numpy as np\n'), ((66050, 66067), 'numpy.std', 'np.std', (['diamRatio'], {}), '(diamRatio)\n', (66056, 66067), True, 'import numpy as np\n'), ((69088, 69122), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (69098, 69122), True, 'import numpy as np\n'), ((69125, 69162), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (69135, 69162), True, 'import numpy as np\n'), ((69222, 69256), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (69232, 69256), True, 'import numpy as np\n'), ((69259, 69296), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (69269, 69296), True, 'import numpy as np\n'), ((3861, 3901), 'numpy.square', 'np.square', (['(startNode[1:4] - endNode[1:4])'], {}), '(startNode[1:4] - endNode[1:4])\n', (3870, 3901), True, 'import numpy as np\n'), ((6605, 6634), 'numpy.where', 'np.where', (['(elems == nodeNumNew)'], {}), '(elems == nodeNumNew)\n', (6613, 6634), True, 'import numpy as np\n'), ((25091, 25133), 'numpy.linspace', 'np.linspace', (['startx', 'endx', 'num_test_points'], {}), '(startx, endx, num_test_points)\n', (25102, 25133), True, 'import numpy as np\n'), ((25156, 25198), 'numpy.linspace', 'np.linspace', (['starty', 'endy', 'num_test_points'], {}), '(starty, endy, num_test_points)\n', (25167, 25198), True, 'import numpy as np\n'), ((25220, 25249), 'numpy.meshgrid', 'np.meshgrid', (['xVector', 'yVector'], {}), '(xVector, yVector)\n', (25231, 25249), True, 'import numpy as np\n'), ((25764, 25789), 'numpy.zeros', 'np.zeros', (['num_test_points'], {}), '(num_test_points)\n', (25772, 25789), True, 'import numpy as np\n'), ((25921, 25952), 'numpy.trapz', 'np.trapz', (['intermediate', 'yVector'], {}), '(intermediate, yVector)\n', (25929, 25952), True, 'import numpy as np\n'), ((31833, 31855), 'numpy.log10', 'np.log10', (['inlet_radius'], {}), '(inlet_radius)\n', (31841, 31855), True, 'import numpy as np\n'), ((36785, 36809), 'numpy.linalg.norm', 'np.linalg.norm', (['v_parent'], {}), '(v_parent)\n', (36799, 36809), True, 'import numpy as np\n'), ((37096, 37122), 'numpy.linalg.norm', 'np.linalg.norm', (['v_daughter'], {}), '(v_daughter)\n', (37110, 37122), True, 'import numpy as np\n'), ((37338, 37356), 'numpy.arccos', 'np.arccos', (['dotProd'], {}), '(dotProd)\n', (37347, 37356), True, 'import numpy as np\n'), ((44411, 44443), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (44421, 44443), True, 'import numpy as np\n'), ((44446, 44485), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (44456, 44485), True, 'import numpy as np\n'), ((44556, 44588), 'numpy.extract', 'np.extract', (['element_list', 'length'], {}), '(element_list, length)\n', (44566, 44588), True, 'import numpy as np\n'), ((44591, 44630), 'numpy.extract', 'np.extract', (['element_list', 'euclid_length'], {}), '(element_list, euclid_length)\n', (44601, 44630), True, 'import numpy as np\n'), ((44858, 44877), 'numpy.mean', 'np.mean', (['angle_list'], {}), '(angle_list)\n', (44865, 44877), True, 'import numpy as np\n'), ((44931, 44949), 'numpy.std', 'np.std', (['angle_list'], {}), '(angle_list)\n', (44937, 44949), True, 'import numpy as np\n'), ((45328, 45353), 'numpy.mean', 'np.mean', (['Minor_angle_list'], {}), '(Minor_angle_list)\n', (45335, 45353), True, 'import numpy as np\n'), ((45413, 45437), 'numpy.std', 'np.std', (['Minor_angle_list'], {}), '(Minor_angle_list)\n', (45419, 45437), True, 'import numpy as np\n'), ((45481, 45506), 'numpy.mean', 'np.mean', (['Major_angle_list'], {}), '(Major_angle_list)\n', (45488, 45506), True, 'import numpy as np\n'), ((45566, 45590), 'numpy.std', 'np.std', (['Major_angle_list'], {}), '(Major_angle_list)\n', (45572, 45590), True, 'import numpy as np\n'), ((46236, 46256), 'numpy.mean', 'np.mean', (['lengthRatio'], {}), '(lengthRatio)\n', (46243, 46256), True, 'import numpy as np\n'), ((46313, 46332), 'numpy.std', 'np.std', (['lengthRatio'], {}), '(lengthRatio)\n', (46319, 46332), True, 'import numpy as np\n'), ((46389, 46415), 'numpy.mean', 'np.mean', (['L_min_parent_list'], {}), '(L_min_parent_list)\n', (46396, 46415), True, 'import numpy as np\n'), ((46459, 46484), 'numpy.std', 'np.std', (['L_min_parent_list'], {}), '(L_min_parent_list)\n', (46465, 46484), True, 'import numpy as np\n'), ((46528, 46554), 'numpy.mean', 'np.mean', (['L_maj_parent_list'], {}), '(L_maj_parent_list)\n', (46535, 46554), True, 'import numpy as np\n'), ((46598, 46623), 'numpy.std', 'np.std', (['L_maj_parent_list'], {}), '(L_maj_parent_list)\n', (46604, 46623), True, 'import numpy as np\n'), ((46667, 46694), 'numpy.mean', 'np.mean', (['L_Major_Minor_list'], {}), '(L_Major_Minor_list)\n', (46674, 46694), True, 'import numpy as np\n'), ((46738, 46764), 'numpy.std', 'np.std', (['L_Major_Minor_list'], {}), '(L_Major_Minor_list)\n', (46744, 46764), True, 'import numpy as np\n'), ((47399, 47417), 'numpy.mean', 'np.mean', (['diamRatio'], {}), '(diamRatio)\n', (47406, 47417), True, 'import numpy as np\n'), ((47475, 47492), 'numpy.std', 'np.std', (['diamRatio'], {}), '(diamRatio)\n', (47481, 47492), True, 'import numpy as np\n'), ((47548, 47574), 'numpy.mean', 'np.mean', (['D_min_parent_list'], {}), '(D_min_parent_list)\n', (47555, 47574), True, 'import numpy as np\n'), ((47618, 47643), 'numpy.std', 'np.std', (['D_min_parent_list'], {}), '(D_min_parent_list)\n', (47624, 47643), True, 'import numpy as np\n'), ((47687, 47713), 'numpy.mean', 'np.mean', (['D_maj_parent_list'], {}), '(D_maj_parent_list)\n', (47694, 47713), True, 'import numpy as np\n'), ((47757, 47782), 'numpy.std', 'np.std', (['D_maj_parent_list'], {}), '(D_maj_parent_list)\n', (47763, 47782), True, 'import numpy as np\n'), ((47826, 47853), 'numpy.mean', 'np.mean', (['D_Major_Minor_list'], {}), '(D_Major_Minor_list)\n', (47833, 47853), True, 'import numpy as np\n'), ((47897, 47923), 'numpy.std', 'np.std', (['D_Major_Minor_list'], {}), '(D_Major_Minor_list)\n', (47903, 47923), True, 'import numpy as np\n'), ((57271, 57294), 'numpy.int', 'np.int', (['elem_down[i, j]'], {}), '(elem_down[i, j])\n', (57277, 57294), True, 'import numpy as np\n'), ((65010, 65044), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (65020, 65044), True, 'import numpy as np\n'), ((65047, 65084), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (65057, 65084), True, 'import numpy as np\n'), ((65157, 65191), 'numpy.extract', 'np.extract', (['branch_list', 'branchLen'], {}), '(branch_list, branchLen)\n', (65167, 65191), True, 'import numpy as np\n'), ((65194, 65231), 'numpy.extract', 'np.extract', (['branch_list', 'branchEucLen'], {}), '(branch_list, branchEucLen)\n', (65204, 65231), True, 'import numpy as np\n'), ((7877, 7906), 'numpy.where', 'np.where', (['(elems == nodeNumNew)'], {}), '(elems == nodeNumNew)\n', (7885, 7906), True, 'import numpy as np\n'), ((16988, 17009), 'numpy.matrix', 'np.matrix', (['cyl_points'], {}), '(cyl_points)\n', (16997, 17009), True, 'import numpy as np\n'), ((17012, 17035), 'numpy.matrix', 'np.matrix', (['rotation_mat'], {}), '(rotation_mat)\n', (17021, 17035), True, 'import numpy as np\n'), ((25872, 25899), 'numpy.trapz', 'np.trapz', (['zv[:, i]', 'xVector'], {}), '(zv[:, i], xVector)\n', (25880, 25899), True, 'import numpy as np\n'), ((26089, 26131), 'numpy.linspace', 'np.linspace', (['startx', 'endx', 'num_test_points'], {}), '(startx, endx, num_test_points)\n', (26100, 26131), True, 'import numpy as np\n'), ((26158, 26200), 'numpy.linspace', 'np.linspace', (['starty', 'endy', 'num_test_points'], {}), '(starty, endy, num_test_points)\n', (26169, 26200), True, 'import numpy as np\n'), ((26226, 26255), 'numpy.meshgrid', 'np.meshgrid', (['xVector', 'yVector'], {}), '(xVector, yVector)\n', (26237, 26255), True, 'import numpy as np\n'), ((26826, 26851), 'numpy.zeros', 'np.zeros', (['num_test_points'], {}), '(num_test_points)\n', (26834, 26851), True, 'import numpy as np\n'), ((26995, 27026), 'numpy.trapz', 'np.trapz', (['intermediate', 'yVector'], {}), '(intermediate, yVector)\n', (27003, 27026), True, 'import numpy as np\n'), ((31777, 31799), 'numpy.log10', 'np.log10', (['radius_ratio'], {}), '(radius_ratio)\n', (31785, 31799), True, 'import numpy as np\n'), ((33457, 33479), 'numpy.log10', 'np.log10', (['inlet_radius'], {}), '(inlet_radius)\n', (33465, 33479), True, 'import numpy as np\n'), ((60324, 60378), 'numpy.argwhere', 'np.argwhere', (['(non_empty_rects == mesh_node_elems[el][1])'], {}), '(non_empty_rects == mesh_node_elems[el][1])\n', (60335, 60378), True, 'import numpy as np\n'), ((60431, 60482), 'numpy.where', 'np.where', (['(non_empty_rects == mesh_node_elems[el][1])'], {}), '(non_empty_rects == mesh_node_elems[el][1])\n', (60439, 60482), True, 'import numpy as np\n'), ((66237, 66257), 'numpy.square', 'np.square', (['diam_list'], {}), '(diam_list)\n', (66246, 66257), True, 'import numpy as np\n'), ((72454, 72482), 'numpy.sum', 'np.sum', (["branchGeom['length']"], {}), "(branchGeom['length'])\n", (72460, 72482), True, 'import numpy as np\n'), ((72741, 72760), 'numpy.std', 'np.std', (['terminalGen'], {}), '(terminalGen)\n', (72747, 72760), True, 'import numpy as np\n'), ((72864, 72894), 'numpy.std', 'np.std', (['(length / euclid_length)'], {}), '(length / euclid_length)\n', (72870, 72894), True, 'import numpy as np\n'), ((72975, 72989), 'numpy.std', 'np.std', (['length'], {}), '(length)\n', (72981, 72989), True, 'import numpy as np\n'), ((73087, 73108), 'numpy.std', 'np.std', (['euclid_length'], {}), '(euclid_length)\n', (73093, 73108), True, 'import numpy as np\n'), ((73189, 73201), 'numpy.std', 'np.std', (['diam'], {}), '(diam)\n', (73195, 73201), True, 'import numpy as np\n'), ((73285, 73307), 'numpy.std', 'np.std', (['(length2 / diam)'], {}), '(length2 / diam)\n', (73291, 73307), True, 'import numpy as np\n'), ((73390, 73404), 'numpy.std', 'np.std', (['angles'], {}), '(angles)\n', (73396, 73404), True, 'import numpy as np\n'), ((73485, 73504), 'numpy.std', 'np.std', (['Minor_angle'], {}), '(Minor_angle)\n', (73491, 73504), True, 'import numpy as np\n'), ((73585, 73604), 'numpy.std', 'np.std', (['Major_angle'], {}), '(Major_angle)\n', (73591, 73604), True, 'import numpy as np\n'), ((73678, 73696), 'numpy.std', 'np.std', (['diam_ratio'], {}), '(diam_ratio)\n', (73684, 73696), True, 'import numpy as np\n'), ((73779, 73799), 'numpy.std', 'np.std', (['D_min_parent'], {}), '(D_min_parent)\n', (73785, 73799), True, 'import numpy as np\n'), ((73882, 73902), 'numpy.std', 'np.std', (['D_maj_parent'], {}), '(D_maj_parent)\n', (73888, 73902), True, 'import numpy as np\n'), ((73983, 74004), 'numpy.std', 'np.std', (['D_Major_Minor'], {}), '(D_Major_Minor)\n', (73989, 74004), True, 'import numpy as np\n'), ((74080, 74100), 'numpy.std', 'np.std', (['length_ratio'], {}), '(length_ratio)\n', (74086, 74100), True, 'import numpy as np\n'), ((74183, 74203), 'numpy.std', 'np.std', (['L_min_parent'], {}), '(L_min_parent)\n', (74189, 74203), True, 'import numpy as np\n'), ((74286, 74306), 'numpy.std', 'np.std', (['L_maj_parent'], {}), '(L_maj_parent)\n', (74292, 74306), True, 'import numpy as np\n'), ((74387, 74408), 'numpy.std', 'np.std', (['L_Major_Minor'], {}), '(L_Major_Minor)\n', (74393, 74408), True, 'import numpy as np\n'), ((6023, 6049), 'numpy.squeeze', 'np.squeeze', (['elems[ind1, :]'], {}), '(elems[ind1, :])\n', (6033, 6049), True, 'import numpy as np\n'), ((6118, 6156), 'numpy.squeeze', 'np.squeeze', (['elem_properties[ind1, 4:6]'], {}), '(elem_properties[ind1, 4:6])\n', (6128, 6156), True, 'import numpy as np\n'), ((25553, 25570), 'numpy.sqrt', 'np.sqrt', (['zv[i, j]'], {}), '(zv[i, j])\n', (25560, 25570), True, 'import numpy as np\n'), ((26942, 26969), 'numpy.trapz', 'np.trapz', (['zv[:, i]', 'xVector'], {}), '(zv[:, i], xVector)\n', (26950, 26969), True, 'import numpy as np\n'), ((33401, 33423), 'numpy.log10', 'np.log10', (['radius_ratio'], {}), '(radius_ratio)\n', (33409, 33423), True, 'import numpy as np\n'), ((7381, 7410), 'numpy.squeeze', 'np.squeeze', (['elems[ind1[k], :]'], {}), '(elems[ind1[k], :])\n', (7391, 7410), True, 'import numpy as np\n'), ((7486, 7527), 'numpy.squeeze', 'np.squeeze', (['elem_properties[ind1[k], 4:6]'], {}), '(elem_properties[ind1[k], 4:6])\n', (7496, 7527), True, 'import numpy as np\n'), ((13994, 14017), 'numpy.double', 'np.double', (['num_points_z'], {}), '(num_points_z)\n', (14003, 14017), True, 'import numpy as np\n'), ((26587, 26604), 'numpy.sqrt', 'np.sqrt', (['zv[i, j]'], {}), '(zv[i, j])\n', (26594, 26604), True, 'import numpy as np\n'), ((72706, 72726), 'numpy.mean', 'np.mean', (['terminalGen'], {}), '(terminalGen)\n', (72713, 72726), True, 'import numpy as np\n'), ((72809, 72840), 'numpy.mean', 'np.mean', (['(length / euclid_length)'], {}), '(length / euclid_length)\n', (72816, 72840), True, 'import numpy as np\n'), ((72945, 72960), 'numpy.mean', 'np.mean', (['length'], {}), '(length)\n', (72952, 72960), True, 'import numpy as np\n'), ((73050, 73072), 'numpy.mean', 'np.mean', (['euclid_length'], {}), '(euclid_length)\n', (73057, 73072), True, 'import numpy as np\n'), ((73161, 73174), 'numpy.mean', 'np.mean', (['diam'], {}), '(diam)\n', (73168, 73174), True, 'import numpy as np\n'), ((73249, 73272), 'numpy.mean', 'np.mean', (['(length2 / diam)'], {}), '(length2 / diam)\n', (73256, 73272), True, 'import numpy as np\n'), ((73360, 73375), 'numpy.mean', 'np.mean', (['angles'], {}), '(angles)\n', (73367, 73375), True, 'import numpy as np\n'), ((73450, 73470), 'numpy.mean', 'np.mean', (['Minor_angle'], {}), '(Minor_angle)\n', (73457, 73470), True, 'import numpy as np\n'), ((73550, 73570), 'numpy.mean', 'np.mean', (['Major_angle'], {}), '(Major_angle)\n', (73557, 73570), True, 'import numpy as np\n'), ((73644, 73663), 'numpy.mean', 'np.mean', (['diam_ratio'], {}), '(diam_ratio)\n', (73651, 73663), True, 'import numpy as np\n'), ((73743, 73764), 'numpy.mean', 'np.mean', (['D_min_parent'], {}), '(D_min_parent)\n', (73750, 73764), True, 'import numpy as np\n'), ((73846, 73867), 'numpy.mean', 'np.mean', (['D_maj_parent'], {}), '(D_maj_parent)\n', (73853, 73867), True, 'import numpy as np\n'), ((73946, 73968), 'numpy.mean', 'np.mean', (['D_Major_Minor'], {}), '(D_Major_Minor)\n', (73953, 73968), True, 'import numpy as np\n'), ((74044, 74065), 'numpy.mean', 'np.mean', (['length_ratio'], {}), '(length_ratio)\n', (74051, 74065), True, 'import numpy as np\n'), ((74147, 74168), 'numpy.mean', 'np.mean', (['L_min_parent'], {}), '(L_min_parent)\n', (74154, 74168), True, 'import numpy as np\n'), ((74250, 74271), 'numpy.mean', 'np.mean', (['L_maj_parent'], {}), '(L_maj_parent)\n', (74257, 74271), True, 'import numpy as np\n'), ((74350, 74372), 'numpy.mean', 'np.mean', (['L_Major_Minor'], {}), '(L_Major_Minor)\n', (74357, 74372), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from common_variables import user_column, item_colum, rating_column, threshold, k
from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank
from utils import change_relevance
from statistics import mean
def count_positive_hits(data, column, threshold):
ratings = get_ratings_set(data)
return data[data[column].isin([i for i in ratings if i > threshold])][column].count()
def count_negative_hits(data, column, threshold):
ratings = get_ratings_set(data)
return data[data[column].isin([i for i in ratings if i <= threshold])][column].count()
def get_ratings_set(data):
return data[rating_column].unique()
def get_test_ratings_rec(recommendation, testdata):
recommendation = set_new_index(recommendation, user_column, item_colum)
testdata = set_new_index(testdata, user_column, item_colum)
recommendation[rating_column] = testdata[rating_column]
return recommendation
def get_topk(data):
return data.groupby(user_column).head(k).reset_index(drop=True)
def set_new_index(df, col1, col2):
df.set_index(df[col1].map(str) + '_' + df[col2].map(str), inplace=True)
return df
def get_number_users(data):
return data[user_column].unique()
def find_user_data(data, u):
return data.loc[data[user_column] == u]
def metrics_one_user(rec_user, user_test, iscondensed):
tp_user = count_positive_hits(rec_user, rating_column, threshold)
fp_user = count_negative_hits(rec_user, rating_column, threshold)
relevants_user = count_positive_hits(user_test, rating_column, threshold)
nonrelevants_user = count_negative_hits(user_test, rating_column, threshold)
if iscondensed == False:
return recall(tp_user, relevants_user), precision(tp_user, k), \
fallout(fp_user, nonrelevants_user), antiprecision(fp_user, k)
else:
return recall(tp_user, relevants_user), precision(tp_user, tp_user + fp_user), \
fallout(fp_user, nonrelevants_user), antiprecision(fp_user, tp_user + fp_user)
def metrics_all_users(recommendation, testdata, iscondensed):
recall_list = []
fallout_list = []
precision_list = []
antiprecision_list = []
users_test = get_number_users(testdata)
for u in users_test:
recommendation_user = find_user_data(recommendation, u)
testdata_user = find_user_data(testdata, u)
recall, precision, fallout, antiprecision = metrics_one_user(recommendation_user, testdata_user, iscondensed)
recall_list.append(recall)
fallout_list.append(fallout)
precision_list.append(precision)
antiprecision_list.append(antiprecision)
recall = np.mean(recall_list)
fallout = np.mean(fallout_list)
precision = np.mean(precision_list)
antiprecision = np.mean(antiprecision_list)
return [recall, precision, fallout, antiprecision]
def full_metrics(recommendation, testdata):
recommendation = get_test_ratings_rec(recommendation, testdata) # match ratings test with rec totaldatset
return metrics_all_users(recommendation, testdata, False)
def condensed_metrics(recommendation, testdata):
recommendation = get_test_ratings_rec(recommendation, testdata) # match ratings test with rec totaldatset
condensed_recommendation = recommendation[recommendation.rating.notnull()]
rec_at_k = get_topk(condensed_recommendation)
return metrics_all_users(rec_at_k, testdata, True)
def ndcg_ndcl(dataset, recommendation, testdata, k):
usersin_test = testdata['user_id'].unique()
total_ndcg = []
total_ndcl = []
recommendation['useritem'] = (recommendation['user_id'].map(str) + '_' + recommendation['item_id'].map(str))
recommendation.set_index('useritem', inplace=True)
testdata['useritem'] = (testdata['user_id'].map(str) + '_' + testdata['item_id'].map(str))
testdata.set_index('useritem', inplace=True)
recommendation['ratings'] = testdata['rating']
recommendation['ratings'].fillna(0, inplace=True)
recommendation, testdata = change_relevance(dataset, recommendation, testdata)
for u in usersin_test:
userInfo = recommendation.loc[recommendation['user_id'] == u]
userRecArray = userInfo['rating_changed'].values
if userInfo.size:
userTest = testdata.loc[testdata['user_id'] == u]
userTestArray_p = userTest['ideal_rank_p'].values
userTestArray_n = userTest['ideal_rank_n'].values
total_ndcg.append(ndcg_k(userRecArray, userTestArray_p, k))
total_ndcl.append(ndcl_k(userRecArray, userTestArray_n, k))
else:
print(u)
return [mean(total_ndcg), mean(total_ndcl)]
def compute_mrrs(recommendation, testdata):
mrr_list = []
recommendation = get_test_ratings_rec(recommendation, testdata)
recommendation[rating_column].fillna(-1, inplace=True) # shouldn't do this for condensed lists
recommendation[rating_column] = recommendation.apply(
lambda row: 0 if row[rating_column] == 0 else 1 if row[rating_column] == 2 or row[rating_column] == 1 \
else -1, axis=1
) # check this depends on test data ratings 012, wont work for 543
users_test = get_number_users(testdata)
for u in users_test:
recommendation_user = find_user_data(recommendation, u)
if recommendation_user[rating_column].values.size: # empty means users from test not in rec?
mrr_list.append(recommendation_user[rating_column].values)
mrr = mean_reciprocal_rank(mrr_list)
anti_mrr = anti_mean_reciprocal_rank(mrr_list)
return [mrr, anti_mrr] | [
"metrics.mean_reciprocal_rank",
"metrics.anti_mean_reciprocal_rank",
"numpy.mean",
"metrics.fallout",
"statistics.mean",
"utils.change_relevance",
"metrics.ndcg_k",
"metrics.precision",
"metrics.antiprecision",
"metrics.ndcl_k",
"metrics.recall"
] | [((2765, 2785), 'numpy.mean', 'np.mean', (['recall_list'], {}), '(recall_list)\n', (2772, 2785), True, 'import numpy as np\n'), ((2800, 2821), 'numpy.mean', 'np.mean', (['fallout_list'], {}), '(fallout_list)\n', (2807, 2821), True, 'import numpy as np\n'), ((2838, 2861), 'numpy.mean', 'np.mean', (['precision_list'], {}), '(precision_list)\n', (2845, 2861), True, 'import numpy as np\n'), ((2882, 2909), 'numpy.mean', 'np.mean', (['antiprecision_list'], {}), '(antiprecision_list)\n', (2889, 2909), True, 'import numpy as np\n'), ((4127, 4178), 'utils.change_relevance', 'change_relevance', (['dataset', 'recommendation', 'testdata'], {}), '(dataset, recommendation, testdata)\n', (4143, 4178), False, 'from utils import change_relevance\n'), ((5599, 5629), 'metrics.mean_reciprocal_rank', 'mean_reciprocal_rank', (['mrr_list'], {}), '(mrr_list)\n', (5619, 5629), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((5645, 5680), 'metrics.anti_mean_reciprocal_rank', 'anti_mean_reciprocal_rank', (['mrr_list'], {}), '(mrr_list)\n', (5670, 5680), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((4742, 4758), 'statistics.mean', 'mean', (['total_ndcg'], {}), '(total_ndcg)\n', (4746, 4758), False, 'from statistics import mean\n'), ((4760, 4776), 'statistics.mean', 'mean', (['total_ndcl'], {}), '(total_ndcl)\n', (4764, 4776), False, 'from statistics import mean\n'), ((1793, 1824), 'metrics.recall', 'recall', (['tp_user', 'relevants_user'], {}), '(tp_user, relevants_user)\n', (1799, 1824), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((1826, 1847), 'metrics.precision', 'precision', (['tp_user', 'k'], {}), '(tp_user, k)\n', (1835, 1847), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((1866, 1901), 'metrics.fallout', 'fallout', (['fp_user', 'nonrelevants_user'], {}), '(fp_user, nonrelevants_user)\n', (1873, 1901), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((1903, 1928), 'metrics.antiprecision', 'antiprecision', (['fp_user', 'k'], {}), '(fp_user, k)\n', (1916, 1928), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((1955, 1986), 'metrics.recall', 'recall', (['tp_user', 'relevants_user'], {}), '(tp_user, relevants_user)\n', (1961, 1986), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((1988, 2025), 'metrics.precision', 'precision', (['tp_user', '(tp_user + fp_user)'], {}), '(tp_user, tp_user + fp_user)\n', (1997, 2025), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((2044, 2079), 'metrics.fallout', 'fallout', (['fp_user', 'nonrelevants_user'], {}), '(fp_user, nonrelevants_user)\n', (2051, 2079), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((2081, 2122), 'metrics.antiprecision', 'antiprecision', (['fp_user', '(tp_user + fp_user)'], {}), '(fp_user, tp_user + fp_user)\n', (2094, 2122), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((4580, 4620), 'metrics.ndcg_k', 'ndcg_k', (['userRecArray', 'userTestArray_p', 'k'], {}), '(userRecArray, userTestArray_p, k)\n', (4586, 4620), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n'), ((4652, 4692), 'metrics.ndcl_k', 'ndcl_k', (['userRecArray', 'userTestArray_n', 'k'], {}), '(userRecArray, userTestArray_n, k)\n', (4658, 4692), False, 'from metrics import precision, antiprecision, recall, fallout, ndcg_k, ndcl_k, mean_reciprocal_rank, anti_mean_reciprocal_rank\n')] |
import csv
import sys
import numpy
import math
from numpy import genfromtxt
from numpy.linalg import inv
import random
from random import randint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
#import time
#start_time = time.time()
PrintEnabled = 0
X = genfromtxt(sys.argv[1], delimiter=',')
# Number of Iterations
NbIterations = 10
# Number of clusters = 5 = K
NbClusters = 5
if (len(sys.argv) > 2):
PrintEnabled = 1
NbClusters = int(sys.argv[2])
# N is the number of input vectors
N = X.shape[0]
# d is the number of element per vector
d = X.shape[1]
###############################
## K-MEANS ALGORITHM ##
###############################
# Ci stores the number of the clusters to which the ith input vector belongs to
Ci = numpy.zeros(shape=(N,1))
# Centroids
Centroids = numpy.zeros(shape=(NbClusters,d))
# I want to go find the min & max of each input
# XminAndMax[0][i] being the min
# XminAndMax[1][i] being the max of ith element of each vector
XminAndMax = numpy.zeros(shape=(2,d))
# Ni is an array that will keep track of how many vectors belong to each clusters
# Needed in UpdateEachCentroids(): it is more efficient to update it during UpdateEachCi():
Ni = []
for e in range(NbClusters):
Ni.append(0)
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters :
Centroids[indexOverNbClusters] = X[randint(0, N-1)]
indexOverNbClusters += 1
# K-means++ algorithm
# K-means++ chooses better than random initial centroids by trying to place
# them as far as possible from one another.
Centroids[0] = X[randint(0, N-1)]
C = 1
def GetNextCentroid(C):
Dist = []
for n in range(0,N-1):
Dists = []
for c in range(0,C):
Dists.append(numpy.sum(numpy.multiply(X[n]-Centroids[c], X[n]-Centroids[c])))
Dist.append(min(Dists))
ProbabilityDist = Dist/sum(Dist)
CumulativeProbability = ProbabilityDist.cumsum()
MyRandom = random.random()
index = 0
result = 0
while index < len(CumulativeProbability):
if MyRandom < CumulativeProbability[index]:
result = index
break
index += 1
return result
indexOverNbClusters = 1
while indexOverNbClusters < NbClusters:
Centroids[indexOverNbClusters] = X[GetNextCentroid(indexOverNbClusters)]
indexOverNbClusters += 1
if PrintEnabled :
print("K-means++ Centroids Initialization:")
print(Centroids)
print("")
def ZeroTheArray(arr):
for e in range(len(arr)):
arr[e] = 0
def InitAnArrayOfSize(size):
arr = []
while size > 0:
arr.append(0.0)
size -= 1
return arr
def WriteToFile(nameOfTheFile_Prefix, nameOfTheFile_extension, OutputMatrix):
nameOfTheFile = str(nameOfTheFile_Prefix) + str(nameOfTheFile_extension) + ".csv"
with open(nameOfTheFile, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for e in range(OutputMatrix.shape[0]):
spamwriter.writerow(OutputMatrix[e])
def WriteArrayToFile(nameOfTheFile_Prefix, nameOfTheFile_extension, OutputArray):
nameOfTheFile = str(nameOfTheFile_Prefix) + str(nameOfTheFile_extension) + ".csv"
with open(nameOfTheFile, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
index = 0
while index < NbClusters:
print(OutputArray[index])
spamwriter.writerow(OutputArray[index])
index += 1
def UpdateEachCi():
ZeroTheArray(Ni)
# First while to iterate over every input vector
indexOverN = 0
while indexOverN < N :
# This array stores the distance to each clusters
Distances = []
# Second while to calculate the euclidian distance from a vector to every clusters
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters :
Sum = 0
#Third while to iterate the calcul of the distance to be the sum of the distance from all element of the input vector
indexOverD = 0
while indexOverD < d:
Sum += (X[indexOverN][indexOverD]-Centroids[indexOverNbClusters][indexOverD])**2
indexOverD += 1
Distances.append(Sum)
indexOverNbClusters += 1
# We now have an array with the distance from that vector to each clusters
assert(len(Distances) == NbClusters)
# The number of the cluster is the index of the smallest value (array kept 0-based on purpose)
cluster = Distances.index(min(Distances))
Ci[indexOverN] = cluster
Ni[cluster] += 1
indexOverN += 1
print("Ni : ")
print(Ni)
# print("Ci : ")
# print(Ci)
def UpdateEachCentroids():
# The number of vector that belong to each cluster is already calculated and stored in Ni
# Reset the matrix Centroids
global Centroids
Centroids = numpy.zeros(shape=(NbClusters,d))
indexOverN = 0
while indexOverN < N :
cluster = int(Ci[indexOverN])
Centroids[cluster] += X[indexOverN]/Ni[cluster]
indexOverN += 1
printOnlyOnce = 0
def UpdateEachPhiAndPi():
global Pi
global Phi
global printOnlyOnce
Phi = numpy.zeros(shape=(N, NbClusters))
# First while to iterate over every input vector
# E-step -- Phi
indexOverN = 0
while indexOverN < N :
indexOverNbClusters = 0
SumPhiForThisCluster = 0.0
while indexOverNbClusters < NbClusters :
SigmaK = numpy.zeros(shape=(d, d))
SigmaK = MatrixOfSigmas[indexOverNbClusters*d:(indexOverNbClusters+1)*d]
Determinant = numpy.linalg.det(SigmaK)
XminusMu = X[indexOverN] - Centroids_EM_GMM[indexOverNbClusters]
TransposeXminusMu = numpy.transpose(XminusMu[numpy.newaxis])
SigmaInverse = inv(SigmaK)
PiDet = Pi[0][indexOverNbClusters] * Determinant**(-0.5)
MatrixMul = XminusMu[numpy.newaxis].dot(SigmaInverse).dot(TransposeXminusMu)
Exp = math.exp(-0.5 * MatrixMul)
if printOnlyOnce == 0 :
print("SigmaK")
print(SigmaK)
print("XminusMu")
print(XminusMu)
print("TransposeXminusMu")
print(TransposeXminusMu)
print("SigmaInverse")
print(SigmaInverse)
print("Pi[0][indexOverNbClusters]")
print(Pi[0][indexOverNbClusters])
print("PiDet")
print(PiDet)
print("Determinant")
print(Determinant)
print("MatrixMul")
print(MatrixMul)
print("Exp")
print(Exp)
printOnlyOnce += 1
# assert(Exp > 0)
Phi[indexOverN][indexOverNbClusters] = PiDet * Exp
# assert(Phi[indexOverN][indexOverNbClusters] > 0)
#Pi[0][indexOverNbClusters] += Phi[indexOverN][indexOverNbClusters]
SumPhiForThisCluster += Phi[indexOverN][indexOverNbClusters]
#print("Phi of N = " + str(indexOverN) + " for cluster = " + str(indexOverNbClusters))
#print(Phi[indexOverN][indexOverNbClusters])
indexOverNbClusters += 1
# Divide by the sum of the Pi * MultivariateNormal for each K
# print("Phi[indexOverN] before sum = 1")
# print(Phi[indexOverN])
# print("Pi")
# print(Pi)
# print(sum(Pi[0]))
Phi[indexOverN] = Phi[indexOverN]/SumPhiForThisCluster
Pi[0] += Phi[indexOverN]
# print("Phi[indexOverN] after sum = 1")
# print(Phi[indexOverN])
# print("Sum of phi")
# print(sum(Phi[indexOverN]))
indexOverN += 1
# Pi[k]
# indexOverN = 0
# while indexOverN < N :
# indexOverNbClusters = 0
# while indexOverNbClusters < NbClusters :
# # Pi-k is the sum of all the Phi-k divided by the number of inputs
# Pi[0][indexOverNbClusters] += Phi[indexOverN][indexOverNbClusters]/N
# indexOverNbClusters += 1
# indexOverN += 1
Pi[0] /= float(N)
def UpdateEachMuAndSigma(indexOverNbIterations):
# The number of vector that belong to each cluster is already calculated and stored in Ni
# Reset the matrix Centroids
global Centroids_EM_GMM
global MatrixOfSigmas
Centroids_EM_GMM = numpy.zeros(shape=(NbClusters,d))
# print("Pi")
# print(Pi)
# print("MatrixOfSigmas")
# print(MatrixOfSigmas)
indexOverN = 0
while indexOverN < N :
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters:
# print("Centroids_EM_GMM[indexOverNbClusters]")
# print(Centroids_EM_GMM[indexOverNbClusters].shape)
# print("X[indexOverN]")
# print(X[indexOverN].shape)
# print("Phi[indexOverNbClusters]")
# print(Phi[indexOverNbClusters].shape)
Centroids_EM_GMM[indexOverNbClusters] += (X[indexOverN]*Phi[indexOverN][indexOverNbClusters])/(Pi[0][indexOverNbClusters]*N)
indexOverNbClusters += 1
indexOverN += 1
# print("Centroids_EM_GMM")
# print(Centroids_EM_GMM)
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters:
# New matrix Sigma for each k
Sigma = numpy.zeros(shape=(d, d))
indexOverN = 0
while indexOverN < N :
SigmaN = numpy.zeros(shape=(d, d))
# Phi[k] * ( x[i] - Centroids_EM_GMM[k]) * transpose ( x[i] - Centroids_EM_GMM[k]) / (Pi[0][k] * N)
# print("X[i].shape")
# print(X[indexOverN].shape)
# [numpy.newaxis] allow me to convert a 1D array into a 2D array. From there only I can transpose it
XtoCentroid = X[indexOverN]-Centroids_EM_GMM[indexOverNbClusters]
XtoCentroid = XtoCentroid[numpy.newaxis]
TransposeXtoCentroid = XtoCentroid.T
SigmaN = (TransposeXtoCentroid).dot(XtoCentroid)
SigmaN *= Phi[indexOverN][indexOverNbClusters]
Sigma += SigmaN
indexOverN += 1
#print(Sigma)
Sigma /= (Pi[0][indexOverNbClusters] * N)
MatrixOfSigmas[indexOverNbClusters*d:(indexOverNbClusters+1)*d] = Sigma
WriteToFile("Sigma-"+str(indexOverNbClusters+1)+"-", indexOverNbIterations+1, Sigma)
indexOverNbClusters += 1
# print("MatrixOfSigmas")
# print(MatrixOfSigmas)
def InitSigmaToIdentityMatrix():
global MatrixOfSigmas
k = 0
indexOverN = 0
while indexOverN < N:
# I retrieve the cluster the data point belongs to
y = Ci[indexOverN][0]
XtoCentroid = X[indexOverN] - Centroids[k]
XtoCentroid = XtoCentroid[numpy.newaxis]
TransposeXtoCentroid = XtoCentroid.T
MatrixOfSigmas[y*d:(y+1)*d] += (XtoCentroid).dot(TransposeXtoCentroid)
indexOverN += 1
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters:
SigmaK = MatrixOfSigmas[indexOverNbClusters*d:(indexOverNbClusters+1)*d]
SigmaK /= Ni[indexOverNbClusters]
MatrixOfSigmas[indexOverNbClusters*d:(indexOverNbClusters+1)*d] = SigmaK
indexOverNbClusters += 1
indexOverDCluster = 0
indexOverd = 0
while indexOverDCluster < d*NbClusters:
MatrixOfSigmas[indexOverDCluster][indexOverd] += 1
indexOverd += 1
if indexOverd >= d:
indexOverd = 0
indexOverDCluster += 1
print(MatrixOfSigmas)
###############################
## MAIN ##
###############################
###############################
## K-MEANS ALGORITHM ##
###############################
indexOverNbIterations = 0
while indexOverNbIterations < NbIterations:
# Expectation Step
UpdateEachCi()
# Maximization Step
UpdateEachCentroids()
WriteToFile("centroids-", indexOverNbIterations+1, Centroids)
indexOverNbIterations += 1
###############################
## GMM ALGORITHM ##
###############################
# Probability of each data point to belong to each cluster
Phi = numpy.zeros(shape=(N, NbClusters))
# Pi[0][k] is a K-dimensional probability distribution.
# Basically the weight of each Gaussian.
# Initialized to be the uniform distribution
Pi = numpy.zeros(shape=(1,NbClusters))
indexOverNbClusters = 0
while indexOverNbClusters < NbClusters:
Pi[0][indexOverNbClusters] = (1/float(NbClusters))
indexOverNbClusters += 1
print("Pi")
print(Pi)
# Stores verticaly every Sigma
MatrixOfSigmas = numpy.zeros(shape=(d*NbClusters, d))
InitSigmaToIdentityMatrix()
# Initialization of the centroids to be the result of the K-means algo
Centroids_EM_GMM = Centroids
indexOverNbIterations = 0
while indexOverNbIterations < NbIterations:
# Expectation Step
UpdateEachPhiAndPi()
# Maximization Step
UpdateEachMuAndSigma(indexOverNbIterations)
# PiMatrix = numpy.mat(Pi)
# print(PiMatrix)
# PiMatrix = numpy.transpose(PiMatrix[numpy.newaxis])
# print(PiMatrix)
WriteToFile("pi-", indexOverNbIterations+1, numpy.transpose(Pi))
WriteToFile("mu-", indexOverNbIterations+1, Centroids_EM_GMM)
indexOverNbIterations += 1
print("")
print("Finishes OK.")
print("")
# print("--- %s seconds ---" % (time.time() - start_time))
# Print a 3D graph.
# Each color represents the belonging to a certain cluster.
if PrintEnabled:
color=['red','green','blue', 'yellow', 'brown']
fig=plt.figure()
ax3D = Axes3D(fig)
for e in range(0,N):
ax3D.scatter(X[e][0], X[e][1], X[e][2], color=color[int(Ci[e])])
plt.show()
| [
"math.exp",
"matplotlib.pyplot.show",
"random.randint",
"mpl_toolkits.mplot3d.Axes3D",
"csv.writer",
"numpy.multiply",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.transpose",
"random.random",
"matplotlib.pyplot.figure",
"numpy.linalg.inv",
"numpy.linalg.det"
] | [((289, 327), 'numpy.genfromtxt', 'genfromtxt', (['sys.argv[1]'], {'delimiter': '""","""'}), "(sys.argv[1], delimiter=',')\n", (299, 327), False, 'from numpy import genfromtxt\n'), ((783, 808), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(N, 1)'}), '(shape=(N, 1))\n', (794, 808), False, 'import numpy\n'), ((833, 867), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(NbClusters, d)'}), '(shape=(NbClusters, d))\n', (844, 867), False, 'import numpy\n'), ((1025, 1050), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, d)'}), '(shape=(2, d))\n', (1036, 1050), False, 'import numpy\n'), ((12237, 12271), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(N, NbClusters)'}), '(shape=(N, NbClusters))\n', (12248, 12271), False, 'import numpy\n'), ((12438, 12472), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(1, NbClusters)'}), '(shape=(1, NbClusters))\n', (12449, 12472), False, 'import numpy\n'), ((12698, 12736), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(d * NbClusters, d)'}), '(shape=(d * NbClusters, d))\n', (12709, 12736), False, 'import numpy\n'), ((1609, 1626), 'random.randint', 'randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (1616, 1626), False, 'from random import randint\n'), ((1972, 1987), 'random.random', 'random.random', ([], {}), '()\n', (1985, 1987), False, 'import random\n'), ((4951, 4985), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(NbClusters, d)'}), '(shape=(NbClusters, d))\n', (4962, 4985), False, 'import numpy\n'), ((5260, 5294), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(N, NbClusters)'}), '(shape=(N, NbClusters))\n', (5271, 5294), False, 'import numpy\n'), ((8486, 8520), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(NbClusters, d)'}), '(shape=(NbClusters, d))\n', (8497, 8520), False, 'import numpy\n'), ((13624, 13636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13634, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13648, 13659), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (13654, 13659), False, 'from mpl_toolkits.mplot3d import axes3d, Axes3D\n'), ((13762, 13772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13770, 13772), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1401), 'random.randint', 'randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (1391, 1401), False, 'from random import randint\n'), ((2897, 2958), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n", (2907, 2958), False, 'import csv\n'), ((3292, 3353), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n", (3302, 3353), False, 'import csv\n'), ((9432, 9457), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(d, d)'}), '(shape=(d, d))\n', (9443, 9457), False, 'import numpy\n'), ((13243, 13262), 'numpy.transpose', 'numpy.transpose', (['Pi'], {}), '(Pi)\n', (13258, 13262), False, 'import numpy\n'), ((5561, 5586), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(d, d)'}), '(shape=(d, d))\n', (5572, 5586), False, 'import numpy\n'), ((5699, 5723), 'numpy.linalg.det', 'numpy.linalg.det', (['SigmaK'], {}), '(SigmaK)\n', (5715, 5723), False, 'import numpy\n'), ((5834, 5874), 'numpy.transpose', 'numpy.transpose', (['XminusMu[numpy.newaxis]'], {}), '(XminusMu[numpy.newaxis])\n', (5849, 5874), False, 'import numpy\n'), ((5903, 5914), 'numpy.linalg.inv', 'inv', (['SigmaK'], {}), '(SigmaK)\n', (5906, 5914), False, 'from numpy.linalg import inv\n'), ((6093, 6119), 'math.exp', 'math.exp', (['(-0.5 * MatrixMul)'], {}), '(-0.5 * MatrixMul)\n', (6101, 6119), False, 'import math\n'), ((9533, 9558), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(d, d)'}), '(shape=(d, d))\n', (9544, 9558), False, 'import numpy\n'), ((1780, 1836), 'numpy.multiply', 'numpy.multiply', (['(X[n] - Centroids[c])', '(X[n] - Centroids[c])'], {}), '(X[n] - Centroids[c], X[n] - Centroids[c])\n', (1794, 1836), False, 'import numpy\n')] |
from albumentations import Compose, ElasticTransform, Flip, CoarseDropout, RandomCrop, pytorch, Normalize, Resize, \
HorizontalFlip, Rotate, PadIfNeeded, CenterCrop, Cutout
import numpy as np
class CustomCompose:
def __init__(self,transforms):
self.transforms = transforms
def __call__(self, img):
img = np.array(img)
img = self.transforms(image=img)["image"]
return img
| [
"numpy.array"
] | [((335, 348), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (343, 348), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from typing import Tuple, List, Iterable
from sklearn import linear_model as lm
from skultrafast.base_funcs.base_functions_np import _fold_exp, _coh_gaussian
def _make_base(tup, taus, w=0.1, add_coh=True, add_const=False, norm=False):
if add_const:
taus = np.hstack((taus, 10000))
out = _fold_exp(tup.t.T[:, None], w, 0, taus[None, :]).squeeze()
if add_const:
print(out.shape)
out[:, -1] *= 1000
if add_coh:
out = np.hstack(
(out, _coh_gaussian(tup.t.T[:, None], w, 0).squeeze())) * 10
if norm:
out = out / np.abs(out).max(0)
return out.squeeze()
def start_ltm(tup,
taus,
w=0.1,
add_coh=False,
use_cv=False,
add_const=False,
verbose=False,
**kwargs):
"""Calculate the lifetime density map for given data.
Parameters
----------
tup : datatuple
tuple with wl, t, data
taus : list of floats
Used to build the basis vectors.
w : float, optional
Used sigma for calculating the , by default 0.1.
add_coh : bool, optional
If true, coherent contributions are added to the basis.
By default False.
use_cv : bool, optional
Whether to use cross-validation, by default False
add_const : bool, optional
Whether to add an explict constant, by default False
verbose : bool, optional
Wheater to be verobse, by default False
Returns
-------
tuple of (linear_model, coefs, fit, alphas)
The linear model is the used sklearn model. Coefs is the arrary
of the coefficents, fit contains the resulting fit and alphas
is an array of the applied alpha value when using cv.
"""
X = _make_base(tup, taus, w=w, add_const=add_const, add_coh=add_coh)
if not use_cv:
mod = lm.ElasticNet(**kwargs, l1_ratio=0.98)
else:
mod = lm.ElasticNetCV(**kwargs, l1_ratio=0.98)
mod.fit_intercept = not add_const
mod.warm_start = 1
coefs = np.empty((X.shape[1], tup.data.shape[1]))
fit = np.empty_like(tup.data)
alphas = np.empty(tup.data.shape[1])
for i in range(tup.data.shape[1]):
if verbose:
print(i, 'ha', end=';')
mod.fit(X, tup.data[:, i])
coefs[:, i] = mod.coef_.copy()
fit[:, i] = mod.predict(X)
if hasattr(mod, 'alpha_'):
alphas[i] = mod.alpha_
return mod, coefs, fit, alphas
def start_ltm_multi(tup, taus, w=0.1, alpha=0.001, **kwargs):
X = _make_base(tup, taus, w=w)
mod = lm.MultiTaskElasticNet(alpha=alpha, **kwargs)
mod.max_iter = 5e4
mod.verbose = 0
mod.fit_intercept = 0
mod.normalize = 1
mod.fit(X, tup.data)
fit = mod.predict(X)
coefs = mod.coef_
return mod, coefs, fit, None
| [
"sklearn.linear_model.ElasticNetCV",
"skultrafast.base_funcs.base_functions_np._coh_gaussian",
"sklearn.linear_model.MultiTaskElasticNet",
"numpy.abs",
"sklearn.linear_model.ElasticNet",
"numpy.empty",
"numpy.empty_like",
"skultrafast.base_funcs.base_functions_np._fold_exp",
"numpy.hstack"
] | [((2215, 2256), 'numpy.empty', 'np.empty', (['(X.shape[1], tup.data.shape[1])'], {}), '((X.shape[1], tup.data.shape[1]))\n', (2223, 2256), True, 'import numpy as np\n'), ((2268, 2291), 'numpy.empty_like', 'np.empty_like', (['tup.data'], {}), '(tup.data)\n', (2281, 2291), True, 'import numpy as np\n'), ((2306, 2333), 'numpy.empty', 'np.empty', (['tup.data.shape[1]'], {}), '(tup.data.shape[1])\n', (2314, 2333), True, 'import numpy as np\n'), ((2768, 2813), 'sklearn.linear_model.MultiTaskElasticNet', 'lm.MultiTaskElasticNet', ([], {'alpha': 'alpha'}), '(alpha=alpha, **kwargs)\n', (2790, 2813), True, 'from sklearn import linear_model as lm\n'), ((361, 385), 'numpy.hstack', 'np.hstack', (['(taus, 10000)'], {}), '((taus, 10000))\n', (370, 385), True, 'import numpy as np\n'), ((2027, 2065), 'sklearn.linear_model.ElasticNet', 'lm.ElasticNet', ([], {'l1_ratio': '(0.98)'}), '(**kwargs, l1_ratio=0.98)\n', (2040, 2065), True, 'from sklearn import linear_model as lm\n'), ((2094, 2134), 'sklearn.linear_model.ElasticNetCV', 'lm.ElasticNetCV', ([], {'l1_ratio': '(0.98)'}), '(**kwargs, l1_ratio=0.98)\n', (2109, 2134), True, 'from sklearn import linear_model as lm\n'), ((397, 445), 'skultrafast.base_funcs.base_functions_np._fold_exp', '_fold_exp', (['tup.t.T[:, None]', 'w', '(0)', 'taus[None, :]'], {}), '(tup.t.T[:, None], w, 0, taus[None, :])\n', (406, 445), False, 'from skultrafast.base_funcs.base_functions_np import _fold_exp, _coh_gaussian\n'), ((681, 692), 'numpy.abs', 'np.abs', (['out'], {}), '(out)\n', (687, 692), True, 'import numpy as np\n'), ((591, 628), 'skultrafast.base_funcs.base_functions_np._coh_gaussian', '_coh_gaussian', (['tup.t.T[:, None]', 'w', '(0)'], {}), '(tup.t.T[:, None], w, 0)\n', (604, 628), False, 'from skultrafast.base_funcs.base_functions_np import _fold_exp, _coh_gaussian\n')] |
from keras.losses import binary_crossentropy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D
from keras import losses, Input, Model
from keras.callbacks import EarlyStopping
from keras.regularizers import l2, l1_l2
from keras.optimizers import SGD, Adam
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from collections import OrderedDict
import numpy as np
import copy
import sklearn
import keras
from keras.metrics import binary_accuracy
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import f1_score
from sklearn.linear_model.logistic import LogisticRegression
from scipy.stats import spearmanr
from off_sample_utils import resize_image
def create_cnn(input_shape=(64, 64, 1), opt=None,
l1_a=0, l2_a=0.01,
init_filters=8,
dropout_p=0.5,
dense_fn=(256, 256),
act_f='relu',
kernel_initializer='glorot_uniform',
metrics=None):
model = Sequential()
strides = 1
pool_size = (2, 2)
# model.add(Conv2D(3, (1, 1), strides=strides, padding='same', input_shape=input_shape,
# kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
# model.add(BatchNormalization())
# model.add(Activation(act_f))
model.add(Conv2D(init_filters, (3, 3), strides=strides, padding='same', input_shape=input_shape,
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(MaxPool2D(pool_size=pool_size))
model.add(Conv2D(init_filters, (3, 3), strides=strides, padding='same',
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(MaxPool2D(pool_size=pool_size))
model.add(Conv2D(init_filters * 2, (3, 3), strides=strides, padding='same',
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(MaxPool2D(pool_size=pool_size))
model.add(Conv2D(init_filters * 4, (3, 3), strides=strides, padding='same',
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(MaxPool2D(pool_size=pool_size))
model.add(Conv2D(init_filters * 8, (3, 3), strides=strides, padding='same',
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(MaxPool2D(pool_size=pool_size))
model.add(Conv2D(init_filters * 16, (2, 2), strides=strides, padding='valid',
kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(BatchNormalization())
model.add(Activation(act_f))
model.add(Flatten())
model.add(Dense(dense_fn[0], kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(Activation(act_f))
model.add(Dropout(dropout_p))
model.add(Dense(dense_fn[1], kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(Activation(act_f))
model.add(Dropout(dropout_p))
model.add(Dense(1, kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_a, l2_a)))
model.add(Activation('sigmoid'))
if not metrics:
metrics = [binary_accuracy]
model.compile(loss=binary_crossentropy, optimizer=opt, metrics=metrics)
return model
class OffSampleImageDataGenerator(ImageDataGenerator):
def standardize(self, x):
return super().standardize(x.copy())
class OffSampleKerasClassifier(KerasClassifier):
def __init__(self, **sk_params):
super().__init__(**sk_params)
self.classes_ = np.arange(2)
self.model = None
self.data_gen = None
self.mask_data_gen = None
def check_params(self, params):
pass
def fit(self, x, y, **kwargs):
self.set_params(**kwargs)
print('create_model args: {}'.format(self.filter_sk_params(create_cnn)))
self.model = create_cnn(**self.filter_sk_params(create_cnn))
data_gen_args = dict(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
rescale=0.3)
seed = 13
self.data_gen = OffSampleImageDataGenerator(**data_gen_args)
# self.mask_data_gen = OffSampleImageDataGenerator(**data_gen_args)
self.data_gen.fit(x, augment=True, seed=seed)
# self.mask_data_gen.fit(x, augment=True, seed=seed)
print('fit_generator args: {}'.format(
{k: v for k, v in self.filter_sk_params(self.model.fit_generator).items() if k != 'validation_data'}))
fit_args = copy.deepcopy(self.filter_sk_params(self.model.fit_generator))
fit_args.update(kwargs)
print('flow args: {}'.format(self.filter_sk_params(ImageDataGenerator.flow)))
flow_args = copy.deepcopy(self.filter_sk_params(ImageDataGenerator.flow))
image_gen = self.data_gen.flow(x, y, seed=seed, **flow_args)
# mask_gen = self.mask_data_gen.flow(masks, y, seed=seed, **flow_args)
history = self.model.fit_generator(image_gen, steps_per_epoch=len(x) / flow_args['batch_size'],
**fit_args)
return history
def _target_class_f1_score(self, x, y, **kwargs):
x = self.data_gen.standardize(x)
y_pred = self.model.predict(x)
y_pred_lab = np.around(y_pred)
return f1_score(y[:, 1], y_pred_lab[:, 1]) # 0 - on, 1 - off
def score(self, x, y, **kwargs):
return self._target_class_f1_score(x, y, **kwargs)
def predict_proba(self, x, **kwargs):
x = self.data_gen.standardize(x)
return KerasClassifier.predict_proba(self, x, **kwargs)
def tta_predict(model, X_test):
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
tta_list = []
for i, img in enumerate(X_test):
tta_list.extend([img, flip_axis(img, 0),
flip_axis(img, 1), flip_axis(flip_axis(img, 1), 0)])
X_test_tta = np.stack(tta_list, axis=0)
y_test_pred_cnn_tta = model.predict(X_test_tta)
if y_test_pred_cnn_tta.ndim > 1:
y_test_pred_cnn_tta = y_test_pred_cnn_tta[:,-1] # handles case when second dim size = 1 or 2
return y_test_pred_cnn_tta.reshape(-1, 4).mean(axis=1)
class KerasCNN(object):
data_gen_args = dict(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=15,
width_shift_range=0.15,
height_shift_range=0.15,
horizontal_flip=True,
vertical_flip=True,
shear_range=0.2,
zoom_range=0.2)
def __init__(self, image_shape, save_path='custom-cnn-weights.hdf5'):
self.save_path = save_path
self.data_gen = OffSampleImageDataGenerator(**self.data_gen_args)
self.args = dict(input_shape=(image_shape + (1,)),
opt=keras.optimizers.Adam(lr=5e-4),
l2_a=0.01,
init_filters=8,
dropout_p=0.5,
dense_fn=(256, 256),
act_f='relu',
kernel_initializer='glorot_uniform',
metrics=[keras.metrics.binary_accuracy])
self.model = create_cnn(**self.args)
def fit(self, X_train, y_train, X_valid=None, y_valid=None,
epochs=20, batch_size=32, seed=13):
callbacks = []
self.data_gen.fit(X_train)
if X_valid is not None:
validation_data = (self.data_gen.standardize(X_valid), y_valid)
checkpointer = keras.callbacks.ModelCheckpoint(filepath=self.save_path,
monitor='val_binary_accuracy',
verbose=1, save_best_only=True)
callbacks.append(checkpointer)
else:
validation_data = None
return self.model.fit_generator(self.data_gen.flow(X_train, y_train, batch_size=batch_size, seed=seed),
epochs=epochs, validation_data=validation_data,
steps_per_epoch=len(X_train) / batch_size,
callbacks=callbacks)
def predict(self, X_test, load_best=False):
if load_best:
self.model = create_cnn(**self.args)
self.model.load_weights(self.save_path)
X_test = self.data_gen.standardize(X_test)
return tta_predict(self.model, X_test)
class KerasNN(object):
@staticmethod
def build_model(feature_n=None, l1_a=None, l2_a=None, lr=None):
kernel_regularizer = l1_l2(l1_a, l2_a)
model_in = Input(shape=(feature_n,))
out = Dense(256, kernel_regularizer=kernel_regularizer)(model_in)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(0.5)(out)
out = Dense(32, kernel_regularizer=kernel_regularizer)(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(0.5)(out)
out = Dense(16, kernel_regularizer=kernel_regularizer)(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(0.5)(out)
out = Dense(1, activation='sigmoid', kernel_regularizer=kernel_regularizer)(out)
model = Model(model_in, out)
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=lr),
metrics=['binary_accuracy'])
return model
def __init__(self, feature_n, save_path='custom-dense-nn-weights.hdf5'):
self.save_path = save_path
self.model = None
self.args = dict(
feature_n=feature_n,
lr=0.001,
l1_a=0,
l2_a=0.01)
def fit(self, X_train, y_train, X_valid=None, y_valid=None,
epochs=5, batch_size=64):
callbacks = []
if X_valid is not None:
validation_data = (X_valid, y_valid)
checkpointer = keras.callbacks.ModelCheckpoint(filepath=self.save_path,
monitor='val_binary_accuracy',
verbose=1, save_best_only=True)
callbacks.append(checkpointer)
else:
validation_data = None
self.model = self.build_model(**self.args)
history = self.model.fit(X_train, y_train,
validation_data=validation_data,
batch_size=batch_size, epochs=epochs, verbose=1,
callbacks=callbacks)
return history
def predict(self, X_test, load_best=False):
if load_best:
self.model = self.build_model(**self.args)
self.model.load_weights(self.save_path)
# X_test = self.data_gen.standardize(X_test)
return self.model.predict(X_test)[:, 0]
class SKLogisticRegression(object):
def __init__(self, n_components=50):
self.pca = sklearn.decomposition.TruncatedSVD(n_components=n_components)
self.model = sklearn.linear_model.logistic.LogisticRegression(solver='lbfgs', max_iter=300, verbose=1)
def fit(self, X_train, y_train):
X_train_pca = self.pca.fit_transform(X_train)
self.model.fit(X_train_pca, y_train)
def predict(self, X_test):
X_test_pca = self.pca.transform(X_test)
y_pred = self.model.predict_proba(X_test_pca)[:, 1]
y_pred[np.isnan(y_pred)] = y_pred[~np.isnan(y_pred)].mean()
return y_pred
def pixel_corr_predict(y_p_pred, groups_p_test, X_test, groups_test,
masks, image_shape):
y_pred = []
for group in np.unique(groups_p_test):
pred_mask = y_p_pred[groups_p_test == group].reshape(masks[group].shape)
pred_mask = resize_image(pred_mask, image_shape)
for img in X_test[groups_test == group]:
sp_corr = spearmanr(img[:, :, 0].flatten(), pred_mask.flatten()).correlation
sp_corr = (sp_corr + 1) / 2 # normalising
y_pred.append(sp_corr)
return np.asarray(y_pred)
class Blender(object):
def __init__(self, cnn, nn, lr, image_shape):
self.cnn, self.nn, self.lr = cnn, nn, lr
self.image_shape = image_shape
self.model = None
self.standard_scaler = None
self.X_blend_test = None
def first_level_pred(self, X_test, groups_test,
X_p_test, groups_p_test, masks):
y_test_pred_cnn = self.cnn.predict(X_test)
y_p_test_pred = self.nn.predict(X_p_test)
y_test_pred_nn = pixel_corr_predict(y_p_test_pred, groups_p_test, X_test, groups_test,
masks, self.image_shape)
y_p_test_pred = self.lr.predict(X_p_test)
y_test_pred_lr = pixel_corr_predict(y_p_test_pred, groups_p_test, X_test, groups_test,
masks, self.image_shape)
return np.stack([y_test_pred_cnn,
y_test_pred_nn,
y_test_pred_lr], axis=1)
def fit(self, X_valid, y_valid, groups_valid, X_p_valid, groups_p_valid, masks):
X_blend_train = self.first_level_pred(X_valid, groups_valid,
X_p_valid, groups_p_valid, masks)
y_blend_train = y_valid
self.standard_scaler = sklearn.preprocessing.StandardScaler()
X_blend_train_scaled = self.standard_scaler.fit_transform(X_blend_train)
self.model = sklearn.linear_model.logistic.LogisticRegressionCV(cv=10, solver='liblinear')
self.model.fit(X_blend_train_scaled, y_blend_train)
def predict(self, X_test, groups_test, X_p_test, groups_p_test, masks):
self.X_blend_test = self.first_level_pred(X_test, groups_test,
X_p_test, groups_p_test, masks)
X_blend_test_scaled = self.standard_scaler.transform(self.X_blend_test)
y_blend_test_pred = self.model.predict_proba(X_blend_test_scaled)[:, 1]
return y_blend_test_pred
| [
"sklearn.preprocessing.StandardScaler",
"keras.layers.MaxPool2D",
"numpy.isnan",
"off_sample_utils.resize_image",
"numpy.around",
"sklearn.metrics.f1_score",
"numpy.arange",
"keras.regularizers.l1_l2",
"numpy.unique",
"sklearn.decomposition.TruncatedSVD",
"keras.layers.Flatten",
"keras.layers.... | [((1161, 1173), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1171, 1173), False, 'from keras.models import Sequential\n'), ((6788, 6814), 'numpy.stack', 'np.stack', (['tta_list'], {'axis': '(0)'}), '(tta_list, axis=0)\n', (6796, 6814), True, 'import numpy as np\n'), ((12584, 12608), 'numpy.unique', 'np.unique', (['groups_p_test'], {}), '(groups_p_test)\n', (12593, 12608), True, 'import numpy as np\n'), ((12987, 13005), 'numpy.asarray', 'np.asarray', (['y_pred'], {}), '(y_pred)\n', (12997, 13005), True, 'import numpy as np\n'), ((1696, 1716), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1714, 1716), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1732, 1749), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (1742, 1749), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((1765, 1795), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (1774, 1795), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((1987, 2007), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2005, 2007), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2023, 2040), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (2033, 2040), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2056, 2086), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (2065, 2086), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2282, 2302), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2300, 2302), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2318, 2335), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (2328, 2335), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2351, 2381), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (2360, 2381), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2577, 2597), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2595, 2597), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2613, 2630), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (2623, 2630), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2646, 2676), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (2655, 2676), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2872, 2892), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2890, 2892), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2908, 2925), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (2918, 2925), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((2941, 2971), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (2950, 2971), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3169, 3189), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3187, 3189), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3205, 3222), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (3215, 3222), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3239, 3248), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3246, 3248), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3376, 3393), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (3386, 3393), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3409, 3427), 'keras.layers.Dropout', 'Dropout', (['dropout_p'], {}), '(dropout_p)\n', (3416, 3427), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3555, 3572), 'keras.layers.Activation', 'Activation', (['act_f'], {}), '(act_f)\n', (3565, 3572), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3588, 3606), 'keras.layers.Dropout', 'Dropout', (['dropout_p'], {}), '(dropout_p)\n', (3595, 3606), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((3724, 3745), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (3734, 3745), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((4181, 4193), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (4190, 4193), True, 'import numpy as np\n'), ((6075, 6092), 'numpy.around', 'np.around', (['y_pred'], {}), '(y_pred)\n', (6084, 6092), True, 'import numpy as np\n'), ((6108, 6143), 'sklearn.metrics.f1_score', 'f1_score', (['y[:, 1]', 'y_pred_lab[:, 1]'], {}), '(y[:, 1], y_pred_lab[:, 1])\n', (6116, 6143), False, 'from sklearn.metrics import f1_score\n'), ((6359, 6407), 'keras.wrappers.scikit_learn.KerasClassifier.predict_proba', 'KerasClassifier.predict_proba', (['self', 'x'], {}), '(self, x, **kwargs)\n', (6388, 6407), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((9467, 9484), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (9472, 9484), False, 'from keras.regularizers import l2, l1_l2\n'), ((9504, 9529), 'keras.Input', 'Input', ([], {'shape': '(feature_n,)'}), '(shape=(feature_n,))\n', (9509, 9529), False, 'from keras import losses, Input, Model\n'), ((10175, 10195), 'keras.Model', 'Model', (['model_in', 'out'], {}), '(model_in, out)\n', (10180, 10195), False, 'from keras import losses, Input, Model\n'), ((11896, 11957), 'sklearn.decomposition.TruncatedSVD', 'sklearn.decomposition.TruncatedSVD', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (11930, 11957), False, 'import sklearn\n'), ((11979, 12073), 'sklearn.linear_model.logistic.LogisticRegression', 'sklearn.linear_model.logistic.LogisticRegression', ([], {'solver': '"""lbfgs"""', 'max_iter': '(300)', 'verbose': '(1)'}), "(solver='lbfgs', max_iter=\n 300, verbose=1)\n", (12027, 12073), False, 'import sklearn\n'), ((12711, 12747), 'off_sample_utils.resize_image', 'resize_image', (['pred_mask', 'image_shape'], {}), '(pred_mask, image_shape)\n', (12723, 12747), False, 'from off_sample_utils import resize_image\n'), ((13873, 13940), 'numpy.stack', 'np.stack', (['[y_test_pred_cnn, y_test_pred_nn, y_test_pred_lr]'], {'axis': '(1)'}), '([y_test_pred_cnn, y_test_pred_nn, y_test_pred_lr], axis=1)\n', (13881, 13940), True, 'import numpy as np\n'), ((14290, 14328), 'sklearn.preprocessing.StandardScaler', 'sklearn.preprocessing.StandardScaler', ([], {}), '()\n', (14326, 14328), False, 'import sklearn\n'), ((14432, 14509), 'sklearn.linear_model.logistic.LogisticRegressionCV', 'sklearn.linear_model.logistic.LogisticRegressionCV', ([], {'cv': '(10)', 'solver': '"""liblinear"""'}), "(cv=10, solver='liblinear')\n", (14482, 14509), False, 'import sklearn\n'), ((8379, 8503), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'self.save_path', 'monitor': '"""val_binary_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=self.save_path, monitor=\n 'val_binary_accuracy', verbose=1, save_best_only=True)\n", (8410, 8503), False, 'import keras\n'), ((9544, 9593), 'keras.layers.Dense', 'Dense', (['(256)'], {'kernel_regularizer': 'kernel_regularizer'}), '(256, kernel_regularizer=kernel_regularizer)\n', (9549, 9593), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9618, 9638), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9636, 9638), False, 'from keras.layers.normalization import BatchNormalization\n'), ((9658, 9676), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9668, 9676), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9696, 9708), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (9703, 9708), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9728, 9776), 'keras.layers.Dense', 'Dense', (['(32)'], {'kernel_regularizer': 'kernel_regularizer'}), '(32, kernel_regularizer=kernel_regularizer)\n', (9733, 9776), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9796, 9816), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9814, 9816), False, 'from keras.layers.normalization import BatchNormalization\n'), ((9836, 9854), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9846, 9854), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9874, 9886), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (9881, 9886), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9906, 9954), 'keras.layers.Dense', 'Dense', (['(16)'], {'kernel_regularizer': 'kernel_regularizer'}), '(16, kernel_regularizer=kernel_regularizer)\n', (9911, 9954), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((9974, 9994), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9992, 9994), False, 'from keras.layers.normalization import BatchNormalization\n'), ((10014, 10032), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10024, 10032), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((10052, 10064), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (10059, 10064), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((10084, 10153), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_regularizer': 'kernel_regularizer'}), "(1, activation='sigmoid', kernel_regularizer=kernel_regularizer)\n", (10089, 10153), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPool2D\n'), ((10860, 10984), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'self.save_path', 'monitor': '"""val_binary_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=self.save_path, monitor=\n 'val_binary_accuracy', verbose=1, save_best_only=True)\n", (10891, 10984), False, 'import keras\n'), ((12361, 12377), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (12369, 12377), True, 'import numpy as np\n'), ((1662, 1679), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (1667, 1679), False, 'from keras.regularizers import l2, l1_l2\n'), ((1953, 1970), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (1958, 1970), False, 'from keras.regularizers import l2, l1_l2\n'), ((2248, 2265), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (2253, 2265), False, 'from keras.regularizers import l2, l1_l2\n'), ((2543, 2560), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (2548, 2560), False, 'from keras.regularizers import l2, l1_l2\n'), ((2838, 2855), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (2843, 2855), False, 'from keras.regularizers import l2, l1_l2\n'), ((3135, 3152), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (3140, 3152), False, 'from keras.regularizers import l2, l1_l2\n'), ((3342, 3359), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (3347, 3359), False, 'from keras.regularizers import l2, l1_l2\n'), ((3521, 3538), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (3526, 3538), False, 'from keras.regularizers import l2, l1_l2\n'), ((3690, 3707), 'keras.regularizers.l1_l2', 'l1_l2', (['l1_a', 'l2_a'], {}), '(l1_a, l2_a)\n', (3695, 3707), False, 'from keras.regularizers import l2, l1_l2\n'), ((6482, 6495), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (6492, 6495), True, 'import numpy as np\n'), ((7666, 7698), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.0005)'}), '(lr=0.0005)\n', (7687, 7698), False, 'import keras\n'), ((10278, 10289), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (10282, 10289), False, 'from keras.optimizers import SGD, Adam\n'), ((12389, 12405), 'numpy.isnan', 'np.isnan', (['y_pred'], {}), '(y_pred)\n', (12397, 12405), True, 'import numpy as np\n')] |
import fast_ffts
import numpy as np
def shift(data, deltax, deltay, phase=0, nthreads=1, use_numpy_fft=False,
return_abs=False, return_real=True):
"""
FFT-based sub-pixel image shift
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
Will turn NaNs into zeros
"""
fftn,ifftn = fast_ffts.get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
if np.any(np.isnan(data)):
data = np.nan_to_num(data)
ny,nx = data.shape
Nx = np.fft.ifftshift(np.linspace(-np.fix(nx/2),np.ceil(nx/2)-1,nx))
Ny = np.fft.ifftshift(np.linspace(-np.fix(ny/2),np.ceil(ny/2)-1,ny))
Nx,Ny = np.meshgrid(Nx,Ny)
gg = ifftn( fftn(data)* np.exp(1j*2*np.pi*(-deltax*Nx/nx-deltay*Ny/ny)) * np.exp(-1j*phase) )
if return_real:
return np.real(gg)
elif return_abs:
return np.abs(gg)
else:
return gg
def shift1d(data, deltax, phase=0, nthreads=1, use_numpy_fft=False,
return_abs=False, return_real=True):
"""
FFT-based sub-pixel image shift
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
Will turn NaNs into zeros
"""
fftn,ifftn = fast_ffts.get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
if np.any(np.isnan(data)):
data = np.nan_to_num(data)
nx = data.size
Nx = np.fft.ifftshift(np.linspace(-np.fix(nx/2),np.ceil(nx/2)-1,nx))
gg = ifftn( fftn(data)* np.exp(1j*2*np.pi*(-deltax*Nx/nx)) * np.exp(-1j*phase) )
if return_real:
return np.real(gg)
elif return_abs:
return np.abs(gg)
else:
return gg
| [
"numpy.meshgrid",
"numpy.abs",
"numpy.nan_to_num",
"numpy.ceil",
"numpy.fix",
"numpy.isnan",
"fast_ffts.get_ffts",
"numpy.exp",
"numpy.real"
] | [((428, 494), 'fast_ffts.get_ffts', 'fast_ffts.get_ffts', ([], {'nthreads': 'nthreads', 'use_numpy_fft': 'use_numpy_fft'}), '(nthreads=nthreads, use_numpy_fft=use_numpy_fft)\n', (446, 494), False, 'import fast_ffts\n'), ((743, 762), 'numpy.meshgrid', 'np.meshgrid', (['Nx', 'Ny'], {}), '(Nx, Ny)\n', (754, 762), True, 'import numpy as np\n'), ((1368, 1434), 'fast_ffts.get_ffts', 'fast_ffts.get_ffts', ([], {'nthreads': 'nthreads', 'use_numpy_fft': 'use_numpy_fft'}), '(nthreads=nthreads, use_numpy_fft=use_numpy_fft)\n', (1386, 1434), False, 'import fast_ffts\n'), ((510, 524), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (518, 524), True, 'import numpy as np\n'), ((542, 561), 'numpy.nan_to_num', 'np.nan_to_num', (['data'], {}), '(data)\n', (555, 561), True, 'import numpy as np\n'), ((895, 906), 'numpy.real', 'np.real', (['gg'], {}), '(gg)\n', (902, 906), True, 'import numpy as np\n'), ((1450, 1464), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (1458, 1464), True, 'import numpy as np\n'), ((1482, 1501), 'numpy.nan_to_num', 'np.nan_to_num', (['data'], {}), '(data)\n', (1495, 1501), True, 'import numpy as np\n'), ((1714, 1725), 'numpy.real', 'np.real', (['gg'], {}), '(gg)\n', (1721, 1725), True, 'import numpy as np\n'), ((840, 861), 'numpy.exp', 'np.exp', (['(-1.0j * phase)'], {}), '(-1.0j * phase)\n', (846, 861), True, 'import numpy as np\n'), ((943, 953), 'numpy.abs', 'np.abs', (['gg'], {}), '(gg)\n', (949, 953), True, 'import numpy as np\n'), ((1659, 1680), 'numpy.exp', 'np.exp', (['(-1.0j * phase)'], {}), '(-1.0j * phase)\n', (1665, 1680), True, 'import numpy as np\n'), ((1762, 1772), 'numpy.abs', 'np.abs', (['gg'], {}), '(gg)\n', (1768, 1772), True, 'import numpy as np\n'), ((624, 638), 'numpy.fix', 'np.fix', (['(nx / 2)'], {}), '(nx / 2)\n', (630, 638), True, 'import numpy as np\n'), ((637, 652), 'numpy.ceil', 'np.ceil', (['(nx / 2)'], {}), '(nx / 2)\n', (644, 652), True, 'import numpy as np\n'), ((697, 711), 'numpy.fix', 'np.fix', (['(ny / 2)'], {}), '(ny / 2)\n', (703, 711), True, 'import numpy as np\n'), ((710, 725), 'numpy.ceil', 'np.ceil', (['(ny / 2)'], {}), '(ny / 2)\n', (717, 725), True, 'import numpy as np\n'), ((790, 855), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * (-deltax * Nx / nx - deltay * Ny / ny))'], {}), '(1.0j * 2 * np.pi * (-deltax * Nx / nx - deltay * Ny / ny))\n', (796, 855), True, 'import numpy as np\n'), ((1560, 1574), 'numpy.fix', 'np.fix', (['(nx / 2)'], {}), '(nx / 2)\n', (1566, 1574), True, 'import numpy as np\n'), ((1573, 1588), 'numpy.ceil', 'np.ceil', (['(nx / 2)'], {}), '(nx / 2)\n', (1580, 1588), True, 'import numpy as np\n'), ((1622, 1668), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * (-deltax * Nx / nx))'], {}), '(1.0j * 2 * np.pi * (-deltax * Nx / nx))\n', (1628, 1668), True, 'import numpy as np\n')] |
########################################################################
# Author(s): <NAME>
# Date: 21 September 2021
# Desc: Code to apply SP3 corrections to satellite states
########################################################################
from datetime import datetime, timedelta
from io import BytesIO
import pandas as pd
import numpy as np
from collections import defaultdict
from scipy import interpolate
import constants
def datetime_to_tow(t):
# DateTime to GPS week and TOW
wk_ref = datetime(2014, 2, 16, 0, 0, 0, 0, None)
refwk = 1780
wk = (t - wk_ref).days // 7 + refwk
tow = ((t - wk_ref) - timedelta((wk - refwk) * 7.0)).total_seconds()
return tow
class PreciseNav(object):
def __init__(self, date, sat_position):
self.date = date
self.tow = datetime_to_tow(date)
self.xyzt = np.array(list(map(float, sat_position))) # [km, km, km, mcs]
def eph2pos(self):
return self.xyzt[:3] * 1e3
def time_offset(self):
return self.xyzt[3] / 1e6
#Read SP3
def parse_sp3(path):
print("\nParsing %s:" % path)
with open(path) as fd:
data = fd.readlines()
nav_dict = defaultdict(list)
for j, d in enumerate(data):
if d[0] == '*':
split = d.split()[1:]
y, m, d, H, M = list(map(int, split[:-1]))
s = int(float(split[-1]))
date = datetime(y, m, d, H, M, s)
elif d[0] == 'P' and date: # GPS satellites
prn, x, y, z, t = d[2:].split()[:5]
nav_dict[d[1] + "%02d" % int(prn)] += [PreciseNav(date, (x, y, z, t))]
else:
continue
return nav_dict
# Rotate to correct ECEF satellite positions
def flight_time_correct(X, Y, Z, flight_time):
theta = constants.WE * flight_time/1e6
R = np.array([[np.cos(theta), np.sin(theta), 0.], [-np.sin(theta), np.cos(theta), 0.], [0., 0., 1.]])
XYZ = np.array([X, Y, Z])
rot_XYZ = R @ np.expand_dims(XYZ, axis=-1)
return rot_XYZ[0], rot_XYZ[1], rot_XYZ[2]
# Interpolate satellite position and correction for time t and prn
def interpol_sp3(sp3, prn, t):
inter_rad = 3
subar = sp3['G'+"%02d" % prn]
low_i, high_i = 0, 0
for i, ephem in enumerate(subar):
if ephem.tow > t:
low_i = max(0, i-inter_rad)
high_i = min(i+inter_rad, len(subar))
break
if high_i-low_i<1:
return 0., 0., 0., 0.
_t = np.zeros(high_i-low_i)
_X = np.zeros(high_i-low_i)
_Y = np.zeros(high_i-low_i)
_Z = np.zeros(high_i-low_i)
_B = np.zeros(high_i-low_i)
for i in range(low_i, high_i):
_t[i-low_i] = subar[i].tow
xyz = subar[i].eph2pos()
_X[i-low_i] = xyz[0]
_Y[i-low_i] = xyz[1]
_Z[i-low_i] = xyz[2]
_B[i-low_i] = subar[i].time_offset()
X = interpolate.interp1d(_t, _X)
Y = interpolate.interp1d(_t, _Y)
Z = interpolate.interp1d(_t, _Z)
B = interpolate.interp1d(_t, _B)
# print( np.linalg.norm(np.array([X,Y,Z]) - gt_ecef) - c*B)
return X(t),Y(t),Z(t),constants.c*B(t) | [
"numpy.zeros",
"numpy.expand_dims",
"datetime.datetime",
"collections.defaultdict",
"numpy.sin",
"numpy.array",
"datetime.timedelta",
"numpy.cos",
"scipy.interpolate.interp1d"
] | [((528, 567), 'datetime.datetime', 'datetime', (['(2014)', '(2)', '(16)', '(0)', '(0)', '(0)', '(0)', 'None'], {}), '(2014, 2, 16, 0, 0, 0, 0, None)\n', (536, 567), False, 'from datetime import datetime, timedelta\n'), ((1191, 1208), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1202, 1208), False, 'from collections import defaultdict\n'), ((1931, 1950), 'numpy.array', 'np.array', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (1939, 1950), True, 'import numpy as np\n'), ((2423, 2447), 'numpy.zeros', 'np.zeros', (['(high_i - low_i)'], {}), '(high_i - low_i)\n', (2431, 2447), True, 'import numpy as np\n'), ((2453, 2477), 'numpy.zeros', 'np.zeros', (['(high_i - low_i)'], {}), '(high_i - low_i)\n', (2461, 2477), True, 'import numpy as np\n'), ((2483, 2507), 'numpy.zeros', 'np.zeros', (['(high_i - low_i)'], {}), '(high_i - low_i)\n', (2491, 2507), True, 'import numpy as np\n'), ((2513, 2537), 'numpy.zeros', 'np.zeros', (['(high_i - low_i)'], {}), '(high_i - low_i)\n', (2521, 2537), True, 'import numpy as np\n'), ((2543, 2567), 'numpy.zeros', 'np.zeros', (['(high_i - low_i)'], {}), '(high_i - low_i)\n', (2551, 2567), True, 'import numpy as np\n'), ((2784, 2812), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['_t', '_X'], {}), '(_t, _X)\n', (2804, 2812), False, 'from scipy import interpolate\n'), ((2819, 2847), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['_t', '_Y'], {}), '(_t, _Y)\n', (2839, 2847), False, 'from scipy import interpolate\n'), ((2854, 2882), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['_t', '_Z'], {}), '(_t, _Z)\n', (2874, 2882), False, 'from scipy import interpolate\n'), ((2889, 2917), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['_t', '_B'], {}), '(_t, _B)\n', (2909, 2917), False, 'from scipy import interpolate\n'), ((1970, 1998), 'numpy.expand_dims', 'np.expand_dims', (['XYZ'], {'axis': '(-1)'}), '(XYZ, axis=-1)\n', (1984, 1998), True, 'import numpy as np\n'), ((1412, 1438), 'datetime.datetime', 'datetime', (['y', 'm', 'd', 'H', 'M', 's'], {}), '(y, m, d, H, M, s)\n', (1420, 1438), False, 'from datetime import datetime, timedelta\n'), ((651, 680), 'datetime.timedelta', 'timedelta', (['((wk - refwk) * 7.0)'], {}), '((wk - refwk) * 7.0)\n', (660, 680), False, 'from datetime import datetime, timedelta\n'), ((1833, 1846), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1839, 1846), True, 'import numpy as np\n'), ((1848, 1861), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1854, 1861), True, 'import numpy as np\n'), ((1885, 1898), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1891, 1898), True, 'import numpy as np\n'), ((1870, 1883), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1876, 1883), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Extract TEC values from an IONEX file given a specific time and geographic coordinate.
Created on Tue Apr 24 11:46:57 2018
@author: mevius
"""
import numpy as np
import datetime
import scipy.ndimage.filters as myfilter
import logging
import os
import ftplib
import socket
logging.basicConfig(level=logging.ERROR)
def _read_ionex_header(filep):
"""reads header from ionex file. returns data shape and position of first
data in the file.
Args:
filep (filepointer) : pointer to opened ionex file.
Returns:
Tuple[float, np.array, np.array, np.array]:
multiplication factor,lonarray,latarray,timearray
"""
filep.seek(0)
dcb_list = []
station_dcb_list = []
for line in filep:
if "END OF HEADER" in line:
break
stripped = line.strip()
if stripped.endswith("EPOCH OF FIRST MAP"):
starttime = datetime.datetime(
*(int(i) for i in
stripped.replace("EPOCH OF FIRST MAP","").split()))
if stripped.endswith("EPOCH OF LAST MAP"):
endtime = datetime.datetime(
*(int(i) for i in
stripped.replace("EPOCH OF LAST MAP","").split()))
if stripped.endswith("INTERVAL"):
timestep = float(stripped.split()[0]) / 3600.
if stripped.endswith("EXPONENT"):
exponent = pow(10, float(stripped.split()[0]))
if stripped.endswith("DLON"):
start_lon, end_lon, step_lon = \
(float(i) for i in stripped.split()[:3])
if stripped.endswith("DLAT"):
start_lat, end_lat, step_lat = \
(float(i) for i in stripped.split()[:3])
if stripped.endswith("OF MAPS IN FILE"):
ntimes = int(stripped.split()[0])
if stripped.endswith("PRN / BIAS / RMS"):
dcb_list.append(stripped.split()[0: 3])
if stripped.endswith("STATION / BIAS / RMS"):
st_dcb = stripped.split()
if len(st_dcb) == 9:
del st_dcb[1]
station_dcb_list.append(st_dcb[0: 3])
lonarray = np.arange(start_lon, end_lon + step_lon, step_lon)
latarray = np.arange(start_lat, end_lat + step_lat, step_lat)
dtime = endtime - starttime
dtimef = dtime.days * 24. + dtime.seconds / 3600.
logging.debug("timerange %f hours. step = %f ", dtimef, timestep)
timearray = np.arange(0,
dtimef + timestep,
timestep)
if timearray.shape[0] < ntimes:
# bug in ILTF files,last time in header is incorrect
extratimes = np.arange(timearray[-1] + timestep,
timearray[-1]
+ (ntimes -
timearray.shape[0] + 0.5) * timestep,
timestep)
timearray = np.concatenate((timearray, extratimes))
timearray += starttime.hour\
+ starttime.minute/60.\
+ starttime.second/3600.
return exponent, lonarray, latarray, timearray, dcb_list, station_dcb_list
def read_tec(filename, _use_filter=None):
""" returns TEC, RMS longitude, lattitude and time read from an IONEX file.
Args:
filename (string) : the full path to the IONEXfile
_use_filter (float) : optional filter the data in space and time
with a gaussian filter with sigma use_filter.
calls scipy.ndimage.filter.gaussian_filter(tec,
use_filter)
Returns:
Tuple[np.array, np.array, np.array, np.array, np.array]:
3D-arrays (time,lat,lon) of (optionally filtered) TEC and RMS +
longitude, latitude and time array
"""
ionex_file = open(filename, "r")
exponent, lonarray, latarray, timearray, dcb_list, station_dcb_list = _read_ionex_header(ionex_file)
logging.info("reading data with shapes %d x %d x %d",
timearray.shape[0],
latarray.shape[0],
lonarray.shape[0])
tecarray = np.zeros(timearray.shape
+ latarray.shape + lonarray.shape, dtype=float)
rmsarray = np.zeros_like(tecarray)
timeidx = 0
lonidx = 0
latidx = 0
tecdata = False
rmsdata = False
readdata = False
for line in ionex_file:
if "START OF TEC MAP" in line:
tecdata = True
rmsdata = False
timeidx = int(line.strip().split()[0]) - 1
continue
if "START OF RMS MAP" in line:
rmsdata = True
tecdata = False
timeidx = int(line.strip().split()[0]) - 1
continue
if "LAT/LON1/LON2/DLON/H" in line:
readdata = True
latstr = line.strip().replace("LAT/LON1/LON2/DLON/H","")
lat = np.fromstring(" -".join(latstr.split("-")), sep=" ")
latidx = np.argmin(np.abs(latarray - lat[0]))
lonidx = 0
continue
if tecdata and ("END OF TEC MAP" in line):
readdata = False
continue
if rmsdata and ("END OF RMS MAP" in line):
readdata = False
continue
if readdata:
data = np.fromstring(" -".join(line.strip().split("-")),
sep=" ") * exponent
if tecdata:
tecarray[timeidx, latidx, lonidx:lonidx + data.shape[0]] = data
elif rmsdata:
rmsarray[timeidx, latidx, lonidx:lonidx + data.shape[0]] = data
lonidx += data.shape[0]
if not _use_filter is None:
tecarray = myfilter.gaussian_filter(
tecarray, _use_filter, mode='nearest')
return tecarray, rmsarray, lonarray, latarray, timearray, dcb_list, station_dcb_list
def readTEC(filename, use_filter=None):
"""oldfunction name for compatibility. Use read_tec."""
logging.warning("function readTEC obsolete, use read_tec instead")
return read_tec(filename, _use_filter=use_filter)
def _compute_index_and_weights(maparray, mapvalues):
'''helper function to get indices and weights for interpolating tecmaps
Args:
maparray (np.array) : array to get indices in
mapvalues (Union[float,np.array]) : values to get indices for
Returns:
Tuple[np.array, np.array, np.array]: idx1,idx2 and weights for idx2,
idx2 is always >= idx1
'''
is_reverse = maparray[1] < maparray[0]
idx1 = np.argmin(np.absolute(maparray[np.newaxis]
- mapvalues[:, np.newaxis]), axis=1)
idx2 = idx1.copy()
if not is_reverse:
idx1[maparray[idx1] > mapvalues] -= 1
idx2[maparray[idx2] < mapvalues] += 1
else:
idx1[maparray[idx1] < mapvalues] -= 1
idx2[maparray[idx2] > mapvalues] += 1
idx1[idx1 < 0] = 0
idx2[idx2 < 0] = 0
idx1[idx1 >= maparray.shape[0]] = maparray.shape[0] - 1
idx2[idx2 >= maparray.shape[0]] = maparray.shape[0] - 1
_steps = np.absolute(maparray[idx2] - maparray[idx1])
weights = np.absolute(mapvalues - maparray[idx1])
_steps[_steps == 0] = weights[_steps == 0]
weights = weights / _steps
return idx1, idx2, weights
def compute_tec_interpol(times, lats, lons, tecinfo, apply_earth_rotation=0):
'''Get interpolated TEC for array of times/lats and lons
Derive interpolated (4 point in lon,lat,2 point in time) vTEC values,
optionally correcting for earth rotation.
Args:
lats (np.array) : angles in degrees between -90 and 90
lons (np.array) : angles in degrees between -180,180
times (np.array) : times in decimal hour of the day (eg. 23.5
for half past 11PM)
tecinfo (tuple) : tuple with the return values of read_tec function
apply_earth_rotation(float) : specify (with a number between 0 and 1)
how much of the earth rotaion is taken in to account in the
interpolation step.
This is assuming that the TEC maps move according to the rotation
Earth (following method 3 of interpolation described in the IONEX
document). Experiments with high time resolution ROB data show that
this is not really the case, resulting in strange wavelike structures
when applying this smart interpolation. Negative values of this
parameter would result in an ionosphere that moves with the rotation
of the earth
Returns:
np.array : interpolated tecvalues
'''
assert times.shape == lats.shape,\
"times and lats should be array with same shape"
assert times.shape == lons.shape, \
"times and lons should be array with same shape"
tecdata = tecinfo[0] # TEC in TECU
lonarray = tecinfo[2] # longitude in degrees from West to East (- to +)
latarray = tecinfo[3] # lattitude in degrees
# times in hour of the day (eg. 23.5 for half past 11PM)
maptimes = tecinfo[4]
# get indices of nearest 2 time frames + inverse distance weights
# assume time is sorted from early to late
timeidx1, timeidx2, time_weights = _compute_index_and_weights(
maptimes, times)
# latarray is sorted small to large
latidx1, latidx2, lat_weights = _compute_index_and_weights(
latarray, lats)
# for getting lon idx take into account earth_rotation
# if longitudes cover all range between -180 and 180 you can modulate the
# indices, otherwise we have to take the edge of the map.
lonstep = lonarray[1] - lonarray[0]
full_circle = np.remainder(lonarray[0] - lonarray[-1], 360.)\
<= 1.1 * lonstep
rot1 = ((times - maptimes[timeidx1]) * 360. / 24.) * apply_earth_rotation
rot2 = ((times - maptimes[timeidx2]) * 360. / 24.) * apply_earth_rotation
if not full_circle:
lonidx11, lonidx12, lon_weights1 = _compute_index_and_weights(
lonarray, lons + rot1)
lonidx21, lonidx22, lon_weights2 = _compute_index_and_weights(
lonarray, lons + rot2)
else:
lonidx11 = np.argmin(np.absolute(np.remainder(lonarray[np.newaxis]
- rot1[:, np.newaxis]
- lons[:, np.newaxis]
+ 180., 360.)
- 180.), axis=1)
lonidx12 = lonidx11.copy()
lonidx11[np.remainder(lonarray[lonidx11] - rot1 - lons + 180., 360.)
- 180. > 0] -= 1
lonidx12[np.remainder(lonarray[lonidx12] - rot1 - lons + 180., 360.)
- 180. < 0] += 1
lonidx11[lonidx11 < 0] += lonarray.shape[0]
lonidx12[lonidx12 < 0] += lonarray.shape[0]
lonidx11[lonidx11 >= lonarray.shape[0]] -= lonarray.shape[0]
lonidx12[lonidx12 >= lonarray.shape[0]] -= lonarray.shape[0]
lon_weights1 = np.absolute(np.remainder(lonarray[lonidx11]
- rot1
- lons + 180., 360.)
- 180.) / lonstep
lonidx21 = np.argmin(np.absolute(np.remainder(lonarray[np.newaxis]
- rot2[:, np.newaxis]
- lons[:, np.newaxis]
+ 180., 360.)
- 180.), axis=1)
lonidx22 = lonidx21.copy()
lonidx21[np.remainder(lonarray[lonidx21] - rot2 - lons + 180., 360.)
- 180. > 0] -= 1
lonidx22[np.remainder(lonarray[lonidx22] - rot2 - lons + 180., 360.)
- 180. < 0] += 1
lonidx21[lonidx21 < 0] += lonarray.shape[0]
lonidx22[lonidx22 < 0] += lonarray.shape[0]
lonidx21[lonidx21 >= lonarray.shape[0]] -= lonarray.shape[0]
lonidx22[lonidx22 >= lonarray.shape[0]] -= lonarray.shape[0]
lon_weights2 = np.absolute(np.remainder(lonarray[lonidx21]
- rot2
- lons + 180., 360.)
- 180.) / lonstep
logging.debug("inidces time %d %d indices lat %d %d indices \
lon %d %d %d %d", timeidx1[0], timeidx2[0],
latidx1[0], latidx2[0],
lonidx11[0], lonidx12[0],
lonidx21[0], lonidx22[0])
logging.debug("weights time %f lat %f lon %f %f",
time_weights[0],
lat_weights[0],
lon_weights1[0],
lon_weights2[0])
tecs = (tecdata[timeidx1, latidx1, lonidx11] * (1. - lon_weights1)
+ tecdata[timeidx1, latidx1, lonidx12] * lon_weights1) \
* (1. - time_weights)
tecs += (tecdata[timeidx2, latidx1, lonidx21] * (1. - lon_weights2)
+ tecdata[timeidx2, latidx1, lonidx22] * lon_weights2) \
* (time_weights)
tecs *= 1. - lat_weights
tecs += lat_weights \
* (tecdata[timeidx1, latidx2, lonidx11] * (1. - lon_weights1)
+ tecdata[timeidx1, latidx2, lonidx12] * lon_weights1) \
* (1. - time_weights)
tecs += lat_weights \
* (tecdata[timeidx2, latidx2, lonidx21] * (1. - lon_weights2)
+ tecdata[timeidx2, latidx2, lonidx22] * lon_weights2) \
* (time_weights)
return tecs
def getTECinterpol(time, lat, lon, tecinfo, apply_earth_rotation=0):
"""old function name for compatibility. Use compute_tec_interpol
instead"""
#logging.warning("obsolete, use compute_tec_interpol instead")
if np.isscalar(time):
time = [time]
lat = [lat]
lon = [lon]
return compute_tec_interpol(np.array(time), np.array(lat), np.array(lon),
tecinfo,
apply_earth_rotation)
def _combine_ionex(outpath, filenames, newfilename):
"""Helper function to combine separate IONEXfiles into 1 single file
(needed for 15min ROBR data)"""
if os.path.isfile(outpath + newfilename):
logging.info("FILE exists: " + outpath + newfilename)
return outpath + newfilename
newf = open(outpath + newfilename, 'w')
filenames = sorted(filenames)
firstfile = open(filenames[0])
lastfile = open(filenames[-1])
for line in lastfile:
if "EPOCH OF LAST MAP" in line:
lastepoch = line
lastfile.close()
break
# write header + tec map
for line in firstfile:
if "END OF TEC MAP" in line:
newf.write(line)
break
if "EPOCH OF LAST MAP" not in line:
if "OF MAPS IN FILE" in line:
newf.write(line.replace('1', str(len(filenames))))
else:
newf.write(line)
else:
newf.write(lastepoch)
tecmapnr = 2
for myfname in filenames[1:]:
myf = open(myfname)
end_of_header = False
for line in myf:
if not end_of_header and "END OF HEADER" in line:
end_of_header = True
else:
if end_of_header:
if "END OF TEC MAP" in line:
newf.write(line.replace('1', str(tecmapnr)))
break
if "START OF TEC MAP" in line:
newf.write(line.replace('1', str(tecmapnr)))
else:
newf.write(line)
tecmapnr += 1
newf.write("END OF FILE\n")
return os.path.join(outpath, newfilename)
# ignore RMS map for now, since it is filled with zeros anyway
def _gunzip_some_file(compressed_file,
uncompressed_file,
delete_file=1):
command = "gunzip -dc %s > %s" % (compressed_file, uncompressed_file)
retcode = os.system(command)
if retcode:
raise RuntimeError("Could not run '%s'" % command)
if delete_file:
os.remove(compressed_file)
return uncompressed_file
def _store_files(ftp, filenames, outpath, overwrite=False):
"""helper function to store files from ftp server to outpath"""
nfilenames = []
for myf in filenames:
#make sure filename is always stored uppercase
mypath = os.path.join(outpath, myf.upper())
if not overwrite and os.path.isfile(mypath):
nfilenames.append(mypath)
logging.info("file %s exists", mypath)
elif not overwrite and\
os.path.isfile(mypath.replace(".Z","")):
nfilenames.append(mypath.replace(".Z",""))
logging.info("file %s exists",
mypath.replace(".Z",""))
else:
myp = open(mypath, "wb")
ftp.retrbinary("RETR " + myf, myp.write)
myp.close()
nfilenames.append(mypath)
nfilenames_copy = nfilenames[:]
nfilenames = []
for myf in nfilenames_copy:
if myf.endswith(".Z"):
nfilenames.append(_gunzip_some_file(
myf,
myf.replace(".Z","")))
else:
nfilenames.append(myf)
return nfilenames
def _get_IONEX_file(time="2012/03/23/02:20:10.01",
server="ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/",
prefix="codg",
outpath='./',
overwrite=False,
backupserver="ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/"):
"""Get IONEX file with prefix from server for a given day
Downloads files with given prefix from the ftp server, unzips and stores
the data. For prefix ROBR the data is stored on the server in a separate
IONEX file every 15 minutes, these are automatically combined for
compatibility.
Args:
time (string or list) : date of the observation
server (string) : ftp server + path to the ionex directories
prefix (string) : prefix of the IONEX files (case insensitive)
outpath (string) : path where the data is stored
overwrite (bool) : Do (not) overwrite existing data
"""
prefix=prefix.upper()
if outpath[-1] != "/":
outpath += "/"
if not os.path.isdir(outpath):
try:
os.makedirs(outpath)
except:
logging.error("cannot create output dir for IONEXdata: %s",
outpath)
try:
yy = time[2:4]
year = int(time[:4])
month = int(time[5:7])
day = int(time[8:10])
except:
year = time[0]
yy = year - 2000
month = time[1]
day = time[2]
mydate = datetime.date(year, month, day)
dayofyear = mydate.timetuple().tm_yday
#if file exists just return filename
if not overwrite and os.path.isfile("%s%s%03d0.%02dI"%(outpath,prefix,dayofyear,yy)):
logging.info("FILE exists: %s%s%03d0.%02dI",outpath,prefix,dayofyear,yy)
return "%s%s%03d0.%02dI"%(outpath,prefix,dayofyear,yy)
#check if IGRG (fast files) exist, use those instead (UGLY!!)
if not overwrite and os.path.isfile("%sIGRG%03d0.%02dI"%(outpath,dayofyear,yy)):
logging.info("fast FILE exists: %sIGRG%03d0.%02dI",outpath,dayofyear,yy)
return "%sIGRG%03d0.%02dI"%(outpath,dayofyear,yy)
tried_backup=False
serverfound=False
while not serverfound:
ftpserver = server.replace("ftp:","").strip("/").split("/")[0]
ftppath = "/".join(server.replace("ftp:","").strip("/").split("/")[1:])
nr_tries = 0
try_again = True
while try_again and nr_tries<10:
try:
ftp = ftplib.FTP(ftpserver)
ftp.login()
try_again=False
serverfound=True
except ftplib.error_perm:
if "192.168.127.12" in server:
ftp.login("data-out", "Qz8803#mhR4z")
try_again=False
serverfound=True
else:
try_again=True
nr_tries += 1
if nr_tries>=10:
if tried_backup or server==backupserver:
raise Exception("Could not connect to %s"%ftpserver)
else:
server=backupserver
tried_backup=True
except socket.gaierror:
try_again=True
nr_tries += 1
if nr_tries>=10:
if tried_backup or server==backupserver:
raise Exception("Could not connect to %s"%ftpserver)
else:
server=backupserver
tried_backup=True
ftp.cwd(ftppath)
totpath = ftppath
myl = []
ftp.retrlines("NLST", myl.append)
logging.info("Retrieving data for %d or %02d%03d", year, yy, dayofyear)
if str(year) in myl:
ftp.cwd(str(year))
totpath += "/%d" % (year)
elif "%02d%03d" % (yy, dayofyear) in myl:
ftp.cwd("%02d%03d" % (yy, dayofyear))
totpath += "/%02d%03d" % (yy, dayofyear)
myl = []
ftp.retrlines("NLST", myl.append)
if "%03d"%dayofyear in myl:
ftp.cwd("%03d"%dayofyear)
totpath += "/%03d" % (dayofyear)
logging.info("Retrieving data from %s", totpath)
myl = []
ftp.retrlines("NLST", myl.append)
filenames = [i for i in myl if (prefix.lower() in i.lower()) and
("%03d"%dayofyear in i.lower()) and
(i.lower().endswith("i.z") or i.lower().endswith("i"))]
logging.info(" ".join(filenames))
#assert len(filenames) > 0, "No files found on %s for %s" % (server,prefix)
if len(filenames) <=0:
logging.info("No files found on %s for %s",server,prefix)
return -1
if prefix.lower() == "robr" and len(filenames) > 1:
filenames = sorted(filenames)
filenames = _store_files(ftp, filenames, outpath, overwrite)
# get data for next day
nextday = mydate + datetime.timedelta(days=1)
nyear = nextday.year
ndayofyear = nextday.timetuple().tm_yday
ftp.cwd("/" + ftppath)
myl = []
ftp.retrlines("NLST", myl.append)
if str(nyear) in myl:
ftp.cwd(str(nyear))
myl = ftp.retrlines("NLST")
if str(ndayofyear) in myl:
ftp.cwd(str(ndayofyear))
myl = ftp.retrlines("NLST")
nfilenames = [i for i in myl if (prefix.lower() in i.lower()) and
(i.lower().endswith("i.z")) and "A00" in i.upper()]
nfilenames = _store_files(ftp, nfilenames, outpath, overwrite)
filenames += nfilenames
_combine_ionex(outpath, filenames,
prefix + "%03d0.%sI" % (dayofyear, yy))
ftp.quit()
return os.path.join(outpath, prefix + "%03d0.%sI" % (dayofyear, yy))
else:
nfilenames = _store_files(ftp, filenames, outpath, overwrite)
ftp.quit()
return nfilenames[0]
#def get_urllib_IONEXfile(time="2012/03/23/02:20:10.01",
# server="ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/",
# prefix="codg",
# outpath='./',
# overwrite=False,
# backupserver="ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/",
# proxy_server=None,
# proxy_type=None,
# proxy_port=None,
# proxy_user=None,
# proxy_pass=None):
# """Get IONEX file with prefix from server for a given day
#
# Downloads files with given prefix from the ftp server, unzips and stores
# the data. Uses urllib2 instead of ftplib to have the option to use a ftp proxy server.
#
# Proxy args are optional.
#
# Args:
# time (string or list) : date of the observation
# server (string) : ftp server + path to the ionex directories
# prefix (string) : prefix of the IONEX files (case insensitive)
# outpath (string) : path where the data is stored
# overwrite (bool) : Do (not) overwrite existing data
# proxy_server (string): address of proxyserver, either url or ip address
# proxy_type (string): socks4 or socks5
# proxy_port (int): port of proxy server
# proxy_user (string): username for proxyserver
# proxy_pass (string): password for proxyserver
# """
# prefix=prefix.upper()
# if outpath[-1] != "/":
# outpath += "/"
# if not os.path.isdir(outpath):
# try:
# os.makedirs(outpath)
# except:
# print("cannot create output dir for IONEXdata: %s",
# outpath)
#
# try:
# yy = int(time[2:4])
# year = int(time[:4])
# month = int(time[5:7])
# day = int(time[8:10])
# except:
# year = time[0]
# yy = year - 2000
# month = time[1]
# day = time[2]
# mydate = datetime.date(year, month, day)
# dayofyear = mydate.timetuple().tm_yday
# if not overwrite and os.path.isfile("%s%s%03d0.%02dI"%(outpath,prefix,dayofyear,yy)):
# logging.info("FILE exists: %s%s%03d0.%02dI",outpath,prefix,dayofyear,yy)
# return "%s%s%03d0.%02dI"%(outpath,prefix,dayofyear,yy)
# #check if IGRG (fast files) exist, use those instead (UGLY!!)
# if not overwrite and os.path.isfile("%sIGRG%03d0.%02dI"%(outpath,dayofyear,yy)):
# logging.info("fast FILE exists: %sIGRG%03d0.%02dI",outpath,dayofyear,yy)
# return "%sIGRG%03d0.%02dI"%(outpath,dayofyear,yy)
#
# tried_backup=False
# serverfound=False
# backupfound=False
# #If proxy url is given, enable proxy using pysocks
# import urllib2
# if proxy_server and ("None" not in proxy_server):
# import socket
# import socks
# s = socks.socksocket()
# if proxy_type=="socks4":
# ProxyType = socks.SOCKS4
# if proxy_type=="socks5":
# ProxyType = socks.SOCKS5
# s.set_proxy(ProxyType, proxy_server, proxy_port, rdns=True, username=proxy_user, password=proxy_pass)
#
# # Url of the primary server has the syntax "ftp://ftp.aiub.unibe.ch/CODE/YYYY/CODGDOY0.YYI.Z" where DOY is the day of the year, padded with leading zero if <100, and YY is the last two digits of year.
# # Url of the backup server has the syntax "ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/YYYY/DOY/codgDOY.YYi.Z where DOY is the day of the year, padded with leading zero if <100, and YY is the last two digits of year.
# #try primary url
#
# try:
# primary = urllib2.urlopen(server,timeout=30)
# serverfound = True
# except:
# try:
# secondary = urllib2.urlopen(backupserver,timeout=30)
# backupfound = True
# server=backupserver
# except:
# logging.error('Primary and Backup Server not responding') #enable in lover environment
# if "ftp://ftp.aiub.unibe.ch" in server:
# url = "ftp://ftp.aiub.unibe.ch/CODE/%4d/%s%03d0.%02dI.Z"%(year,prefix.upper(),dayofyear,yy)
# elif "ftp://cddis.gsfc.nasa.gov" in server:
# url = "ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/%4d/%03d/%s%03d0.%02di.Z"%(year,dayofyear,prefix,dayofyear,yy)
# elif "igsiono.uwm.edu.pl" in server:
# url = "https://igsiono.uwm.edu.pl/data/ilt/%4d/igrg%03d0.%02di"%(year,dayofyear,yy)
#
# # Download IONEX file, make sure it is always uppercase
# fname = outpath+'/'+(url.split('/')[-1]).upper()
# try:
# site = urllib2.urlopen(url,timeout=30)
# except:
# logging.info("No files found on %s for %s",server,fname)
# return -1
#
# output=open(fname,'wb')
# output.write(site.read())
# output.close()
# ###### gunzip files
# if fname[-2:].upper()==".Z":
# command = "gunzip -dc %s > %s" % (fname, fname[:-2])
# retcode = os.system(command)
# if retcode:
# raise RuntimeError("Could not run '%s'" % command)
# else:
# os.remove(fname)
# fname=fname[:-2]
# #returns filename of uncompressed file
# return fname
def getIONEXfile(time="2012/03/23/02:20:10.01",
server="ftp://cddis.gsfc.nasa.gov/gnss/productsionex/",
prefix="codg",
outpath='./',
overwrite=False):
getIONEXfile.__doc__ = _get_IONEX_file.__doc__
return _get_IONEX_file(time, server, prefix, outpath, overwrite)
def get_TEC_data(times, lonlatpp, server, prefix, outpath, use_filter=None,earth_rot=0.):
'''Returns vtec for given times and lonlats.
If times has the same length as the first axis of lonlatpp,
it is assumed that there is a one to one correspondence.
Else vTEC is calculated for every combination of lonlatpp and times.
Args:
times (np.array) : float of time in MJD seconds
lonlatpp (np.array) : array of time X 2 ,longitude lattitude of
piercepoints
server (string) : ftp server to get IONEX data from
prefix (string) : prefix of IONEX data
outpath (string) : local location of the IONEX files
Returns:
np.array : array with shape times.shape x lonlatpp.shape[0], unless both
are equal
'''
date_parms = PosTools.obtain_observation_year_month_day_fraction(times[0])
ionexf=get_IONEX_file(time=date_parms,server=server,prefix=prefix,outpath=outpath)
tecinfo=ionex.readTEC(ionexf,use_filter=use_filter)
latpp = lonlatpp[:, 1]
lonpp = lonlatpp[:, 0]
if latpp.shape == times.shape:
vtec = compute_tec_interpol(times,lat=latpp,lon=lonpp,tecinfo=tecinfo,apply_earth_rotation=earth_rot)
else:
vtec=[]
for itime in range(times.shape[0]):
vtec.append(compute_tec_interpol(times[itime]*np.ones_like(latpp),
lat=latpp,
lon=lonpp,
tecinfo=tecinfo,
apply_earth_rotation=earth_rot))
return np.array(vtec)
| [
"numpy.absolute",
"os.remove",
"numpy.abs",
"os.path.isfile",
"numpy.arange",
"os.path.join",
"logging.error",
"numpy.zeros_like",
"logging.warning",
"datetime.timedelta",
"numpy.ones_like",
"numpy.remainder",
"datetime.date",
"os.system",
"numpy.concatenate",
"scipy.ndimage.filters.ga... | [((328, 368), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (347, 368), False, 'import logging\n'), ((2173, 2223), 'numpy.arange', 'np.arange', (['start_lon', '(end_lon + step_lon)', 'step_lon'], {}), '(start_lon, end_lon + step_lon, step_lon)\n', (2182, 2223), True, 'import numpy as np\n'), ((2239, 2289), 'numpy.arange', 'np.arange', (['start_lat', '(end_lat + step_lat)', 'step_lat'], {}), '(start_lat, end_lat + step_lat, step_lat)\n', (2248, 2289), True, 'import numpy as np\n'), ((2380, 2445), 'logging.debug', 'logging.debug', (['"""timerange %f hours. step = %f """', 'dtimef', 'timestep'], {}), "('timerange %f hours. step = %f ', dtimef, timestep)\n", (2393, 2445), False, 'import logging\n'), ((2462, 2503), 'numpy.arange', 'np.arange', (['(0)', '(dtimef + timestep)', 'timestep'], {}), '(0, dtimef + timestep, timestep)\n', (2471, 2503), True, 'import numpy as np\n'), ((3965, 4081), 'logging.info', 'logging.info', (['"""reading data with shapes %d x %d x %d"""', 'timearray.shape[0]', 'latarray.shape[0]', 'lonarray.shape[0]'], {}), "('reading data with shapes %d x %d x %d', timearray.shape[0],\n latarray.shape[0], lonarray.shape[0])\n", (3977, 4081), False, 'import logging\n'), ((4144, 4216), 'numpy.zeros', 'np.zeros', (['(timearray.shape + latarray.shape + lonarray.shape)'], {'dtype': 'float'}), '(timearray.shape + latarray.shape + lonarray.shape, dtype=float)\n', (4152, 4216), True, 'import numpy as np\n'), ((4256, 4279), 'numpy.zeros_like', 'np.zeros_like', (['tecarray'], {}), '(tecarray)\n', (4269, 4279), True, 'import numpy as np\n'), ((5983, 6049), 'logging.warning', 'logging.warning', (['"""function readTEC obsolete, use read_tec instead"""'], {}), "('function readTEC obsolete, use read_tec instead')\n", (5998, 6049), False, 'import logging\n'), ((7126, 7170), 'numpy.absolute', 'np.absolute', (['(maparray[idx2] - maparray[idx1])'], {}), '(maparray[idx2] - maparray[idx1])\n', (7137, 7170), True, 'import numpy as np\n'), ((7185, 7224), 'numpy.absolute', 'np.absolute', (['(mapvalues - maparray[idx1])'], {}), '(mapvalues - maparray[idx1])\n', (7196, 7224), True, 'import numpy as np\n'), ((12333, 12544), 'logging.debug', 'logging.debug', (['"""inidces time %d %d indices lat %d %d indices lon %d %d %d %d"""', 'timeidx1[0]', 'timeidx2[0]', 'latidx1[0]', 'latidx2[0]', 'lonidx11[0]', 'lonidx12[0]', 'lonidx21[0]', 'lonidx22[0]'], {}), "(\n 'inidces time %d %d indices lat %d %d indices lon %d %d %d %d'\n , timeidx1[0], timeidx2[0], latidx1[0], latidx2[0], lonidx11[0],\n lonidx12[0], lonidx21[0], lonidx22[0])\n", (12346, 12544), False, 'import logging\n'), ((12591, 12711), 'logging.debug', 'logging.debug', (['"""weights time %f lat %f lon %f %f"""', 'time_weights[0]', 'lat_weights[0]', 'lon_weights1[0]', 'lon_weights2[0]'], {}), "('weights time %f lat %f lon %f %f', time_weights[0],\n lat_weights[0], lon_weights1[0], lon_weights2[0])\n", (12604, 12711), False, 'import logging\n'), ((13775, 13792), 'numpy.isscalar', 'np.isscalar', (['time'], {}), '(time)\n', (13786, 13792), True, 'import numpy as np\n'), ((14201, 14238), 'os.path.isfile', 'os.path.isfile', (['(outpath + newfilename)'], {}), '(outpath + newfilename)\n', (14215, 14238), False, 'import os\n'), ((15706, 15740), 'os.path.join', 'os.path.join', (['outpath', 'newfilename'], {}), '(outpath, newfilename)\n', (15718, 15740), False, 'import os\n'), ((16016, 16034), 'os.system', 'os.system', (['command'], {}), '(command)\n', (16025, 16034), False, 'import os\n'), ((18794, 18825), 'datetime.date', 'datetime.date', (['year', 'month', 'day'], {}), '(year, month, day)\n', (18807, 18825), False, 'import datetime\n'), ((20995, 21066), 'logging.info', 'logging.info', (['"""Retrieving data for %d or %02d%03d"""', 'year', 'yy', 'dayofyear'], {}), "('Retrieving data for %d or %02d%03d', year, yy, dayofyear)\n", (21007, 21066), False, 'import logging\n'), ((21456, 21504), 'logging.info', 'logging.info', (['"""Retrieving data from %s"""', 'totpath'], {}), "('Retrieving data from %s', totpath)\n", (21468, 21504), False, 'import logging\n'), ((30061, 30075), 'numpy.array', 'np.array', (['vtec'], {}), '(vtec)\n', (30069, 30075), True, 'import numpy as np\n'), ((2674, 2788), 'numpy.arange', 'np.arange', (['(timearray[-1] + timestep)', '(timearray[-1] + (ntimes - timearray.shape[0] + 0.5) * timestep)', 'timestep'], {}), '(timearray[-1] + timestep, timearray[-1] + (ntimes - timearray.\n shape[0] + 0.5) * timestep, timestep)\n', (2683, 2788), True, 'import numpy as np\n'), ((2931, 2970), 'numpy.concatenate', 'np.concatenate', (['(timearray, extratimes)'], {}), '((timearray, extratimes))\n', (2945, 2970), True, 'import numpy as np\n'), ((5710, 5773), 'scipy.ndimage.filters.gaussian_filter', 'myfilter.gaussian_filter', (['tecarray', '_use_filter'], {'mode': '"""nearest"""'}), "(tecarray, _use_filter, mode='nearest')\n", (5734, 5773), True, 'import scipy.ndimage.filters as myfilter\n'), ((6604, 6664), 'numpy.absolute', 'np.absolute', (['(maparray[np.newaxis] - mapvalues[:, np.newaxis])'], {}), '(maparray[np.newaxis] - mapvalues[:, np.newaxis])\n', (6615, 6664), True, 'import numpy as np\n'), ((9659, 9706), 'numpy.remainder', 'np.remainder', (['(lonarray[0] - lonarray[-1])', '(360.0)'], {}), '(lonarray[0] - lonarray[-1], 360.0)\n', (9671, 9706), True, 'import numpy as np\n'), ((13888, 13902), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (13896, 13902), True, 'import numpy as np\n'), ((13904, 13917), 'numpy.array', 'np.array', (['lat'], {}), '(lat)\n', (13912, 13917), True, 'import numpy as np\n'), ((13919, 13932), 'numpy.array', 'np.array', (['lon'], {}), '(lon)\n', (13927, 13932), True, 'import numpy as np\n'), ((14248, 14301), 'logging.info', 'logging.info', (["('FILE exists: ' + outpath + newfilename)"], {}), "('FILE exists: ' + outpath + newfilename)\n", (14260, 14301), False, 'import logging\n'), ((16138, 16164), 'os.remove', 'os.remove', (['compressed_file'], {}), '(compressed_file)\n', (16147, 16164), False, 'import os\n'), ((18359, 18381), 'os.path.isdir', 'os.path.isdir', (['outpath'], {}), '(outpath)\n', (18372, 18381), False, 'import os\n'), ((18935, 19003), 'os.path.isfile', 'os.path.isfile', (["('%s%s%03d0.%02dI' % (outpath, prefix, dayofyear, yy))"], {}), "('%s%s%03d0.%02dI' % (outpath, prefix, dayofyear, yy))\n", (18949, 19003), False, 'import os\n'), ((19008, 19084), 'logging.info', 'logging.info', (['"""FILE exists: %s%s%03d0.%02dI"""', 'outpath', 'prefix', 'dayofyear', 'yy'], {}), "('FILE exists: %s%s%03d0.%02dI', outpath, prefix, dayofyear, yy)\n", (19020, 19084), False, 'import logging\n'), ((19235, 19297), 'os.path.isfile', 'os.path.isfile', (["('%sIGRG%03d0.%02dI' % (outpath, dayofyear, yy))"], {}), "('%sIGRG%03d0.%02dI' % (outpath, dayofyear, yy))\n", (19249, 19297), False, 'import os\n'), ((19303, 19378), 'logging.info', 'logging.info', (['"""fast FILE exists: %sIGRG%03d0.%02dI"""', 'outpath', 'dayofyear', 'yy'], {}), "('fast FILE exists: %sIGRG%03d0.%02dI', outpath, dayofyear, yy)\n", (19315, 19378), False, 'import logging\n'), ((21905, 21964), 'logging.info', 'logging.info', (['"""No files found on %s for %s"""', 'server', 'prefix'], {}), "('No files found on %s for %s', server, prefix)\n", (21917, 21964), False, 'import logging\n'), ((23004, 23065), 'os.path.join', 'os.path.join', (['outpath', "(prefix + '%03d0.%sI' % (dayofyear, yy))"], {}), "(outpath, prefix + '%03d0.%sI' % (dayofyear, yy))\n", (23016, 23065), False, 'import os\n'), ((16507, 16529), 'os.path.isfile', 'os.path.isfile', (['mypath'], {}), '(mypath)\n', (16521, 16529), False, 'import os\n'), ((16581, 16619), 'logging.info', 'logging.info', (['"""file %s exists"""', 'mypath'], {}), "('file %s exists', mypath)\n", (16593, 16619), False, 'import logging\n'), ((18408, 18428), 'os.makedirs', 'os.makedirs', (['outpath'], {}), '(outpath)\n', (18419, 18428), False, 'import os\n'), ((22212, 22238), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (22230, 22238), False, 'import datetime\n'), ((4997, 5022), 'numpy.abs', 'np.abs', (['(latarray - lat[0])'], {}), '(latarray - lat[0])\n', (5003, 5022), True, 'import numpy as np\n'), ((18457, 18525), 'logging.error', 'logging.error', (['"""cannot create output dir for IONEXdata: %s"""', 'outpath'], {}), "('cannot create output dir for IONEXdata: %s', outpath)\n", (18470, 18525), False, 'import logging\n'), ((19788, 19809), 'ftplib.FTP', 'ftplib.FTP', (['ftpserver'], {}), '(ftpserver)\n', (19798, 19809), False, 'import ftplib\n'), ((10177, 10275), 'numpy.remainder', 'np.remainder', (['(lonarray[np.newaxis] - rot1[:, np.newaxis] - lons[:, np.newaxis] + 180.0)', '(360.0)'], {}), '(lonarray[np.newaxis] - rot1[:, np.newaxis] - lons[:, np.\n newaxis] + 180.0, 360.0)\n', (10189, 10275), True, 'import numpy as np\n'), ((10541, 10602), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx11] - rot1 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx11] - rot1 - lons + 180.0, 360.0)\n', (10553, 10602), True, 'import numpy as np\n'), ((10652, 10713), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx12] - rot1 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx12] - rot1 - lons + 180.0, 360.0)\n', (10664, 10713), True, 'import numpy as np\n'), ((11023, 11084), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx11] - rot1 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx11] - rot1 - lons + 180.0, 360.0)\n', (11035, 11084), True, 'import numpy as np\n'), ((11274, 11372), 'numpy.remainder', 'np.remainder', (['(lonarray[np.newaxis] - rot2[:, np.newaxis] - lons[:, np.newaxis] + 180.0)', '(360.0)'], {}), '(lonarray[np.newaxis] - rot2[:, np.newaxis] - lons[:, np.\n newaxis] + 180.0, 360.0)\n', (11286, 11372), True, 'import numpy as np\n'), ((11638, 11699), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx21] - rot2 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx21] - rot2 - lons + 180.0, 360.0)\n', (11650, 11699), True, 'import numpy as np\n'), ((11749, 11810), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx22] - rot2 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx22] - rot2 - lons + 180.0, 360.0)\n', (11761, 11810), True, 'import numpy as np\n'), ((12120, 12181), 'numpy.remainder', 'np.remainder', (['(lonarray[lonidx21] - rot2 - lons + 180.0)', '(360.0)'], {}), '(lonarray[lonidx21] - rot2 - lons + 180.0, 360.0)\n', (12132, 12181), True, 'import numpy as np\n'), ((29777, 29796), 'numpy.ones_like', 'np.ones_like', (['latpp'], {}), '(latpp)\n', (29789, 29796), True, 'import numpy as np\n')] |
import numpy as np
import luibeal as lb
import torch
import os
DECK_PATH = os.path.join('.', 'data', 'rand_exp')
def exponential(p0, exp, t):
return p0 * np.exp(exp * t)
def random_exponential_decline(tlim, lseq, p0_mean, exp_mean=-0.1):
noise = 0.1
t = np.linspace(*tlim, lseq)
p = exponential(p0_mean, exp_mean, t) * np.random.normal(1.0, noise, t.shape)
return torch.from_numpy(p)
if __name__ == '__main__':
np.random.seed(41)
nseq, lseq = 100, 50
p0_mean = 50.0
tlim = (0.0, 50.0)
input = lb.input.Input(lseq)
training_deck = lb.deck.Deck(input, nseq)
for i in range(nseq):
training_deck.set_sequence(i, random_exponential_decline(tlim, lseq, p0_mean))
lb.util.save_as(training_deck, DECK_PATH)
| [
"numpy.random.seed",
"luibeal.input.Input",
"luibeal.deck.Deck",
"luibeal.util.save_as",
"numpy.linspace",
"numpy.exp",
"numpy.random.normal",
"os.path.join",
"torch.from_numpy"
] | [((76, 113), 'os.path.join', 'os.path.join', (['"""."""', '"""data"""', '"""rand_exp"""'], {}), "('.', 'data', 'rand_exp')\n", (88, 113), False, 'import os\n'), ((271, 295), 'numpy.linspace', 'np.linspace', (['*tlim', 'lseq'], {}), '(*tlim, lseq)\n', (282, 295), True, 'import numpy as np\n'), ((389, 408), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (405, 408), False, 'import torch\n'), ((442, 460), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (456, 460), True, 'import numpy as np\n'), ((540, 560), 'luibeal.input.Input', 'lb.input.Input', (['lseq'], {}), '(lseq)\n', (554, 560), True, 'import luibeal as lb\n'), ((581, 606), 'luibeal.deck.Deck', 'lb.deck.Deck', (['input', 'nseq'], {}), '(input, nseq)\n', (593, 606), True, 'import luibeal as lb\n'), ((724, 765), 'luibeal.util.save_as', 'lb.util.save_as', (['training_deck', 'DECK_PATH'], {}), '(training_deck, DECK_PATH)\n', (739, 765), True, 'import luibeal as lb\n'), ((161, 176), 'numpy.exp', 'np.exp', (['(exp * t)'], {}), '(exp * t)\n', (167, 176), True, 'import numpy as np\n'), ((340, 377), 'numpy.random.normal', 'np.random.normal', (['(1.0)', 'noise', 't.shape'], {}), '(1.0, noise, t.shape)\n', (356, 377), True, 'import numpy as np\n')] |
from psenet_ctw import PSENET_CTW
import torch
import numpy as np
import cv2
import random
import os
torch.manual_seed(123456)
torch.cuda.manual_seed(123456)
np.random.seed(123456)
random.seed(123456)
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1], 1)
img = np.concatenate((img, img, img), axis=2) * 255
return img
def save(img_path, imgs):
if not os.path.exists('vis/'):
os.makedirs('vis/')
for i in range(len(imgs)):
imgs[i] = cv2.copyMakeBorder(imgs[i], 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=[255, 0, 0])
res = np.concatenate(imgs, axis=1)
if type(img_path) != str:
img_name = img_path[0].split('/')[-1]
else:
img_name = img_path.split('/')[-1]
print('saved %s.' % img_name)
cv2.imwrite('vis/' + img_name, res)
# data_loader = SynthLoader(split='train', is_transform=True, img_size=640, kernel_scale=0.5, short_size=640,
# for_rec=True)
# data_loader = IC15Loader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = CombineLoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = TTLoader(split='train', is_transform=True, img_size=640, kernel_scale=0.8, short_size=640,
# for_rec=True, read_type='pil')
# data_loader = CombineAllLoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
data_loader = PSENET_CTW(split='test', is_transform=True, img_size=736)
# data_loader = MSRALoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = CTWv2Loader(split='train', is_transform=True, img_size=640, kernel_scale=0.7, short_size=640,
# for_rec=True)
# data_loader = IC15(split='train', is_transform=True, img_size=640,)
train_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=1,
shuffle=False,
num_workers=0,
drop_last=True)
for batch_idx, imgs in enumerate(train_loader):
if batch_idx > 100:
break
# image_name = data_loader.img_paths[batch_idx].split('/')[-1].split('.')[0]
# print('%d/%d %s'%(batch_idx, len(train_loader), data_loader.img_paths[batch_idx]))
print('%d/%d' % (batch_idx, len(train_loader)))
img = imgs[0].numpy()
img = ((img * np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1) +
np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)) * 255).astype(np.uint8)
img = np.transpose(img, (1, 2, 0))[:, :, ::-1].copy()
# gt_text = to_rgb(gt_texts[0].numpy())
# gt_kernel_0 = to_rgb(gt_kernels[0, 0].numpy())
# gt_kernel_1 = to_rgb(gt_kernels[0, 1].numpy())
# gt_kernel_2 = to_rgb(gt_kernels[0, 2].numpy())
# gt_kernel_3 = to_rgb(gt_kernels[0, 3].numpy())
# gt_kernel_4 = to_rgb(gt_kernels[0, 4].numpy())
# gt_kernel_5 = to_rgb(gt_kernels[0, 5].numpy())
# gt_text_mask = to_rgb(training_masks[0].numpy().astype(np.uint8))
# save('%d.png' % batch_idx, [img, gt_text, gt_kernel_0, gt_kernel_1, gt_kernel_2, gt_kernel_3, gt_kernel_4, gt_kernel_5, gt_text_mask])
save('%d_test.png' % batch_idx, [img]) | [
"numpy.random.seed",
"os.makedirs",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"cv2.imwrite",
"torch.cuda.manual_seed",
"os.path.exists",
"cv2.copyMakeBorder",
"numpy.transpose",
"random.seed",
"numpy.array",
"psenet_ctw.PSENET_CTW",
"numpy.concatenate"
] | [((102, 127), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (119, 127), False, 'import torch\n'), ((128, 158), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(123456)'], {}), '(123456)\n', (150, 158), False, 'import torch\n'), ((159, 181), 'numpy.random.seed', 'np.random.seed', (['(123456)'], {}), '(123456)\n', (173, 181), True, 'import numpy as np\n'), ((182, 201), 'random.seed', 'random.seed', (['(123456)'], {}), '(123456)\n', (193, 201), False, 'import random\n'), ((1608, 1665), 'psenet_ctw.PSENET_CTW', 'PSENET_CTW', ([], {'split': '"""test"""', 'is_transform': '(True)', 'img_size': '(736)'}), "(split='test', is_transform=True, img_size=736)\n", (1618, 1665), False, 'from psenet_ctw import PSENET_CTW\n'), ((2054, 2158), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data_loader'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(data_loader, batch_size=1, shuffle=False,\n num_workers=0, drop_last=True)\n', (2081, 2158), False, 'import torch\n'), ((576, 604), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(1)'}), '(imgs, axis=1)\n', (590, 604), True, 'import numpy as np\n'), ((772, 807), 'cv2.imwrite', 'cv2.imwrite', (["('vis/' + img_name)", 'res'], {}), "('vis/' + img_name, res)\n", (783, 807), False, 'import cv2\n'), ((284, 323), 'numpy.concatenate', 'np.concatenate', (['(img, img, img)'], {'axis': '(2)'}), '((img, img, img), axis=2)\n', (298, 323), True, 'import numpy as np\n'), ((384, 406), 'os.path.exists', 'os.path.exists', (['"""vis/"""'], {}), "('vis/')\n", (398, 406), False, 'import os\n'), ((416, 435), 'os.makedirs', 'os.makedirs', (['"""vis/"""'], {}), "('vis/')\n", (427, 435), False, 'import os\n'), ((486, 565), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['imgs[i]', '(3)', '(3)', '(3)', '(3)', 'cv2.BORDER_CONSTANT'], {'value': '[255, 0, 0]'}), '(imgs[i], 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=[255, 0, 0])\n', (504, 565), False, 'import cv2\n'), ((2678, 2706), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (2690, 2706), True, 'import numpy as np\n'), ((2594, 2625), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2602, 2625), True, 'import numpy as np\n'), ((2531, 2562), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2539, 2562), True, 'import numpy as np\n')] |
import argparse
import random
import warnings
import numpy as np
import torch
import torch.backends.cudnn
import torch.distributed as dist
import torch.nn.parallel as parallel
import torch.optim as optim
import torch.utils.data as data
import engine
from datasets import AugVocTrainDataset, AugVocValDataset, CLASSES
from model import InstanceTransformer, HungarianMatcher, DetCriterion
from utils.distributed_logger import DistributedLogger
warnings.filterwarnings('ignore')
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--is_train', default=True, type=bool)
parser.add_argument('--dataset_root', type=str)
parser.add_argument('--name', type=str)
parser.add_argument('--random_seed', default=970423, type=int)
parser.add_argument('--input_size', default=448, type=int)
parser.add_argument('--num_instances', default=40, type=int)
parser.add_argument('--d_model', default=512, type=int)
parser.add_argument('--class_weight', default=1., type=float)
parser.add_argument('--giou_weight', default=2., type=float)
parser.add_argument('--l1_weight', default=5., type=float)
parser.add_argument('--no_instance_coef', default=0.1, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--step_size', default=200, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--lr', default=2e-4, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--master_rank', default=0, type=int)
return parser.parse_args()
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
def __main__():
args = get_args_parser()
dist.init_process_group(backend='nccl')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
set_random_seed(args.random_seed + dist.get_rank())
torch.cuda.set_device(torch.device('cuda:{}'.format(dist.get_rank())))
dist_logger = DistributedLogger(args.name, args.master_rank, use_tensorboard=True)
model = InstanceTransformer(args.num_instances, len(CLASSES), args.d_model).cuda()
model = parallel.DistributedDataParallel(model, device_ids=[dist.get_rank()])
matcher = HungarianMatcher(args.class_weight, args.giou_weight, args.l1_weight)
criterion = DetCriterion(args.no_instance_coef)
optimized_parameters = [
{'params': [p for n, p in model.module.named_parameters() if 'backbone' not in n and p.requires_grad]},
{'params': [p for n, p in model.module.named_parameters() if 'backbone' in n and p.requires_grad], 'lr': args.lr / 10}
]
optimizer = optim.AdamW(optimized_parameters, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size)
train_dataset = AugVocTrainDataset(args.dataset_root, args.input_size, args.input_size, args.num_instances)
train_sampler = data.distributed.DistributedSampler(train_dataset)
train_dataloader = data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, sampler=train_sampler, pin_memory=True, drop_last=True)
val_dataset = AugVocValDataset(args.dataset_root, args.input_size, args.input_size)
val_sampler = data.distributed.DistributedSampler(val_dataset)
val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, sampler=val_sampler)
for epoch_idx in range(args.epochs):
train_sampler.set_epoch(epoch_idx)
engine.train_one_epoch(model, optimizer, matcher, criterion, lr_scheduler, train_dataloader, dist_logger, epoch_idx)
val_sampler.set_epoch(epoch_idx)
engine.val_one_epoch(model, val_dataloader, val_dataset.coco, dist_logger, epoch_idx)
if __name__ == '__main__':
__main__()
| [
"engine.train_one_epoch",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.random.manual_seed",
"warnings.filterwarnings",
"torch.distributed.init_process_group",
"model.HungarianMatcher",
"model.DetCriterion",
"torch.optim.AdamW",
"utils.distributed_logger.DistributedLogger",
"torch.optim... | [((445, 478), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (468, 478), False, 'import warnings\n'), ((517, 542), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (540, 542), False, 'import argparse\n'), ((1788, 1805), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1799, 1805), False, 'import random\n'), ((1810, 1830), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1824, 1830), True, 'import numpy as np\n'), ((1835, 1865), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (1859, 1865), False, 'import torch\n'), ((1917, 1956), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (1940, 1956), True, 'import torch.distributed as dist\n'), ((2188, 2256), 'utils.distributed_logger.DistributedLogger', 'DistributedLogger', (['args.name', 'args.master_rank'], {'use_tensorboard': '(True)'}), '(args.name, args.master_rank, use_tensorboard=True)\n', (2205, 2256), False, 'from utils.distributed_logger import DistributedLogger\n'), ((2441, 2510), 'model.HungarianMatcher', 'HungarianMatcher', (['args.class_weight', 'args.giou_weight', 'args.l1_weight'], {}), '(args.class_weight, args.giou_weight, args.l1_weight)\n', (2457, 2510), False, 'from model import InstanceTransformer, HungarianMatcher, DetCriterion\n'), ((2527, 2562), 'model.DetCriterion', 'DetCriterion', (['args.no_instance_coef'], {}), '(args.no_instance_coef)\n', (2539, 2562), False, 'from model import InstanceTransformer, HungarianMatcher, DetCriterion\n'), ((2854, 2931), 'torch.optim.AdamW', 'optim.AdamW', (['optimized_parameters'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(optimized_parameters, lr=args.lr, weight_decay=args.weight_decay)\n', (2865, 2931), True, 'import torch.optim as optim\n'), ((2951, 3013), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.step_size'}), '(optimizer, step_size=args.step_size)\n', (2976, 3013), True, 'import torch.optim as optim\n'), ((3035, 3130), 'datasets.AugVocTrainDataset', 'AugVocTrainDataset', (['args.dataset_root', 'args.input_size', 'args.input_size', 'args.num_instances'], {}), '(args.dataset_root, args.input_size, args.input_size,\n args.num_instances)\n', (3053, 3130), False, 'from datasets import AugVocTrainDataset, AugVocValDataset, CLASSES\n'), ((3147, 3197), 'torch.utils.data.distributed.DistributedSampler', 'data.distributed.DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (3182, 3197), True, 'import torch.utils.data as data\n'), ((3221, 3370), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'sampler': 'train_sampler', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, num_workers=args\n .num_workers, sampler=train_sampler, pin_memory=True, drop_last=True)\n', (3236, 3370), True, 'import torch.utils.data as data\n'), ((3385, 3454), 'datasets.AugVocValDataset', 'AugVocValDataset', (['args.dataset_root', 'args.input_size', 'args.input_size'], {}), '(args.dataset_root, args.input_size, args.input_size)\n', (3401, 3454), False, 'from datasets import AugVocTrainDataset, AugVocValDataset, CLASSES\n'), ((3473, 3521), 'torch.utils.data.distributed.DistributedSampler', 'data.distributed.DistributedSampler', (['val_dataset'], {}), '(val_dataset)\n', (3508, 3521), True, 'import torch.utils.data as data\n'), ((3543, 3672), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'sampler': 'val_sampler'}), '(val_dataset, batch_size=args.batch_size, num_workers=args.\n num_workers, pin_memory=True, sampler=val_sampler)\n', (3558, 3672), True, 'import torch.utils.data as data\n'), ((3761, 3881), 'engine.train_one_epoch', 'engine.train_one_epoch', (['model', 'optimizer', 'matcher', 'criterion', 'lr_scheduler', 'train_dataloader', 'dist_logger', 'epoch_idx'], {}), '(model, optimizer, matcher, criterion, lr_scheduler,\n train_dataloader, dist_logger, epoch_idx)\n', (3783, 3881), False, 'import engine\n'), ((3928, 4017), 'engine.val_one_epoch', 'engine.val_one_epoch', (['model', 'val_dataloader', 'val_dataset.coco', 'dist_logger', 'epoch_idx'], {}), '(model, val_dataloader, val_dataset.coco, dist_logger,\n epoch_idx)\n', (3948, 4017), False, 'import engine\n'), ((2078, 2093), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2091, 2093), True, 'import torch.distributed as dist\n'), ((2151, 2166), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2164, 2166), True, 'import torch.distributed as dist\n'), ((2409, 2424), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2422, 2424), True, 'import torch.distributed as dist\n')] |
# !/usr/bin/python
# -*-coding:utf-8-*-
from keras.layers import Input, SpatialDropout1D, Dense
from keras.layers import Bidirectional, GRU, Flatten, Dropout, Embedding
from keras.preprocessing import text, sequence
from keras.models import Model
from sklearn.preprocessing import MultiLabelBinarizer
import pandas as pd
import jieba
import _pickle as pickle
import numpy as np
import keras.backend as K
K.clear_session()
gru_len = 128
Routings = 5
dropout_p = 0.25
rate_drop_dense = 0.28
# not enable in windows
# jieba.enable_parallel(4)
K.clear_session()
remove_stop_words = True
train_file = '../../data/train.csv'
test_file = '../../data/test_public.csv'
# load stopwords
with open('../../data/stop_words.txt', encoding='utf-8') as f:
stop_words = set([l.strip() for l in f])
# load Glove Vectors
embeddings_index = {}
EMBEDDING_DIM = 300
# load data
train_df = pd.read_csv(train_file, encoding='utf-8')
test_df = pd.read_csv(test_file, encoding='utf-8')
train_df['label'] = train_df['subject'].str.cat(
train_df['sentiment_value'].astype(str))
if remove_stop_words:
train_df['content'] = train_df.content.map(
lambda x: ''.join([e for e in x.strip().split() if e not in stop_words]))
test_df['content'] = test_df.content.map(
lambda x: ''.join([e for e in x.strip().split() if e not in stop_words]))
else:
train_df['content'] = train_df.content.map(
lambda x: ''.join(x.strip().split()))
test_df['content'] = test_df.content.map(
lambda x: ''.join(x.strip().split()))
train_dict = {}
for ind, row in train_df.iterrows():
content, label = row['content'], row['label']
if train_dict.get(content) is None:
train_dict[content] = set([label])
else:
train_dict[content].add(label)
conts = []
labels = []
flag = -1
for k, v in train_dict.items():
conts.append(k)
labels.append(v)
def make_multilabel(labels):
topic = ['动力', '价格', '内饰', '配置', '安全性',
'外观', '操控', '油耗', '空间', '舒适性']
sentiment = [-1, 0, 1]
cmb = [str(i) + str(j) for i in topic for j in sentiment]
index = [i for i in range(0, 30)]
d = dict(zip(cmb, index))
res = np.zeros((len(labels), 30), dtype=np.int32)
i = 0
for lst in labels:
for item in lst:
res[i][d[item]] = 1
i += 1
return [res.tolist()]
y_train = make_multilabel(labels)
# mlb = MultiLabelBinarizer()
# y_train = mlb.fit_transform(labels)
# with open('mlb.pickle', 'wb') as handle:
# pickle.dump(mlb, handle)
content_list = [jieba.lcut(str(c)) for c in conts]
test_content_list = [jieba.lcut(c) for c in test_df.content.astype(str).values]
max_feature = 30000
tokenizer = text.Tokenizer(num_words=max_feature)
tokenizer.fit_on_texts(list(content_list) + list(test_content_list))
# saving tokenizer model
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle)
seqs = tokenizer.texts_to_sequences(content_list)
seqs_dev = tokenizer.texts_to_sequences(test_content_list)
embedding_matrix = pickle.load(
open('../../data/word2vec_model/embedding_matrix', 'rb'))
def get_padding_data(maxlen=100):
x_train = sequence.pad_sequences(seqs, maxlen=maxlen)
x_dev = sequence.pad_sequences(seqs_dev, maxlen=maxlen)
return x_train, x_dev
def get_model():
input1 = Input(shape=(maxlen,))
embed_layer = Embedding(len(embedding_matrix),
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)(input1)
embed_layer = SpatialDropout1D(rate_drop_dense)(embed_layer)
x = Bidirectional(
GRU(gru_len, activation='relu', dropout=dropout_p, recurrent_dropout=dropout_p, return_sequences=True))(
embed_layer)
x = Flatten()(x)
x = Dropout(dropout_p)(x)
output = Dense(30, activation='sigmoid')(x)
model = Model(inputs=input1, outputs=output)
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
maxlen = 100
X_train, X_dev = get_padding_data(maxlen)
# print(X_train.shape, X_dev.shape, y_train.shape)
first_model_results = []
for i in range(5):
model = get_model()
model.fit(X_train, y_train, batch_size=64, epochs=15) # 15
first_model_results.append(model.predict(X_dev, batch_size=1024))
pred4 = np.average(first_model_results, axis=0)
model.save('model_7.h5')
tmp = [[i for i in row] for row in pred4]
for i, v in enumerate(tmp):
if max(v) < 0.5:
max_val = max(v)
tmp[i] = [1 if j == max_val else 0 for j in v]
else:
tmp[i] = [int(round(j)) for j in v]
tmp = np.asanyarray(tmp)
def decode_multilabel(labels):
topic = ['动力', '价格', '内饰', '配置', '安全性',
'外观', '操控', '油耗', '空间', '舒适性']
sentiment = [-1, 0, 1]
cmb = [str(i) + str(j) for i in topic for j in sentiment]
index = [i for i in range(0, 30)]
d2 = dict(zip(index, cmb))
x, y = labels.shape
print(x, y)
res = [[] for i in range(x)]
for i in range(x):
for j in range(y):
if labels[i][j] == 1:
res[i].append(d2[j])
return res
# res = mlb.inverse_transform(tmp)
res = decode_multilabel(tmp)
cids = []
subjs = []
sent_vals = []
for c, r in zip(test_df.content_id, res):
for t in r:
if '-' in t:
sent_val = -1
subj = t[:-2]
else:
sent_val = int(t[-1])
subj = t[:-1]
cids.append(c)
subjs.append(subj)
sent_vals.append(sent_val)
res_df = pd.DataFrame({'content_id': cids, 'subject': subjs, 'sentiment_value': sent_vals,
'sentiment_word': ['便宜' for i in range(len(cids))]})
columns = ['content_id', 'subject', 'sentiment_value', 'sentiment_word']
res_df = res_df.reindex(columns=columns)
res_df.to_csv('submit_word.csv', encoding='utf-8', index=False)
| [
"numpy.average",
"pandas.read_csv",
"_pickle.dump",
"keras.preprocessing.sequence.pad_sequences",
"numpy.asanyarray",
"keras.layers.Flatten",
"keras.layers.Dropout",
"keras.models.Model",
"keras.layers.SpatialDropout1D",
"keras.layers.GRU",
"keras.preprocessing.text.Tokenizer",
"keras.layers.D... | [((404, 421), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (419, 421), True, 'import keras.backend as K\n'), ((542, 559), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (557, 559), True, 'import keras.backend as K\n'), ((878, 919), 'pandas.read_csv', 'pd.read_csv', (['train_file'], {'encoding': '"""utf-8"""'}), "(train_file, encoding='utf-8')\n", (889, 919), True, 'import pandas as pd\n'), ((930, 970), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {'encoding': '"""utf-8"""'}), "(test_file, encoding='utf-8')\n", (941, 970), True, 'import pandas as pd\n'), ((2687, 2724), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {'num_words': 'max_feature'}), '(num_words=max_feature)\n', (2701, 2724), False, 'from keras.preprocessing import text, sequence\n'), ((4413, 4452), 'numpy.average', 'np.average', (['first_model_results'], {'axis': '(0)'}), '(first_model_results, axis=0)\n', (4423, 4452), True, 'import numpy as np\n'), ((4713, 4731), 'numpy.asanyarray', 'np.asanyarray', (['tmp'], {}), '(tmp)\n', (4726, 4731), True, 'import numpy as np\n'), ((2595, 2608), 'jieba.lcut', 'jieba.lcut', (['c'], {}), '(c)\n', (2605, 2608), False, 'import jieba\n'), ((2871, 2901), '_pickle.dump', 'pickle.dump', (['tokenizer', 'handle'], {}), '(tokenizer, handle)\n', (2882, 2901), True, 'import _pickle as pickle\n'), ((3157, 3200), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['seqs'], {'maxlen': 'maxlen'}), '(seqs, maxlen=maxlen)\n', (3179, 3200), False, 'from keras.preprocessing import text, sequence\n'), ((3213, 3260), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['seqs_dev'], {'maxlen': 'maxlen'}), '(seqs_dev, maxlen=maxlen)\n', (3235, 3260), False, 'from keras.preprocessing import text, sequence\n'), ((3319, 3341), 'keras.layers.Input', 'Input', ([], {'shape': '(maxlen,)'}), '(shape=(maxlen,))\n', (3324, 3341), False, 'from keras.layers import Input, SpatialDropout1D, Dense\n'), ((3928, 3964), 'keras.models.Model', 'Model', ([], {'inputs': 'input1', 'outputs': 'output'}), '(inputs=input1, outputs=output)\n', (3933, 3964), False, 'from keras.models import Model\n'), ((3612, 3645), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['rate_drop_dense'], {}), '(rate_drop_dense)\n', (3628, 3645), False, 'from keras.layers import Input, SpatialDropout1D, Dense\n'), ((3825, 3834), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3832, 3834), False, 'from keras.layers import Bidirectional, GRU, Flatten, Dropout, Embedding\n'), ((3846, 3864), 'keras.layers.Dropout', 'Dropout', (['dropout_p'], {}), '(dropout_p)\n', (3853, 3864), False, 'from keras.layers import Bidirectional, GRU, Flatten, Dropout, Embedding\n'), ((3881, 3912), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""sigmoid"""'}), "(30, activation='sigmoid')\n", (3886, 3912), False, 'from keras.layers import Input, SpatialDropout1D, Dense\n'), ((3691, 3798), 'keras.layers.GRU', 'GRU', (['gru_len'], {'activation': '"""relu"""', 'dropout': 'dropout_p', 'recurrent_dropout': 'dropout_p', 'return_sequences': '(True)'}), "(gru_len, activation='relu', dropout=dropout_p, recurrent_dropout=\n dropout_p, return_sequences=True)\n", (3694, 3798), False, 'from keras.layers import Bidirectional, GRU, Flatten, Dropout, Embedding\n')] |
import os
import uuid
import time
import asyncio
import base64
import numpy as np
import tensorflow as tf
from aiohttp import web
from checkers_ai.model import Policy
from checkers_ai.state import State
class PolicyServer:
maxBatchSz = 512
maxTOQ = 1e-5
clockSpeed = 1e-6
def __init__(self, model_path:str, selection:str, url:str, port:int, device:str, gpu_idx=None):
self.tasks = []
self.resps = {}
if device == 'gpu':
assert gpu_idx is not None
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_idx)
TF_CONFIG = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
graph = tf.Graph()
with graph.as_default():
session = tf.Session(config=TF_CONFIG, graph=graph)
self.policy = Policy(
session=session,
load_dir=model_path,
selection=selection,
trainable=False,
device=device
)
self.app = self.startServer()
web.run_app(self.app, host=url, port=port)
def __del__(self):
self.app.close()
async def stateTracker(self):
t = time.time()
while True:
qSize = len(self.tasks)
qFull = qSize >= self.maxBatchSz
qWait = (time.time() - t) < self.maxTOQ
if qSize and (qFull or not qWait):
if not qFull:
t = time.time()
await self.process()
else:
await asyncio.sleep(self.clockSpeed)
async def enqueue(self, key:str, state:State):
self.tasks.insert(0, (key, state))
async def dequeue(self, key):
result = None
while not result:
result = self.resps.pop(key, None)
if result:
return result
else:
await asyncio.sleep(self.clockSpeed)
async def requestHandler(self, request):
response = {
"action": None,
"prob": None
}
try:
request = await request.json()
state = request.get('state')
if state:
key = str(uuid.uuid4())
_ = asyncio.ensure_future(self.enqueue(key, state))
response = await self.dequeue(key=key)
return web.json_response(response)
except Exception as e:
print("Web handler caught the following exception: {}".format(e))
return web.json_response(response)
async def process(self):
batchSize = min(len(self.tasks), self.maxBatchSz)
tasks = [self.tasks.pop() for _ in range(batchSize)]
keys = [task[0] for task in tasks]
states = np.stack([task[1] for task in tasks], axis=0).reshape(-1, 8, 4).astype(np.int32)
fetches = [self.policy.probs, self.policy.actions]
feed_dict = {self.policy.state: states}
probs, actions = self.policy.session.run(fetches, feed_dict)
for i in range(batchSize):
result = {
"action": base64.encodebytes(actions[i].dumps()).decode("utf-8"),
"prob": base64.encodebytes(probs[i].dumps()).decode("utf-8"),
}
self.resps.update({keys[i]: result})
async def startServer(self):
app = web.Application()
app.add_routes([web.get("/", self.requestHandler)])
_ = asyncio.ensure_future(self.stateTracker())
return app
| [
"numpy.stack",
"uuid.uuid4",
"asyncio.sleep",
"tensorflow.Session",
"time.time",
"aiohttp.web.json_response",
"tensorflow.ConfigProto",
"aiohttp.web.get",
"aiohttp.web.run_app",
"tensorflow.Graph",
"aiohttp.web.Application",
"checkers_ai.model.Policy"
] | [((587, 656), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (601, 656), True, 'import tensorflow as tf\n'), ((707, 717), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (715, 717), True, 'import tensorflow as tf\n'), ((1079, 1121), 'aiohttp.web.run_app', 'web.run_app', (['self.app'], {'host': 'url', 'port': 'port'}), '(self.app, host=url, port=port)\n', (1090, 1121), False, 'from aiohttp import web\n'), ((1218, 1229), 'time.time', 'time.time', ([], {}), '()\n', (1227, 1229), False, 'import time\n'), ((3364, 3381), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (3379, 3381), False, 'from aiohttp import web\n'), ((773, 814), 'tensorflow.Session', 'tf.Session', ([], {'config': 'TF_CONFIG', 'graph': 'graph'}), '(config=TF_CONFIG, graph=graph)\n', (783, 814), True, 'import tensorflow as tf\n'), ((841, 943), 'checkers_ai.model.Policy', 'Policy', ([], {'session': 'session', 'load_dir': 'model_path', 'selection': 'selection', 'trainable': '(False)', 'device': 'device'}), '(session=session, load_dir=model_path, selection=selection, trainable\n =False, device=device)\n', (847, 943), False, 'from checkers_ai.model import Policy\n'), ((2384, 2411), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (2401, 2411), False, 'from aiohttp import web\n'), ((2540, 2567), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (2557, 2567), False, 'from aiohttp import web\n'), ((3406, 3439), 'aiohttp.web.get', 'web.get', (['"""/"""', 'self.requestHandler'], {}), "('/', self.requestHandler)\n", (3413, 3439), False, 'from aiohttp import web\n'), ((1352, 1363), 'time.time', 'time.time', ([], {}), '()\n', (1361, 1363), False, 'import time\n'), ((1484, 1495), 'time.time', 'time.time', ([], {}), '()\n', (1493, 1495), False, 'import time\n'), ((1573, 1603), 'asyncio.sleep', 'asyncio.sleep', (['self.clockSpeed'], {}), '(self.clockSpeed)\n', (1586, 1603), False, 'import asyncio\n'), ((1922, 1952), 'asyncio.sleep', 'asyncio.sleep', (['self.clockSpeed'], {}), '(self.clockSpeed)\n', (1935, 1952), False, 'import asyncio\n'), ((2228, 2240), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2238, 2240), False, 'import uuid\n'), ((2777, 2822), 'numpy.stack', 'np.stack', (['[task[1] for task in tasks]'], {'axis': '(0)'}), '([task[1] for task in tasks], axis=0)\n', (2785, 2822), True, 'import numpy as np\n')] |
#%%
import fnmatch, os, re
import glob
import math
import cv2
import numpy as np
import operator
from matplotlib import pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
point = '.'
extension = 'jpg'
point_extension = '.'+ extension
tag = '_data_au{}_'
import random
#%%
def preview(image):
plt.imshow(image, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.show()
#%%
def insensitive_glob(pattern):
def either(c):
return '[%s%s]' % (c.lower(), c.upper()) if c.isalpha() else c
return glob.glob(''.join(map(either, pattern)))
#%%
def build_model(input_shape):
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3), activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['accuracy'])
return model
#%%
def main():
list_images = []
IMG_DIM = (640, 480)
for filename in insensitive_glob(os.path.join('images','*','*.{}').format(extension)):
if '.DS_Store' in filename or '_' in filename:
continue
list_images.append(filename)
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode='nearest')
# print(list_images)
# val_datagen = ImageDataGenerator(rescale=1./255)
# validation_imgs_scaled = validation_imgs.astype('float32')
# validation_imgs_scaled /= 255
train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in list_images]
train_imgs = np.array(train_imgs)
train_imgs_scaled = train_imgs.astype('float32')
train_imgs_scaled /= 255
train_labels =[random.choice(['100','50','10','20']) for i in train_imgs]
bill_generator = train_datagen.flow(train_imgs, train_labels, batch_size=1)
bill = [next(bill_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(15, 6))
for i in range(0,5):
ax[i].imshow(bill[i][0][0])
plt.show()
# ax.set_yticks([])
# ax.set_xticks([])
model = build_model((640, 480,3))
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
validation_data=val_generator, validation_steps=50,
verbose=1)
#%%
if __name__ == '__main__':
main()
print('Data Augmentation has been generated!!!')
#%% | [
"keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.imshow",
"keras.layers.Dropout",
"matplotlib.pyplot.yticks",
"keras.layers.Flatten",
"random.choice",
"keras.preprocessing.image.load_img",
"keras.layers.Dense",
"numpy.array",
"keras.l... | [((422, 432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (430, 432), True, 'from matplotlib import pyplot as plt\n'), ((807, 819), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (817, 819), False, 'from keras.models import Sequential\n'), ((1979, 2166), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'zoom_range': '(0.3)', 'rotation_range': '(50)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.2)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, zoom_range=0.3, rotation_range=50,\n width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,\n horizontal_flip=True, fill_mode='nearest')\n", (1997, 2166), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\n'), ((2523, 2543), 'numpy.array', 'np.array', (['train_imgs'], {}), '(train_imgs)\n', (2531, 2543), True, 'import numpy as np\n'), ((2855, 2890), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(15, 6)'}), '(1, 5, figsize=(15, 6))\n', (2867, 2890), True, 'from matplotlib import pyplot as plt\n'), ((355, 385), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (365, 385), True, 'from matplotlib import pyplot as plt\n'), ((387, 401), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (397, 401), True, 'from matplotlib import pyplot as plt\n'), ((403, 417), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (413, 417), True, 'from matplotlib import pyplot as plt\n'), ((835, 909), 'keras.layers.Conv2D', 'Conv2D', (['(16)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(16, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (841, 909), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((946, 976), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (958, 976), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((993, 1042), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (999, 1042), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1058, 1088), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1070, 1088), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1105, 1155), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (1111, 1155), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1171, 1201), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1183, 1201), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1218, 1268), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (1224, 1268), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1284, 1314), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1296, 1314), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1331, 1340), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1338, 1340), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1356, 1385), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (1361, 1385), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1401, 1413), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1408, 1413), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1429, 1458), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (1434, 1458), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1474, 1486), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1481, 1486), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1502, 1532), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1507, 1532), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n'), ((2646, 2686), 'random.choice', 'random.choice', (["['100', '50', '10', '20']"], {}), "(['100', '50', '10', '20'])\n", (2659, 2686), False, 'import random\n'), ((2959, 2969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2967, 2969), True, 'from matplotlib import pyplot as plt\n'), ((1607, 1636), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1625, 1636), False, 'from keras import optimizers\n'), ((2446, 2480), 'keras.preprocessing.image.load_img', 'load_img', (['img'], {'target_size': 'IMG_DIM'}), '(img, target_size=IMG_DIM)\n', (2454, 2480), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\n'), ((1791, 1826), 'os.path.join', 'os.path.join', (['"""images"""', '"""*"""', '"""*.{}"""'], {}), "('images', '*', '*.{}')\n", (1803, 1826), False, 'import fnmatch, os, re\n')] |
import keras
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
import random
from collections import deque
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
self.state_size = state_size # normalized previous days
self.action_size = 5 # buy_1, sell_1,DO Nothing, buy2, sell2
self.memory = deque(maxlen=2000)
self.inventory1 = []
self.inventory2 = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95 #gamma is the discount factor. It quantifies how much importance we give for future rewards.
self.epsilon = 1.0 #Exploration and Exploitation — Epsilon (ε)
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = load_model("models/" + model_name) if is_eval else self._model()
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.0001))
return model
def act(self, state):
if not self.is_eval and random.random() <= self.epsilon:
#print("random action")
return random.randrange(self.action_size)
#print("Calculating using model")
options = self.model.predict(state)
#print(str(options))
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
| [
"keras.models.load_model",
"numpy.argmax",
"random.sample",
"keras.optimizers.Adam",
"random.random",
"keras.layers.Dense",
"random.randrange",
"keras.models.Sequential",
"collections.deque"
] | [((452, 470), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (457, 470), False, 'from collections import deque\n'), ((979, 991), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (989, 991), False, 'from keras.models import Sequential\n'), ((1638, 1659), 'numpy.argmax', 'np.argmax', (['options[0]'], {}), '(options[0])\n', (1647, 1659), True, 'import numpy as np\n'), ((1780, 1818), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (1793, 1818), False, 'import random\n'), ((875, 909), 'keras.models.load_model', 'load_model', (["('models/' + model_name)"], {}), "('models/' + model_name)\n", (885, 909), False, 'from keras.models import load_model\n'), ((1010, 1071), 'keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'input_dim': 'self.state_size', 'activation': '"""relu"""'}), "(units=64, input_dim=self.state_size, activation='relu')\n", (1015, 1071), False, 'from keras.layers import Dense\n'), ((1091, 1125), 'keras.layers.Dense', 'Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (1096, 1125), False, 'from keras.layers import Dense\n'), ((1145, 1178), 'keras.layers.Dense', 'Dense', ([], {'units': '(8)', 'activation': '"""relu"""'}), "(units=8, activation='relu')\n", (1150, 1178), False, 'from keras.layers import Dense\n'), ((1198, 1242), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""linear"""'}), "(self.action_size, activation='linear')\n", (1203, 1242), False, 'from keras.layers import Dense\n'), ((1473, 1507), 'random.randrange', 'random.randrange', (['self.action_size'], {}), '(self.action_size)\n', (1489, 1507), False, 'import random\n'), ((1288, 1303), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1292, 1303), False, 'from keras.optimizers import Adam\n'), ((1385, 1400), 'random.random', 'random.random', ([], {}), '()\n', (1398, 1400), False, 'import random\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import platform
import paddle
import sklearn
from sklearn.model_selection import KFold
from sklearn.decomposition import PCA
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger
def fuse_features_with_norm(stacked_embeddings, stacked_norms):
assert stacked_embeddings.ndim == 3 # (n_features_to_fuse, batch_size, channel)
assert stacked_norms.ndim == 3 # (n_features_to_fuse, batch_size, 1)
pre_norm_embeddings = stacked_embeddings * stacked_norms
fused = pre_norm_embeddings.sum(axis=0)
norm = paddle.norm(fused, 2, 1, True)
fused = paddle.divide(fused, norm)
return fused, norm
def adaface_eval(engine, epoch_id=0):
output_info = dict()
time_info = {
"batch_cost": AverageMeter(
"batch_cost", '.5f', postfix=" s,"),
"reader_cost": AverageMeter(
"reader_cost", ".5f", postfix=" s,"),
}
print_batch_step = engine.config["Global"]["print_batch_step"]
metric_key = None
tic = time.time()
unique_dict = {}
for iter_id, batch in enumerate(engine.eval_dataloader):
images, labels, dataname, image_index = batch
if iter_id == 5:
for key in time_info:
time_info[key].reset()
time_info["reader_cost"].update(time.time() - tic)
batch_size = images.shape[0]
batch[0] = paddle.to_tensor(images)
embeddings = engine.model(images, labels)['features']
norms = paddle.divide(embeddings, paddle.norm(embeddings, 2, 1, True))
embeddings = paddle.divide(embeddings, norms)
fliped_images = paddle.flip(images, axis=[3])
flipped_embeddings = engine.model(fliped_images, labels)['features']
flipped_norms = paddle.divide(
flipped_embeddings, paddle.norm(flipped_embeddings, 2, 1, True))
flipped_embeddings = paddle.divide(flipped_embeddings, flipped_norms)
stacked_embeddings = paddle.stack(
[embeddings, flipped_embeddings], axis=0)
stacked_norms = paddle.stack([norms, flipped_norms], axis=0)
embeddings, norms = fuse_features_with_norm(stacked_embeddings,
stacked_norms)
for out, nor, label, data, idx in zip(embeddings, norms, labels,
dataname, image_index):
unique_dict[int(idx.numpy())] = {
'output': out,
'norm': nor,
'target': label,
'dataname': data
}
# calc metric
time_info["batch_cost"].update(time.time() - tic)
if iter_id % print_batch_step == 0:
time_msg = "s, ".join([
"{}: {:.5f}".format(key, time_info[key].avg)
for key in time_info
])
ips_msg = "ips: {:.5f} images/sec".format(
batch_size / time_info["batch_cost"].avg)
metric_msg = ", ".join([
"{}: {:.5f}".format(key, output_info[key].val)
for key in output_info
])
logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
epoch_id, iter_id,
len(engine.eval_dataloader), metric_msg, time_msg, ips_msg))
tic = time.time()
unique_keys = sorted(unique_dict.keys())
all_output_tensor = paddle.stack(
[unique_dict[key]['output'] for key in unique_keys], axis=0)
all_norm_tensor = paddle.stack(
[unique_dict[key]['norm'] for key in unique_keys], axis=0)
all_target_tensor = paddle.stack(
[unique_dict[key]['target'] for key in unique_keys], axis=0)
all_dataname_tensor = paddle.stack(
[unique_dict[key]['dataname'] for key in unique_keys], axis=0)
eval_result = cal_metric(all_output_tensor, all_norm_tensor,
all_target_tensor, all_dataname_tensor)
metric_msg = ", ".join([
"{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
])
face_msg = ", ".join([
"{}: {:.5f}".format(key, eval_result[key])
for key in eval_result.keys()
])
logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg + ", " +
face_msg))
# return 1st metric in the dict
return eval_result['all_test_acc']
def cal_metric(all_output_tensor, all_norm_tensor, all_target_tensor,
all_dataname_tensor):
all_target_tensor = all_target_tensor.reshape([-1])
all_dataname_tensor = all_dataname_tensor.reshape([-1])
dataname_to_idx = {
"agedb_30": 0,
"cfp_fp": 1,
"lfw": 2,
"cplfw": 3,
"calfw": 4
}
idx_to_dataname = {val: key for key, val in dataname_to_idx.items()}
test_logs = {}
# _, indices = paddle.unique(all_dataname_tensor, return_index=True, return_inverse=False, return_counts=False)
for dataname_idx in all_dataname_tensor.unique():
dataname = idx_to_dataname[dataname_idx.item()]
# per dataset evaluation
embeddings = all_output_tensor[all_dataname_tensor ==
dataname_idx].numpy()
labels = all_target_tensor[all_dataname_tensor == dataname_idx].numpy()
issame = labels[0::2]
tpr, fpr, accuracy, best_thresholds = evaluate_face(
embeddings, issame, nrof_folds=10)
acc, best_threshold = accuracy.mean(), best_thresholds.mean()
num_test_samples = len(embeddings)
test_logs[f'{dataname}_test_acc'] = acc
test_logs[f'{dataname}_test_best_threshold'] = best_threshold
test_logs[f'{dataname}_num_test_samples'] = num_test_samples
test_acc = np.mean([
test_logs[f'{dataname}_test_acc']
for dataname in dataname_to_idx.keys()
if f'{dataname}_test_acc' in test_logs
])
test_logs['all_test_acc'] = test_acc
return test_logs
def evaluate_face(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds = calculate_roc(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
return tpr, fpr, accuracy, best_thresholds
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_thresholds = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
# print('pca', pca)
dist = None
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# print('train_set', train_set)
# print('test_set', test_set)
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
# print(_embed_train.shape)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
# print(embed1.shape, embed2.shape)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
best_thresholds[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[
fold_idx, threshold_idx], _ = calculate_accuracy(
threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, best_thresholds
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(
np.logical_and(
np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
| [
"numpy.argmax",
"paddle.stack",
"numpy.mean",
"numpy.arange",
"numpy.logical_not",
"ppcls.utils.misc.AverageMeter",
"paddle.flip",
"paddle.norm",
"numpy.less",
"numpy.asarray",
"numpy.square",
"sklearn.preprocessing.normalize",
"paddle.divide",
"numpy.concatenate",
"numpy.subtract",
"n... | [((1290, 1320), 'paddle.norm', 'paddle.norm', (['fused', '(2)', '(1)', '(True)'], {}), '(fused, 2, 1, True)\n', (1301, 1320), False, 'import paddle\n'), ((1333, 1359), 'paddle.divide', 'paddle.divide', (['fused', 'norm'], {}), '(fused, norm)\n', (1346, 1359), False, 'import paddle\n'), ((1744, 1755), 'time.time', 'time.time', ([], {}), '()\n', (1753, 1755), False, 'import time\n'), ((4115, 4188), 'paddle.stack', 'paddle.stack', (["[unique_dict[key]['output'] for key in unique_keys]"], {'axis': '(0)'}), "([unique_dict[key]['output'] for key in unique_keys], axis=0)\n", (4127, 4188), False, 'import paddle\n'), ((4220, 4291), 'paddle.stack', 'paddle.stack', (["[unique_dict[key]['norm'] for key in unique_keys]"], {'axis': '(0)'}), "([unique_dict[key]['norm'] for key in unique_keys], axis=0)\n", (4232, 4291), False, 'import paddle\n'), ((4325, 4398), 'paddle.stack', 'paddle.stack', (["[unique_dict[key]['target'] for key in unique_keys]"], {'axis': '(0)'}), "([unique_dict[key]['target'] for key in unique_keys], axis=0)\n", (4337, 4398), False, 'import paddle\n'), ((4434, 4509), 'paddle.stack', 'paddle.stack', (["[unique_dict[key]['dataname'] for key in unique_keys]"], {'axis': '(0)'}), "([unique_dict[key]['dataname'] for key in unique_keys], axis=0)\n", (4446, 4509), False, 'import paddle\n'), ((6810, 6831), 'numpy.arange', 'np.arange', (['(0)', '(4)', '(0.01)'], {}), '(0, 4, 0.01)\n', (6819, 6831), True, 'import numpy as np\n'), ((7567, 7608), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (7572, 7608), False, 'from sklearn.model_selection import KFold\n'), ((7621, 7660), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (7629, 7660), True, 'import numpy as np\n'), ((7672, 7711), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (7680, 7711), True, 'import numpy as np\n'), ((7727, 7747), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (7735, 7747), True, 'import numpy as np\n'), ((7772, 7792), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (7780, 7792), True, 'import numpy as np\n'), ((7809, 7830), 'numpy.arange', 'np.arange', (['nrof_pairs'], {}), '(nrof_pairs)\n', (7818, 7830), True, 'import numpy as np\n'), ((9711, 9727), 'numpy.mean', 'np.mean', (['tprs', '(0)'], {}), '(tprs, 0)\n', (9718, 9727), True, 'import numpy as np\n'), ((9738, 9754), 'numpy.mean', 'np.mean', (['fprs', '(0)'], {}), '(fprs, 0)\n', (9745, 9754), True, 'import numpy as np\n'), ((9881, 9905), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (9888, 9905), True, 'import numpy as np\n'), ((1488, 1536), 'ppcls.utils.misc.AverageMeter', 'AverageMeter', (['"""batch_cost"""', '""".5f"""'], {'postfix': '""" s,"""'}), "('batch_cost', '.5f', postfix=' s,')\n", (1500, 1536), False, 'from ppcls.utils.misc import AverageMeter\n'), ((1574, 1623), 'ppcls.utils.misc.AverageMeter', 'AverageMeter', (['"""reader_cost"""', '""".5f"""'], {'postfix': '""" s,"""'}), "('reader_cost', '.5f', postfix=' s,')\n", (1586, 1623), False, 'from ppcls.utils.misc import AverageMeter\n'), ((2105, 2129), 'paddle.to_tensor', 'paddle.to_tensor', (['images'], {}), '(images)\n', (2121, 2129), False, 'import paddle\n'), ((2292, 2324), 'paddle.divide', 'paddle.divide', (['embeddings', 'norms'], {}), '(embeddings, norms)\n', (2305, 2324), False, 'import paddle\n'), ((2349, 2378), 'paddle.flip', 'paddle.flip', (['images'], {'axis': '[3]'}), '(images, axis=[3])\n', (2360, 2378), False, 'import paddle\n'), ((2601, 2649), 'paddle.divide', 'paddle.divide', (['flipped_embeddings', 'flipped_norms'], {}), '(flipped_embeddings, flipped_norms)\n', (2614, 2649), False, 'import paddle\n'), ((2679, 2733), 'paddle.stack', 'paddle.stack', (['[embeddings, flipped_embeddings]'], {'axis': '(0)'}), '([embeddings, flipped_embeddings], axis=0)\n', (2691, 2733), False, 'import paddle\n'), ((2771, 2815), 'paddle.stack', 'paddle.stack', (['[norms, flipped_norms]'], {'axis': '(0)'}), '([norms, flipped_norms], axis=0)\n', (2783, 2815), False, 'import paddle\n'), ((4033, 4044), 'time.time', 'time.time', ([], {}), '()\n', (4042, 4044), False, 'import time\n'), ((7029, 7054), 'numpy.asarray', 'np.asarray', (['actual_issame'], {}), '(actual_issame)\n', (7039, 7054), True, 'import numpy as np\n'), ((7904, 7941), 'numpy.subtract', 'np.subtract', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (7915, 7941), True, 'import numpy as np\n'), ((8949, 8974), 'numpy.zeros', 'np.zeros', (['nrof_thresholds'], {}), '(nrof_thresholds)\n', (8957, 8974), True, 'import numpy as np\n'), ((9206, 9226), 'numpy.argmax', 'np.argmax', (['acc_train'], {}), '(acc_train)\n', (9215, 9226), True, 'import numpy as np\n'), ((9922, 9967), 'numpy.logical_and', 'np.logical_and', (['predict_issame', 'actual_issame'], {}), '(predict_issame, actual_issame)\n', (9936, 9967), True, 'import numpy as np\n'), ((2234, 2269), 'paddle.norm', 'paddle.norm', (['embeddings', '(2)', '(1)', '(True)'], {}), '(embeddings, 2, 1, True)\n', (2245, 2269), False, 'import paddle\n'), ((2527, 2570), 'paddle.norm', 'paddle.norm', (['flipped_embeddings', '(2)', '(1)', '(True)'], {}), '(flipped_embeddings, 2, 1, True)\n', (2538, 2570), False, 'import paddle\n'), ((7964, 7979), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (7973, 7979), True, 'import numpy as np\n'), ((8331, 8383), 'numpy.concatenate', 'np.concatenate', (['(embed1_train, embed2_train)'], {'axis': '(0)'}), '((embed1_train, embed2_train), axis=0)\n', (8345, 8383), True, 'import numpy as np\n'), ((8448, 8469), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'pca'}), '(n_components=pca)\n', (8451, 8469), False, 'from sklearn.decomposition import PCA\n'), ((8639, 8678), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['embed1'], {}), '(embed1)\n', (8670, 8678), False, 'import sklearn\n'), ((8700, 8739), 'sklearn.preprocessing.normalize', 'sklearn.preprocessing.normalize', (['embed2'], {}), '(embed2)\n', (8731, 8739), False, 'import sklearn\n'), ((8807, 8834), 'numpy.subtract', 'np.subtract', (['embed1', 'embed2'], {}), '(embed1, embed2)\n', (8818, 8834), True, 'import numpy as np\n'), ((10016, 10045), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (10030, 10045), True, 'import numpy as np\n'), ((10101, 10131), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (10115, 10131), True, 'import numpy as np\n'), ((10133, 10162), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (10147, 10162), True, 'import numpy as np\n'), ((10196, 10226), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (10210, 10226), True, 'import numpy as np\n'), ((2030, 2041), 'time.time', 'time.time', ([], {}), '()\n', (2039, 2041), False, 'import time\n'), ((3351, 3362), 'time.time', 'time.time', ([], {}), '()\n', (3360, 3362), False, 'import time\n'), ((8861, 8876), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (8870, 8876), True, 'import numpy as np\n')] |
from torch import Tensor
from scipy.spatial.transform import Rotation
import torch
import numpy as np
from oil.utils.utils import export
import random
import torch
@export
class FixedSeedAll(object):
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.np_rng_state = np.random.get_state()
np.random.seed(self.seed)
self.rand_rng_state = random.getstate()
random.seed(self.seed)
self.pt_rng_state = torch.random.get_rng_state()
torch.manual_seed(self.seed)
def __exit__(self, *args):
np.random.set_state(self.np_rng_state)
random.setstate(self.rand_rng_state)
torch.random.set_rng_state(self.pt_rng_state)
def rel_err(x: Tensor, y: Tensor) -> Tensor:
return (((x - y) ** 2).sum() / ((x + y) ** 2).sum()).sqrt()
def cross_matrix(k):
"""Application of hodge star on R3, mapping Λ^1 R3 -> Λ^2 R3"""
K = torch.zeros(*k.shape[:-1],3,3,device=k.device,dtype=k.dtype)
K[...,0,1] = -k[...,2]
K[...,0,2] = k[...,1]
K[...,1,0] = k[...,2]
K[...,1,2] = -k[...,0]
K[...,2,0] = -k[...,1]
K[...,2,1] = k[...,0]
return K
def uncross_matrix(K):
"""Application of hodge star on R3, mapping Λ^2 R3 -> Λ^1 R3"""
k = torch.zeros(*K.shape[:-1],device=K.device,dtype=K.dtype)
k[...,0] = (K[...,2,1] - K[...,1,2])/2
k[...,1] = (K[...,0,2] - K[...,2,0])/2
k[...,2] = (K[...,1,0] - K[...,0,1])/2
return k
def eulerdot2omega(euler):
"""(bs,3) -> (bs,3,3) matrix"""
bs,_ = euler.shape
M = torch.zeros(bs,3,3,device=euler.device,dtype=euler.dtype)
phi,theta,psi = euler.T
M[:,0,0] = theta.sin()*psi.sin()
M[:,0,1] = psi.cos()
M[:,1,0] = theta.sin()*psi.cos()
M[:,1,1] = -psi.sin()
M[:,2,0] = theta.cos()
M[:,2,2] = 1
return M
@export
def euler2frame(euler_and_dot):
""" input: (bs,2,3)
output: (bs,2,3,3)"""
euler,eulerdot = euler_and_dot.permute(1,0,2)
omega = (eulerdot2omega(euler)@eulerdot.unsqueeze(-1)).squeeze(-1)
# omega = (angular velocity in the body frame)
RT_Rdot = cross_matrix(omega)
R = torch.from_numpy(Rotation.from_euler('ZXZ',euler.data.cpu().numpy()).as_matrix()).to(euler.device,euler.dtype)
Rdot = R@RT_Rdot
return torch.stack([R,Rdot],dim=1).permute(0,1,3,2) # (bs,2,d,n->bs,2,n,d)
@export
def frame2euler(frame_pos_vel):
""" input: (bs,2,3,3)
output: (bs,2,3)"""
R,Rdot = frame_pos_vel.permute(1,0,3,2)#frame_pos_vel[:,0,1:].permute(0,2,1)-frame_pos_vel[:,0,0].unsqueeze(-1) #(bs,3,3)
#Rdot = frame_pos_vel[:,1,1:].permute(0,2,1)-frame_pos_vel[:,1,0].unsqueeze(-1) #(bs,3,3)
omega = uncross_matrix(R.permute(0,2,1)@Rdot) #angular velocity in body frame Omega = RTRdot
angles = torch.from_numpy(np.ascontiguousarray(Rotation.from_matrix(R.data.cpu().numpy()).as_euler('ZXZ'))).to(R.device,R.dtype)
eulerdot = torch.solve(omega.unsqueeze(-1),eulerdot2omega(angles))[0].squeeze(-1)
return torch.stack([angles,eulerdot],dim=1)
@export
def bodyX2comEuler(X):
""" input: (bs,2,4,3) output: (bs,2,6)"""
xcom = X[:,:,0] #(bs,2,3)
euler = frame2euler(X[:,:,1:]-xcom[:,:,None,:])
return torch.cat([xcom,euler],dim=-1)
@export
def comEuler2bodyX(com_euler):
""" output: (bs,2,6) input: (bs,2,4,3) """
xcom = com_euler[:,:,:3] #(bs,2,3)
frame = euler2frame(com_euler[:,:,3:]) #(bs,2,3,3)
shifted_frame = frame+xcom[:,:,None,:] # (bs,2,3,3)
return torch.cat([xcom[:,:,None,:],shifted_frame],dim=-2)
@export
def read_obj(filename):
import pywavefront
scene = pywavefront.Wavefront(filename,collect_faces=True)
return np.roll(np.array(scene.vertices),1,axis=1), np.array(np.concatenate([mesh.faces for mesh in scene.mesh_list]))
# def read_obj(filename):
# triangles = []
# vertices = []
# with open(filename) as file:
# for line in file:
# components = line.strip(' \n').split(' ')
# if components[0] == "f": # face data
# # e.g. "f 1/1/1/ 2/2/2 3/3/3 4/4/4 ..."
# indices = list(map(lambda c: int(c.split('/')[0]) - 1, components[1:]))
# for i in range(0, len(indices) - 2):
# triangles.append(indices[i: i+3])
# elif components[0] == "v": # vertex data
# # e.g. "v 30.2180 89.5757 -76.8089"
# #print(components)
# vertex = list(map(lambda c: float(c), components[1:]))
# vertices.append(vertex)
# return np.roll(np.array(vertices),1,axis=1), np.array(triangles)
def Vols(mesh_verts):
""" computes the volume of an obj from vertices of the boundary mesh"""
#(num verts, verts per triangle, xyz)
return mesh_verts.det()/6
def Coms(mesh_verts):
""" (bs,n,d) -> (bs,d)"""
return mesh_verts.sum(1)/4
def ExxT(V,mu):
""" (bs,n,d), (bs,d) -> (bs,d,d)"""
return (V.permute(0,2,1)@V)/20+(4/5)*mu[:,None]*mu[:,:,None]
@export
def compute_moments(mesh_verts):
with torch.no_grad():
vols = Vols(mesh_verts)
Vol = vols.sum()
weights = vols/Vol
coms = Coms(mesh_verts)
Com = (coms*weights[:,None]).sum(0)
xxT = (ExxT(mesh_verts,coms)*weights[:,None,None]).sum(0)
covar = xxT-Com[None,:]*Com[:,None]
return Vol,Com,covar | [
"pywavefront.Wavefront",
"numpy.random.seed",
"torch.stack",
"random.setstate",
"numpy.random.get_state",
"torch.manual_seed",
"torch.cat",
"torch.random.set_rng_state",
"numpy.random.set_state",
"random.seed",
"numpy.array",
"torch.random.get_rng_state",
"torch.zeros",
"torch.no_grad",
... | [((952, 1016), 'torch.zeros', 'torch.zeros', (['*k.shape[:-1]', '(3)', '(3)'], {'device': 'k.device', 'dtype': 'k.dtype'}), '(*k.shape[:-1], 3, 3, device=k.device, dtype=k.dtype)\n', (963, 1016), False, 'import torch\n'), ((1296, 1354), 'torch.zeros', 'torch.zeros', (['*K.shape[:-1]'], {'device': 'K.device', 'dtype': 'K.dtype'}), '(*K.shape[:-1], device=K.device, dtype=K.dtype)\n', (1307, 1354), False, 'import torch\n'), ((1599, 1660), 'torch.zeros', 'torch.zeros', (['bs', '(3)', '(3)'], {'device': 'euler.device', 'dtype': 'euler.dtype'}), '(bs, 3, 3, device=euler.device, dtype=euler.dtype)\n', (1610, 1660), False, 'import torch\n'), ((3061, 3099), 'torch.stack', 'torch.stack', (['[angles, eulerdot]'], {'dim': '(1)'}), '([angles, eulerdot], dim=1)\n', (3072, 3099), False, 'import torch\n'), ((3276, 3308), 'torch.cat', 'torch.cat', (['[xcom, euler]'], {'dim': '(-1)'}), '([xcom, euler], dim=-1)\n', (3285, 3308), False, 'import torch\n'), ((3565, 3620), 'torch.cat', 'torch.cat', (['[xcom[:, :, None, :], shifted_frame]'], {'dim': '(-2)'}), '([xcom[:, :, None, :], shifted_frame], dim=-2)\n', (3574, 3620), False, 'import torch\n'), ((3689, 3740), 'pywavefront.Wavefront', 'pywavefront.Wavefront', (['filename'], {'collect_faces': '(True)'}), '(filename, collect_faces=True)\n', (3710, 3740), False, 'import pywavefront\n'), ((322, 343), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (341, 343), True, 'import numpy as np\n'), ((353, 378), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (367, 378), True, 'import numpy as np\n'), ((410, 427), 'random.getstate', 'random.getstate', ([], {}), '()\n', (425, 427), False, 'import random\n'), ((437, 459), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (448, 459), False, 'import random\n'), ((489, 517), 'torch.random.get_rng_state', 'torch.random.get_rng_state', ([], {}), '()\n', (515, 517), False, 'import torch\n'), ((527, 555), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (544, 555), False, 'import torch\n'), ((597, 635), 'numpy.random.set_state', 'np.random.set_state', (['self.np_rng_state'], {}), '(self.np_rng_state)\n', (616, 635), True, 'import numpy as np\n'), ((645, 681), 'random.setstate', 'random.setstate', (['self.rand_rng_state'], {}), '(self.rand_rng_state)\n', (660, 681), False, 'import random\n'), ((691, 736), 'torch.random.set_rng_state', 'torch.random.set_rng_state', (['self.pt_rng_state'], {}), '(self.pt_rng_state)\n', (717, 736), False, 'import torch\n'), ((5160, 5175), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5173, 5175), False, 'import torch\n'), ((2340, 2369), 'torch.stack', 'torch.stack', (['[R, Rdot]'], {'dim': '(1)'}), '([R, Rdot], dim=1)\n', (2351, 2369), False, 'import torch\n'), ((3760, 3784), 'numpy.array', 'np.array', (['scene.vertices'], {}), '(scene.vertices)\n', (3768, 3784), True, 'import numpy as np\n'), ((3805, 3861), 'numpy.concatenate', 'np.concatenate', (['[mesh.faces for mesh in scene.mesh_list]'], {}), '([mesh.faces for mesh in scene.mesh_list])\n', (3819, 3861), True, 'import numpy as np\n')] |
# The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import unittest
import os
import bandicoot as bc
import numpy as np
class TestChurn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestChurn._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestChurn._dir_changed = True
self.user = bc.io.read_csv("churn_user", "samples",
describe=False, warnings=False)
def test_churn(self):
distribution = bc.spatial.churn_rate(self.user, summary=None)
v1 = [1 / 3, 1 / 3, 1 / 3, 0]
v2 = v1
v3 = [1 / 4, 3 / 4, 0, 0]
v4 = [0, 0, 1 / 2, 1 / 2]
cos_1 = 1 - np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cos_2 = 1 - np.dot(v2, v3) / (np.linalg.norm(v2) * np.linalg.norm(v3))
cos_3 = 1 - np.dot(v3, v4) / (np.linalg.norm(v3) * np.linalg.norm(v4))
np.testing.assert_almost_equal(distribution, [cos_1, cos_2, cos_3])
churn_rate = bc.spatial.churn_rate(self.user)
np.testing.assert_almost_equal(churn_rate['mean'],
np.mean([cos_1, cos_2, cos_3]))
np.testing.assert_almost_equal(churn_rate['std'],
np.std([cos_1, cos_2, cos_3]))
| [
"os.path.abspath",
"os.path.basename",
"numpy.std",
"numpy.testing.assert_almost_equal",
"bandicoot.io.read_csv",
"numpy.mean",
"numpy.linalg.norm",
"numpy.dot",
"os.chdir",
"bandicoot.spatial.churn_rate"
] | [((1664, 1735), 'bandicoot.io.read_csv', 'bc.io.read_csv', (['"""churn_user"""', '"""samples"""'], {'describe': '(False)', 'warnings': '(False)'}), "('churn_user', 'samples', describe=False, warnings=False)\n", (1678, 1735), True, 'import bandicoot as bc\n'), ((1821, 1867), 'bandicoot.spatial.churn_rate', 'bc.spatial.churn_rate', (['self.user'], {'summary': 'None'}), '(self.user, summary=None)\n', (1842, 1867), True, 'import bandicoot as bc\n'), ((2238, 2305), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['distribution', '[cos_1, cos_2, cos_3]'], {}), '(distribution, [cos_1, cos_2, cos_3])\n', (2268, 2305), True, 'import numpy as np\n'), ((2328, 2360), 'bandicoot.spatial.churn_rate', 'bc.spatial.churn_rate', (['self.user'], {}), '(self.user)\n', (2349, 2360), True, 'import bandicoot as bc\n'), ((1447, 1472), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1462, 1472), False, 'import os\n'), ((1583, 1600), 'os.chdir', 'os.chdir', (['abspath'], {}), '(abspath)\n', (1591, 1600), False, 'import os\n'), ((2459, 2489), 'numpy.mean', 'np.mean', (['[cos_1, cos_2, cos_3]'], {}), '([cos_1, cos_2, cos_3])\n', (2466, 2489), True, 'import numpy as np\n'), ((2588, 2617), 'numpy.std', 'np.std', (['[cos_1, cos_2, cos_3]'], {}), '([cos_1, cos_2, cos_3])\n', (2594, 2617), True, 'import numpy as np\n'), ((1506, 1532), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1522, 1532), False, 'import os\n'), ((2012, 2026), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (2018, 2026), True, 'import numpy as np\n'), ((2091, 2105), 'numpy.dot', 'np.dot', (['v2', 'v3'], {}), '(v2, v3)\n', (2097, 2105), True, 'import numpy as np\n'), ((2170, 2184), 'numpy.dot', 'np.dot', (['v3', 'v4'], {}), '(v3, v4)\n', (2176, 2184), True, 'import numpy as np\n'), ((2030, 2048), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2044, 2048), True, 'import numpy as np\n'), ((2051, 2069), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2065, 2069), True, 'import numpy as np\n'), ((2109, 2127), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2123, 2127), True, 'import numpy as np\n'), ((2130, 2148), 'numpy.linalg.norm', 'np.linalg.norm', (['v3'], {}), '(v3)\n', (2144, 2148), True, 'import numpy as np\n'), ((2188, 2206), 'numpy.linalg.norm', 'np.linalg.norm', (['v3'], {}), '(v3)\n', (2202, 2206), True, 'import numpy as np\n'), ((2209, 2227), 'numpy.linalg.norm', 'np.linalg.norm', (['v4'], {}), '(v4)\n', (2223, 2227), True, 'import numpy as np\n')] |
import copy
import numpy as np
import numpy.random as npr
import matplotlib
import matplotlib.pyplot as plt
def SubPlotData(K, data, labels2, means2):
if data.shape[0] == 2:
proj = "rectilinear"
elif data.shape[0] == 3:
proj = "3d"
else:
return -1
"Generate plots with more than 10 colors"
"Deepcopy or else you change the colors matplotlib calls"
colors = copy.deepcopy(matplotlib.colors.get_named_colors_mapping())
[colors.popitem() for i in range(10)] # Remove starting colors that aren't as good
fig, axs = plt.subplots(1, 2, sharey=True, sharex=True, tight_layout=True,
subplot_kw={'projection': proj})
for k in range(K):
if data.shape[0] == 2:
c = colors.popitem()[1]
[axs[a].plot(data[0, np.where(labels2[a] == k)].squeeze(),
data[1, np.where(labels2[a] == k)].squeeze(),
'.',
alpha=0.50,
color=c)
for a in range(2)]
c = colors.popitem()[1]
[axs[a].plot(means2[a, k, 0],
means2[a, k, 1],
'.',
ms=10,
zorder=K+1,
color=c) for a in range(2)]
elif data.shape[0] == 3:
c = colors.popitem()[1]
[axs[a].plot(data[0, np.where(labels2[a] == k)].squeeze(),
data[1, np.where(labels2[a] == k)].squeeze(),
data[2, np.where(labels2[a] == k)].squeeze(),
'.',
alpha=0.50,
color=c)
for a in range(2)]
c = colors.popitem()[1]
[axs[a].plot(means2[a, k, 0],
means2[a, k, 1],
means2[a, k, 2],
'.',
ms=10,
zorder=K+1,
color=c) for a in range(2)]
[axs[a].legend(sum([["L"+str(l)+" Data","L"+str(l)+" Mean"] for l in range(K)], [])) for a in range(2)]
axs[0].set_title("True Data Distributions")
axs[1].set_title("Predicted Distributions")
plt.draw()
| [
"matplotlib.pyplot.draw",
"matplotlib.colors.get_named_colors_mapping",
"numpy.where",
"matplotlib.pyplot.subplots"
] | [((570, 671), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'sharex': '(True)', 'tight_layout': '(True)', 'subplot_kw': "{'projection': proj}"}), "(1, 2, sharey=True, sharex=True, tight_layout=True, subplot_kw=\n {'projection': proj})\n", (582, 671), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2292), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2290, 2292), True, 'import matplotlib.pyplot as plt\n'), ((421, 465), 'matplotlib.colors.get_named_colors_mapping', 'matplotlib.colors.get_named_colors_mapping', ([], {}), '()\n', (463, 465), False, 'import matplotlib\n'), ((818, 843), 'numpy.where', 'np.where', (['(labels2[a] == k)'], {}), '(labels2[a] == k)\n', (826, 843), True, 'import numpy as np\n'), ((888, 913), 'numpy.where', 'np.where', (['(labels2[a] == k)'], {}), '(labels2[a] == k)\n', (896, 913), True, 'import numpy as np\n'), ((1441, 1466), 'numpy.where', 'np.where', (['(labels2[a] == k)'], {}), '(labels2[a] == k)\n', (1449, 1466), True, 'import numpy as np\n'), ((1511, 1536), 'numpy.where', 'np.where', (['(labels2[a] == k)'], {}), '(labels2[a] == k)\n', (1519, 1536), True, 'import numpy as np\n'), ((1581, 1606), 'numpy.where', 'np.where', (['(labels2[a] == k)'], {}), '(labels2[a] == k)\n', (1589, 1606), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Set of isotropic filters to use in calculations
import numpy as np
import typeutils as tu
def Wtophatkspace(kR, R = None ) :
"""Returns the Fourier Transform of the real space top hat window
function which is 1 when r < R, and 0 otherwise.
args:
kR: unit less quantity
returns:
array like, window function in k space
"""
filt = 3*(np.sin(kR) - (kR)*np.cos(kR))/(kR)**3
return filt
def Wtophatkspacesq (kR , R = None):
filt = Wtophatkspace(kR, R )
return filt*filt
def dWtophatkspacedR ( kR ,R ) :
"""
Returns the derivative of the Fourier Transform of the real space
top hat window function which is 1 when r < R, and 0 otherwise with
respect to R.
args:
kR : unit less quantity.
R : Radius
returns:
array like derivative of Wtophatkspace with respect to R
"""
filt = 3.0 *( np.sin(kR) / kR - Wtophatkspace (kR ))
if tu.isiterable(R):
x = filt.transpose()/R
#returns an array whose elesements are k values for each R
return x.transpose()
else:
return filt /R
def dWtophatkspacedlnR ( kR ) :
"""
Returns the derivative of the Fourier Transform of the real space
top hat window function which is 1 when r < R, and 0 otherwise with
respect to R.
args:
kR : unit less quantity.
returns:
array like derivative of Wtophatkspace with respect to R
"""
filt = 3.0 *( np.sin(kR) / kR - Wtophatkspace (kR ))
#if tu.isiterable(R):
# x = filt.transpose()/R
# #returns an array whose elesements are k values for each R
# return x.transpose()
#else:
# return filt /R
return filt
def dWtophatkspacesqdR( kR , R ) :
w1 = dWtophatkspacedR (kR, R)
w2 = Wtophatkspace(kR , R)
#print "w1,w2", w1 , w2
return 2.0*w1 * w2
def dWtophatkspacesqdlnR( kR , R = None) :
w1 = dWtophatkspacedlnR (kR)
w2 = Wtophatkspace(kR )
#print "w1,w2", w1 , w2
return 2.0*w1 * w2
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
k = np.logspace(-6,5,100)
#R = np.arange(1,8,1.)
#kr = np.outer (k,R)
plt.plot(k , Wtophatkspace(k*2.0)**2.0, label = "R= 2.0")
plt.plot(k , Wtophatkspace(k*5.0)**2.0, label = "R =5.0")
plt.plot(k , Wtophatkspace(k*8.0)**2.0, label = "R= 8.0")
plt.legend(loc= "best")
plt.ylabel("Wtophatkspace(k*R)")
plt.xlabel(r'$k h^{-1} Mpc$')
plt.xscale('log')
plt.show()
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show",
"numpy.logspace",
"matplotlib.pyplot.legend",
"typeutils.isiterable",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((893, 909), 'typeutils.isiterable', 'tu.isiterable', (['R'], {}), '(R)\n', (906, 909), True, 'import typeutils as tu\n'), ((1977, 2000), 'numpy.logspace', 'np.logspace', (['(-6)', '(5)', '(100)'], {}), '(-6, 5, 100)\n', (1988, 2000), True, 'import numpy as np\n'), ((2226, 2248), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2236, 2248), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2283), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wtophatkspace(k*R)"""'], {}), "('Wtophatkspace(k*R)')\n", (2261, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2313), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k h^{-1} Mpc$"""'], {}), "('$k h^{-1} Mpc$')\n", (2295, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2333), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2326, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2343, 2345), True, 'import matplotlib.pyplot as plt\n'), ((371, 381), 'numpy.sin', 'np.sin', (['kR'], {}), '(kR)\n', (377, 381), True, 'import numpy as np\n'), ((848, 858), 'numpy.sin', 'np.sin', (['kR'], {}), '(kR)\n', (854, 858), True, 'import numpy as np\n'), ((1374, 1384), 'numpy.sin', 'np.sin', (['kR'], {}), '(kR)\n', (1380, 1384), True, 'import numpy as np\n'), ((389, 399), 'numpy.cos', 'np.cos', (['kR'], {}), '(kR)\n', (395, 399), True, 'import numpy as np\n')] |
###############################################################################
###############################################################################
# Name: set_adc.py
# Coder: <NAME>
# Description:
###############################################################################
###############################################################################
# Libraries and Modules
###############################################################################
import struct
import time
import argparse
import higgs
import socket as sc
import numpy as np
import matplotlib.pyplot as plt
from log import logger
###############################################################################
# Constants
###############################################################################
VGA_GAIN = 0 # Corresponds to 26 dB gain
GAIN_ADDR = 0x00000200
VGA_CHANNEL = {'A':0x00010000, 'B':0x00000000}
RX_CHANNEL = 'B'
PACKET_DELAY = 0.005
MAX_ATTEN_CHANNEL_A = 31
MAX_ATTEN_CHANNEL_B = 30
MIN_ATTEN = 0
BLOCK_SIZE = 6
###############################################################################
# Class Definitions
###############################################################################
class SetADC:
def __init__(self, host=higgs.HOST, our_host=higgs.OUR_HOST,
rx_cmd_port=higgs.RX_CMD_PORT, tx_cmd_port=higgs.TX_CMD_PORT,
connect_type=sc.SOCK_DGRAM):
self.higgs_controller = higgs.HiggsController(host=host,
our_host=our_host,
rx_cmd_port=rx_cmd_port,
tx_cmd_port=tx_cmd_port,
connect_type=connect_type)
self.attenuation_value = np.arange(0, 32, 2)
def set_vga_attenuation(self, attenuation, channel):
cmd_packet = higgs.VGA_GAIN_CMD|\
VGA_CHANNEL[channel]|\
GAIN_ADDR|\
attenuation
self.higgs_controller.send_cmd('eth', cmd_packet, PACKET_DELAY)
logger.info('Setting attenuation of Variable Gain Attenuator (VGA) ' +\
'to %d at channel %s', attenuation, channel)
def set_dsa_attenuation(self, attenuation, channel):
attenuation = self._get_attenuation(attenuation, channel)
cmd_packet = higgs.DSA_GAIN_CMD|VGA_CHANNEL[channel]|attenuation
self.higgs_controller.send_cmd('eth', cmd_packet, PACKET_DELAY)
logger.info('Setting attenuation of Digital Step Attenuator (DSA) ' +\
'to %d at channel %s', attenuation, channel)
def measure_saturation(self, fpga, attenuation, block_size, channel):
attenuation = self._get_attenuation(attenuation, channel)
cmd_packet = higgs.SATURATION_RATIO_CMD|attenuation|block_size
self.higgs_controller.send_cmd(fpga, cmd_packet, PACKET_DELAY)
saturated_samples = self.higgs_controller.get_cmd(8)
total_sample = saturated_samples[1]&0xffff
saturated_count = saturated_samples[1]>>16
saturated_ratio = float(saturated_count)/float(total_sample)
logger.info('Saturated samples: %d, ' +\
'Total samples %d, ' +\
'Saturated ratio %f',
saturated_count, total_sample, saturated_ratio)
return saturated_ratio, total_sample
def create_sat_map(self, channel):
power = []
saturation_ratio = self._get_clipping_curve(BLOCK_SIZE, channel)
for attenuation in self.attenuation_value:
power.append(self.estimate_power('cs00', attenuation, channel))
logger.info(saturation_ratio)
logger.info(power)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(self.attenuation_value, saturation_ratio, 'g-', marker='o')
ax2.plot(self.attenuation_value, power, 'b-', marker='x')
ax1.set_xlabel('Attenuation (dB)')
ax1.set_ylabel('Clipping Ratio', color='g')
ax2.set_ylabel('Power', color='b')
plt.show()
def create_block_size_map(self, channel):
block_size_fig = plt.figure(figsize=(16, 10))
block_size_axes = block_size_fig.add_subplot(111)
for block_size in range(4, 7):
saturation_ratio = self._get_clipping_curve(block_size, channel)
block_size_axes.plot(self.attenuation_value,
saturation_ratio,
marker='o',
label=str(2**(block_size+4)) + ' Block Size')
handles, labels = block_size_axes.get_legend_handles_labels()
block_size_axes.legend(handles, labels)
block_size_axes.set_xlabel('Attenuation (dB)')
block_size_axes.set_ylabel('Clipping Ratio', color='g')
plt.show()
def create_sat_variation_map(self, iteration, channel):
end_block = 1
handles = []
start_pos = np.arange(0, (6-end_block)*16, 6-end_block)
boxplot_data = np.empty((0, 16))
clipping_var_fig = plt.figure(figsize=(30, 16))
clipping_var_axes = clipping_var_fig.add_subplot(111)
for block_size in range(6, end_block, -1):
for each_run in range(iteration):
saturation_ratio = self._get_clipping_curve(block_size, channel)
boxplot_data = np.append(boxplot_data, [saturation_ratio],
axis=0)
boxes = clipping_var_axes.boxplot(boxplot_data,
patch_artist=True,
positions=start_pos+block_size)
self._set_box_color(boxes, block_size)
handles.append(boxes['boxes'][0])
clipping_var_axes.legend(handles, [1024, 512, 256, 128, 64, 32])
clipping_var_axes.set_ylabel('Clipping Ratio', color='g')
plt.show()
def estimate_power(self, fpga, attenuation, channel):
attenuation = self._get_attenuation(attenuation, channel)
cmd_packet = higgs.POWER_ESTIMATION_CMD|attenuation
self.higgs_controller.send_cmd(fpga, cmd_packet, PACKET_DELAY)
power = self.higgs_controller.get_cmd(8)[1];
logger.info(power)
return power
def test_agc(self):
cmd_packet = higgs.AGC_TEST_CMD;
self.higgs_controller.send_cmd('cs00', cmd_packet, PACKET_DELAY)
attenuation = self.higgs_controller.get_cmd(8)[1];
logger.info((attenuation>>6)*2)
def _get_attenuation(self, attenuation, channel):
if channel == 'A':
return (attenuation*4)<<8
else:
return (attenuation*4)<<3
def _get_clipping_curve(self, block_size, channel):
saturation_ratio = []
for attenuation in self.attenuation_value:
sat_ratio = self.measure_saturation('cs00',
attenuation,
block_size,
channel)
saturation_ratio.append(sat_ratio[0])
return saturation_ratio
def _set_box_color(self, boxes, block_size):
box_color = ['indigo', 'violet', 'red',
'blue', 'green', 'orange', 'yellow']
for box in boxes['boxes']:
box.set(facecolor=box_color[block_size])
###############################################################################
# Method Definitions
###############################################################################
def main():
parser = higgs.create_parser()
parser.add_argument('-vga', '--attenuate_vga',
help='set attenuation value of VGA',
nargs='?',
type=int)
parser.add_argument('-dsa', '--attenuate_dsa',
help='set attenuation value of DSA',
nargs='?',
type=int)
parser.add_argument('-sat', '--saturation',
help='return a ratio of saturated samples. ' +\
'Set attenuation value',
nargs='?',
type=int)
parser.add_argument('-var', '--variation_map',
help='create a graph highlighting the ' +\
'variations in saturation vs gain',
nargs='?',
type=int)
parser.add_argument('-map', '--saturation_map',
help='create a graph of saturation ratio ' +\
'versus gain',
action='store_true')
parser.add_argument('-agc', '--test_agc',
help='test if AGC is properly working',
action='store_true')
parser.add_argument('-bs', '--block_size',
help='create a saturation graph using ' +\
'different block size',
action='store_true')
parser.add_argument('-p', '--power_estimation',
help='estimate power of signal',
nargs='?',
type=int)
parser.add_argument('-c', '--channel',
help='set receive channel',
nargs='?',
type=str,
choices=['A', 'B'],
default=RX_CHANNEL)
args = parser.parse_args()
set_adc = SetADC(host=args.host, our_host=args.our_host,
rx_cmd_port=args.rx_cmd_port, tx_cmd_port=args.tx_cmd_port)
if args.attenuate_vga != None:
set_adc.set_vga_attenuation(args.attenuate_vga, args.channel)
if args.attenuate_dsa != None:
atten_val = args.attenuate_dsa
if args.channel == 'A':
correct_range = (MAX_ATTEN_CHANNEL_A >= atten_val >= MIN_ATTEN)
if correct_range and not (atten_val%1):
set_adc.set_dsa_attenuation(atten_val, args.channel)
else:
message = 'Attenuation must be between 0 - 31dB in steps '
message += 'of 1dB for Channel A'
logger.info(message)
elif args.channel == 'B':
correct_range = (MAX_ATTEN_CHANNEL_B >= atten_val >= MIN_ATTEN)
if correct_range and not(atten_val%2):
set_adc.set_dsa_attenuation(atten_val, args.channel)
else:
message = 'Attenuation must be between 0 - 30dB in steps '
message += 'of 2dB for Channel B'
logger.info(message)
if args.saturation != None:
correct_range = (MAX_ATTEN >= args.saturation >= MIN_ATTEN)
if correct_range and not(args.saturation%2):
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.measure_saturation('cs00',
args.saturation,
BLOCK_SIZE,
args.channel)
else:
logger.info('Attenuation must be between 0 - 30dB in steps of 2dB')
if args.saturation_map:
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.create_sat_map(args.channel)
if args.block_size:
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.create_block_size_map(args.channel)
if args.test_agc:
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.test_agc()
if args.variation_map != None:
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.create_sat_variation_map(args.variation_map, args.channel)
if args.power_estimation != None:
correct_range = (MAX_ATTEN >= args.power_estimation >= MIN_ATTEN)
if correct_range and not(args.power_estimation%2):
set_adc.higgs_controller.bind_rx_cmd_soc(1.0)
set_adc.estimate_power('cs00', args.power_estimation, args.channel)
###############################################################################
# Main Script
###############################################################################
if __name__ == "__main__":
main() | [
"higgs.create_parser",
"matplotlib.pyplot.show",
"log.logger.info",
"numpy.empty",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.arange",
"higgs.HiggsController"
] | [((7718, 7739), 'higgs.create_parser', 'higgs.create_parser', ([], {}), '()\n', (7737, 7739), False, 'import higgs\n'), ((1440, 1572), 'higgs.HiggsController', 'higgs.HiggsController', ([], {'host': 'host', 'our_host': 'our_host', 'rx_cmd_port': 'rx_cmd_port', 'tx_cmd_port': 'tx_cmd_port', 'connect_type': 'connect_type'}), '(host=host, our_host=our_host, rx_cmd_port=rx_cmd_port,\n tx_cmd_port=tx_cmd_port, connect_type=connect_type)\n', (1461, 1572), False, 'import higgs\n'), ((1818, 1837), 'numpy.arange', 'np.arange', (['(0)', '(32)', '(2)'], {}), '(0, 32, 2)\n', (1827, 1837), True, 'import numpy as np\n'), ((2146, 2265), 'log.logger.info', 'logger.info', (["('Setting attenuation of Variable Gain Attenuator (VGA) ' +\n 'to %d at channel %s')", 'attenuation', 'channel'], {}), "('Setting attenuation of Variable Gain Attenuator (VGA) ' +\n 'to %d at channel %s', attenuation, channel)\n", (2157, 2265), False, 'from log import logger\n'), ((2560, 2678), 'log.logger.info', 'logger.info', (["('Setting attenuation of Digital Step Attenuator (DSA) ' +\n 'to %d at channel %s')", 'attenuation', 'channel'], {}), "('Setting attenuation of Digital Step Attenuator (DSA) ' +\n 'to %d at channel %s', attenuation, channel)\n", (2571, 2678), False, 'from log import logger\n'), ((3219, 3355), 'log.logger.info', 'logger.info', (["('Saturated samples: %d, ' + 'Total samples %d, ' + 'Saturated ratio %f')", 'saturated_count', 'total_sample', 'saturated_ratio'], {}), "('Saturated samples: %d, ' + 'Total samples %d, ' +\n 'Saturated ratio %f', saturated_count, total_sample, saturated_ratio)\n", (3230, 3355), False, 'from log import logger\n'), ((3729, 3758), 'log.logger.info', 'logger.info', (['saturation_ratio'], {}), '(saturation_ratio)\n', (3740, 3758), False, 'from log import logger\n'), ((3767, 3785), 'log.logger.info', 'logger.info', (['power'], {}), '(power)\n', (3778, 3785), False, 'from log import logger\n'), ((3800, 3812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3810, 3812), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4173, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4248, 4276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (4258, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4928, 4938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4936, 4938), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5112), 'numpy.arange', 'np.arange', (['(0)', '((6 - end_block) * 16)', '(6 - end_block)'], {}), '(0, (6 - end_block) * 16, 6 - end_block)\n', (5072, 5112), True, 'import numpy as np\n'), ((5130, 5147), 'numpy.empty', 'np.empty', (['(0, 16)'], {}), '((0, 16))\n', (5138, 5147), True, 'import numpy as np\n'), ((5175, 5203), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 16)'}), '(figsize=(30, 16))\n', (5185, 5203), True, 'import matplotlib.pyplot as plt\n'), ((6029, 6039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6037, 6039), True, 'import matplotlib.pyplot as plt\n'), ((6357, 6375), 'log.logger.info', 'logger.info', (['power'], {}), '(power)\n', (6368, 6375), False, 'from log import logger\n'), ((6604, 6639), 'log.logger.info', 'logger.info', (['((attenuation >> 6) * 2)'], {}), '((attenuation >> 6) * 2)\n', (6615, 6639), False, 'from log import logger\n'), ((11439, 11506), 'log.logger.info', 'logger.info', (['"""Attenuation must be between 0 - 30dB in steps of 2dB"""'], {}), "('Attenuation must be between 0 - 30dB in steps of 2dB')\n", (11450, 11506), False, 'from log import logger\n'), ((5475, 5526), 'numpy.append', 'np.append', (['boxplot_data', '[saturation_ratio]'], {'axis': '(0)'}), '(boxplot_data, [saturation_ratio], axis=0)\n', (5484, 5526), True, 'import numpy as np\n'), ((10564, 10584), 'log.logger.info', 'logger.info', (['message'], {}), '(message)\n', (10575, 10584), False, 'from log import logger\n'), ((10974, 10994), 'log.logger.info', 'logger.info', (['message'], {}), '(message)\n', (10985, 10994), False, 'from log import logger\n')] |
import argparse
import biggie
import time
import numpy as np
import matplotlib
import os
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
import seaborn
import models
import datatools
FIG = plt.figure(figsize=(10, 10))
AX = FIG.gca()
def render(z_out, y_true, fps, output_file, title='', dpi=300):
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title,
artist='Matplotlib',
comment='Learntest')
writer = FFMpegWriter(fps=fps, metadata=metadata)
# Create string highlighters
palette = seaborn.color_palette("Set3", 10)
colors = np.asarray([palette[y] for y in y_true])
dot_params = dict(marker='o', s=50, alpha=0.75, c=colors)
z = np.asarray(z_out)
width = z[:, 0].max() - z[:, 0].min()
height = z[:, 1].max() - z[:, 1].min()
span = max([width, height])
x_min, x_max = z[:, 0].mean() - span / 1.75, z[:, 0].mean() + span / 1.75
y_min, y_max = z[:, 1].mean() - span / 1.75, z[:, 1].mean() + span / 1.75
AX.set_xlim(x_min, x_max)
AX.set_ylim(y_min, y_max)
# print(AX.set_xlim(-max_val, max_val))
# print(AX.set_ylim(-max_val, max_val))
# handle.set_visible(True)
with writer.saving(FIG, output_file, dpi):
for frame_num, (x, y) in enumerate(z_out):
# handle.set_offsets([x, y])
AX.clear()
AX.scatter(x, y, **dot_params)
AX.set_xlim(x_min, x_max)
AX.set_ylim(y_min, y_max)
plt.draw()
writer.grab_frame(pad_inches=0)
if (frame_num % fps) == 0:
print("[{}] Finished {} seconds."
"".format(time.asctime(), frame_num/float(fps)))
def main(mnist_file, param_file, fps, output_file):
train, valid, test = datatools.load_mnist_npz(mnist_file)
trainer, predictor = models.pwrank()
params = biggie.Stash(param_file)
idx = np.random.permutation(len(valid[0]))[:500]
x_in = valid[0][idx]
y_true = valid[1][idx]
z_out = []
for pkey in sorted(params.keys()):
print("[{}] Processing - {}".format(time.asctime(), pkey))
predictor.param_values = params.get(pkey)
z_out += [predictor(x_in)['z_out'].T]
render(z_out, y_true, fps, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="")
parser.add_argument("mnist_file",
metavar="mnist_file", type=str,
help="Filepath to the mnist data.")
parser.add_argument("param_file",
metavar="param_file", type=str,
help="")
parser.add_argument("output_file",
metavar="output_file", type=str,
help="File path to save output movie.")
parser.add_argument("--fps",
metavar="--fps", type=float, default=10.0,
help="Framerate for the fretmap.")
# parser.add_argument("title",
# metavar="title", type=str,
# help="Title for the resulting video.")
args = parser.parse_args()
main(args.mnist_file, args.param_file, args.fps, args.output_file)
| [
"time.asctime",
"datatools.load_mnist_npz",
"argparse.ArgumentParser",
"biggie.Stash",
"numpy.asarray",
"models.pwrank",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"matplotlib.use",
"seaborn.color_palette"
] | [((90, 111), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (104, 111), False, 'import matplotlib\n'), ((241, 269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (251, 269), True, 'import matplotlib.pyplot as plt\n'), ((616, 649), 'seaborn.color_palette', 'seaborn.color_palette', (['"""Set3"""', '(10)'], {}), "('Set3', 10)\n", (637, 649), False, 'import seaborn\n'), ((663, 703), 'numpy.asarray', 'np.asarray', (['[palette[y] for y in y_true]'], {}), '([palette[y] for y in y_true])\n', (673, 703), True, 'import numpy as np\n'), ((775, 792), 'numpy.asarray', 'np.asarray', (['z_out'], {}), '(z_out)\n', (785, 792), True, 'import numpy as np\n'), ((1833, 1869), 'datatools.load_mnist_npz', 'datatools.load_mnist_npz', (['mnist_file'], {}), '(mnist_file)\n', (1857, 1869), False, 'import datatools\n'), ((1895, 1910), 'models.pwrank', 'models.pwrank', ([], {}), '()\n', (1908, 1910), False, 'import models\n'), ((1924, 1948), 'biggie.Stash', 'biggie.Stash', (['param_file'], {}), '(param_file)\n', (1936, 1948), False, 'import biggie\n'), ((2360, 2399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (2383, 2399), False, 'import argparse\n'), ((1539, 1549), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1547, 1549), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2168), 'time.asctime', 'time.asctime', ([], {}), '()\n', (2166, 2168), False, 'import time\n'), ((1715, 1729), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1727, 1729), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019/10/22 21:42
# @Author : Esbiya
# @Email : <EMAIL>
# @File : geetest.py
# @Software: PyCharm
import os
import random
import requests
import time
import json
from PIL import Image
import cv2
import numpy as np
session = requests.session()
session.headers = {
'Content-Type': 'application/json',
'Origin': 'https://passport.ximalaya.com',
'Referer': 'https://passport.ximalaya.com/page/web/forget',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
def _pic_download(url, type):
"""
图片下载
:param url:
:param type:
:return:
"""
save_path = os.path.abspath('...') + '\\' + 'images'
if not os.path.exists(save_path):
os.mkdir(save_path)
img_path = save_path + '\\' + '{}.jpg'.format(type)
img_data = session.get(url).content
with open(img_path, 'wb') as f:
f.write(img_data)
return img_path
def _cut_slider(path):
"""
滑块切割
:return:
"""
image = Image.open(path)
x = []
y = []
for i in range(image.size[0]):
for j in range(image.size[1]):
pix = image.load()[i, j]
if pix != 255:
x.append(i)
y.append(j)
z = (np.min(x), np.min(y), np.max(x), np.max(y))
result = image.crop(z)
result.convert('RGB').save(path)
# result.show()
return result.size[0], result.size[1]
def _get_distance(slider_url, captcha_url):
"""
获取缺口距离
:param slider_url: 滑块图片 url
:param captcha_url: 验证码图片 url
:return:
"""
save_path = os.path.abspath('...') + '\\' + 'images'
if not os.path.exists(save_path):
os.mkdir(save_path)
# 引用上面的图片下载
slider_path = _pic_download(slider_url, 'slider')
# 引用上面的图片下载
captcha_path = _pic_download(captcha_url, 'captcha')
# # 计算拼图还原距离
target = cv2.imread(slider_path, 0)
template = cv2.imread(captcha_path, 0)
temp = save_path + '\\' + 'temp.jpg'
targ = save_path + '\\' + 'targ.jpg'
cv2.imwrite(targ, target)
w, h = _cut_slider(slider_path)
cv2.imwrite(temp, template)
target = cv2.imread(targ)
target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
target = abs(255 - target)
cv2.imwrite(targ, target)
target = cv2.imread(targ)
template = cv2.imread(temp)
result = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
x, y = np.unravel_index(result.argmax(), result.shape)
# 调用PIL Image 做测试
image = Image.open(captcha_path)
xy = (y + 15, x, y + w, x + h)
# 切割
imagecrop = image.crop(xy)
# 保存切割的缺口
imagecrop.save(save_path + '\\' + "new_image.jpg")
imagecrop.show()
return int(y + 15)
def process_distance(distance):
"""
处理缺口距离
:param distance:
:return:
"""
x = -12 * 0.808 + (distance + 10) * (384 - 85.64800000000001 + 24 * 0.808) / (384 - 40)
return int(round(x / 0.808 + 44))
def _init_slider():
"""
初始化滑块
:return:
"""
url = 'https://mobile.ximalaya.com/captcha-web/check/slide/get?bpId=139&sessionId=xm_k21uo8e150s7pt'
resp = session.get(url).json()
if resp['result'] == 'true':
return {
'captcha_url': resp['data']['bgUrl'],
'slider_url': resp['data']['fgUrl']
}
return None
def _slider_verify(distance):
"""
滑块验证
:param distance:
:return:
"""
url_ = 'https://mobile.ximalaya.com/captcha-web/valid/slider'
start_x = random.randint(795, 810)
start_y = random.randint(325, 340)
start_time = int(time.time() * 1000)
time.sleep(random.randint(1, 2))
payload = json.dumps({
'bpId': 139,
'sessionId': "xm_k21uo8e150s7pt",
'type': "slider",
'captchaText': f"{process_distance(distance)},{random.randint(-5, 5)}",
'startX': start_x,
'startY': start_y,
'startTime': start_time
}).replace(' ', '')
resp = session.post(url_, data=payload).json()
print(resp)
if 'token' in set(resp.keys()):
return resp['token']
return None
def crack():
# 初始化滑块
init_data = _init_slider()
# 获取缺口距离
distance = _get_distance(init_data['slider_url'], init_data['captcha_url'])
# 屏幕验证码尺寸比
distance = int(round(distance * (404 / 500)))
# 最终验证
result = _slider_verify(distance)
if result:
return {
'success': 1,
'message': '校验通过! ',
'data': {
'token': result
}
}
return {
'success': 0,
'message': '校验失败! ',
'data': None
}
if __name__ == '__main__':
x = crack()
print(x)
| [
"requests.session",
"os.mkdir",
"os.path.abspath",
"random.randint",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"PIL.Image.open",
"time.time",
"cv2.imread",
"numpy.min",
"numpy.max",
"cv2.matchTemplate"
] | [((269, 287), 'requests.session', 'requests.session', ([], {}), '()\n', (285, 287), False, 'import requests\n'), ((1076, 1092), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1086, 1092), False, 'from PIL import Image\n'), ((1939, 1965), 'cv2.imread', 'cv2.imread', (['slider_path', '(0)'], {}), '(slider_path, 0)\n', (1949, 1965), False, 'import cv2\n'), ((1981, 2008), 'cv2.imread', 'cv2.imread', (['captcha_path', '(0)'], {}), '(captcha_path, 0)\n', (1991, 2008), False, 'import cv2\n'), ((2095, 2120), 'cv2.imwrite', 'cv2.imwrite', (['targ', 'target'], {}), '(targ, target)\n', (2106, 2120), False, 'import cv2\n'), ((2161, 2188), 'cv2.imwrite', 'cv2.imwrite', (['temp', 'template'], {}), '(temp, template)\n', (2172, 2188), False, 'import cv2\n'), ((2202, 2218), 'cv2.imread', 'cv2.imread', (['targ'], {}), '(targ)\n', (2212, 2218), False, 'import cv2\n'), ((2232, 2272), 'cv2.cvtColor', 'cv2.cvtColor', (['target', 'cv2.COLOR_BGR2GRAY'], {}), '(target, cv2.COLOR_BGR2GRAY)\n', (2244, 2272), False, 'import cv2\n'), ((2308, 2333), 'cv2.imwrite', 'cv2.imwrite', (['targ', 'target'], {}), '(targ, target)\n', (2319, 2333), False, 'import cv2\n'), ((2347, 2363), 'cv2.imread', 'cv2.imread', (['targ'], {}), '(targ)\n', (2357, 2363), False, 'import cv2\n'), ((2379, 2395), 'cv2.imread', 'cv2.imread', (['temp'], {}), '(temp)\n', (2389, 2395), False, 'import cv2\n'), ((2409, 2466), 'cv2.matchTemplate', 'cv2.matchTemplate', (['target', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(target, template, cv2.TM_CCOEFF_NORMED)\n', (2426, 2466), False, 'import cv2\n'), ((2561, 2585), 'PIL.Image.open', 'Image.open', (['captcha_path'], {}), '(captcha_path)\n', (2571, 2585), False, 'from PIL import Image\n'), ((3548, 3572), 'random.randint', 'random.randint', (['(795)', '(810)'], {}), '(795, 810)\n', (3562, 3572), False, 'import random\n'), ((3587, 3611), 'random.randint', 'random.randint', (['(325)', '(340)'], {}), '(325, 340)\n', (3601, 3611), False, 'import random\n'), ((767, 792), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (781, 792), False, 'import os\n'), ((802, 821), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (810, 821), False, 'import os\n'), ((1318, 1327), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1324, 1327), True, 'import numpy as np\n'), ((1329, 1338), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1335, 1338), True, 'import numpy as np\n'), ((1340, 1349), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1346, 1349), True, 'import numpy as np\n'), ((1351, 1360), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1357, 1360), True, 'import numpy as np\n'), ((1708, 1733), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (1722, 1733), False, 'import os\n'), ((1743, 1762), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (1751, 1762), False, 'import os\n'), ((3668, 3688), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (3682, 3688), False, 'import random\n'), ((715, 737), 'os.path.abspath', 'os.path.abspath', (['"""..."""'], {}), "('...')\n", (730, 737), False, 'import os\n'), ((1656, 1678), 'os.path.abspath', 'os.path.abspath', (['"""..."""'], {}), "('...')\n", (1671, 1678), False, 'import os\n'), ((3633, 3644), 'time.time', 'time.time', ([], {}), '()\n', (3642, 3644), False, 'import time\n'), ((3861, 3882), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (3875, 3882), False, 'import random\n')] |
"""
Active Fairness Run through questions
"""
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.calibration import _SigmoidCalibration
from sklearn.isotonic import IsotonicRegression
from joblib import Parallel, delayed
import pathos.multiprocessing as multiprocessing
from sklearn.model_selection import train_test_split
from numpy import genfromtxt
import numpy as np
from collections import Counter
import numpy as np
import pandas as pd
import time
import random
from copy import deepcopy
class TreeNode:
'''
A node in the "featured tree"
'''
def __init__(self, threshold, dummy = False):
'''
threshold: The threshold of this node
dummy: whether it's a fake node or not (The fake node can only be the root node of the tree)
'''
self.children_left = [] # nodes in its left (and of lower level in original tree)
self.children_right = [] # nodes in its right (and of lower level in original tree)
self.threshold = threshold
self.node_set = [set(), set()] # set of leaf nodes in its left and right,
# self.node_set[0] are the nodes in the left
# self.node_set[1] are the nodes in the right
self.dummy = dummy
class TreeProcess:
def __init__(self, tree, all_features):
'''
tree: the tree trained by random forest
all_features: all possible features in this tree
'''
rootNode = 0
node_trace = []
self.children_left = tree.children_left
self.children_right = tree.children_right
child_left_dict = {}
child_right_dict = {}
for i in range(len(self.children_left)):
child_left_dict[i] = self.children_left[i]
for i in range(len(self.children_right)):
child_right_dict[i] = self.children_right[i]
self.threshold = tree.threshold
self.feature = tree.feature
self.values = tree.value
self.weighted_samples = tree.weighted_n_node_samples
# children_left, children_right, threshold, feature, values, weighted_samples used as a dict. Can provide corresponding value given an index of that node.
self.total_leaf_id = set() # ids of all leaves in this tree
self.feature2nodes = {} # dict, key is the name of features, value is the TreeNode object of the root for that 'feature tree'
self.nodeid2TreeNode = {} # dict, key is the id of nodes in original tree, value is the TreeNode object corresponds to that node
self.feature2threshold_list = {} # dict, key is name of features, value is a list of all thresholds for that feature
self.featureAndthreshold2delete_set = {} # dict, key is name of features, value is another dict, with key as threshold value, and value as a set of leaf node ids to be delted
self.tree_single_value_shape = np.shape(self.values[0]) # imitate the shape of 'self.values[0]'
self.unique_feature = set() # total features exist in this tree (different from self.feature, which are features)
if self.feature[rootNode] == -2:
assert False, "The root of a tree is a leaf, please verify"
for feature in all_features:
# construct feature tree for all features
queue = [rootNode]
if feature == self.feature[rootNode]:
# if the root node of original tree is of this feature, there is no need for a dummy
queue = []
self.nodeid2TreeNode[rootNode] = TreeNode(self.threshold[rootNode])
self.feature2nodes[feature] = self.nodeid2TreeNode[rootNode]
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[rootNode].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[rootNode].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node) # get all leaf nodes it can reach in the left sub-tree
self.nodeid2TreeNode[rootNode].node_set[0] = result_set
result_set = set()
self.node_traverse_leaf(result_set, right_node) # get all leaf nodes it can reach in the right sub-tree
self.nodeid2TreeNode[rootNode].node_set[1] = result_set
queue.append(left_node)
queue.append(right_node)
else:
# if the root node of original tree is not of this feature, we need to have a dummy root for this feature tree
self.feature2nodes[feature] = TreeNode(-1, True) # add a dummy root
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.feature2nodes[feature].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature)# get all non-leaf nodes of this feature in the right sub-tree
self.feature2nodes[feature].children_right = result_list
while queue:
current_node = queue.pop(0)
if feature == self.feature[current_node]:
# find a node of given feature
self.nodeid2TreeNode[current_node] = TreeNode(self.threshold[current_node])
result_list = []
left_node = self.children_left[current_node]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[current_node].children_left = result_list
result_list = []
right_node = self.children_right[current_node]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[current_node].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node)
self.nodeid2TreeNode[current_node].node_set[0] = result_set # get all leaf nodes it can reach in the left sub-tree
result_set = set()
self.node_traverse_leaf(result_set, right_node)
self.nodeid2TreeNode[current_node].node_set[1] = result_set # get all leaf nodes it can reach in the right sub-tree
if self.feature[current_node] != -2:
# if not the leaf
queue.append(self.children_left[current_node])
queue.append(self.children_right[current_node])
for feature in all_features:
threshold_set = set()
queue = [self.feature2nodes[feature]] # get the root in feature tree
while queue:
currentNode = queue.pop(0)
if currentNode.dummy != True:
threshold_set.add(currentNode.threshold)
for node in currentNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentNode.children_right:
queue.append(self.nodeid2TreeNode[node])
threshold_list = sorted(list(threshold_set)) # rank the list in increasing threshold
self.feature2threshold_list[feature] = threshold_list
self.featureAndthreshold2delete_set[feature] = {}
for feature in self.feature2threshold_list.keys():
l = len(self.feature2threshold_list[feature])
if l == 0:
continue
for i in range(l):
threshold = self.feature2threshold_list[feature][i]
delete_set_equal_or_less = set() # the nodes to be deleted if equal or less than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
if threshold <= currentTreeNode.threshold:
# current value (threshold) is equal or less than threshold for this node, go to the left sub-tree for this node
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[1] # delete all leaf-nodes can be reached in the right sub-tree
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][threshold] = delete_set_equal_or_less
delete_set_larger = set() # the nodes to be deleted if larger than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_larger |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][np.inf] = delete_set_larger
for feature in self.feature2threshold_list.keys():
if len(self.feature2threshold_list[feature]) > 0:
self.unique_feature.add(feature)
def node_traverse_leaf(self,
result_set,
currentNode):
# get all leaf nodes which can be reached starting from one node
nodeFeature = self.feature[currentNode]
if nodeFeature == -2:
result_set.add(currentNode)
self.total_leaf_id.add(currentNode)
return
self.node_traverse_leaf(result_set, self.children_left[currentNode])
self.node_traverse_leaf(result_set, self.children_right[currentNode])
def node_traverse(self,
result_list,
currentNode,
feature_target):
nodeFeature = self.feature[currentNode]
if nodeFeature == feature_target:
result_list.append(currentNode)
return
if nodeFeature == -2:
return
self.node_traverse(result_list, self.children_left[currentNode], feature_target)
self.node_traverse(result_list, self.children_right[currentNode], feature_target)
class ActiveFairness(object):
def __init__(self,
dataset_train, dataset_test,
clf,
sensitive_features = [],
target_label = []):
'''
dataset_train: training dataset, type: MexicoDataset()
dataset_test: testing dataset, type: MexicoDataset()
clf: trained randomforest classifier
sensitive_features: a list of sensitive features which should be removed when doing prediction
target_label: a list of features whose values are to be predicted
'''
assert len(target_label) == 1, print("Error in ActiveFairness, length of target_label not defined")
train = dataset_train.features
complete_data = dataset_train.metadata['previous'][0]
self.feature2columnmap = {}
test = dataset_test.features
feature_name = pd.DataFrame(complete_data.feature_names)
y_column_index = ~(feature_name.isin(sensitive_features + target_label).iloc[:, 0])
y_column_index_inverse = (feature_name.isin(sensitive_features + target_label).iloc[:, 0])
index = 0
for i in range(len(y_column_index_inverse)):
if y_column_index_inverse.iloc[i] == True:
self.feature2columnmap[complete_data.feature_names[i]] = index
index += 1
self.target_label = target_label
self.sensitive_features = sensitive_features
self.dataset_train = dataset_train
self.dataset_test = dataset_test
self.X_tr_sensitiveAtarget = pd.DataFrame(train[:, y_column_index_inverse]) # the dataframe of all samples in training dataset which only keeps the non-sensitive and target features
self.X_tr = pd.DataFrame(train[:, y_column_index])
self.y_tr = pd.DataFrame(self.dataset_train.labels[:, 0]).iloc[:, 0]
self.X_te_sensitiveAtarget = pd.DataFrame(test[:, y_column_index_inverse]) # the dataframe of all samples in testing dataset which only keeps the non-sensitive and target features
self.X_te = pd.DataFrame(test[:, y_column_index])
self.y_te = pd.DataFrame(self.dataset_test.labels[:, 0]).iloc[:, 0]
self.clf = clf
self.trees = []
def fit(self):
# This is a temporary implementation
self.clf = self.clf.fit(self.X_tr, self.y_tr)
self.features_by_importance = self.clf.feature_importances_.argsort()[::-1] # get the importance of features based on trained RF
self.all_features = list(range(self.X_te.shape[1]))
def predict(self, train):
if train == True:
Y_tr_predict = self.clf.predict(self.X_tr)
re_dataset_train = deepcopy(self.dataset_train)
re_dataset_train.labels = Y_tr_predict
return re_dataset_train
else:
Y_te_predict = self.clf.predict(self.X_te)
re_dataset_test = deepcopy(self.dataset_test)
re_dataset_test.labels = Y_te_predict
return re_dataset_test
# choose the appropriate number of features to ask for Group A and B
def choose_appropriate_num_of_feature(self, privilige_feature, privilige_value, unprivilige_value, \
total_budget, feat_method = 'feat-imp', run_on_training = False):
num_of_priviledge = 0
num_of_unpriviledge = 0
dataset = self.X_te_sensitiveAtarget if run_on_training == False else self.X_tr_sensitiveAtarget
featured_dataset = self.X_te if run_on_training == False else self.X_tr
for i in range(len(dataset)):
if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
# priviledge class
num_of_priviledge += 1
else:
assert dataset.iloc[i, self.feature2columnmap[privilige_feature]] == unprivilige_value, "Value incorrect!"
num_of_unpriviledge += 1
total_num = num_of_priviledge + num_of_unpriviledge
current_num_of_feature_for_priviledge = 0
current_num_of_feature_for_unpriviledge = 0
budget_used = 0
# batch_size = 500
# nr_of_batches = total_num // batch_size + 2
dataset_orig = self.dataset_test if run_on_training == False else self.dataset_train
self.trees = [TreeProcess(value.tree_, self.all_features) for value in self.clf.estimators_]
features_by_importance = self.features_by_importance
last_add_privi = True
result = np.zeros([len(dataset)], dtype = np.float32)
priviledge_index = []
unprivilege_index = []
for i in range(len(dataset)):
if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
priviledge_index.append(i)
else:
unprivilege_index.append(i)
less_than_pri = np.array(dataset_orig.labels[priviledge_index] <= 0.5, dtype = bool)[:, 0]
less_than_unpri = np.array(dataset_orig.labels[unprivilege_index] <= 0.5, dtype = bool)[:, 0]
previous_answers = [[tree.total_leaf_id.copy() for tree in self.trees] for i in range(len(dataset))]
print("Start the process")
while budget_used < total_budget:
# FP_pri = 0
# TN_pri = 0
# FP_unpri = 0
# TN_unpri = 0
if current_num_of_feature_for_priviledge == 0:
FP_pri = 1
TN_pri = 0
else:
privi_predict_result = np.array(result[priviledge_index] > 0.5, dtype = bool)
FP_pri = np.sum(privi_predict_result * less_than_pri)
TN_pri = np.sum((1 - privi_predict_result) * less_than_pri)
if current_num_of_feature_for_unpriviledge == 0:
FP_unpri = 1
TN_unpri = 0
else:
unprivi_predict_result = np.array(result[unprivilege_index] > 0.5, dtype = bool)
FP_unpri = np.sum(unprivi_predict_result * less_than_unpri)
TN_unpri = np.sum((1 - unprivi_predict_result) * less_than_unpri)
# for i in range(len(dataset)):
# if dataset.iloc[i, self.feature2columnmap[privilige_feature]] == privilige_value:
# # priviledge class
# if dataset_orig.labels[i] <= 0.5:
# # actual negative
# if current_num_of_feature_for_priviledge == 0:
# FP_pri += 1
# else:
# if result[i] > 0.5:
# FP_pri += 1
# else:
# TN_pri += 1
# else:
# if dataset_orig.labels[i] <= 0.5:
# # actual negative
# if current_num_of_feature_for_unpriviledge == 0:
# FP_unpri += 1
# else:
# if result[i] > 0.5:
# FP_unpri += 1
# else:
# TN_unpri += 1
FPR_pri = FP_pri * 1.0 / (FP_pri + TN_pri)
FPR_unpri = FP_unpri * 1.0 / (FP_unpri + TN_unpri)
result[:] = 0
if FPR_pri > FPR_unpri:
current_num_of_feature_for_priviledge += 1
last_add_privi = True
budget_used += (num_of_priviledge* 1.0 / total_num)
else:
current_num_of_feature_for_unpriviledge += 1
last_add_privi = False
budget_used += (num_of_unpriviledge * 1.0 / total_num)
print("budget_used", budget_used)
print("FPR_pri", FPR_pri)
print("FPR_unpri", FPR_unpri)
print("FP_pri", FP_pri)
print("TN_pri", TN_pri)
print("FP_unpri", FP_unpri)
print("TN_unpri", TN_unpri)
features = deepcopy(self.all_features)
for j in range(len(dataset)):
test_example_full = featured_dataset.iloc[j, :].values.astype(float)
if dataset.iloc[j, self.feature2columnmap[privilige_feature]] == privilige_value and last_add_privi == True:
# priviledge class
if feat_method == 'random':
new_feature = random.sample(features,1)[0]
features.remove(new_feature)
elif feat_method == 'feat-imp':
new_feature = features_by_importance[current_num_of_feature_for_priviledge]
elif feat_method == 'ask-town':
assert False, "Error 385, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur, absolutes_on=False)
features.remove(new_feature)
elif feat_method == 'abs-agg':
assert False, "Error 389, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur)
features.remove(new_feature)
else:
raise Exception('mode has not been implemented')
p_dict, p_cur = calcPValuesPerTree(test_example_full, self.trees, previous_answers[j], new_feature)
result[j] = 1 - p_cur # somehow inversed
elif dataset.iloc[j, self.feature2columnmap[privilige_feature]] != privilige_value and last_add_privi == False:
if feat_method == 'random':
new_feature = random.sample(features,1)[0]
features.remove(new_feature)
elif feat_method == 'feat-imp':
new_feature = features_by_importance[current_num_of_feature_for_unpriviledge]
elif feat_method == 'ask-town':
assert False, "Error 385, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur, absolutes_on=False)
features.remove(new_feature)
elif feat_method == 'abs-agg':
assert False, "Error 389, not supported yet"
new_feature = getTheNextBestFeature(self.trees, features, test_example, previous_answers, p_cur)
features.remove(new_feature)
else:
raise Exception('mode has not been implemented')
p_dict, p_cur = calcPValuesPerTree(test_example_full, self.trees, previous_answers[j], new_feature)
result[j] = 1 - p_cur # somehow inversed
return current_num_of_feature_for_priviledge, current_num_of_feature_for_unpriviledge
def run_algo_in_parallel(self, new_feat_mode,
sensitive_name,
privilige_variable_value,
unprivilige_variable_value,
pri_num_feature_fetched,
un_pri_num_feature_fetched,
verbose=1,
plot_any=False,
batch_size=512,
nr_of_batches=100,
save_to_file=True,
run_on_training=False,
save_folder='',
show_no_words = False):
assert (len(save_folder) == 0) or (save_to_file)
X_tr = self.X_tr
y_tr = self.y_tr
if run_on_training:
X_te = self.X_tr
y_te = self.y_tr
X_sensi_te = self.X_tr_sensitiveAtarget
else:
X_te = self.X_te
y_te = self.y_te
X_sensi_te = self.X_te_sensitiveAtarget
clf = self.clf
self.trees = [TreeProcess(value.tree_, self.all_features) for value in clf.estimators_]
all_features = self.all_features
features_by_importance = self.features_by_importance
start_time2 = time.time()
results = []
for ii in range(nr_of_batches):
start = ii*batch_size
end = min(X_te.shape[0] ,(ii+1) * batch_size)
if start >= X_te.shape[0]:
break
if show_no_words == False:
print('START',start, 'END', end)
results_one = [run_per_test_case(i, X_tr, y_tr, X_te, y_te, X_sensi_te, sensitive_name, privilige_variable_value, \
unprivilige_variable_value, pri_num_feature_fetched, un_pri_num_feature_fetched, self.feature2columnmap, \
verbose, new_feat_mode, clf, start_time2, all_features, features_by_importance, self.trees) for i in np.arange(start,end)]
results.extend(results_one)
l = len(results)
ser_p = [pd.Series(results[i]['p_list'], name=results[i]['index']) for i in range(l)]
df_p = pd.concat(ser_p,axis=1).transpose()
df_p = (1-df_p) #correcting because somehow the p's are inversed
ser_qa = [pd.Series(results[i]['qa'], name=results[i]['index']) for i in range(l)]
df_qa = pd.concat(ser_qa,axis=1).transpose()
ser_y = [pd.Series(results[i]['y_te'], name=results[i]['index']) for i in range(l)]
df_y = pd.concat(ser_y,axis=1).transpose()
ser_mc = [pd.Series(results[i]['max_conf'], name=results[i]['index']) for i in range(l)]
df_mc = pd.concat(ser_mc,axis=1).transpose()
df_X = pd.concat([results[i]['X_te'] for i in range(l)],axis=1).transpose()
if save_to_file:
df_p.to_csv('{}/{}_dataframe_p_{}.csv'.format(save_folder,new_feat_mode,ii))
df_qa.to_csv('{}/{}_dataframe_qa_{}.csv'.format(save_folder,new_feat_mode,ii))
df_y.to_csv('{}/{}_dataframe_y_test_{}.csv'.format(save_folder,new_feat_mode,ii))
df_mc.to_csv('{}/{}_dataframe_mc_{}.csv'.format(save_folder,new_feat_mode,ii))
df_X.to_csv('{}/{}_dataframe_X_test_{}.csv'.format(save_folder,new_feat_mode,ii))
return df_p, df_qa, df_y, df_mc, df_X # What does this part mean?
def run_per_test_case(test_case_id, X_tr, y_tr, X_te, y_te, X_sensi_te, sensitive_name, privilige_variable_value, \
unprivilige_variable_value, pri_num_feature_fetched, un_pri_num_feature_fetched,feature2columnmap, \
verbose, new_feat_mode, clf, start_time2, all_features, features_by_importance, forestProcess):
# start_time_00 = time.time()
if verbose >= 1:
if test_case_id % 1 == 0:
print('Test case', test_case_id,"of",len(y_te))
print('Time passed', time.time()-start_time2)
print('Mode', new_feat_mode)
print()
if X_sensi_te.iloc[test_case_id,feature2columnmap[sensitive_name]] == privilige_variable_value:
# privilege group
budget = pri_num_feature_fetched
else:
budget = un_pri_num_feature_fetched
features = deepcopy(all_features)
test_example_full = X_te.iloc[test_case_id, :].values.astype(float)
test_example = test_example_full * np.nan # Initialized as all nan
# time_stop1 = time.time()
max_conf = clf.predict_proba(test_example_full.reshape(1,-1))[0][0]
p = []
question_asked = []
if new_feat_mode == 'feat-imp':
upper_bound = min(budget, len(features))
else:
upper_bound = len(features)
# time_stop2 = time.time()
previous_answers = [tree.total_leaf_id.copy() for tree in forestProcess]
p_cur = -1
for question_i in range(upper_bound):
if new_feat_mode == 'random':
new_feature = random.sample(features,1)[0]
features.remove(new_feature)
elif new_feat_mode == 'feat-imp':
new_feature = features_by_importance[question_i]
elif new_feat_mode == 'ask-town':
new_feature = getTheNextBestFeature(forestProcess, features, test_example, previous_answers, p_cur, absolutes_on=False)
features.remove(new_feature)
elif new_feat_mode == 'abs-agg':
new_feature = getTheNextBestFeature(forestProcess, features, test_example, previous_answers, p_cur)
features.remove(new_feature)
else:
raise Exception('mode has not been implemented')
p_dict, p_cur = calcPValuesPerTree(test_example_full, forestProcess, previous_answers, new_feature)
p.append(p_cur)
question_asked.append(new_feature)
# test_example[new_feature] = test_example_full[new_feature]
if verbose >= 3:
print()
print('Test case', test_case_id,"of",len(y_tr), 'index', X_tr.index[test_case_id])
print('Time passed', time.time()-start_time2)
print("Nr of questions asked : ", question_i)
print("P before classification : ", p[-1])
print("feature asked : ", list(X_tr)[new_feature])
print("feature number asked : ", new_feature)
print("max conf", max_conf)
# time_stop3 = time.time()
if verbose >= 2:
print("Test example's true label: ", y_te.iloc[test_case_id])
print("Prevalence of class 0 in y_test: ", y_te.mean())
# plot per test case
if False:
fig, ax1 = plt.subplots()
ax1.set_title(str(test_case_id) + "|" + str(y_te.iloc[test_case_id]))
ax1.plot(p, "gd-", label='probability of class 0')
ax1.set_ylabel('probability of class 0')
ax1.set_ylim([0, 1])
ax2 = ax1.twinx()
# ax2.plot(times, 'bd-', label='computation time')
ax2.set_ylabel('time')
ax1.set_xlabel("Questions Asked")
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=1)
ax2.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2)
plt.show()
# used to debug
# print("T-lap 0", time_stop1 - start_time_00)
# print("T-lap 1", time_stop2 - time_stop1)
# print("T-lap 2", time_stop3 - time_stop2)
# return_time.append(time_stop3 - time_stop2)
return {'X_te':X_te.iloc[test_case_id,:],'y_te':y_te.iloc[test_case_id],'max_conf':max_conf,'index':X_te.index[test_case_id],'p_list':p,'qa':question_asked}
def ClassifyWithPartialFeatures(sampleData,tree, previous_answers, new_feature, only_norm_prob = False):
# only_norm_prob is for accelerating
value = sampleData[new_feature]
l = len(tree.feature2threshold_list[new_feature])
if l > 0:
if value > tree.feature2threshold_list[new_feature][l - 1]:
previous_answers -= tree.featureAndthreshold2delete_set[new_feature][np.inf]
else:
for i in range(l):
if value <= tree.feature2threshold_list[new_feature][i]:
previous_answers -= tree.featureAndthreshold2delete_set[new_feature][tree.feature2threshold_list[new_feature][i]]
break
# time1 = time.time()
total_w = 0
norm_w = 1
total_probs = np.zeros(tree.tree_single_value_shape, dtype = np.float32)
if only_norm_prob == False:
for node in previous_answers:
total_probs += tree.values[node]
total_w += tree.weighted_samples[node]
norm_w = total_w / tree.weighted_samples[tree.feature == -2].sum()
norm_probs = total_probs[0]/total_probs[0].sum()
else:
for node in previous_answers:
total_probs += tree.values[node]
norm_probs = total_probs[0]/total_probs[0].sum()
# time2 = time.time()
# print("time 4", time1 - start_time)
# print("time 5", time2 - time1)
return norm_probs[0], norm_w # also return the weight
def getTheNextBestFeature(forest, features, test_example, previous_answers, p_cur = -1, absolutes_on=True):
number_of_trees = len(forest)
next_feature_array = np.zeros(len(features))
for feat_i, feature in enumerate(features):
for tree_id in range(number_of_trees):
tree = forest[tree_id]
if feature in tree.unique_feature:
l = tree.feature2threshold_list[feature]
test_values = [l[0]-1e-3] + [(a + b) / 2. for a, b in zip(l, l[1:])] + [l[-1]+1e-3]
conf_list = []
w_list = []
for value in test_values:
input_answer = previous_answers[tree_id].copy()
test_example_temp = test_example.copy()
test_example_temp[feature] = value
p,w = ClassifyWithPartialFeatures(test_example_temp, tree, input_answer, feature)
if absolutes_on and p_cur != -1:
conf_list.append(np.abs(p-p_cur))
else:
conf_list.append(p)
w_list.append(w)
next_feature_array[feat_i] += sum([x*y for x,y in zip(conf_list,w_list)]) / sum(w_list)
else:
if (not absolutes_on):
next_feature_array[feat_i] += p_cur
if absolutes_on:
return features[np.argmax(next_feature_array)]
else:
next_feature_array = next_feature_array / number_of_trees
return features[np.argmax(np.abs(next_feature_array-p_cur))]
def calcPValuesPerTree(test_example, forest, previous_answers, new_feature):
# p_list = [ClassifyWithPartialFeatures(test_example,tree, previous_answers[i], new_feature, only_norm_prob = True)[0] for i, tree in enumerate(forest)]
p_list = []
# sampleData,tree, previous_answers, new_feature, only_norm_prob = False
for i, tree in enumerate(forest):
# only_norm_prob is for accelerating
value = test_example[new_feature]
# assert np.isnan(value) == False, "Value error in ClassifyWithPartialFeatures"
l = len(tree.feature2threshold_list[new_feature])
if l > 0:
if value > tree.feature2threshold_list[new_feature][l - 1]:
previous_answers[i] -= tree.featureAndthreshold2delete_set[new_feature][np.inf]
else:
for j in range(l):
if value <= tree.feature2threshold_list[new_feature][j]:
previous_answers[i] -= tree.featureAndthreshold2delete_set[new_feature][tree.feature2threshold_list[new_feature][j]]
break
# time1 = time.time()
total_probs = np.zeros(tree.tree_single_value_shape, dtype = np.float32)
list_previous = list(previous_answers[i])
total_probs = np.sum(tree.values[list_previous], axis = 0)
# for node in previous_answers[i]:
# total_probs += tree.values[node]
norm_probs = total_probs[0]/total_probs[0].sum()
# time2 = time.time()
# print("time 4", time1 - start_time)
# print("time 5", time2 - time1)
p_list.append(norm_probs[0])
return p_list, np.mean(p_list)
class calibration(object):
def __init__(self,method= 'sigmoid'):
self.method = method
def fit(self, p_input, y):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
calibrator.fit(p_input, y)
if self.method == 'sigmoid':
self.a = calibrator.a_
self.b = calibrator.b_
self.calibrator = calibrator
return self
def predict(self, p_input):
return self.calibrator.predict(p_input)
def calibrate_probs(df_p_train, df_p_test, y_train, y_test, group_train, group_test, cal_mode = 'sigmoid', calibration_type='per_group', print_random=True):
'''
df_p_train: the probability when having certain amount of features in queries(training data), shape = [Number of examples, Number of features asked]
df_p_test: the probability when having certain amount of features in queries(testing data), shape = [Number of examples, Number of features asked]
y_train: the actual label in the training dataset, shape = [Number of examples, 1]
y_test: the actual label in the testing dataset, shape = [Number of examples, 1]
group_train: the sensitive value in training dataset, type: Series
group_test: the sensitive value in testing dataset, type: Series
cal_mode: the model for calibration
calibration_type: the type for calibration (Only group level calibration supported so far)
print_random: print signal
'''
df_p_test.columns = [str(i) for i in df_p_test.columns]
df_p_train.columns = [str(i) for i in df_p_train.columns]
df_p_cal_test = df_p_test.copy()
if calibration_type == 'per_group':
for q_i in range(df_p_train.shape[1]):
calibrator_per_group = {}
for group in group_train.unique():
X = df_p_train.loc[group_train == group,str(q_i)]
Y = y_train.loc[group_train == group,0]
calibrator_per_group[group] = calibration(cal_mode).fit(
X,Y) if len(Y) != 0 else None
for ii, (p_old, group, y_value) in enumerate(zip(df_p_test[str(q_i)],group_test,y_test.loc[:,0])):
df_p_cal_test[str(q_i)].iloc[ii] = calibrator_per_group[group].predict(pd.Series(p_old))[0] if calibrator_per_group[group] != None else p_old
else:
raise ValueError('Calibration type {} is not yet supported'.format(calibration_type))
return df_p_cal_test
| [
"pandas.DataFrame",
"copy.deepcopy",
"sklearn.calibration._SigmoidCalibration",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"random.sample",
"numpy.zeros",
"time.time",
"numpy.shape",
"sklearn.isotonic.IsotonicRegression",
"numpy.mean",
"numpy.array",
"pandas.Series",
"numpy.arange",
"pa... | [((27455, 27477), 'copy.deepcopy', 'deepcopy', (['all_features'], {}), '(all_features)\n', (27463, 27477), False, 'from copy import deepcopy\n'), ((31491, 31547), 'numpy.zeros', 'np.zeros', (['tree.tree_single_value_shape'], {'dtype': 'np.float32'}), '(tree.tree_single_value_shape, dtype=np.float32)\n', (31499, 31547), True, 'import numpy as np\n'), ((3121, 3145), 'numpy.shape', 'np.shape', (['self.values[0]'], {}), '(self.values[0])\n', (3129, 3145), True, 'import numpy as np\n'), ((13058, 13099), 'pandas.DataFrame', 'pd.DataFrame', (['complete_data.feature_names'], {}), '(complete_data.feature_names)\n', (13070, 13099), True, 'import pandas as pd\n'), ((13739, 13785), 'pandas.DataFrame', 'pd.DataFrame', (['train[:, y_column_index_inverse]'], {}), '(train[:, y_column_index_inverse])\n', (13751, 13785), True, 'import pandas as pd\n'), ((13912, 13950), 'pandas.DataFrame', 'pd.DataFrame', (['train[:, y_column_index]'], {}), '(train[:, y_column_index])\n', (13924, 13950), True, 'import pandas as pd\n'), ((14065, 14110), 'pandas.DataFrame', 'pd.DataFrame', (['test[:, y_column_index_inverse]'], {}), '(test[:, y_column_index_inverse])\n', (14077, 14110), True, 'import pandas as pd\n'), ((14236, 14273), 'pandas.DataFrame', 'pd.DataFrame', (['test[:, y_column_index]'], {}), '(test[:, y_column_index])\n', (14248, 14273), True, 'import pandas as pd\n'), ((24550, 24561), 'time.time', 'time.time', ([], {}), '()\n', (24559, 24561), False, 'import time\n'), ((34887, 34943), 'numpy.zeros', 'np.zeros', (['tree.tree_single_value_shape'], {'dtype': 'np.float32'}), '(tree.tree_single_value_shape, dtype=np.float32)\n', (34895, 34943), True, 'import numpy as np\n'), ((35018, 35060), 'numpy.sum', 'np.sum', (['tree.values[list_previous]'], {'axis': '(0)'}), '(tree.values[list_previous], axis=0)\n', (35024, 35060), True, 'import numpy as np\n'), ((35384, 35399), 'numpy.mean', 'np.mean', (['p_list'], {}), '(p_list)\n', (35391, 35399), True, 'import numpy as np\n'), ((14857, 14885), 'copy.deepcopy', 'deepcopy', (['self.dataset_train'], {}), '(self.dataset_train)\n', (14865, 14885), False, 'from copy import deepcopy\n'), ((15072, 15099), 'copy.deepcopy', 'deepcopy', (['self.dataset_test'], {}), '(self.dataset_test)\n', (15080, 15099), False, 'from copy import deepcopy\n'), ((17055, 17121), 'numpy.array', 'np.array', (['(dataset_orig.labels[priviledge_index] <= 0.5)'], {'dtype': 'bool'}), '(dataset_orig.labels[priviledge_index] <= 0.5, dtype=bool)\n', (17063, 17121), True, 'import numpy as np\n'), ((17156, 17223), 'numpy.array', 'np.array', (['(dataset_orig.labels[unprivilege_index] <= 0.5)'], {'dtype': 'bool'}), '(dataset_orig.labels[unprivilege_index] <= 0.5, dtype=bool)\n', (17164, 17223), True, 'import numpy as np\n'), ((20216, 20243), 'copy.deepcopy', 'deepcopy', (['self.all_features'], {}), '(self.all_features)\n', (20224, 20243), False, 'from copy import deepcopy\n'), ((25339, 25396), 'pandas.Series', 'pd.Series', (["results[i]['p_list']"], {'name': "results[i]['index']"}), "(results[i]['p_list'], name=results[i]['index'])\n", (25348, 25396), True, 'import pandas as pd\n'), ((25559, 25612), 'pandas.Series', 'pd.Series', (["results[i]['qa']"], {'name': "results[i]['index']"}), "(results[i]['qa'], name=results[i]['index'])\n", (25568, 25612), True, 'import pandas as pd\n'), ((25703, 25758), 'pandas.Series', 'pd.Series', (["results[i]['y_te']"], {'name': "results[i]['index']"}), "(results[i]['y_te'], name=results[i]['index'])\n", (25712, 25758), True, 'import pandas as pd\n'), ((25856, 25915), 'pandas.Series', 'pd.Series', (["results[i]['max_conf']"], {'name': "results[i]['index']"}), "(results[i]['max_conf'], name=results[i]['index'])\n", (25865, 25915), True, 'import pandas as pd\n'), ((33568, 33597), 'numpy.argmax', 'np.argmax', (['next_feature_array'], {}), '(next_feature_array)\n', (33577, 33597), True, 'import numpy as np\n'), ((35602, 35642), 'sklearn.isotonic.IsotonicRegression', 'IsotonicRegression', ([], {'out_of_bounds': '"""clip"""'}), "(out_of_bounds='clip')\n", (35620, 35642), False, 'from sklearn.isotonic import IsotonicRegression\n'), ((13971, 14016), 'pandas.DataFrame', 'pd.DataFrame', (['self.dataset_train.labels[:, 0]'], {}), '(self.dataset_train.labels[:, 0])\n', (13983, 14016), True, 'import pandas as pd\n'), ((14294, 14338), 'pandas.DataFrame', 'pd.DataFrame', (['self.dataset_test.labels[:, 0]'], {}), '(self.dataset_test.labels[:, 0])\n', (14306, 14338), True, 'import pandas as pd\n'), ((17708, 17760), 'numpy.array', 'np.array', (['(result[priviledge_index] > 0.5)'], {'dtype': 'bool'}), '(result[priviledge_index] > 0.5, dtype=bool)\n', (17716, 17760), True, 'import numpy as np\n'), ((17788, 17832), 'numpy.sum', 'np.sum', (['(privi_predict_result * less_than_pri)'], {}), '(privi_predict_result * less_than_pri)\n', (17794, 17832), True, 'import numpy as np\n'), ((17858, 17908), 'numpy.sum', 'np.sum', (['((1 - privi_predict_result) * less_than_pri)'], {}), '((1 - privi_predict_result) * less_than_pri)\n', (17864, 17908), True, 'import numpy as np\n'), ((18088, 18141), 'numpy.array', 'np.array', (['(result[unprivilege_index] > 0.5)'], {'dtype': 'bool'}), '(result[unprivilege_index] > 0.5, dtype=bool)\n', (18096, 18141), True, 'import numpy as np\n'), ((18171, 18219), 'numpy.sum', 'np.sum', (['(unprivi_predict_result * less_than_unpri)'], {}), '(unprivi_predict_result * less_than_unpri)\n', (18177, 18219), True, 'import numpy as np\n'), ((18247, 18301), 'numpy.sum', 'np.sum', (['((1 - unprivi_predict_result) * less_than_unpri)'], {}), '((1 - unprivi_predict_result) * less_than_unpri)\n', (18253, 18301), True, 'import numpy as np\n'), ((25431, 25455), 'pandas.concat', 'pd.concat', (['ser_p'], {'axis': '(1)'}), '(ser_p, axis=1)\n', (25440, 25455), True, 'import pandas as pd\n'), ((25648, 25673), 'pandas.concat', 'pd.concat', (['ser_qa'], {'axis': '(1)'}), '(ser_qa, axis=1)\n', (25657, 25673), True, 'import pandas as pd\n'), ((25793, 25817), 'pandas.concat', 'pd.concat', (['ser_y'], {'axis': '(1)'}), '(ser_y, axis=1)\n', (25802, 25817), True, 'import pandas as pd\n'), ((25951, 25976), 'pandas.concat', 'pd.concat', (['ser_mc'], {'axis': '(1)'}), '(ser_mc, axis=1)\n', (25960, 25976), True, 'import pandas as pd\n'), ((28121, 28147), 'random.sample', 'random.sample', (['features', '(1)'], {}), '(features, 1)\n', (28134, 28147), False, 'import random\n'), ((33710, 33744), 'numpy.abs', 'np.abs', (['(next_feature_array - p_cur)'], {}), '(next_feature_array - p_cur)\n', (33716, 33744), True, 'import numpy as np\n'), ((35707, 35728), 'sklearn.calibration._SigmoidCalibration', '_SigmoidCalibration', ([], {}), '()\n', (35726, 35728), False, 'from sklearn.calibration import _SigmoidCalibration\n'), ((25234, 25255), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (25243, 25255), True, 'import numpy as np\n'), ((27131, 27142), 'time.time', 'time.time', ([], {}), '()\n', (27140, 27142), False, 'import time\n'), ((29208, 29219), 'time.time', 'time.time', ([], {}), '()\n', (29217, 29219), False, 'import time\n'), ((20634, 20660), 'random.sample', 'random.sample', (['features', '(1)'], {}), '(features, 1)\n', (20647, 20660), False, 'import random\n'), ((33175, 33192), 'numpy.abs', 'np.abs', (['(p - p_cur)'], {}), '(p - p_cur)\n', (33181, 33192), True, 'import numpy as np\n'), ((37760, 37776), 'pandas.Series', 'pd.Series', (['p_old'], {}), '(p_old)\n', (37769, 37776), True, 'import pandas as pd\n'), ((21992, 22018), 'random.sample', 'random.sample', (['features', '(1)'], {}), '(features, 1)\n', (22005, 22018), False, 'import random\n')] |
import unittest
import numpy as np
from PyANN.utils import add_col, remove_col
class TestUtils(unittest.TestCase):
def test_add_col(self):
matrix: np.ndarray = np.ones((5, 5))
matrix_with_col: np.ndarray = add_col(matrix)
self.assertTrue(matrix.shape[0] == matrix_with_col.shape[0])
self.assertTrue(matrix.shape[1] == matrix_with_col.shape[1] - 1)
def test_remove_col(self):
matrix: np.ndarray = np.ones((5, 5))
matrix_without_col: np.ndarray = remove_col(matrix)
self.assertTrue(matrix.shape[0] == matrix_without_col.shape[0])
self.assertTrue(matrix.shape[1] - 1 == matrix_without_col.shape[1])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"PyANN.utils.add_col",
"PyANN.utils.remove_col",
"numpy.ones"
] | [((710, 725), 'unittest.main', 'unittest.main', ([], {}), '()\n', (723, 725), False, 'import unittest\n'), ((176, 191), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (183, 191), True, 'import numpy as np\n'), ((231, 246), 'PyANN.utils.add_col', 'add_col', (['matrix'], {}), '(matrix)\n', (238, 246), False, 'from PyANN.utils import add_col, remove_col\n'), ((451, 466), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (458, 466), True, 'import numpy as np\n'), ((509, 527), 'PyANN.utils.remove_col', 'remove_col', (['matrix'], {}), '(matrix)\n', (519, 527), False, 'from PyANN.utils import add_col, remove_col\n')] |
#!/usr/bin/env python
# adapted from
# https://github.com/opencv/opencv/blob/master/samples/python/lk_track.py
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
'''
import numpy as np
import cv2 as cv
import time
from umucv.stream import autoStream
from umucv.util import putText
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
track_len = 20
detect_interval = 5
tracks = []
frame_idx = 0
for key, frame in autoStream():
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
vis = frame.copy()
if len(tracks) > 0:
img0, img1 = prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
t0 = time.time()
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
t1 = time.time()
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > track_len:
del tr[0]
new_tracks.append(tr)
cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
tracks = new_tracks
cv.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
putText(vis, 'tracks: {}, {:.0f}ms'.format(len(tracks), 1000*(t1-t0)) )
#for t in tracks:
# print( t[0],t[-1] )
if frame_idx % detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in tracks]:
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
tracks.append([(x, y)])
frame_idx += 1
prev_gray = frame_gray
cv.imshow('lk_track', vis)
cv.destroyAllWindows()
| [
"umucv.stream.autoStream",
"numpy.zeros_like",
"cv2.circle",
"cv2.cvtColor",
"numpy.float32",
"cv2.imshow",
"time.time",
"cv2.goodFeaturesToTrack",
"numpy.int32",
"cv2.calcOpticalFlowPyrLK",
"cv2.destroyAllWindows"
] | [((835, 847), 'umucv.stream.autoStream', 'autoStream', ([], {}), '()\n', (845, 847), False, 'from umucv.stream import autoStream\n'), ((2425, 2447), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2445, 2447), True, 'import cv2 as cv\n'), ((866, 903), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (877, 903), True, 'import cv2 as cv\n'), ((2397, 2423), 'cv2.imshow', 'cv.imshow', (['"""lk_track"""', 'vis'], {}), "('lk_track', vis)\n", (2406, 2423), True, 'import cv2 as cv\n'), ((1077, 1088), 'time.time', 'time.time', ([], {}), '()\n', (1086, 1088), False, 'import time\n'), ((1114, 1172), 'cv2.calcOpticalFlowPyrLK', 'cv.calcOpticalFlowPyrLK', (['img0', 'img1', 'p0', 'None'], {}), '(img0, img1, p0, None, **lk_params)\n', (1137, 1172), True, 'import cv2 as cv\n'), ((1198, 1256), 'cv2.calcOpticalFlowPyrLK', 'cv.calcOpticalFlowPyrLK', (['img1', 'img0', 'p1', 'None'], {}), '(img1, img0, p1, None, **lk_params)\n', (1221, 1256), True, 'import cv2 as cv\n'), ((1270, 1281), 'time.time', 'time.time', ([], {}), '()\n', (1279, 1281), False, 'import time\n'), ((1997, 2022), 'numpy.zeros_like', 'np.zeros_like', (['frame_gray'], {}), '(frame_gray)\n', (2010, 2022), True, 'import numpy as np\n'), ((2160, 2223), 'cv2.goodFeaturesToTrack', 'cv.goodFeaturesToTrack', (['frame_gray'], {'mask': 'mask'}), '(frame_gray, mask=mask, **feature_params)\n', (2182, 2223), True, 'import cv2 as cv\n'), ((1642, 1684), 'cv2.circle', 'cv.circle', (['vis', '(x, y)', '(2)', '(0, 255, 0)', '(-1)'], {}), '(vis, (x, y), 2, (0, 255, 0), -1)\n', (1651, 1684), True, 'import cv2 as cv\n'), ((2066, 2082), 'numpy.int32', 'np.int32', (['tr[-1]'], {}), '(tr[-1])\n', (2074, 2082), True, 'import numpy as np\n'), ((2114, 2147), 'cv2.circle', 'cv.circle', (['mask', '(x, y)', '(5)', '(0)', '(-1)'], {}), '(mask, (x, y), 5, 0, -1)\n', (2123, 2147), True, 'import cv2 as cv\n'), ((1008, 1045), 'numpy.float32', 'np.float32', (['[tr[-1] for tr in tracks]'], {}), '([tr[-1] for tr in tracks])\n', (1018, 1045), True, 'import numpy as np\n'), ((1748, 1760), 'numpy.int32', 'np.int32', (['tr'], {}), '(tr)\n', (1756, 1760), True, 'import numpy as np\n'), ((2276, 2289), 'numpy.float32', 'np.float32', (['p'], {}), '(p)\n', (2286, 2289), True, 'import numpy as np\n')] |
"""
Tutorials / horn antenna
Description at:
http://openems.de/index.php/Tutorial:_Horn_Antenna
(C) 2011,2012,2013 <NAME> <<EMAIL>>
Python Adaptation : ESIR Project 2015
"""
from pylayers.em.openems.openems import *
import scipy.constants as cst
import numpy as np
# setup the simulation
unit = 1e-3 # all length in mm
class HornAntenna(object):
def __init__(self,**kwargs):
defaults = {'unit' : 1e-3,
'width' : 20,
'height' : 30 ,
'length' : 50 ,
'feed_length' : 50 ,
'thickness' : 2,
'angle' : np.array([20,20])*np.pi/180.
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.unit = kwargs['unit']
self.width = kwargs['width']
self.height = kwargs['height']
self.length = kwargs['length']
self.feed_length = kwargs['feed_length']
self.thickness = kwargs['thickness']
self.angle = kwargs['angle']
HA = HornAntenna()
# size of the simulation box
SimBox = np.r_[200,200,200]
# frequency range of interest
f_start = 10e9
f_stop = 20e9
# frequency of interest
f0 = 15e9
#waveguide TE-mode definition
TE_mode = 'TE10'
a = HA.width
b = HA.height
# setup FDTD parameter & excitation function
F = FDTD(EndCriteria="1e-4")
F.add(Exc(typ='Gaussian',f0=0.5*(f_start+f_stop),fc=0.5*(f_stop-f_start)))
F.add(BoundaryCond(['PML 8','PML 8','PML 8','PML 8','PML 8','PML 8']))
# setup CSXCAD geometry & mesh
# currently, openEMS cannot automatically generate a mesh
max_res = ((cst.c/f_stop)/unit)/15. # cell size: lambda/20
C = CSX()
#
# Warning : It is not the same thing to add a new properties (add) and to add
# a new primitive to an existing property (primitive)
#
C.add(Matter('horn',
p=Box(
P1=[-a/2.-HA.thickness,-b/2.,0],
P2=[-a/2.,-b/2.,0],Pr=10)
))
#
# Define Mesh
#
linex = [-SimBox[0]/2.,-a/2., a/2., SimBox[0]/2.]
meshx = SmoothMeshLine( linex, max_res, 1.4)
liney = [-SimBox[1]/2., -b/2., b/2., SimBox[1]/2.]
meshy = SmoothMeshLine( liney, max_res, 1.4 )
linez = [-HA.feed_length, 0 ,SimBox[2]-HA.feed_length ]
meshz = SmoothMeshLine( linez, max_res, 1.4 )
C.add(RectilinearGrid(meshx,meshy,meshz))
#
# Waveguide
#
C.primitive('horn',Box(
P1=[-a/2.-HA.thickness,-b/2.,meshz[0]],
P2=[-a/2.,b/2.,0],Pr=10)
)
C.primitive('horn',Box(
P1=[a/2.+HA.thickness,-b/2.,meshz[0]],
P2=[a/2.,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,b/2.+HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,-b/2.-HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,-b/2.,0],Pr=10)
)
#
# horn opening 4 metallic plates
#
horn_opening1 = np.array([[0, HA.length, HA.length, 0],
[a/2.,
a/2 + np.sin(HA.angle[0])*HA.length,
-a/2 - np.sin(HA.angle[0])*HA.length,
-a/2.]])
horn_opening2 = np.array([[b/2+HA.thickness,
b/2+HA.thickness + np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness - np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness],
[ 0, HA.length, HA.length, 0]])
L1 = LinPoly(lp=horn_opening1.T,Pr=10)
L2 = LinPoly(lp=horn_opening1.T,Pr=10)
L3 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
L4 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
T1 = Transformation()
T2 = Transformation()
T3 = Transformation()
T4 = Transformation()
# y translate
Tr1 = Translate([0,-b/2-HA.thickness/2,0])
Tr2 = Translate([0,b/2+HA.thickness/2,0])
# x translate
Tr3 = Translate([-a/2-HA.thickness/2,0,0])
Tr4 = Translate([a/2+HA.thickness/2,0,0])
Rx1 = Rotate_X(HA.angle[1])
Rx2 = Rotate_X(-HA.angle[1])
Rx3 = Rotate_Y(-HA.angle[1])
Rx4 = Rotate_Y(HA.angle[1])
T1.append(Rx1)
T1.append(Tr1)
T2.append(Rx2)
T2.append(Tr2)
T3.append(Rx3)
T3.append(Tr3)
T4.append(Rx4)
T4.append(Tr4)
L1.append(T1)
L2.append(T2)
L3.append(T3)
L4.append(T4)
C.primitive('horn',L1)
C.primitive('horn',L2)
C.primitive('horn',L3)
C.primitive('horn',L4)
## first ProbeBox
#C.add(ProbeBox(name='port_ut1', Type='wv', Weight='1'),
# a=Attributes([(0*cos(0.15708*(x--10))*sin(0*(y--15))),
# (-0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0]),
# p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
## second ProbeBox
#
#C.add(ProbeBox(name='port_it1', Type='wc', Weight='1'), a=Attributes([(0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0*cos(0.15708*(x--10))*sin(0*(y--15))),0]), p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
#
##
A = (a + 2*np.sin(HA.angle[0])*HA.length)*unit * (b + 2*np.sin(HA.angle[1])*HA.length)*unit;
##
## apply the excitation
start=[-a/2, -b/2 ,meshz[7] ];
stop =[ a/2, b/2 ,meshz[0]+HA.feed_length/2. ];
C.add(Excitation('port_excite_1',typ="Es",excite="1,1,0"))
# AddRectWaveGuidePort( CSX, 0, 1, start, stop, 2, a*unit, b*unit, TE_mode, 1);
##
##%% nf2ff calc
##start = [mesh.x(9) mesh.y(9) mesh.z(9)];
##stop = [mesh.x(end-8) mesh.y(end-8) mesh.z(end-8)];
##[CSX nf2ff] = CreateNF2FFBox(CSX, 'nf2ff', start, stop, 'Directions', [1 1 1 1 0 1]);
##
##%% prepare simulation folder
##Sim_Path = 'tmp_Horn_Antenna';
##Sim_CSX = 'horn_ant.xml';
##
##[status, message, messageid] = rmdir( Sim_Path, 's' ); % clear previous directory
##[status, message, messageid] = mkdir( Sim_Path ); % create empty simulation folder
##
##%% write openEMS compatible xml-file
##WriteOpenEMS([Sim_Path '/' Sim_CSX], FDTD, CSX);
##
##%% show the structure
##CSXGeomPlot([Sim_Path '/' Sim_CSX]);
##
##%% run openEMS
##RunOpenEMS(Sim_Path, Sim_CSX);
##
##%% postprocessing & do the plots
##freq = linspace(f_start,f_stop,201);
##
##port = calcPort(port, Sim_Path, freq);
##
##Zin = port.uf.tot ./ port.if.tot;
##s11 = port.uf.ref ./ port.uf.inc;
##
##plot( freq/1e9, 20*log10(abs(s11)), 'k-', 'Linewidth', 2 );
##ylim([-60 0]);
##grid on
##title( 'reflection coefficient S_{11}' );
##xlabel( 'frequency f / GHz' );
##ylabel( 'reflection coefficient |S_{11}|' );
##
##drawnow
##
##%% NFFF contour plots %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##
##% calculate the far field at phi=0 degrees and at phi=90 degrees
##thetaRange = (0:2:359) - 180;
##disp( 'calculating far field at phi=[0 90] deg...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, [0 90]*pi/180);
##
##Dlog=10*log10(nf2ff.Dmax);
##G_a = 4*pi*A/(c0/f0)^2;
##e_a = nf2ff.Dmax/G_a;
##
##% display some antenna parameter
##disp( ['radiated power: Prad = ' num2str(nf2ff.Prad) ' Watt']);
##disp( ['directivity: Dmax = ' num2str(Dlog) ' dBi'] );
##disp( ['aperture efficiency: e_a = ' num2str(e_a*100) '%'] );
##
##%%
##% normalized directivity
##figure
##plotFFdB(nf2ff,'xaxis','theta','param',[1 2]);
##drawnow
##% D_log = 20*log10(nf2ff.E_norm{1}/max(max(nf2ff.E_norm{1})));
##% D_log = D_log + 10*log10(nf2ff.Dmax);
##% plot( nf2ff.theta, D_log(:,1) ,'k-', nf2ff.theta, D_log(:,2) ,'r-' );
##
##% polar plot
##figure
##polarFF(nf2ff,'xaxis','theta','param',[1 2],'logscale',[-40 20], 'xtics', 12);
##drawnow
##% polar( nf2ff.theta, nf2ff.E_norm{1}(:,1) )
##
##%% calculate 3D pattern
##phiRange = sort( unique( [-180:5:-100 -100:2.5:-50 -50:1:50 50:2.5:100 100:5:180] ) );
##thetaRange = sort( unique([ 0:1:50 50:2.:100 100:5:180 ]));
##
##disp( 'calculating 3D far field...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, phiRange*pi/180, 'Verbose',2,'Outfile','nf2ff_3D.h5');
##
##figure
##plotFF3D(nf2ff);
##
##%%
##E_far_normalized = nf2ff.E_norm{1}/max(nf2ff.E_norm{1}(:));
##DumpFF2VTK([Sim_Path '/Horn_Pattern.vtk'],E_far_normalized,thetaRange,phiRange,'scale',1e-3);
S = OpenEMS(F,C)
#
S.save(filename='HornAntenna.xml')
| [
"numpy.sin",
"numpy.array"
] | [((637, 655), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (645, 655), True, 'import numpy as np\n'), ((3087, 3106), 'numpy.sin', 'np.sin', (['HA.angle[0]'], {}), '(HA.angle[0])\n', (3093, 3106), True, 'import numpy as np\n'), ((3135, 3154), 'numpy.sin', 'np.sin', (['HA.angle[0]'], {}), '(HA.angle[0])\n', (3141, 3154), True, 'import numpy as np\n'), ((3260, 3279), 'numpy.sin', 'np.sin', (['HA.angle[1]'], {}), '(HA.angle[1])\n', (3266, 3279), True, 'import numpy as np\n'), ((3321, 3340), 'numpy.sin', 'np.sin', (['HA.angle[1]'], {}), '(HA.angle[1])\n', (3327, 3340), True, 'import numpy as np\n'), ((4843, 4862), 'numpy.sin', 'np.sin', (['HA.angle[1]'], {}), '(HA.angle[1])\n', (4849, 4862), True, 'import numpy as np\n'), ((4798, 4817), 'numpy.sin', 'np.sin', (['HA.angle[0]'], {}), '(HA.angle[0])\n', (4804, 4817), True, 'import numpy as np\n')] |
# various analytic mass profiles: Hernquist, NFW, Plummer, Isothermal, Miyamoto-Nagai (for disks)
import numpy as np
import astropy.units as u
from astropy import constants
from .cosmo_tools import *
G = constants.G.to(u.kpc * u.km**2. / u.Msun/ u.s**2.)
class NFW:
def __init__(self, Mvir, r, cvir):
"""
Inputs: Mvir (solar mass)
r: radius (kpc)
c: r_vir/r_scale (dimensionless)
"""
self.Mvir = Mvir
self.r = r
self.cvir = cvir
self.rs = r_vir(0.3, 0.7, Mvir)/cvir
self.x = r/self.rs
def density(self):
rhos = self.Mvir/ (4*np.pi*f(self.cvir)*self.rs**3)
return rhos/(self.x*(1+self.x)**2)
def mass(self):
return self.Mvir*f(self.x)/f(self.cvir)
def potential(self):
phi = -G*self.Mvir/f(self.cvir) *np.log(1+self.x)/self.r
return phi
def v_esc(self):
phi = self.potential()
return np.sqrt(-2*phi)
def v_rot(self):
m = self.mass()
return np.sqrt(G*m/self.r)
def acc(self, position, i):
x,y,z = position
rr = np.sqrt(x**2. + y**2. + z**2.)
return -G*self.Mvir*f(self.x)*i/(f(self.cvir)*self.rr**3.)
class Isothermal:
def __init__(self, r, vc):
"""
Inputs: r: radius (kpc)
vc: circular velocity at a given position (i.e. solar circle) [km/s]
"""
self.r = r
self.vc = vc
def potential(self):
return - self.vc**2. * np.log(self.r)
def density(self):
return (self.vc)**2./ (4.*np.pi*G*self.r**2.)
def mass(self):
return (self.vc)**2.*self.r/G
def v_esc(self):
phi = self.potential()
return np.sqrt(-2.*phi)
def acc(self, position, i):
r = self.r
vc = self.vc
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
acc = i * vc**2 /rr**2
return acc
class MN:
def __init__(self, Mdisk, a, b, r, z):
"""
Inputs: Mass of disk (solar mass)
a: disk scale length (kpc)
b: disk scale height (kpc)
r: radius (kpc)
z: galactocentric height (kpc)
"""
self.Mdisk = Mdisk
self.a = a
self.b = b
self.z = z
self.B = np.sqrt(self.z**2 + self.b**2)
self.r = r
def potential(self):
Mdisk = self.Mdisk
r = self.r
a = self.a
B = self.B
return -G*Mdisk / np.sqrt(r**2 + (a**2 + B**2)**2)
# def mass(self):
# b = self.b
# a = self.a
# r = self.r
# z = self.z
# Mdisk = self.Mdisk
# K = a + np.sqrt(z**2 + b**2)
# num = r**2.
# den = (r**2 + K**2)**(1.5)
# t1 = num/den
# num = z * K
# den = np.sqrt(z**2 + b**2) * (K**2 + r**2)**(1.5)
# t2 = num/den
# return Mdisk * ((r *t1) + (t2 * z))
def v_rot(self):
# taken from Bullock 05 paper
b = self.b
a = self.a
r = self.r
z = self.z
Mdisk = self.Mdisk
K = a + np.sqrt(z**2 + b**2)
num = G * Mdisk * r**2
den = (r**2 + K**2)**(1.5)
t1 = num/den
num = G * Mdisk * z**2 * K
den = np.sqrt(z**2 + b**2) * (K**2 + r**2)**(1.5)
t2 = num/den
return np.sqrt(t1+t2)
def density(self):
Mdisk = self.Mdisk
r = self.r
a = self.a
B = self.B
b = self.b
k = b**2 * Mdisk/ (4*np.pi)
num = a*r**2 + ((a+3*B)*(a+B)**2)
den = (r**2 + (a+B)**2)**(2.5) * B**3
return k * num / den
def v_esc(self):
phi = self.potential()
return np.sqrt(-2*phi)
def acc(self, position, i):
G = self.G
bdisk = self.b
adisk = self.a
rdisk = self.r
zdisk = self.z
Mdisk = self.Mdisk
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
if i == 'x' or 'y':
acc = -G*Mdisk*i/( (rdisk**2.) + (adisk + np.sqrt(z**2. + bdisk**2.))**2.)**(1.5)
if i == 'z':
acc = -G*Mdisk*i*(adisk+np.sqrt(z**2.+bdisk**2.))/(((adisk+np.sqrt(z**2. + bdisk**2.))**2. + rdisk**2.)**(1.5) * np.sqrt(relz**2. + bdisk**2.))
return acc
class Hernquist:
def __init__(self, Mvir, r, a):
"""
Inputs: Mvir: total mass (solar mass)
a: Hernquist length scale (kpc)
r: radius (kpc)
"""
self.Mvir = Mvir
self.a = a
self.r = r
def density(self):
M = self.Mvir
r = self.r
a = self.a
return M*a / (2.*np.pi*r*(r+a)**3.)
def potential(self):
M = self.Mvir
a = self.a
r = self.r
return -G*M /(r+a)
def mass(self):
M = self.Mvir
r = self.r
a = self.a
return M*r**2. / (r+a)**2.
def v_esc(self):
phi = self.potential()
return np.sqrt(-2.*phi)
def v_rot(self):
M = self.mass()
r = self.r
return np.sqrt(G*M/r)
def acc(self, position, i):
M = self.Mvir
a = self.a
r = self.r
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2.)
return -G*M*i/((rr**2. + a**2.) * rr)
class Plummer:
def __init__(self, Mtot, r, a):
"""
Inputs: Mtot: total mass (solar mass)
a: Plummer length scale (kpc)
r: radius (kpc)
"""
self.Mtot = Mtot
self.a = a
self.r = r
def density(self):
M = self.Mtot
a = self.a
r = self.r
return 3*M/(4*np.pi*a**3) * (1+(r/a)**2)**(-2.5)
def potential(self):
M = self.Mtot
a = self.a
r = self.r
return - G*M/ np.sqrt(r**2 + a**2)
def v_esc(self):
r = self.r
phi = self.potential()
return np.sqrt(-2*phi)
def mass(self):
M = self.Mtot
a = self.a
r = self.r
mass_enc = M*r**3/ (r**2 + a**2)**(1.5)
return mass_enc
def v_rot(self):
r = self.r
M = self.mass()
return np.sqrt(G*M/r)
def acc(self, position, i):
M = self.Mtot
a = self.a
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
return - (G * M * i)/(rr**2 + a**2)**(1.5)
| [
"numpy.sqrt",
"numpy.log",
"astropy.constants.G.to"
] | [((205, 262), 'astropy.constants.G.to', 'constants.G.to', (['(u.kpc * u.km ** 2.0 / u.Msun / u.s ** 2.0)'], {}), '(u.kpc * u.km ** 2.0 / u.Msun / u.s ** 2.0)\n', (219, 262), False, 'from astropy import constants\n'), ((945, 962), 'numpy.sqrt', 'np.sqrt', (['(-2 * phi)'], {}), '(-2 * phi)\n', (952, 962), True, 'import numpy as np\n'), ((1022, 1045), 'numpy.sqrt', 'np.sqrt', (['(G * m / self.r)'], {}), '(G * m / self.r)\n', (1029, 1045), True, 'import numpy as np\n'), ((1113, 1152), 'numpy.sqrt', 'np.sqrt', (['(x ** 2.0 + y ** 2.0 + z ** 2.0)'], {}), '(x ** 2.0 + y ** 2.0 + z ** 2.0)\n', (1120, 1152), True, 'import numpy as np\n'), ((1724, 1743), 'numpy.sqrt', 'np.sqrt', (['(-2.0 * phi)'], {}), '(-2.0 * phi)\n', (1731, 1743), True, 'import numpy as np\n'), ((1852, 1885), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (1859, 1885), True, 'import numpy as np\n'), ((2294, 2328), 'numpy.sqrt', 'np.sqrt', (['(self.z ** 2 + self.b ** 2)'], {}), '(self.z ** 2 + self.b ** 2)\n', (2301, 2328), True, 'import numpy as np\n'), ((3339, 3355), 'numpy.sqrt', 'np.sqrt', (['(t1 + t2)'], {}), '(t1 + t2)\n', (3346, 3355), True, 'import numpy as np\n'), ((3711, 3728), 'numpy.sqrt', 'np.sqrt', (['(-2 * phi)'], {}), '(-2 * phi)\n', (3718, 3728), True, 'import numpy as np\n'), ((3936, 3969), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (3943, 3969), True, 'import numpy as np\n'), ((4970, 4989), 'numpy.sqrt', 'np.sqrt', (['(-2.0 * phi)'], {}), '(-2.0 * phi)\n', (4977, 4989), True, 'import numpy as np\n'), ((5067, 5085), 'numpy.sqrt', 'np.sqrt', (['(G * M / r)'], {}), '(G * M / r)\n', (5074, 5085), True, 'import numpy as np\n'), ((5213, 5248), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2.0)'], {}), '(x ** 2 + y ** 2 + z ** 2.0)\n', (5220, 5248), True, 'import numpy as np\n'), ((5896, 5913), 'numpy.sqrt', 'np.sqrt', (['(-2 * phi)'], {}), '(-2 * phi)\n', (5903, 5913), True, 'import numpy as np\n'), ((6153, 6171), 'numpy.sqrt', 'np.sqrt', (['(G * M / r)'], {}), '(G * M / r)\n', (6160, 6171), True, 'import numpy as np\n'), ((6288, 6321), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (6295, 6321), True, 'import numpy as np\n'), ((1500, 1514), 'numpy.log', 'np.log', (['self.r'], {}), '(self.r)\n', (1506, 1514), True, 'import numpy as np\n'), ((2480, 2520), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 + (a ** 2 + B ** 2) ** 2)'], {}), '(r ** 2 + (a ** 2 + B ** 2) ** 2)\n', (2487, 2520), True, 'import numpy as np\n'), ((3101, 3125), 'numpy.sqrt', 'np.sqrt', (['(z ** 2 + b ** 2)'], {}), '(z ** 2 + b ** 2)\n', (3108, 3125), True, 'import numpy as np\n'), ((3258, 3282), 'numpy.sqrt', 'np.sqrt', (['(z ** 2 + b ** 2)'], {}), '(z ** 2 + b ** 2)\n', (3265, 3282), True, 'import numpy as np\n'), ((5787, 5811), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 + a ** 2)'], {}), '(r ** 2 + a ** 2)\n', (5794, 5811), True, 'import numpy as np\n'), ((834, 852), 'numpy.log', 'np.log', (['(1 + self.x)'], {}), '(1 + self.x)\n', (840, 852), True, 'import numpy as np\n'), ((4234, 4269), 'numpy.sqrt', 'np.sqrt', (['(relz ** 2.0 + bdisk ** 2.0)'], {}), '(relz ** 2.0 + bdisk ** 2.0)\n', (4241, 4269), True, 'import numpy as np\n'), ((4145, 4177), 'numpy.sqrt', 'np.sqrt', (['(z ** 2.0 + bdisk ** 2.0)'], {}), '(z ** 2.0 + bdisk ** 2.0)\n', (4152, 4177), True, 'import numpy as np\n'), ((4047, 4079), 'numpy.sqrt', 'np.sqrt', (['(z ** 2.0 + bdisk ** 2.0)'], {}), '(z ** 2.0 + bdisk ** 2.0)\n', (4054, 4079), True, 'import numpy as np\n'), ((4180, 4212), 'numpy.sqrt', 'np.sqrt', (['(z ** 2.0 + bdisk ** 2.0)'], {}), '(z ** 2.0 + bdisk ** 2.0)\n', (4187, 4212), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import csv
import math
def sigmoid(x):
return 1./(1.+np.exp(-x))
def sigmoid2(x):
print(x)
return 1./(1.+math.exp(-x))
def main():
x = np.random.uniform(-5.,5.,10000)
x = np.sort(x)
# y = []
# for xx in x:
# y.append(sigmoid(xx))
y = sigmoid(x)
plt.plot(x,y,'r')
plt.show()
if __name__ == '__main__':
main() | [
"numpy.random.uniform",
"math.exp",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sort",
"numpy.exp"
] | [((195, 230), 'numpy.random.uniform', 'np.random.uniform', (['(-5.0)', '(5.0)', '(10000)'], {}), '(-5.0, 5.0, 10000)\n', (212, 230), True, 'import numpy as np\n'), ((232, 242), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (239, 242), True, 'import numpy as np\n'), ((312, 331), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""'], {}), "(x, y, 'r')\n", (320, 331), True, 'import matplotlib.pyplot as plt\n'), ((331, 341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as plt\n'), ((107, 117), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (113, 117), True, 'import numpy as np\n'), ((162, 174), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (170, 174), False, 'import math\n')] |
"""@package MuSCADeT
"""
import numpy as np
def asinh_norm(data, Q = 10, bands = [0,1,2], range = 1):
"""Normalises frames in a data-cube for rgb display
Parameter:
----------
data: 'array'
Cube of images with size nbxn1xn2
Q: 'int'
Stretching parameter for the arcsinh function.
bands: 'array'
An array of three values between 0 and nb-1 that contains the bands to use to display the rgb image
range: 'array'
if set to None, range will be taken as the min and max of the image.
Returns:
--------
normimg: 'array'
arcsinh-normalised array of slices of data.
"""
img = data[bands]
vmin = np.ma.min(img)
if range is not None:
range = np.ma.max(img)-vmin
normimg = np.ma.array(np.arcsinh(Q * (img - vmin) / range) / Q)
normimg/=np.max(normimg)
normimg *= 255
normimg[normimg<0] = 0
normimg[normimg>255] = 255
normimg = normimg.astype(np.uint8)
normimg = np.transpose(normimg, axes=(1,2,0))
return normimg
| [
"numpy.ma.min",
"numpy.transpose",
"numpy.max",
"numpy.arcsinh",
"numpy.ma.max"
] | [((683, 697), 'numpy.ma.min', 'np.ma.min', (['img'], {}), '(img)\n', (692, 697), True, 'import numpy as np\n'), ((842, 857), 'numpy.max', 'np.max', (['normimg'], {}), '(normimg)\n', (848, 857), True, 'import numpy as np\n'), ((989, 1026), 'numpy.transpose', 'np.transpose', (['normimg'], {'axes': '(1, 2, 0)'}), '(normimg, axes=(1, 2, 0))\n', (1001, 1026), True, 'import numpy as np\n'), ((740, 754), 'numpy.ma.max', 'np.ma.max', (['img'], {}), '(img)\n', (749, 754), True, 'import numpy as np\n'), ((787, 823), 'numpy.arcsinh', 'np.arcsinh', (['(Q * (img - vmin) / range)'], {}), '(Q * (img - vmin) / range)\n', (797, 823), True, 'import numpy as np\n')] |
import os
import click
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
def sq_sinv(y,y_):
# To avoid log(0) = -inf
y_[y_==0] = 1
y[y==0] = 1
alpha = np.mean(np.log(y_) - np.log(y))
err = (np.log(y) - np.log(y_) + alpha) ** 2
return (np.mean(err[:]) / 2)
def pear_coeff(y,y_):
y = y.ravel()
y_ = y_.ravel()
err = pearsonr(y,y_)
return err[0]
@click.command()
@click.option('--gt_path',
type=click.STRING,
default='',
help='Path to the folder containing the ground-truth images')
@click.option('--results_path',
type=click.STRING,
default='',
help='Path to the folder containing the ground-truth images')
def calculate_metrics(gt_path, results_path):
l1 = os.listdir(gt_path)
l1.sort()
l2 = os.listdir(results_path)
l2.sort()
score = []
names = []
for i in range(len(l1)):
g1 = (io.imread(os.path.join(gt_path,l1[i]))/ 256).astype(np.uint8)
t1 = io.imread(os.path.join(results_path,l2[i]))
# If depth value is nan or invalid, change it to zero.
for j in range(g1.shape[0]):
for k in range(g1.shape[1]):
if g1[j,k] >= 255:
g1[j,k] = 0
t1[j,k] = 0
score.append([sq_sinv(g1,t1),pear_coeff(g1,t1)])
print('RMSE Squared log Scale-invariant error:', score[-1][0], ', Pearson Coefficient', score[-1][1])
names.append(l1[i])
m = np.mean(np.array(score), axis = 0)
print('Mean RMSE Squared log Scale-invariant error', m[0])
print('Mean Pearson Coefficient', m[1])
if __name__=='__main__':
calculate_metrics() | [
"numpy.log",
"click.option",
"scipy.stats.pearsonr",
"click.command",
"numpy.mean",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((445, 460), 'click.command', 'click.command', ([], {}), '()\n', (458, 460), False, 'import click\n'), ((462, 585), 'click.option', 'click.option', (['"""--gt_path"""'], {'type': 'click.STRING', 'default': '""""""', 'help': '"""Path to the folder containing the ground-truth images"""'}), "('--gt_path', type=click.STRING, default='', help=\n 'Path to the folder containing the ground-truth images')\n", (474, 585), False, 'import click\n'), ((624, 752), 'click.option', 'click.option', (['"""--results_path"""'], {'type': 'click.STRING', 'default': '""""""', 'help': '"""Path to the folder containing the ground-truth images"""'}), "('--results_path', type=click.STRING, default='', help=\n 'Path to the folder containing the ground-truth images')\n", (636, 752), False, 'import click\n'), ((410, 425), 'scipy.stats.pearsonr', 'pearsonr', (['y', 'y_'], {}), '(y, y_)\n', (418, 425), False, 'from scipy.stats import pearsonr\n'), ((846, 865), 'os.listdir', 'os.listdir', (['gt_path'], {}), '(gt_path)\n', (856, 865), False, 'import os\n'), ((889, 913), 'os.listdir', 'os.listdir', (['results_path'], {}), '(results_path)\n', (899, 913), False, 'import os\n'), ((318, 333), 'numpy.mean', 'np.mean', (['err[:]'], {}), '(err[:])\n', (325, 333), True, 'import numpy as np\n'), ((1575, 1590), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (1583, 1590), True, 'import numpy as np\n'), ((234, 244), 'numpy.log', 'np.log', (['y_'], {}), '(y_)\n', (240, 244), True, 'import numpy as np\n'), ((247, 256), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (253, 256), True, 'import numpy as np\n'), ((1087, 1120), 'os.path.join', 'os.path.join', (['results_path', 'l2[i]'], {}), '(results_path, l2[i])\n', (1099, 1120), False, 'import os\n'), ((269, 278), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (275, 278), True, 'import numpy as np\n'), ((281, 291), 'numpy.log', 'np.log', (['y_'], {}), '(y_)\n', (287, 291), True, 'import numpy as np\n'), ((1012, 1040), 'os.path.join', 'os.path.join', (['gt_path', 'l1[i]'], {}), '(gt_path, l1[i])\n', (1024, 1040), False, 'import os\n')] |
import argparse
import glob
from operator import gt
from pathlib import Path
import pickle
import mayavi.mlab as mlab
import numpy as np
import torch
import re
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
from visual_utils import visualize_utils as V
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin', **kwargs):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.args = kwargs
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
parser.add_argument('--sample_folder', type=str, default='test', help='Take demo sample from {test, train}')
parser.add_argument('--bifpn', type=int, nargs='*', default=[], help='<Required> Set number of bifpn blocks')
parser.add_argument('--bifpn_skip', dest='bifpn_skip', action='store_true', help='Use skip connections with BiFPN blocks')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
kitti_infos = include_kitti_data(cfg, args.sample_folder)
gt_instance = list(filter(lambda x :x["image"]["image_idx"] == re.findall(f"[0-9]+", args.data_path)[-1], kitti_infos))
gt_instance = gt_instance[0]["annos"]["gt_boxes_lidar"] if len(gt_instance) else None
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of CenterPoint----------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger, bifpn=args.bifpn, bifpn_skip=args.bifpn_skip
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], gt_boxes=gt_instance, ref_boxes=pred_dicts[0]['pred_boxes'],
ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
)
mlab.show(stop=True)
logger.info('Demo done.')
def include_kitti_data(dataset_cfg, mode):
"""
Extracts the scenes and images info from the .pkl files inti a dictionary that holds the gt_boxes, predicted_boxes, etc.
"""
kitti_infos = []
for info_path in dataset_cfg.DATA_CONFIG["INFO_PATH"][mode]:
info_path = Path(dataset_cfg.DATA_CONFIG.DATA_PATH) / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
return kitti_infos
if __name__ == '__main__':
main()
| [
"pcdet.models.load_data_to_gpu",
"visual_utils.visualize_utils.draw_scenes",
"numpy.load",
"argparse.ArgumentParser",
"pcdet.config.cfg_from_yaml_file",
"numpy.fromfile",
"mayavi.mlab.show",
"pcdet.utils.common_utils.create_logger",
"pathlib.Path",
"pickle.load",
"re.findall",
"torch.no_grad"
... | [((1719, 1768), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""arg parser"""'}), "(description='arg parser')\n", (1742, 1768), False, 'import argparse\n'), ((2671, 2709), 'pcdet.config.cfg_from_yaml_file', 'cfg_from_yaml_file', (['args.cfg_file', 'cfg'], {}), '(args.cfg_file, cfg)\n', (2689, 2709), False, 'from pcdet.config import cfg, cfg_from_yaml_file\n'), ((3064, 3092), 'pcdet.utils.common_utils.create_logger', 'common_utils.create_logger', ([], {}), '()\n', (3090, 3092), False, 'from pcdet.utils import common_utils\n'), ((3703, 3718), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3716, 3718), False, 'import torch\n'), ((3309, 3329), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (3313, 3329), False, 'from pathlib import Path\n'), ((3916, 3943), 'pcdet.models.load_data_to_gpu', 'load_data_to_gpu', (['data_dict'], {}), '(data_dict)\n', (3932, 3943), False, 'from pcdet.models import build_network, load_data_to_gpu\n'), ((4009, 4209), 'visual_utils.visualize_utils.draw_scenes', 'V.draw_scenes', ([], {'points': "data_dict['points'][:, 1:]", 'gt_boxes': 'gt_instance', 'ref_boxes': "pred_dicts[0]['pred_boxes']", 'ref_scores': "pred_dicts[0]['pred_scores']", 'ref_labels': "pred_dicts[0]['pred_labels']"}), "(points=data_dict['points'][:, 1:], gt_boxes=gt_instance,\n ref_boxes=pred_dicts[0]['pred_boxes'], ref_scores=pred_dicts[0][\n 'pred_scores'], ref_labels=pred_dicts[0]['pred_labels'])\n", (4022, 4209), True, 'from visual_utils import visualize_utils as V\n'), ((4259, 4279), 'mayavi.mlab.show', 'mlab.show', ([], {'stop': '(True)'}), '(stop=True)\n', (4268, 4279), True, 'import mayavi.mlab as mlab\n'), ((4603, 4642), 'pathlib.Path', 'Path', (['dataset_cfg.DATA_CONFIG.DATA_PATH'], {}), '(dataset_cfg.DATA_CONFIG.DATA_PATH)\n', (4607, 4642), False, 'from pathlib import Path\n'), ((4772, 4786), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4783, 4786), False, 'import pickle\n'), ((1414, 1451), 'numpy.load', 'np.load', (['self.sample_file_list[index]'], {}), '(self.sample_file_list[index])\n', (1421, 1451), True, 'import numpy as np\n'), ((1285, 1344), 'numpy.fromfile', 'np.fromfile', (['self.sample_file_list[index]'], {'dtype': 'np.float32'}), '(self.sample_file_list[index], dtype=np.float32)\n', (1296, 1344), True, 'import numpy as np\n'), ((2904, 2941), 're.findall', 're.findall', (['f"""[0-9]+"""', 'args.data_path'], {}), "(f'[0-9]+', args.data_path)\n", (2914, 2941), False, 'import re\n')] |
# Copyright (C) 2015, <NAME> <<EMAIL>>
#
# LICENSE: THE SOFTWARE IS PROVIDED "AS IS" UNDER THE
# ACADEMIC FREE LICENSE (AFL) v3.0.
#
import numpy as np
from mpi4py import MPI
from utils import accel
from _layermodels import LayerModel
from utils.decorators import DocInherit
doc_inherit = DocInherit
#------------------------------------------------------------------------------
class Poisson(LayerModel):
"""(FF-)Mixture of Poisson layer"""
def __init__(self):
self.W = None
self.epsilon = None
self._s = None
self._dsY = None
self._ds = None
self._comm = MPI.COMM_WORLD
@doc_inherit
def feed(self, layer, multilayer, input_data, input_label, mode=''):
#--- TODO: add selection option to config
#-- select activate function
I = self._activate_function(input_data)
#self._lin_activate_function(W, input_data)
#-- select competition function
self._s = self._softmax(I)
#self._max()
if (mode == 'train'):
self._accumulate_weight_update(input_data)
return
@doc_inherit
def update(self):
if (self._comm.Get_size() > 1):
# multiple-thread parallel computing
DsY = np.zeros_like(self._dsY)
Ds = np.zeros_like(self._ds)
self._comm.Allreduce(self._dsY, DsY, op=MPI.SUM)
self._comm.Allreduce(self._ds, Ds, op=MPI.SUM)
try:
# Grayscale Image
self.W += self.epsilon*(DsY - Ds[:,np.newaxis]*self.W)
except:
# RGB Image
self.W += self.epsilon*(
DsY - Ds[:,np.newaxis,np.newaxis]*self.W
)
self._dsY = np.zeros_like(self._dsY)
self._ds = np.zeros_like(self._ds)
else:
# single-thread computing
try:
# Grayscale Image
self.W += self.epsilon*(
self._dsY - self._ds[:,np.newaxis]*self.W
)
except:
# RGB Image
self.W += self.epsilon*(
self._dsY - self._ds[:,np.newaxis,np.newaxis]*self.W
)
self._dsY = np.zeros_like(self._dsY)
self._ds = np.zeros_like(self._ds)
return
@doc_inherit
def activation(self):
return self._s
@doc_inherit
def set_weights(self, W):
self.W = W
@doc_inherit
def get_weights(self):
return self.W
def _accumulate_weight_update(self,input_data):
try:
self._dsY += self._s[:,np.newaxis] * input_data
self._ds += self._s
except:
self._dsY = self._s[:,np.newaxis] * input_data
self._ds = self._s
return
def _activate_function(self, input_data):
try:
#I = np.dot(np.log(W),Y)
I = np.dot(accel.log(self.W),input_data)
#I = np.dot(self._log(W),input_data)
except:
I = np.sum(np.sum(
input_data[np.newaxis,:,:]*accel.log(self.W),
axis=1),axis=1)
#TODO: Check This!
return I.astype('float64')
def _lin_activate_function(self, W, input_data):
return np.dot(W,input_data)
def _softmax(self, I):
# softmax-function with overflow fix
# over/underflow in np.exp(x) for approximately x > 700 or x < -740
scale = 0
if (I[np.argmax(I)] > 700):
scale = I[np.argmax(I)] - 700
if (I[np.argmin(I)] < -740 + scale):
I[np.argmin(I)] = -740 + scale
return np.exp(I-scale) / np.sum(np.exp(I-scale))
def _max(self, I):
# max-function
s = np.zeros_like(I)
s[np.argmax(self._I)] = 1.
return s
#------------------------------------------------------------------------------
class Poisson_Recurrent(LayerModel):
def __init__(self):
self.W = None
self.epsilon = None
self._s = None
self._dsY = None
self._ds = None
self._comm = MPI.COMM_WORLD
@doc_inherit
def feed(self, layer, multilayer, input_data, input_label, mode='train'):
if (input_label == -1):
M = np.sum(
multilayer.Layer[
int(layer.get_inputsource()[1][15])
].get_weights(),
axis=0
)
else:
M = multilayer.Layer[
int(layer.get_inputsource()[1][15])
].get_weights()[input_label,:]
I = self._activate_function(input_data)
self._s = self._recurrent_softmax(I,M)
if (mode == 'train'):
self._accumulate_weight_update(input_data)
return
@doc_inherit
def update(self):
if (self._comm.Get_size() > 1):
# multiple-thread parallel computing
DsY = np.zeros_like(self._dsY)
Ds = np.zeros_like(self._ds)
self._comm.Allreduce(self._dsY, DsY, op=MPI.SUM)
self._comm.Allreduce(self._ds, Ds, op=MPI.SUM)
try:
# Grayscale Image
self.W += self.epsilon*(DsY - Ds[:,np.newaxis]*self.W)
except:
# RGB Image
self.W += self.epsilon*(
DsY - Ds[:,np.newaxis,np.newaxis]*self.W
)
self._dsY = np.zeros_like(self._dsY)
self._ds = np.zeros_like(self._ds)
else:
# single-thread computing
try:
# Grayscale Image
self.W += self.epsilon*(
self._dsY - self._ds[:,np.newaxis]*self.W
)
except:
# RGB Image
self.W += self.epsilon*(
self._dsY - self._ds[:,np.newaxis,np.newaxis]*self.W
)
self._dsY = np.zeros_like(self._dsY)
self._ds = np.zeros_like(self._ds)
return
@doc_inherit
def activation(self):
return self._s
@doc_inherit
def set_weights(self, W):
self.W = W
@doc_inherit
def get_weights(self):
return self.W
def _accumulate_weight_update(self,input_data):
try:
self._dsY += self._s[:,np.newaxis] * input_data
self._ds += self._s
except:
self._dsY = self._s[:,np.newaxis] * input_data
self._ds = self._s
return
def _activate_function(self, input_data):
#I = np.dot(np.log(self.W),Y)
I = np.dot(accel.log(self.W),input_data)
#I = np.dot(self._log(self.W),input_data)
return I.astype('float64')
def _recurrent_softmax(self, I, M):
# softmax-function with overflow fix
# float64: over/underflow in np.exp(x) for ~ x>700 or x<-740
scale = 0
if (I[np.argmax(I)] > 700):
scale = I[np.argmax(I)] - 700
if (I[np.argmin(I)] < -740 + scale):
I[np.argmin(I)] = -740 + scale
# for float32:
# if (I[np.argmax(I)] > 86):
# scale = I[np.argmax(I)] - 86
return M*np.exp(I-scale) / np.sum(M*np.exp(I-scale)) | [
"numpy.zeros_like",
"numpy.argmax",
"numpy.argmin",
"numpy.exp",
"numpy.dot",
"utils.accel.log"
] | [((3309, 3330), 'numpy.dot', 'np.dot', (['W', 'input_data'], {}), '(W, input_data)\n', (3315, 3330), True, 'import numpy as np\n'), ((3780, 3796), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (3793, 3796), True, 'import numpy as np\n'), ((1254, 1278), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (1267, 1278), True, 'import numpy as np\n'), ((1296, 1319), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (1309, 1319), True, 'import numpy as np\n'), ((1758, 1782), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (1771, 1782), True, 'import numpy as np\n'), ((1806, 1829), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (1819, 1829), True, 'import numpy as np\n'), ((2266, 2290), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (2279, 2290), True, 'import numpy as np\n'), ((2314, 2337), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (2327, 2337), True, 'import numpy as np\n'), ((3679, 3696), 'numpy.exp', 'np.exp', (['(I - scale)'], {}), '(I - scale)\n', (3685, 3696), True, 'import numpy as np\n'), ((3807, 3825), 'numpy.argmax', 'np.argmax', (['self._I'], {}), '(self._I)\n', (3816, 3825), True, 'import numpy as np\n'), ((4961, 4985), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (4974, 4985), True, 'import numpy as np\n'), ((5003, 5026), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (5016, 5026), True, 'import numpy as np\n'), ((5465, 5489), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (5478, 5489), True, 'import numpy as np\n'), ((5513, 5536), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (5526, 5536), True, 'import numpy as np\n'), ((5973, 5997), 'numpy.zeros_like', 'np.zeros_like', (['self._dsY'], {}), '(self._dsY)\n', (5986, 5997), True, 'import numpy as np\n'), ((6021, 6044), 'numpy.zeros_like', 'np.zeros_like', (['self._ds'], {}), '(self._ds)\n', (6034, 6044), True, 'import numpy as np\n'), ((6644, 6661), 'utils.accel.log', 'accel.log', (['self.W'], {}), '(self.W)\n', (6653, 6661), False, 'from utils import accel\n'), ((2953, 2970), 'utils.accel.log', 'accel.log', (['self.W'], {}), '(self.W)\n', (2962, 2970), False, 'from utils import accel\n'), ((3511, 3523), 'numpy.argmax', 'np.argmax', (['I'], {}), '(I)\n', (3520, 3523), True, 'import numpy as np\n'), ((3590, 3602), 'numpy.argmin', 'np.argmin', (['I'], {}), '(I)\n', (3599, 3602), True, 'import numpy as np\n'), ((3635, 3647), 'numpy.argmin', 'np.argmin', (['I'], {}), '(I)\n', (3644, 3647), True, 'import numpy as np\n'), ((3704, 3721), 'numpy.exp', 'np.exp', (['(I - scale)'], {}), '(I - scale)\n', (3710, 3721), True, 'import numpy as np\n'), ((6946, 6958), 'numpy.argmax', 'np.argmax', (['I'], {}), '(I)\n', (6955, 6958), True, 'import numpy as np\n'), ((7025, 7037), 'numpy.argmin', 'np.argmin', (['I'], {}), '(I)\n', (7034, 7037), True, 'import numpy as np\n'), ((7070, 7082), 'numpy.argmin', 'np.argmin', (['I'], {}), '(I)\n', (7079, 7082), True, 'import numpy as np\n'), ((7220, 7237), 'numpy.exp', 'np.exp', (['(I - scale)'], {}), '(I - scale)\n', (7226, 7237), True, 'import numpy as np\n'), ((3556, 3568), 'numpy.argmax', 'np.argmax', (['I'], {}), '(I)\n', (3565, 3568), True, 'import numpy as np\n'), ((6991, 7003), 'numpy.argmax', 'np.argmax', (['I'], {}), '(I)\n', (7000, 7003), True, 'import numpy as np\n'), ((7247, 7264), 'numpy.exp', 'np.exp', (['(I - scale)'], {}), '(I - scale)\n', (7253, 7264), True, 'import numpy as np\n'), ((3122, 3139), 'utils.accel.log', 'accel.log', (['self.W'], {}), '(self.W)\n', (3131, 3139), False, 'from utils import accel\n')] |
seedNum=10
import random, os, statistics, argparse, json
random.seed(seedNum)
import numpy
numpy.random.seed(seedNum)
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
tf.random.set_seed(seedNum)
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn import preprocessing, decomposition, cluster, model_selection
#import keras
from keras import optimizers, regularizers
from keras.layers import Input, Dense
from keras.models import Model
def training(model, X, nfolds=5, bsize=20, epochs=50, fn='testresult'):
kf5=model_selection.KFold(n_splits=nfolds)
#kf5.get_n_splits(tab)
randInit = tf.keras.initializers.RandomNormal()
#X = preprocessing.scale(X)
iniw = model.get_weights()
initShapes = [ i.shape for i in iniw]
#print("shapes", initShapes)
eachFoldData = []
for trainIdx, testIdx in kf5.split(X):
train, test = X[trainIdx], X[testIdx]
model.set_weights( [randInit(shape=x) for x in initShapes] )
history= model.fit(train, train, epochs=epochs, batch_size=bsize, shuffle=True, validation_data=(test, test), verbose=2)
eachFoldData.append( history.history)
bestval=[None, None, None]
fw=open(fn, 'w')
for epochid in range(epochs):
val= [ oneFold['val_mean_absolute_error'][epochid] for oneFold in eachFoldData]
meanv = statistics.mean(val)
stdv = statistics.stdev(val)
print(epochid+1, "val:", meanv, stdv, file=fw)
if not(bestval[0]) or (meanv < bestval[0]):
bestval = [meanv, stdv, epochid]
fw.close()
return bestval
def produce(model, X, encDim=2, hide1=30, act1='elu', act2='relu', epochs=None, bsize=20):
model.fit(X, X, epochs=epochs, batch_size=bsize, shuffle=True, verbose=2)
encoder = modelEnc(model, len(X[0]), encDim=encDim, hide1=hide1, act1=act1, act2=act2)
res = encoder.predict(X)
#numpy.savetxt('encoded.smiles.csv', res, delimiter='\t' )
return res
def getSmilesRep(fn, fplen=512, radius=3):
data =[]
for smi in open(fn):
row = [ float(x) for x in AllChem.GetMorganFingerprintAsBitVect(AllChem.MolFromSmiles(smi),radius, nBits=fplen ).ToBitString() ]
data.append(row)
res= numpy.array( data)
return res
def makeModel(inputDim, encDim=2, hide1=30, hide2=30, act1='elu', act2='relu'):
#encoding_dim = int(sys.argv[2]) # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(inputDim,))
# "encoded" is the encoded representation of the input
encoded1 = Dense(hide1, activation=act1)(input_img)
#encoded1 = Dropout(0.05)(encoded1)
encoded = Dense(encDim, activation=act2)(encoded1)
# "decoded" is the lossy reconstruction of the input
decoded1 = Dense(hide2, activation=act2)(encoded)
#decoded1 = Dropout(0.05)(decoded1)
decoded = Dense(inputDim, activation=act1)(decoded1)
encoder = Model(input_img, encoded)
#raise
# this model maps an input to its reconstruction
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
#optim = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#optim = optimizers.RMSprop(lr=0.1, clipnorm=1.2)
#optim = optimizers.Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
optim = optimizers.Adam() #( clipnorm=1, lr=0.01, amsgrad=True )
autoencoder.compile(optimizer=optim, loss="mean_squared_error", metrics=['mean_absolute_error',] )
return autoencoder
def modelEnc(model, inputDim, encDim=2, hide1=30, act1='elu', act2='relu'):
input_img = Input(shape=(inputDim,))
# "encoded" is the encoded representation of the input
encoded1 = Dense(hide1, activation=act1)(input_img)
#encoded1 = Dropout(0.05)(encoded1)
encoded = Dense(encDim, activation=act2)(encoded1)
encModel = Model(input_img, encoded)
print("ENNN", [x.shape for x in encModel.get_weights()])
print("L", [x.shape for x in model.get_weights()], "LAY", model.layers[0].get_weights() )
#print(model.layers[0].get_weights().shape, model.layers[1].get_weights().shape, model.layers[2].get_weights().shape )
encModel.set_weights([x for i, x in enumerate(model.get_weights()) if i < len(encModel.get_weights())])
return encModel
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, type=str)
parser.add_argument('--n1', required=True, type=int)
parser.add_argument('--n2', required=True, type=int)
parser.add_argument('--act1', required=True, type=str)
parser.add_argument('--act2', required=True, type=str)
parser.add_argument('--encdim', required=True, type=int)
parser.add_argument('--radius', required=True, type=int)
parser.add_argument('--fplen', required=True, type=int)
parser.add_argument('--produce', required=False, type=int)
#parser.add_argument('--act3', required=True, type=str)
args = parser.parse_args()
print("ARGS", args)
#raise
data = getSmilesRep(args.input, fplen=args.fplen, radius=args.radius)
print("DATA", data)
if args.produce:
model = makeModel(len(data[0]), encDim=args.encdim, hide1=args.n1, hide2=args.n2, act1=args.act1, act2=args.act2)
numres = produce(model, data, encDim=args.encdim, hide1=args.n1, act1=args.act1, act2=args.act2, epochs=args.produce)
res = dict()
for idx,smi in enumerate(open(args.input)):
smi=smi.strip()
res[smi] = tuple([float(x) for x in numres[idx]])
json.dump(res, open('smiEncoded.json', 'w'))
else:
model = makeModel(len(data[0]), encDim=args.encdim, hide1=args.n1, hide2=args.n2, act1=args.act1, act2=args.act2)
fname =f"wyniki_autoenc_{args.encdim}dims_{args.n1}_{args.n2}_{args.act1}_{args.act2}_fp{args.fplen}_radius{args.radius}"
res= training(model, data, nfolds=5, bsize=20, epochs=70, fn=fname)
print(fname, res) | [
"tensorflow.random.set_seed",
"numpy.random.seed",
"argparse.ArgumentParser",
"rdkit.Chem.AllChem.MolFromSmiles",
"statistics.stdev",
"keras.optimizers.Adam",
"sklearn.model_selection.KFold",
"keras.models.Model",
"keras.layers.Dense",
"random.seed",
"tensorflow.keras.initializers.RandomNormal",... | [((57, 77), 'random.seed', 'random.seed', (['seedNum'], {}), '(seedNum)\n', (68, 77), False, 'import random, os, statistics, argparse, json\n'), ((91, 117), 'numpy.random.seed', 'numpy.random.seed', (['seedNum'], {}), '(seedNum)\n', (108, 117), False, 'import numpy\n'), ((184, 211), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seedNum'], {}), '(seedNum)\n', (202, 211), True, 'import tensorflow as tf\n'), ((550, 588), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': 'nfolds'}), '(n_splits=nfolds)\n', (571, 588), False, 'from sklearn import preprocessing, decomposition, cluster, model_selection\n'), ((631, 667), 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {}), '()\n', (665, 667), True, 'import tensorflow as tf\n'), ((2218, 2235), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (2229, 2235), False, 'import numpy\n'), ((2500, 2524), 'keras.layers.Input', 'Input', ([], {'shape': '(inputDim,)'}), '(shape=(inputDim,))\n', (2505, 2524), False, 'from keras.layers import Input, Dense\n'), ((2957, 2982), 'keras.models.Model', 'Model', (['input_img', 'encoded'], {}), '(input_img, encoded)\n', (2962, 2982), False, 'from keras.models import Model\n'), ((3118, 3143), 'keras.models.Model', 'Model', (['input_img', 'decoded'], {}), '(input_img, decoded)\n', (3123, 3143), False, 'from keras.models import Model\n'), ((3366, 3383), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (3381, 3383), False, 'from keras import optimizers, regularizers\n'), ((3643, 3667), 'keras.layers.Input', 'Input', ([], {'shape': '(inputDim,)'}), '(shape=(inputDim,))\n', (3648, 3667), False, 'from keras.layers import Input, Dense\n'), ((3894, 3919), 'keras.models.Model', 'Model', (['input_img', 'encoded'], {}), '(input_img, encoded)\n', (3899, 3919), False, 'from keras.models import Model\n'), ((4367, 4392), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4390, 4392), False, 'import random, os, statistics, argparse, json\n'), ((1351, 1371), 'statistics.mean', 'statistics.mean', (['val'], {}), '(val)\n', (1366, 1371), False, 'import random, os, statistics, argparse, json\n'), ((1388, 1409), 'statistics.stdev', 'statistics.stdev', (['val'], {}), '(val)\n', (1404, 1409), False, 'import random, os, statistics, argparse, json\n'), ((2599, 2628), 'keras.layers.Dense', 'Dense', (['hide1'], {'activation': 'act1'}), '(hide1, activation=act1)\n', (2604, 2628), False, 'from keras.layers import Input, Dense\n'), ((2694, 2724), 'keras.layers.Dense', 'Dense', (['encDim'], {'activation': 'act2'}), '(encDim, activation=act2)\n', (2699, 2724), False, 'from keras.layers import Input, Dense\n'), ((2807, 2836), 'keras.layers.Dense', 'Dense', (['hide2'], {'activation': 'act2'}), '(hide2, activation=act2)\n', (2812, 2836), False, 'from keras.layers import Input, Dense\n'), ((2900, 2932), 'keras.layers.Dense', 'Dense', (['inputDim'], {'activation': 'act1'}), '(inputDim, activation=act1)\n', (2905, 2932), False, 'from keras.layers import Input, Dense\n'), ((3742, 3771), 'keras.layers.Dense', 'Dense', (['hide1'], {'activation': 'act1'}), '(hide1, activation=act1)\n', (3747, 3771), False, 'from keras.layers import Input, Dense\n'), ((3838, 3868), 'keras.layers.Dense', 'Dense', (['encDim'], {'activation': 'act2'}), '(encDim, activation=act2)\n', (3843, 3868), False, 'from keras.layers import Input, Dense\n'), ((2119, 2145), 'rdkit.Chem.AllChem.MolFromSmiles', 'AllChem.MolFromSmiles', (['smi'], {}), '(smi)\n', (2140, 2145), False, 'from rdkit.Chem import AllChem\n')] |
from models.transformers.bert import BERT
from tests.entities.embedding_configuration import EmbeddingConfiguration
from typing import List
from models.model_base import ModelBase
from services.vocabulary_service import VocabularyService
import numpy as np
import pickle
from models.simple.skip_gram import SkipGram
from models.simple.cbow import CBOW
from tests.fakes.log_service_fake import LogServiceFake
from enums.language import Language
from enums.configuration import Configuration
from enums.challenge import Challenge
from enums.ocr_output_type import OCROutputType
from entities.cache.cache_options import CacheOptions
import os
from tests.fakes.non_context_service_fake import NonContextServiceFake
from dependency_injection.ioc_container import IocContainer
import dependency_injector.providers as providers
import torch
import unittest
from sklearn.metrics.pairwise import cosine_distances, cosine_similarity
from scipy import spatial
import csv
import pandas as pd
import tests.constants.embedding_models as embedding_models
def initialize_container(
configuration: Configuration,
ocr_output_type: OCROutputType,
language: Language,
seed: int = 13,
override_args: dict = None) -> IocContainer:
custom_args = {
'learning_rate': 1e-3,
'data_folder': os.path.join('tests', 'data'),
'challenge': Challenge.OCREvaluation,
'configuration': configuration,
'language': language,
'output_folder': os.path.join('tests', 'results'),
'ocr_output_type': ocr_output_type,
'seed': seed
}
if override_args is not None:
for key, value in override_args.items():
custom_args[key] = value
container = IocContainer()
container.arguments_service.override(
providers.Factory(
NonContextServiceFake,
custom_args))
container.log_service.override(providers.Factory(LogServiceFake))
return container
def calculate_context_words(
configuration: Configuration,
vocabulary_service: VocabularyService,
model: ModelBase,
target_word: str,
neighbourhood_set_size: int = 50) -> List[str]:
target_id = vocabulary_service.string_to_id(target_word)
target_embeddings = model.get_embeddings([target_word])
all_embeddings = None
if configuration == Configuration.SkipGram:
all_embeddings = list(model._embedding_layer._embeddings_target.parameters())[0].detach().cpu().tolist()
elif configuration == Configuration.CBOW:
all_embeddings = list(model._embeddings.parameters())[0].detach().cpu().tolist()
similarities = []
for j in range(len(all_embeddings)):
print(f'Processing {target_word} - {j}/{len(all_embeddings)} \r', end='')
if j == target_id:
assert all_embeddings[j] == target_embeddings
# similarity = cosine_similarity(target_embeddings, all_embeddings[j])
similarity = 1 - spatial.distance.cosine(target_embeddings, all_embeddings[j])
similarities.append(similarity)
indices = np.argsort(similarities)[::-1]
sorted_similarities = [similarities[x] for x in indices]
assert sorted_similarities[-2] < sorted_similarities[1]
sorted_words = vocabulary_service.ids_to_strings(indices)
return sorted_words[:neighbourhood_set_size]
def initialize_model(
arguments_service,
vocabulary_service,
data_service,
log_service,
tokenize_service,
ocr_output_type: OCROutputType,
language: Language,
configuration: Configuration,
initialize_randomly: bool,
learning_rate: float):
model = create_model(
configuration,
arguments_service,
vocabulary_service,
data_service,
log_service,
tokenize_service,
ocr_output_type=ocr_output_type)
model.load(
path=os.path.join('results', 'ocr-evaluation', configuration.value, language.value),
name_prefix='BEST',
name_suffix=None,
load_model_dict=True,
use_checkpoint_name=True,
checkpoint_name=None,
overwrite_args={
'initialize_randomly': initialize_randomly,
'configuration': configuration.value,
'learning_rate': learning_rate,
'minimal_occurrence_limit': 5 if configuration != Configuration.BERT else None,
# 'checkpoint_name': 'local-test-pre',
})
return model
def create_model(
configuration: Configuration,
arguments_service,
vocabulary_service,
data_service,
log_service,
tokenize_service,
ocr_output_type: OCROutputType,
pretrained_matrix = None):
if configuration == Configuration.SkipGram:
result = SkipGram(
arguments_service=arguments_service,
vocabulary_service=vocabulary_service,
data_service=data_service,
log_service=log_service,
pretrained_matrix=pretrained_matrix,
ocr_output_type=ocr_output_type)
elif configuration == Configuration.CBOW:
result = CBOW(
arguments_service=arguments_service,
vocabulary_service=vocabulary_service,
data_service=data_service,
log_service=log_service,
pretrained_matrix=pretrained_matrix,
ocr_output_type=ocr_output_type)
elif configuration == Configuration.CBOW:
result = BERT(
arguments_service=arguments_service,
data_service=data_service,
log_service=log_service,
tokenize_service=tokenize_service,
overwrite_initialization=False)
return result
target_words = {
Language.English: ['man', 'new', 'time', 'day', 'good', 'old', 'little', 'one', 'two', 'three'],
Language.Dutch: ['man', 'jaar', 'tijd', 'mensen', 'dag', 'huis', 'dier', 'afbeelding', 'werk', 'naam', 'groot', 'kleine', 'twee', 'drie', 'vier', 'vijf']
}
def log_neighbourhoods(
vocabulary_service: VocabularyService,
model: ModelBase,
embedding_configuration: EmbeddingConfiguration,
output_folder: str):
for target_word in target_words[embedding_configuration.language]:
context_words = calculate_context_words(embedding_configuration.configuration, vocabulary_service, model, target_word)
save_context_words('skip-gram-base', target_word, context_words, output_folder, embedding_configuration)
def save_context_words(
model_name: str,
target_word: str,
context_words: List[str],
output_folder: str,
embedding_configuration: EmbeddingConfiguration):
csv_fieldnames = ['language', 'configuration', 'randomly_initialized', 'ocr_type', 'learning_rate', 'target_word', 'context_words']
file_path = os.path.join(output_folder, 'context-words.csv')
init_header = not os.path.exists(file_path)
with open(file_path, 'a', encoding='utf-8') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=csv_fieldnames)
if init_header:
csv_writer.writeheader()
csv_writer.writerow({
'language': embedding_configuration.language,
'configuration': embedding_configuration.configuration,
'randomly_initialized': embedding_configuration.initialize_randomly,
'learning_rate': str(embedding_configuration.lr),
# 'configuration': embedding_configuration.lr,
'ocr_type': embedding_configuration.ocr_output_type,
'target_word': target_word,
'context_words': ', '.join(context_words)
})
def log_embedding_layers(model):
print(f'Base Context mean: {model._embedding_layer._embeddings_context.weight.mean()}')
print(f'Base Input mean: {model._embedding_layer._embeddings_target.weight.mean()}')
class TestBaselineSkipGram(unittest.TestCase):
def test_baseline_convergence(self):
output_folder = os.path.join('tests', 'results')
file_path = os.path.join(output_folder, 'context-words.csv')
if os.path.exists(file_path):
os.remove(file_path)
for language, configurations in embedding_models.configurations.items():
for configuration, lrs in configurations.items():
for lr, initialize_randomly_to_output_types in lrs.items():
for initialize_randomly, output_types in initialize_randomly_to_output_types.items():
for ocr_output_type in output_types:
# if configuration != Configuration.SkipGram:
# continue
container_base = initialize_container(configuration, ocr_output_type, language)
vocabulary_service = container_base.vocabulary_service()
arguments_service = container_base.arguments_service()
skip_gram_base = initialize_model(
arguments_service,
vocabulary_service,
container_base.data_service(),
container_base.log_service(),
container_base.tokenize_service(),
ocr_output_type=ocr_output_type,
language=language,
configuration=configuration,
initialize_randomly=initialize_randomly,
learning_rate=lr)
# log_embedding_layers(skip_gram_base)
embedding_configuration = EmbeddingConfiguration(language, configuration, lr, initialize_randomly, ocr_output_type)
log_neighbourhoods(vocabulary_service, skip_gram_base, embedding_configuration, output_folder=output_folder)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"scipy.spatial.distance.cosine",
"os.remove",
"models.transformers.bert.BERT",
"os.path.exists",
"numpy.argsort",
"dependency_injector.providers.Factory",
"tests.entities.embedding_configuration.EmbeddingConfiguration",
"models.simple.cbow.CBOW",
"dependency_injection.ioc_containe... | [((1720, 1734), 'dependency_injection.ioc_container.IocContainer', 'IocContainer', ([], {}), '()\n', (1732, 1734), False, 'from dependency_injection.ioc_container import IocContainer\n'), ((6733, 6781), 'os.path.join', 'os.path.join', (['output_folder', '"""context-words.csv"""'], {}), "(output_folder, 'context-words.csv')\n", (6745, 6781), False, 'import os\n'), ((9880, 9895), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9893, 9895), False, 'import unittest\n'), ((1305, 1334), 'os.path.join', 'os.path.join', (['"""tests"""', '"""data"""'], {}), "('tests', 'data')\n", (1317, 1334), False, 'import os\n'), ((1477, 1509), 'os.path.join', 'os.path.join', (['"""tests"""', '"""results"""'], {}), "('tests', 'results')\n", (1489, 1509), False, 'import os\n'), ((1785, 1838), 'dependency_injector.providers.Factory', 'providers.Factory', (['NonContextServiceFake', 'custom_args'], {}), '(NonContextServiceFake, custom_args)\n', (1802, 1838), True, 'import dependency_injector.providers as providers\n'), ((1901, 1934), 'dependency_injector.providers.Factory', 'providers.Factory', (['LogServiceFake'], {}), '(LogServiceFake)\n', (1918, 1934), True, 'import dependency_injector.providers as providers\n'), ((3070, 3094), 'numpy.argsort', 'np.argsort', (['similarities'], {}), '(similarities)\n', (3080, 3094), True, 'import numpy as np\n'), ((4723, 4938), 'models.simple.skip_gram.SkipGram', 'SkipGram', ([], {'arguments_service': 'arguments_service', 'vocabulary_service': 'vocabulary_service', 'data_service': 'data_service', 'log_service': 'log_service', 'pretrained_matrix': 'pretrained_matrix', 'ocr_output_type': 'ocr_output_type'}), '(arguments_service=arguments_service, vocabulary_service=\n vocabulary_service, data_service=data_service, log_service=log_service,\n pretrained_matrix=pretrained_matrix, ocr_output_type=ocr_output_type)\n', (4731, 4938), False, 'from models.simple.skip_gram import SkipGram\n'), ((6805, 6830), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (6819, 6830), False, 'import os\n'), ((6914, 6965), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'csv_fieldnames'}), '(csv_file, fieldnames=csv_fieldnames)\n', (6928, 6965), False, 'import csv\n'), ((7885, 7917), 'os.path.join', 'os.path.join', (['"""tests"""', '"""results"""'], {}), "('tests', 'results')\n", (7897, 7917), False, 'import os\n'), ((7938, 7986), 'os.path.join', 'os.path.join', (['output_folder', '"""context-words.csv"""'], {}), "(output_folder, 'context-words.csv')\n", (7950, 7986), False, 'import os\n'), ((7998, 8023), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (8012, 8023), False, 'import os\n'), ((8099, 8138), 'tests.constants.embedding_models.configurations.items', 'embedding_models.configurations.items', ([], {}), '()\n', (8136, 8138), True, 'import tests.constants.embedding_models as embedding_models\n'), ((2953, 3014), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['target_embeddings', 'all_embeddings[j]'], {}), '(target_embeddings, all_embeddings[j])\n', (2976, 3014), False, 'from scipy import spatial\n'), ((3859, 3937), 'os.path.join', 'os.path.join', (['"""results"""', '"""ocr-evaluation"""', 'configuration.value', 'language.value'], {}), "('results', 'ocr-evaluation', configuration.value, language.value)\n", (3871, 3937), False, 'import os\n'), ((5066, 5277), 'models.simple.cbow.CBOW', 'CBOW', ([], {'arguments_service': 'arguments_service', 'vocabulary_service': 'vocabulary_service', 'data_service': 'data_service', 'log_service': 'log_service', 'pretrained_matrix': 'pretrained_matrix', 'ocr_output_type': 'ocr_output_type'}), '(arguments_service=arguments_service, vocabulary_service=\n vocabulary_service, data_service=data_service, log_service=log_service,\n pretrained_matrix=pretrained_matrix, ocr_output_type=ocr_output_type)\n', (5070, 5277), False, 'from models.simple.cbow import CBOW\n'), ((8037, 8057), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (8046, 8057), False, 'import os\n'), ((5405, 5573), 'models.transformers.bert.BERT', 'BERT', ([], {'arguments_service': 'arguments_service', 'data_service': 'data_service', 'log_service': 'log_service', 'tokenize_service': 'tokenize_service', 'overwrite_initialization': '(False)'}), '(arguments_service=arguments_service, data_service=data_service,\n log_service=log_service, tokenize_service=tokenize_service,\n overwrite_initialization=False)\n', (5409, 5573), False, 'from models.transformers.bert import BERT\n'), ((9621, 9714), 'tests.entities.embedding_configuration.EmbeddingConfiguration', 'EmbeddingConfiguration', (['language', 'configuration', 'lr', 'initialize_randomly', 'ocr_output_type'], {}), '(language, configuration, lr, initialize_randomly,\n ocr_output_type)\n', (9643, 9714), False, 'from tests.entities.embedding_configuration import EmbeddingConfiguration\n')] |
import numpy as np
import pandas as pd
import cv2
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
from pdb import *
from pathlib import Path
from skimage.io import imread
#export
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.vision.all import *
# from PIL import Images
VAL_IMAGE_IDS = ['8f6e49e474ebb649a1e99662243d51a46cc9ba0c9c8f1efe2e2b662a81b48de1',
'<KEY>',
'<KEY>',
'a4c729efb5059893a8b62c7abeba171cb516836f8a20468f6b176dfe2f6f84d1',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'0b2e702f90aee4fff2bc6e4326308d50cf04701082e718d4f831c8959fbcda93',
'<KEY>',
'b6edad733399c83c8eb7a59c0d37b54e10cc0d59894e39ff843884d84f61dee1',
'bf566e75d5cb0196de4139573f8bbbda0fa38d5048edf7267fe8793dcc094a66',
'e52960d31f8bddf85400259beb4521383f5ceface1080be3429f2f926cc9b5c2',
'ddf1bf458312de2895dd9cc5ce7ec9d334ad54c35edc96ad6001d20b1d8588d8',
'<KEY>',
'dbbfe08a52688d0ac8de9161cbb17cb201e3991aacab8ab8a77fe0e203a69481',
'3b0709483b1e86449cc355bb797e841117ba178c6ae1ed955384f4da6486aa20',
'<KEY>',
'c0f172831b8017c769ff0e80f85b096ac939e79de3d524e0826fbb95221365da',
'ebc18868864ad075548cc1784f4f9a237bb98335f9645ee727dac8332a3e3716',
'66236902b874b7e4b3891db63a69f6d56f6edcec6aca7ba3c6871d73e7b4c34f',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'd4d88391bc399a3715440d4da9f8b7a973e010dc1edd9551df2e5a538685add5',
'<KEY>',
'dad607a203483439fcbc2acecd0a39fb5e5a94a32a94348f5c802c79cfeb6e7c',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'b76ff33ae9da28f9cd8bdce465d45f1eca399db3ffa83847535708e0d511fe38',
'57bd029b19c1b382bef9db3ac14f13ea85e36a6053b92e46caedee95c05847ab',
'1db1cddf28e305c9478519cfac144eee2242183fe59061f1f15487e925e8f5b5',
'6bd18a218d25247dc456aed124c066a6397fb93086e860e4d04014bfa9c9555d',
'd7d12a2acc47a94961aeb56fd56e8a0873016af75f5dd10915de9db8af8e4f5e',
'<KEY>',
'7798ca1ddb3133563e290c36228bc8f8f3c9f224e096f442ef0653856662d121',
'<KEY>',
'33a5b0ff232b425796ee6a9dd5b516ff9aad54ca723b4ec490bf5cd9b2e2a731',
'c3bec1066aae20f48b82975e7e8b684cd67635a8baf211e4d9e3e13bc54c5d06',
'<KEY>',
'f26f4c2c70c38fe12e00d5a814d5116691f2ca548908126923fd76ddd665ed24',
'<KEY>',
'<KEY>',
'<KEY>',
'a102535b0e88374bea4a1cfd9ee7cb3822ff54f4ab2a9845d428ec22f9ee2288',
'8d05fb18ee0cda107d56735cafa6197a31884e0a5092dc6d41760fb92ae23ab4',
'6f8197baf738986a1ec3b6ba92b567863d897a739376b7cec5599ad6cecafdfc',
'<KEY>',
'3d0ca3498d97edebd28dbc7035eced40baa4af199af09cbb7251792accaa69fe',
'<KEY>',
'1a75e9f15481d11084fe66bc2a5afac6dc5bec20ed56a7351a6d65ef0fe8762b',
'<KEY>',
'5488e8df5440ee5161fdfae3aeccd2ee396636430065c90e3f1f73870a975991',
'8cdbdda8b3a64c97409c0160bcfb06eb8e876cedc3691aa63ca16dbafae6f948',
'c2a646a819f59a4e816e0ee8ea00ba10d5de9ac20b5a435c41192637790dabee',
'<KEY>',
'<KEY>',
'<KEY>',
'2dd3356f2dcf470aec4003800744dfec6490e75d88011e1d835f4f3d60f88e7a',
'2ab91a4408860ae8339689ed9f87aa9359de1bdd4ca5c2eab7fff7724dbd6707',
'1e61ecf354cb93a62a9561db87a53985fb54e001444f98112ed0fc623fad793e',
'2ad489c11ed8b77a9d8a2339ac64ffc38e79281c03a2507db4688fd3186c0fe5',
'be771d6831e3f8f1af4696bc08a582f163735db5baf9906e4729acc6a05e1187']
# bounding boxes
def bounding_boxes(img):
ret, thresh = cv2.threshold(img, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(contours, key=lambda x: len(x))[-1]
return cv2.boundingRect(cnt) # x, y, w, h
def bbox2center(bbox):
bbox = np.array(bbox)
x = ((bbox[:, 0] + bbox[:, 2]//2))
y = ((bbox[:, 1] + bbox[:, 3]//2))
return np.stack((x,y),axis=-1)
def get_pt_annotations(DATA_PATH):
pts_raw_path = DATA_PATH/'pt_annotations.pkl'
if pts_raw_path.exists():
pt_annotations = pts_raw_path.load()
else:
pt_annotations = []
for tid in progress_bar(train_ids):
path = TRAIN_PATH/tid
img_path = Path(tid)/'images'/f'{tid}.png'
bboxes = []
for mask_file in (path/'masks').glob('*'):
mask = imread(str(mask_file))
bbox = bounding_boxes(mask)
bboxes.append(bbox)
pt_annotations.append((img_path, bbox2center(bboxes)))
(DATA_PATH/'pt_annotations.pkl').save(pt_annotations)
return pt_annotations
# Gaussian
@patch
def affine_coord(x: TensorMask, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Reflection, align_corners=True):
add_dim = (x.ndim==3)
if add_dim: x = x[:,None]
res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode, align_corners)#.long() - We use gaussian kernels. Mask must be float
if add_dim: res = res[:,0]
return TensorMask(res)
@IntToFloatTensor
def encodes(self, o:TensorMask ): return o
# taken from https://github.com/xingyizhou/CenterNet/blob/819e0d0dde02f7b8cb0644987a8d3a370aa8206a/src/lib/utils/image.py
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
# Calculations
import cv2
from functools import partial
def to_pts(box): # x,y,w,h -> x1,y1,x2,y2
x,y,w,h = box
return x,y,x+w,y+h
def score(box, pred): # get prediction score
x1,y1,x2,y2 = box
return pred[:, y1:y2,x1:x2].max()
# https://gist.github.com/meyerjo/dd3533edc97c81258898f60d8978eddc
def bb_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def compute_ap(precision, recall):
"Compute the average precision for `precision` and `recall` curve."
recall = np.concatenate(([0.], list(recall), [1.]))
precision = np.concatenate(([0.], list(precision), [0.]))
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = np.maximum(precision[i - 1], precision[i])
idx = np.where(recall[1:] != recall[:-1])[0]
ap = np.sum((recall[idx + 1] - recall[idx]) * precision[idx + 1])
return ap
def euclidean_dist(boxA, boxB):
def midpt(box):
x1,y1,x2,y2 = box
return (x1+x2)/2, (y1+y2)/2
(xA,yA), (xB,yB) = midpt(boxA), midpt(boxB)
return ((xB-xA)**2 + (yB-yA)**2)**0.5
def calc_pr(lbls, preds, pmaxs, min_sz=1, max_dist=5, iou_thresh=0.1, score_thresh=0.3):
tps = []
fps = []
scores = []
n_gts = []
for lbl,pred,pmax in zip(lbls,preds,pmaxs):
contours,hierarchy = cv2.findContours(pmax.max(0).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pboxes = [cv2.boundingRect(cnt) for cnt in contours]
pboxes = [to_pts(pbox) for pbox in pboxes if pbox[2] >= min_sz and pbox[3] >= min_sz] # only if width and height are greater than min_sz
pboxes = [pbox for pbox in pboxes if score(pbox, pred) >= score_thresh]
contours,hierarchy = cv2.findContours((lbl>=0.9).max(0).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
lboxes = [to_pts(cv2.boundingRect(cnt)) for cnt in contours]
# detected = []
# ious = []
# for pb in pboxes:
# calc = [(bb_iou(lb, pb), lb) for lb in lboxes if lb not in detected]
# if len(calc) == 0: ious.append(0)
# else:
# iou, lb = max(calc)
# detected.append(lb)
# ious.append(iou)
# tp = (np.array(ious) >= iou_thresh)
# fp = ~tp
# s = np.array([score(pb, pred) for pb in pboxes])
detected = []
dists = []
for pb in pboxes:
calc = [(euclidean_dist(lb, pb), lb) for lb in lboxes if lb not in detected]
if len(calc) == 0:
dists.append(1e10)
else:
dist, lb = min(calc)
detected.append(lb)
dists.append(dist)
tp = (np.array(dists) < max_dist)
fp = ~tp
s = np.array([score(pb, pred) for pb in pboxes])
n_gts.append(len(lboxes))
tps.extend(tp.astype(np.uint8).tolist())
fps.extend(fp.astype(np.uint8).tolist())
scores.extend(s.tolist())
res = sorted(zip(scores, tps, fps), key=lambda x: x[0], reverse=True)
res = np.array(res)
if len(res) == 0: res = np.zeros((1, 3))
tp = res[:,1].cumsum(0)
fp = res[:,2].cumsum(0)
precision = tp / (tp + fp + 1e-8)
recall = tp / sum(n_gts)
return precision, recall
# misc
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# https://www.kaggle.com/hocop1/centernet-baseline
class FocalLoss2(nn.Module):
def __init__(self):
super().__init__()
def forward(self, out, target):
# Binary mask loss
pred_mask = torch.sigmoid(out)
target = target[:, None]
# mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
mask_loss = target * torch.log(pred_mask + 1e-12) + (1 - target) * torch.log(1 - pred_mask + 1e-12)
mask_loss = -mask_loss.mean(0).sum()
return mask_loss
| [
"numpy.stack",
"numpy.sum",
"numpy.maximum",
"cv2.threshold",
"numpy.zeros",
"numpy.finfo",
"pathlib.Path",
"numpy.where",
"numpy.array",
"numpy.exp",
"cv2.boundingRect"
] | [((3190, 3221), 'cv2.threshold', 'cv2.threshold', (['img', '(127)', '(255)', '(0)'], {}), '(img, 127, 255, 0)\n', (3203, 3221), False, 'import cv2\n'), ((3394, 3415), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3410, 3415), False, 'import cv2\n'), ((3464, 3478), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (3472, 3478), True, 'import numpy as np\n'), ((3568, 3593), 'numpy.stack', 'np.stack', (['(x, y)'], {'axis': '(-1)'}), '((x, y), axis=-1)\n', (3576, 3593), True, 'import numpy as np\n'), ((5034, 5080), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2 * sigma * sigma))\n', (5040, 5080), True, 'import numpy as np\n'), ((7394, 7454), 'numpy.sum', 'np.sum', (['((recall[idx + 1] - recall[idx]) * precision[idx + 1])'], {}), '((recall[idx + 1] - recall[idx]) * precision[idx + 1])\n', (7400, 7454), True, 'import numpy as np\n'), ((9696, 9709), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (9704, 9709), True, 'import numpy as np\n'), ((5758, 5825), 'numpy.maximum', 'np.maximum', (['masked_heatmap', '(masked_gaussian * k)'], {'out': 'masked_heatmap'}), '(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n', (5768, 5825), True, 'import numpy as np\n'), ((7293, 7335), 'numpy.maximum', 'np.maximum', (['precision[i - 1]', 'precision[i]'], {}), '(precision[i - 1], precision[i])\n', (7303, 7335), True, 'import numpy as np\n'), ((7346, 7381), 'numpy.where', 'np.where', (['(recall[1:] != recall[:-1])'], {}), '(recall[1:] != recall[:-1])\n', (7354, 7381), True, 'import numpy as np\n'), ((9738, 9754), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (9746, 9754), True, 'import numpy as np\n'), ((8005, 8026), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (8021, 8026), False, 'import cv2\n'), ((9313, 9328), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (9321, 9328), True, 'import numpy as np\n'), ((8423, 8444), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (8439, 8444), False, 'import cv2\n'), ((3893, 3902), 'pathlib.Path', 'Path', (['tid'], {}), '(tid)\n', (3897, 3902), False, 'from pathlib import Path\n'), ((5091, 5108), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (5099, 5108), True, 'import numpy as np\n')] |
import cv2
import numpy as np
cap = cv2.VideoCapture(2)
_, prev = cap.read()
prev = cv2.flip(prev, 1)
_, new = cap.read()
new = cv2.flip(new, 1)
while True:
diff = cv2.absdiff(prev, new)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
diff = cv2.blur(diff, (5,5))
_,thresh = cv2.threshold(diff, 10, 255, cv2.THRESH_BINARY)
threh = cv2.dilate(thresh, None, 3)
thresh = cv2.erode(thresh, np.ones((4,4)), 1)
contor,_ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.circle(prev, (20,200), 5, (0,0,255), -1)
for contors in contor:
if cv2.contourArea(contors) > 3000:
(x,y,w,h) = cv2.boundingRect(contors)
(x1,y1),rad = cv2.minEnclosingCircle(contors)
x1 = int(x1)
y1 = int(y1)
cv2.line(prev, (20,200), (x1, y1), (255,0,0), 4)
#dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
cv2.putText(prev, "{}".format(int(np.sqrt((x1 - 20)**2 + (y1 - 200)**2))), (100,100),cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3)
cv2.rectangle(prev, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.circle(prev, (x1,y1), 5, (0,0,255), -1)
cv2.imshow("orig", prev)
prev = new
_, new = cap.read()
new = cv2.flip(new, 1)
if cv2.waitKey(1) == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"numpy.ones",
"cv2.rectangle",
"cv2.absdiff",
"cv2.imshow",
"cv2.line",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.waitKey",
"cv2.flip",
"cv2.threshold",
"cv2.blur",
"cv2.VideoCapture",
... | [((38, 57), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(2)'], {}), '(2)\n', (54, 57), False, 'import cv2\n'), ((87, 104), 'cv2.flip', 'cv2.flip', (['prev', '(1)'], {}), '(prev, 1)\n', (95, 104), False, 'import cv2\n'), ((131, 147), 'cv2.flip', 'cv2.flip', (['new', '(1)'], {}), '(new, 1)\n', (139, 147), False, 'import cv2\n'), ((1206, 1229), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1227, 1229), False, 'import cv2\n'), ((170, 192), 'cv2.absdiff', 'cv2.absdiff', (['prev', 'new'], {}), '(prev, new)\n', (181, 192), False, 'import cv2\n'), ((201, 239), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_BGR2GRAY'], {}), '(diff, cv2.COLOR_BGR2GRAY)\n', (213, 239), False, 'import cv2\n'), ((248, 270), 'cv2.blur', 'cv2.blur', (['diff', '(5, 5)'], {}), '(diff, (5, 5))\n', (256, 270), False, 'import cv2\n'), ((282, 329), 'cv2.threshold', 'cv2.threshold', (['diff', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(diff, 10, 255, cv2.THRESH_BINARY)\n', (295, 329), False, 'import cv2\n'), ((339, 366), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None', '(3)'], {}), '(thresh, None, 3)\n', (349, 366), False, 'import cv2\n'), ((426, 490), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (442, 490), False, 'import cv2\n'), ((492, 539), 'cv2.circle', 'cv2.circle', (['prev', '(20, 200)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(prev, (20, 200), 5, (0, 0, 255), -1)\n', (502, 539), False, 'import cv2\n'), ((1072, 1096), 'cv2.imshow', 'cv2.imshow', (['"""orig"""', 'prev'], {}), "('orig', prev)\n", (1082, 1096), False, 'import cv2\n'), ((1139, 1155), 'cv2.flip', 'cv2.flip', (['new', '(1)'], {}), '(new, 1)\n', (1147, 1155), False, 'import cv2\n'), ((395, 410), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (402, 410), True, 'import numpy as np\n'), ((1161, 1175), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1172, 1175), False, 'import cv2\n'), ((570, 594), 'cv2.contourArea', 'cv2.contourArea', (['contors'], {}), '(contors)\n', (585, 594), False, 'import cv2\n'), ((618, 643), 'cv2.boundingRect', 'cv2.boundingRect', (['contors'], {}), '(contors)\n', (634, 643), False, 'import cv2\n'), ((661, 692), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['contors'], {}), '(contors)\n', (683, 692), False, 'import cv2\n'), ((728, 779), 'cv2.line', 'cv2.line', (['prev', '(20, 200)', '(x1, y1)', '(255, 0, 0)', '(4)'], {}), '(prev, (20, 200), (x1, y1), (255, 0, 0), 4)\n', (736, 779), False, 'import cv2\n'), ((961, 1020), 'cv2.rectangle', 'cv2.rectangle', (['prev', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(prev, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (974, 1020), False, 'import cv2\n'), ((1016, 1062), 'cv2.circle', 'cv2.circle', (['prev', '(x1, y1)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(prev, (x1, y1), 5, (0, 0, 255), -1)\n', (1026, 1062), False, 'import cv2\n'), ((864, 905), 'numpy.sqrt', 'np.sqrt', (['((x1 - 20) ** 2 + (y1 - 200) ** 2)'], {}), '((x1 - 20) ** 2 + (y1 - 200) ** 2)\n', (871, 905), True, 'import numpy as np\n')] |
# Copyright 2020 The PEGASUS Authors..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pegasus.ops.text_encoder_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pegasus.ops.python import text_encoder_utils
import tensorflow as tf
_SUBWORD_VOCAB = "pegasus/ops/testdata/subwords"
_SPM_VOCAB = "pegasus/ops/testdata/sp_test.model"
class TextEncoderUtilsTest(parameterized.TestCase, tf.test.TestCase):
def test_sentencepiece(self):
e = text_encoder_utils.create_text_encoder("sentencepiece", _SPM_VOCAB)
in_text = "the quick brown fox jumps over the lazy dog"
self.assertEqual(in_text, e.decode(e.encode(in_text)))
def test_sentencepiece_offset(self):
e = text_encoder_utils.create_text_encoder("sentencepiece_newline",
_SPM_VOCAB)
in_text = "the quick brown fox jumps over the lazy dog"
ids = [25] + e.encode(in_text)
self.assertEqual(in_text, e.decode(ids))
def test_subword_decode(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
self.assertEqual(encoder.decode([9, 10, 11, 12, 1, 0]), "quick brown fox")
def test_subword_decode_numpy_int32(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
ids = np.array([9, 10, 11, 12, 1, 0], dtype=np.int32)
# Without tolist(), the test will not pass for any other np array types
# other than int64.
self.assertEqual(encoder.decode(ids.tolist()), "quick brown fox")
def test_subword_decode_numpy_int64(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
ids = np.array([9, 10, 11, 12, 1, 0], dtype=np.int64)
# Without tolist(), the test will not pass for python3
self.assertEqual(encoder.decode(ids.tolist()), "quick brown fox")
if __name__ == "__main__":
absltest.main()
| [
"absl.testing.absltest.main",
"numpy.array",
"pegasus.ops.python.text_encoder_utils.create_text_encoder"
] | [((2403, 2418), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2416, 2418), False, 'from absl.testing import absltest\n'), ((1014, 1081), 'pegasus.ops.python.text_encoder_utils.create_text_encoder', 'text_encoder_utils.create_text_encoder', (['"""sentencepiece"""', '_SPM_VOCAB'], {}), "('sentencepiece', _SPM_VOCAB)\n", (1052, 1081), False, 'from pegasus.ops.python import text_encoder_utils\n'), ((1249, 1324), 'pegasus.ops.python.text_encoder_utils.create_text_encoder', 'text_encoder_utils.create_text_encoder', (['"""sentencepiece_newline"""', '_SPM_VOCAB'], {}), "('sentencepiece_newline', _SPM_VOCAB)\n", (1287, 1324), False, 'from pegasus.ops.python import text_encoder_utils\n'), ((1560, 1625), 'pegasus.ops.python.text_encoder_utils.create_text_encoder', 'text_encoder_utils.create_text_encoder', (['"""subword"""', '_SUBWORD_VOCAB'], {}), "('subword', _SUBWORD_VOCAB)\n", (1598, 1625), False, 'from pegasus.ops.python import text_encoder_utils\n'), ((1765, 1830), 'pegasus.ops.python.text_encoder_utils.create_text_encoder', 'text_encoder_utils.create_text_encoder', (['"""subword"""', '_SUBWORD_VOCAB'], {}), "('subword', _SUBWORD_VOCAB)\n", (1803, 1830), False, 'from pegasus.ops.python import text_encoder_utils\n'), ((1841, 1888), 'numpy.array', 'np.array', (['[9, 10, 11, 12, 1, 0]'], {'dtype': 'np.int32'}), '([9, 10, 11, 12, 1, 0], dtype=np.int32)\n', (1849, 1888), True, 'import numpy as np\n'), ((2119, 2184), 'pegasus.ops.python.text_encoder_utils.create_text_encoder', 'text_encoder_utils.create_text_encoder', (['"""subword"""', '_SUBWORD_VOCAB'], {}), "('subword', _SUBWORD_VOCAB)\n", (2157, 2184), False, 'from pegasus.ops.python import text_encoder_utils\n'), ((2195, 2242), 'numpy.array', 'np.array', (['[9, 10, 11, 12, 1, 0]'], {'dtype': 'np.int64'}), '([9, 10, 11, 12, 1, 0], dtype=np.int64)\n', (2203, 2242), True, 'import numpy as np\n')] |
"""
## Author: <NAME>, <NAME>
"""
import numpy as np
from project.size import size
from project.zeros import zeros
from project.dfimdalpha import dfimdalpha
from project.d2fimdalpha2 import d2fimdalpha2
from project.trace_matrix import trace_matrix
from project.log_prior_pdf import log_prior_pdf
def hesskalpha2(alpha,model_switch,groupsize,ni,xtoptn,xoptn,aoptn,bpopdescr,ddescr,covd,sigma,docc,poped_db,ha,Engine):
#D2KALPHA2 calculates the hessian of k with respect to alpha
# Detailed explanation goes here
returnArgs = log_prior_pdf(alpha, bpopdescr, ddescr, return_gradient=True, return_hessian=True)
p = returnArgs[0]
gradp = returnArgs[1]
hessp = returnArgs[2]
#get dF/dAlpha and fim
returnArgs = dfimdalpha(alpha, model_switch, groupsize, ni, xtoptn, xoptn, aoptn, bpopdescr, ddescr, covd, sigma, docc, poped_db, ha)
d_fim = returnArgs[0]
fim = returnArgs[1]
ifim = np.linalg.inv(fim)
tigi = zeros(size(d_fim)[2])
for i in range(0, size(d_fim)[2]):
for j in range(0, i):
tigi[i,j] = trace_matrix(ifim*d_fim[:,:,i]*ifim*d_fim[:,:,j])
tigi[j,i] = tigi[i,j]
d2 = d2fimdalpha2(alpha, model_switch, groupsize, ni, xtoptn, xoptn, aoptn, bpopdescr, ddescr, covd, sigma, docc, poped_db, 1e-4)["hess"].reshape(fim.size, hessp.size)
d2logdfim = np.matmul(np.transpose(d2), np.asarray(ifim)).reshape(size(hessp)[0] ,size(hessp)[1])
hess = -(hessp + d2logdfim - tigi)
# try({
# L=chol(fim)
# # calc inverse
# iL=solve(L,diag_matlab(length(L)))
# ifim=t(iL)%*%iL
# # calc trace of iF*dF/dAlpha(i)*iF*dF/dAlpha(j)
# for(i in 1:size(d_fim,3)){
# for(j in 1:i){
# tigi[i,j]=trace_matrix(ifim*d_fim[,,i]*ifim*d_fim[,,j])
# tigi[j,i]=tigi[i,j]
# }
# }
# d2=d2fimdalpha2(alpha,model_switch,groupsize,ni,xtoptn,xoptn,aoptn,bpopdescr,ddescr,covd,sigma,docc,poped_db,1e-4)
# d2Fim=reshape_matlab(d2,length(fim)^2,length(hessp)^2)
# d2logdfim=t(d2Fim)%*%ifim
# hess=-(hessp+reshape_matlab(d2logdfim,length(hessp),length(hessp))-tigi)
# })
# catch
# if((Engine$Type==1)){
# exception = lasterror
# if(exception$identifier=='MATLAB:posdef'){
# hess=zeros(length(alpha))
# } else {
# rethrow(exception)
# }
# } else {
# exception = lasterr
# if(exception$identifier==''){
# hess=zeros(length(alpha))
# } else {
# stop(sprintf(exception))
# }
# }
return hess
| [
"project.d2fimdalpha2.d2fimdalpha2",
"project.dfimdalpha.dfimdalpha",
"numpy.asarray",
"numpy.transpose",
"project.log_prior_pdf.log_prior_pdf",
"numpy.linalg.inv",
"project.size.size",
"project.trace_matrix.trace_matrix"
] | [((564, 650), 'project.log_prior_pdf.log_prior_pdf', 'log_prior_pdf', (['alpha', 'bpopdescr', 'ddescr'], {'return_gradient': '(True)', 'return_hessian': '(True)'}), '(alpha, bpopdescr, ddescr, return_gradient=True,\n return_hessian=True)\n', (577, 650), False, 'from project.log_prior_pdf import log_prior_pdf\n'), ((771, 895), 'project.dfimdalpha.dfimdalpha', 'dfimdalpha', (['alpha', 'model_switch', 'groupsize', 'ni', 'xtoptn', 'xoptn', 'aoptn', 'bpopdescr', 'ddescr', 'covd', 'sigma', 'docc', 'poped_db', 'ha'], {}), '(alpha, model_switch, groupsize, ni, xtoptn, xoptn, aoptn,\n bpopdescr, ddescr, covd, sigma, docc, poped_db, ha)\n', (781, 895), False, 'from project.dfimdalpha import dfimdalpha\n'), ((957, 975), 'numpy.linalg.inv', 'np.linalg.inv', (['fim'], {}), '(fim)\n', (970, 975), True, 'import numpy as np\n'), ((994, 1005), 'project.size.size', 'size', (['d_fim'], {}), '(d_fim)\n', (998, 1005), False, 'from project.size import size\n'), ((1033, 1044), 'project.size.size', 'size', (['d_fim'], {}), '(d_fim)\n', (1037, 1044), False, 'from project.size import size\n'), ((1106, 1165), 'project.trace_matrix.trace_matrix', 'trace_matrix', (['(ifim * d_fim[:, :, i] * ifim * d_fim[:, :, j])'], {}), '(ifim * d_fim[:, :, i] * ifim * d_fim[:, :, j])\n', (1118, 1165), False, 'from project.trace_matrix import trace_matrix\n'), ((1445, 1456), 'project.size.size', 'size', (['hessp'], {}), '(hessp)\n', (1449, 1456), False, 'from project.size import size\n'), ((1461, 1472), 'project.size.size', 'size', (['hessp'], {}), '(hessp)\n', (1465, 1472), False, 'from project.size import size\n'), ((1211, 1341), 'project.d2fimdalpha2.d2fimdalpha2', 'd2fimdalpha2', (['alpha', 'model_switch', 'groupsize', 'ni', 'xtoptn', 'xoptn', 'aoptn', 'bpopdescr', 'ddescr', 'covd', 'sigma', 'docc', 'poped_db', '(0.0001)'], {}), '(alpha, model_switch, groupsize, ni, xtoptn, xoptn, aoptn,\n bpopdescr, ddescr, covd, sigma, docc, poped_db, 0.0001)\n', (1223, 1341), False, 'from project.d2fimdalpha2 import d2fimdalpha2\n'), ((1401, 1417), 'numpy.transpose', 'np.transpose', (['d2'], {}), '(d2)\n', (1413, 1417), True, 'import numpy as np\n'), ((1419, 1435), 'numpy.asarray', 'np.asarray', (['ifim'], {}), '(ifim)\n', (1429, 1435), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
from datetime import datetime, timedelta
from floodsystem.stationdata import build_station_list
from floodsystem.datafetcher import fetch_measure_levels
import matplotlib.pyplot as plt
def polyfit(dates, levels, p):
days = mpl.dates.date2num(dates)
d0 = np.min(days)
x = days - d0
y = levels
# Find coefficients of best-fit polynomial f(x) of degree 4
p_coeff = np.polyfit(x, y, p)
# Convert coefficient into a polynomial that can be evaluated,
# e.g. poly(0.3)
poly = np.poly1d(p_coeff)
return d0, poly | [
"matplotlib.dates.date2num",
"numpy.poly1d",
"numpy.min",
"numpy.polyfit"
] | [((277, 302), 'matplotlib.dates.date2num', 'mpl.dates.date2num', (['dates'], {}), '(dates)\n', (295, 302), True, 'import matplotlib as mpl\n'), ((312, 324), 'numpy.min', 'np.min', (['days'], {}), '(days)\n', (318, 324), True, 'import numpy as np\n'), ((437, 456), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'p'], {}), '(x, y, p)\n', (447, 456), True, 'import numpy as np\n'), ((556, 574), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (565, 574), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import backend as K
import os
import numpy as np
import matplotlib.pyplot as pl
import argparse
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
parser = argparse.ArgumentParser('Combines trained top model with full MobileNetV2 model')
parser.add_argument('--image_dir',
help='directory for images')
parser.add_argument('--target',
help='where to save the final model in h5 format')
args = parser.parse_args()
IMAGE_SIZE = 224
BATCH_SIZE = 64
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2)
train_generator = datagen.flow_from_directory(
args.image_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='training')
val_generator = datagen.flow_from_directory(
args.image_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='validation')
for image_batch, label_batch in train_generator:
break
print (train_generator.class_indices)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('labels.txt', 'w') as f:
f.write(labels)
IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(2, activation='softmax')
])
def prediction_min(y_true, y_pred):
final = K.min(y_pred)
return final
def prediction_max(y_true, y_pred):
final = K.max(y_pred)
return final
def prediction_variance(y_true, y_pred):
final = K.var(y_pred)
return final
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
epochs = 2
history = model.fit_generator(train_generator,
epochs=epochs,
validation_data=val_generator)
model.save(args.target) | [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.backend.min",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.max",
"tensorflow.set_random_seed",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.backend.var"... | [((256, 263), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (260, 263), False, 'from numpy.random import seed\n'), ((303, 321), 'tensorflow.set_random_seed', 'set_random_seed', (['(2)'], {}), '(2)\n', (318, 321), False, 'from tensorflow import set_random_seed\n'), ((332, 418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Combines trained top model with full MobileNetV2 model"""'], {}), "(\n 'Combines trained top model with full MobileNetV2 model')\n", (355, 418), False, 'import argparse\n'), ((676, 768), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.2)'}), '(rescale=1.0 / 255,\n validation_split=0.2)\n', (723, 768), True, 'import tensorflow as tf\n'), ((1430, 1529), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': 'IMG_SHAPE', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=IMG_SHAPE, include_top=False,\n weights='imagenet')\n", (1463, 1529), True, 'import tensorflow as tf\n'), ((1839, 1852), 'tensorflow.keras.backend.min', 'K.min', (['y_pred'], {}), '(y_pred)\n', (1844, 1852), True, 'from tensorflow.keras import backend as K\n'), ((1919, 1932), 'tensorflow.keras.backend.max', 'K.max', (['y_pred'], {}), '(y_pred)\n', (1924, 1932), True, 'from tensorflow.keras import backend as K\n'), ((2004, 2017), 'tensorflow.keras.backend.var', 'K.var', (['y_pred'], {}), '(y_pred)\n', (2009, 2017), True, 'from tensorflow.keras import backend as K\n'), ((1696, 1736), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (1734, 1736), True, 'import tensorflow as tf\n'), ((1740, 1786), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (1761, 1786), True, 'import tensorflow as tf\n'), ((2060, 2086), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (2084, 2086), True, 'import tensorflow as tf\n')] |
#! /usr/bin/env python
#
# Description:
#
#
# Usage:
# python
#
import sys
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
def kozeny_carman(x, x_c, gamma, C):
""" TODO """
dx = x - x_c
y = np.log10(C) + gamma * np.log10(dx) + 2.0 * np.log10(x / (1 - x))
return y
def mse(x, logk, x_c, gamma, C):
""" TODO """
N = 0
mse = 0.0
for i in range(len(x)):
logk_fit = kozeny_carman(x[i], x_c, gamma, C)
mse += (logk[i] - logk_fit) ** 2
N += 1
mse /= N
return mse
data = np.loadtxt(sys.argv[1])
phi, perc, k, t = data.T
indcs = perc == 1
phi = phi[indcs]
k = k[indcs]
indcs = phi <= 0.95
phi = phi[indcs]
k = k[indcs]
log10k = np.log10(k)
popt, pcov = curve_fit(kozeny_carman, phi, log10k, bounds=(0, [0.6, np.inf, np.inf]))
x = np.linspace(popt[0] + 0.01, np.max(phi), 100)
log_k_fit = kozeny_carman(x, popt[0], popt[1], popt[2])
k_fit = 10 ** log_k_fit
print("MSE=", mse(phi, log10k, popt[0], popt[1], popt[2]))
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(7, 7))
ax.set_yscale("log", nonposy="clip")
plt.plot(phi, k, "+", color="green")
plt.plot(x, k_fit, "--", color="darkblue", lw=3)
plt.xlabel("Porosity", fontsize=20)
plt.ylabel(r"Permeability", fontsize=20, labelpad=0)
plt.tick_params(axis="both", which="major", labelsize=15)
plt.tick_params(axis="both", which="minor", labelsize=12)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"scipy.optimize.curve_fit",
"numpy.max",
"numpy.loadtxt",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"seaborn.set"
] | [((188, 213), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (195, 213), True, 'import seaborn as sns\n'), ((624, 647), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (634, 647), True, 'import numpy as np\n'), ((783, 794), 'numpy.log10', 'np.log10', (['k'], {}), '(k)\n', (791, 794), True, 'import numpy as np\n'), ((809, 881), 'scipy.optimize.curve_fit', 'curve_fit', (['kozeny_carman', 'phi', 'log10k'], {'bounds': '(0, [0.6, np.inf, np.inf])'}), '(kozeny_carman, phi, log10k, bounds=(0, [0.6, np.inf, np.inf]))\n', (818, 881), False, 'from scipy.optimize import curve_fit\n'), ((1084, 1131), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '(True)', 'figsize': '(7, 7)'}), '(1, 1, sharey=True, figsize=(7, 7))\n', (1096, 1131), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1206), 'matplotlib.pyplot.plot', 'plt.plot', (['phi', 'k', '"""+"""'], {'color': '"""green"""'}), "(phi, k, '+', color='green')\n", (1178, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1255), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'k_fit', '"""--"""'], {'color': '"""darkblue"""', 'lw': '(3)'}), "(x, k_fit, '--', color='darkblue', lw=3)\n", (1215, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Porosity"""'], {'fontsize': '(20)'}), "('Porosity', fontsize=20)\n", (1266, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1292, 1343), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Permeability"""'], {'fontsize': '(20)', 'labelpad': '(0)'}), "('Permeability', fontsize=20, labelpad=0)\n", (1302, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1402), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (1360, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1460), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(12)'}), "(axis='both', which='minor', labelsize=12)\n", (1418, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n'), ((915, 926), 'numpy.max', 'np.max', (['phi'], {}), '(phi)\n', (921, 926), True, 'import numpy as np\n'), ((295, 306), 'numpy.log10', 'np.log10', (['C'], {}), '(C)\n', (303, 306), True, 'import numpy as np\n'), ((338, 359), 'numpy.log10', 'np.log10', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (346, 359), True, 'import numpy as np\n'), ((317, 329), 'numpy.log10', 'np.log10', (['dx'], {}), '(dx)\n', (325, 329), True, 'import numpy as np\n')] |
import pickle
import re
import string
import pkg_resources
from gensim.models import KeyedVectors
import numpy as np
class Preprocessor(object):
char_search = re.compile(r"[^\u0020\u0027\u002b-\u002e\u0030-\u0039\u0041-\u005a\u0061-\u007a]")
strip_multi_ws = re.compile(r"( {2,})")
word_re = re.compile(r"([\w|-]+)")
punc = set(string.punctuation)
def __init__(self):
self.kp = self._load_kp()
self.gram_counts = self._load_gram_counts()
def __call__(self, x):
x = self.char_search.sub(" ", x)
x = self.strip_multi_ws.sub(" ", x)
x = self.word_re.findall(x)
x = [w.lower() for w in x if len(w) > 1 and w not in self.punc]
# trimmed_x = self._trim_title(" ".join(x))
return " ".join(x)
def _load_kp(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/kp.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _load_gram_counts(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/gram_counts.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _trim_title(self, x):
matches = self.kp.extract_keywords(x)
if not matches:
return x
if len(matches) > 1:
return max([(kw, self.gram_counts.get(kw, 0)) for kw in matches], key=lambda x: x[1])[0]
else:
return matches[0]
class TitleGraph(object):
def __init__(self, preprocessor=Preprocessor):
self.graph = self._load_graph()
self.model = self._load_model()
self.preprocessor = preprocessor()
def _load_graph(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/graph.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _load_model(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/title_model.kv')
return KeyedVectors.load(data_path, mmap='r')
def query_forward(self, title, min_weight=2, topn=25):
"""
Given a Job Title, find the most likely Job Title to occur next
:param title: str, a Job Title
:param min_weight: int, the minimum weight to consider from the graph. Setting this higher will reduce the number of matches returned
:return: results if title in self.graph else None
"""
x = self.preprocessor(title)
if x not in self.graph:
return None
results = [(x, y) for x, y in self.graph.succ.get(x).items()]
result_vecs = []
for title, data in results:
if data['weight'] < min_weight:
continue
td = {'title': title, 'weight': data['weight'], 'vec': self.model.wv.get_vector(title) * data['weight']}
result_vecs.append(td)
if not result_vecs:
return []
resulting_vec = np.mean([x['vec'] for x in result_vecs], axis=0)
return self.model.wv.similar_by_vector(resulting_vec, topn=topn)
def query_backwards(self, title, min_weight=2, topn=25):
"""
Given a Job Title, find the most likely previous Job Title
:param title: str, a Job Title
:param min_weight:
:param topn: int, The number of results to return
:return: results if title in self.graph else None
"""
x = self.preprocessor(title)
if x not in self.graph:
return None
results = [(x, y) for x, y in self.graph.pred.get(x).items()]
result_vecs = []
for title, data in results:
if data['weight'] < min_weight:
continue
td = {'title': title, 'weight': data['weight'], 'vec': self.model.wv.get_vector(title) * data['weight']}
result_vecs.append(td)
if not result_vecs:
return []
resulting_vec = np.mean([x['vec'] for x in result_vecs], axis=0)
return self.model.wv.similar_by_vector(resulting_vec, topn=topn)
def query_similar_semantic(self, title, topn=25, as_tokens=False):
"""
Given a Job Title, use FastText via Gensim and return topn similar titles
:param title: str, a Job Title
:param topn: int, The number of results to return
:param as_tokens: bool, Whether to split the string. This should only effect Job Title queries with 2+ words.
If the order of the words is important, leave as False.
:return: results
"""
x = self.preprocessor(title)
if as_tokens:
x = x.split()
return self.model.most_similar(x, topn=topn)
| [
"gensim.models.KeyedVectors.load",
"pkg_resources.resource_filename",
"numpy.mean",
"pickle.load",
"re.compile"
] | [((167, 268), 're.compile', 're.compile', (['"""[^\\\\u0020\\\\u0027\\\\u002b-\\\\u002e\\\\u0030-\\\\u0039\\\\u0041-\\\\u005a\\\\u0061-\\\\u007a]"""'], {}), "(\n '[^\\\\u0020\\\\u0027\\\\u002b-\\\\u002e\\\\u0030-\\\\u0039\\\\u0041-\\\\u005a\\\\u0061-\\\\u007a]'\n )\n", (177, 268), False, 'import re\n'), ((271, 292), 're.compile', 're.compile', (['"""( {2,})"""'], {}), "('( {2,})')\n", (281, 292), False, 'import re\n'), ((308, 332), 're.compile', 're.compile', (['"""([\\\\w|-]+)"""'], {}), "('([\\\\w|-]+)')\n", (318, 332), False, 'import re\n'), ((824, 885), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""title_graph"""', '"""data/kp.pkl"""'], {}), "('title_graph', 'data/kp.pkl')\n", (855, 885), False, 'import pkg_resources\n'), ((1023, 1093), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""title_graph"""', '"""data/gram_counts.pkl"""'], {}), "('title_graph', 'data/gram_counts.pkl')\n", (1054, 1093), False, 'import pkg_resources\n'), ((1725, 1789), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""title_graph"""', '"""data/graph.pkl"""'], {}), "('title_graph', 'data/graph.pkl')\n", (1756, 1789), False, 'import pkg_resources\n'), ((1922, 1991), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""title_graph"""', '"""data/title_model.kv"""'], {}), "('title_graph', 'data/title_model.kv')\n", (1953, 1991), False, 'import pkg_resources\n'), ((2007, 2045), 'gensim.models.KeyedVectors.load', 'KeyedVectors.load', (['data_path'], {'mmap': '"""r"""'}), "(data_path, mmap='r')\n", (2024, 2045), False, 'from gensim.models import KeyedVectors\n'), ((2968, 3016), 'numpy.mean', 'np.mean', (["[x['vec'] for x in result_vecs]"], {'axis': '(0)'}), "([x['vec'] for x in result_vecs], axis=0)\n", (2975, 3016), True, 'import numpy as np\n'), ((3952, 4000), 'numpy.mean', 'np.mean', (["[x['vec'] for x in result_vecs]"], {'axis': '(0)'}), "([x['vec'] for x in result_vecs], axis=0)\n", (3959, 4000), True, 'import numpy as np\n'), ((950, 968), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (961, 968), False, 'import pickle\n'), ((1158, 1176), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (1169, 1176), False, 'import pickle\n'), ((1854, 1872), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (1865, 1872), False, 'import pickle\n')] |
import os
import sys
import numpy as np
from numpy import savez_compressed
from PIL import Image
from tqdm import tqdm
import multiprocessing
from util_functions import get_normals_from_depth
PRODUCTION = False
if PRODUCTION:
FOLDER_NAME = 'depth_production'
TARGET_FOLDER = 'normals_production'
else:
FOLDER_NAME = 'depth'
TARGET_FOLDER = 'normals'
def create_normals_from_img(img_name: str, current_idx: int, n_total: int):
print(f'[Starting] [{current_idx + 1} / {n_total}] creating normals from: {img_name}')
IMG_PATH = os.path.join('..', FOLDER_NAME, img_name)
img_arr = np.array(Image.open(IMG_PATH), dtype=np.float32)
img_arr_normals = np.array(get_normals_from_depth(depth_img_arr=img_arr), dtype=np.float32)
outfile = img_name.replace('.png', '')
savez_compressed(f'../{TARGET_FOLDER}/{outfile}', img_arr_normals)
print(f'[Finished] [{current_idx + 1} / {n_total}] creating normals from: {img_name}')
def main():
depth_images = sorted(os.listdir(f'../{FOLDER_NAME}'))
iter_from, iter_to = 0, len(depth_images)
if len(sys.argv) > 1:
iter_from = int(sys.argv[1])
if len(sys.argv) > 2:
iter_to = int(sys.argv[2])
print(f'Creating normals. Iterating from: {iter_from} to: {iter_to} ...')
for i in tqdm(range(iter_from, iter_to), desc='Creating normals...'):
file_name = depth_images[i]
create_normals_from_img(
img_name=file_name,
current_idx=i - iter_from,
n_total=iter_to - iter_from + 1
)
if __name__ == '__main__':
main() | [
"PIL.Image.open",
"numpy.savez_compressed",
"util_functions.get_normals_from_depth",
"os.path.join",
"os.listdir"
] | [((553, 594), 'os.path.join', 'os.path.join', (['""".."""', 'FOLDER_NAME', 'img_name'], {}), "('..', FOLDER_NAME, img_name)\n", (565, 594), False, 'import os\n'), ((803, 869), 'numpy.savez_compressed', 'savez_compressed', (['f"""../{TARGET_FOLDER}/{outfile}"""', 'img_arr_normals'], {}), "(f'../{TARGET_FOLDER}/{outfile}', img_arr_normals)\n", (819, 869), False, 'from numpy import savez_compressed\n'), ((619, 639), 'PIL.Image.open', 'Image.open', (['IMG_PATH'], {}), '(IMG_PATH)\n', (629, 639), False, 'from PIL import Image\n'), ((690, 735), 'util_functions.get_normals_from_depth', 'get_normals_from_depth', ([], {'depth_img_arr': 'img_arr'}), '(depth_img_arr=img_arr)\n', (712, 735), False, 'from util_functions import get_normals_from_depth\n'), ((1000, 1031), 'os.listdir', 'os.listdir', (['f"""../{FOLDER_NAME}"""'], {}), "(f'../{FOLDER_NAME}')\n", (1010, 1031), False, 'import os\n')] |
from bfmplot import pl
import numpy as np
x = np.arange(0, 2*np.pi, 0.01)
y = np.arange(0, 2*np.pi, 0.01)
X, Y = np.meshgrid(x,y)
Z = np.cos(X) * np.sin(Y) * 20
pl.imshow(Z)
pl.colorbar()
pl.show()
| [
"bfmplot.pl.colorbar",
"numpy.meshgrid",
"bfmplot.pl.imshow",
"numpy.sin",
"numpy.arange",
"bfmplot.pl.show",
"numpy.cos"
] | [((47, 76), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (56, 76), True, 'import numpy as np\n'), ((79, 108), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (88, 108), True, 'import numpy as np\n'), ((114, 131), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (125, 131), True, 'import numpy as np\n'), ((163, 175), 'bfmplot.pl.imshow', 'pl.imshow', (['Z'], {}), '(Z)\n', (172, 175), False, 'from bfmplot import pl\n'), ((176, 189), 'bfmplot.pl.colorbar', 'pl.colorbar', ([], {}), '()\n', (187, 189), False, 'from bfmplot import pl\n'), ((191, 200), 'bfmplot.pl.show', 'pl.show', ([], {}), '()\n', (198, 200), False, 'from bfmplot import pl\n'), ((135, 144), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (141, 144), True, 'import numpy as np\n'), ((147, 156), 'numpy.sin', 'np.sin', (['Y'], {}), '(Y)\n', (153, 156), True, 'import numpy as np\n')] |
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""This module provides a h5py-like API to access FioFile data.
API description
+++++++++++++++
Fiofile data structure exposed by this API:
::
/
n.1/
title = "…"
start_time = "…"
instrument/
fiofile/
comments = "…"
parameter = "…"
comment = "…"
parameter/
parameter_name = value
measurement/
colname0 = …
colname1 = …
…
The top level scan number ``n.1`` is determined from the filename as in
``prefix_n.fio``. (e.g. ``eh1_sixc_00045.fio`` would give ``45.1``)
If no number is available, will use the filename instead.
``comments`` and ``parameter`` in group ``fiofile`` are the raw headers as they
appear in the original file, as a string of lines separated by newline
(``\\n``) characters. ``comment`` are the remaining comments,
which were not parsed.
The title is the content of the first comment header line
(e.g ``"ascan ss1vo -4.55687 -0.556875 40 0.2"``).
The start_time is parsed from the second comment line.
Datasets are stored in the data format specified in the fio file header.
Scan data (e.g. ``/1.1/measurement/colname0``) is accessed by column,
the dataset name ``colname0`` being the column label as defined in the
``Col …`` header line.
If a ``/`` character is present in a column label or in a motor name in the
original FIO file, it will be substituted with a ``%`` character in the
corresponding dataset name.
MCA data is not yet supported.
This reader requires a fio file as defined in
src/sardana/macroserver/recorders/storage.py of the Sardana project
(https://github.com/sardana-org/sardana).
Accessing data
++++++++++++++
Data and groups are accessed in :mod:`h5py` fashion::
from silx.io.fioh5 import FioH5
# Open a FioFile
fiofh5 = FioH5("test_00056.fio")
# using FioH5 as a regular group to access scans
scan1group = fiofh5["56.1"]
instrument_group = scan1group["instrument"]
# alternative: full path access
measurement_group = fiofh5["/56.1/measurement"]
# accessing a scan data column by name as a 1D numpy array
data_array = measurement_group["Pslit HGap"]
:class:`FioH5` files and groups provide a :meth:`keys` method::
>>> fiofh5.keys()
['96.1', '97.1', '98.1']
>>> fiofh5['96.1'].keys()
['title', 'start_time', 'instrument', 'measurement']
They can also be treated as iterators:
.. code-block:: python
from silx.io import is_dataset
for scan_group in FioH5("test_00056.fio"):
dataset_names = [item.name in scan_group["measurement"] if
is_dataset(item)]
print("Found data columns in scan " + scan_group.name)
print(", ".join(dataset_names))
You can test for existence of data or groups::
>>> "/1.1/measurement/Pslit HGap" in fiofh5
True
>>> "positioners" in fiofh5["/2.1/instrument"]
True
>>> "spam" in fiofh5["1.1"]
False
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "09/04/2021"
import os
import datetime
import logging
import io
import h5py
import numpy
from silx import version as silx_version
from . import commonh5
from .spech5 import to_h5py_utf8
logger1 = logging.getLogger(__name__)
if h5py.version.version_tuple[0] < 3:
text_dtype = h5py.special_dtype(vlen=str) # old API
else:
text_dtype = 'O' # variable-length string (supported as of h5py > 3.0)
ABORTLINENO = 5
dtypeConverter = {'STRING': text_dtype,
'DOUBLE': 'f8',
'FLOAT': 'f4',
'INTEGER': 'i8',
'BOOLEAN': '?'}
def is_fiofile(filename):
"""Test if a file is a FIO file, by checking if three consecutive lines
start with *!*. Tests up to ABORTLINENO lines at the start of the file.
:param str filename: File path
:return: *True* if file is a FIO file, *False* if it is not a FIO file
:rtype: bool
"""
if not os.path.isfile(filename):
return False
# test for presence of three ! in first lines
with open(filename, "rb") as f:
chunk = f.read(2500)
count = 0
for i, line in enumerate(chunk.split(b"\n")):
if line.startswith(b"!"):
count += 1
if count >= 3:
return True
else:
count = 0
if i >= ABORTLINENO:
break
return False
class FioFile(object):
"""This class opens a FIO file and reads the data.
"""
def __init__(self, filepath):
# parse filename
filename = os.path.basename(filepath)
fnowithsuffix = filename.split('_')[-1]
try:
self.scanno = int(fnowithsuffix.split('.')[0])
except Exception:
self.scanno = None
logger1.warning("Cannot parse scan number of file %s", filename)
with open(filepath, 'r') as fiof:
prev = 0
line_counter = 0
while(True):
line = fiof.readline()
if line.startswith('!'): # skip comments
prev = fiof.tell()
line_counter = 0
continue
if line.startswith('%c'): # comment section
line_counter = 0
self.commentsection = ''
line = fiof.readline()
while(not line.startswith('%')
and not line.startswith('!')):
self.commentsection += line
prev = fiof.tell()
line = fiof.readline()
if line.startswith('%p'): # parameter section
line_counter = 0
self.parameterssection = ''
line = fiof.readline()
while(not line.startswith('%')
and not line.startswith('!')):
self.parameterssection += line
prev = fiof.tell()
line = fiof.readline()
if line.startswith('%d'): # data type definitions
line_counter = 0
self.datacols = []
self.names = []
self.dtypes = []
line = fiof.readline()
while(line.startswith(' Col')):
splitline = line.split()
name = splitline[-2]
self.names.append(name)
dtype = dtypeConverter[splitline[-1]]
self.dtypes.append(dtype)
self.datacols.append((name, dtype))
prev = fiof.tell()
line = fiof.readline()
fiof.seek(prev)
break
line_counter += 1
if line_counter > ABORTLINENO:
raise IOError("Invalid fio file: Found no data "
"after %s lines" % ABORTLINENO)
self.data = numpy.loadtxt(fiof,
dtype={'names': tuple(self.names),
'formats': tuple(self.dtypes)},
comments="!")
# ToDo: read only last line of file,
# which sometimes contains the end of acquisition timestamp.
self.parameter = {}
# parse parameter section:
try:
for line in self.parameterssection.splitlines():
param, value = line.split(' = ')
self.parameter[param] = value
except Exception:
logger1.warning("Cannot parse parameter section")
# parse default sardana comments: username and start time
try:
acquiMarker = "acquisition started at" # indicates timestamp
commentlines = self.commentsection.splitlines()
if len(commentlines) >= 2:
self.title = commentlines[0]
l2 = commentlines[1]
acqpos = l2.lower().find(acquiMarker)
if acqpos < 0:
raise Exception("acquisition str not found")
self.user = l2[:acqpos][4:].strip()
self.start_time = l2[acqpos+len(acquiMarker):].strip()
commentlines = commentlines[2:]
self.comments = "\n".join(commentlines[2:])
except Exception:
logger1.warning("Cannot parse default comment section")
self.comments = self.commentsection
self.user = ""
self.start_time = ""
self.title = ""
class FioH5NodeDataset(commonh5.Dataset):
"""This class inherits :class:`commonh5.Dataset`, to which it adds
little extra functionality. The main additional functionality is the
proxy behavior that allows to mimic the numpy array stored in this
class.
"""
def __init__(self, name, data, parent=None, attrs=None):
# get proper value types, to inherit from numpy
# attributes (dtype, shape, size)
if isinstance(data, str):
# use unicode (utf-8 when saved to HDF5 output)
value = to_h5py_utf8(data)
elif isinstance(data, float):
# use 32 bits for float scalars
value = numpy.float32(data)
elif isinstance(data, int):
value = numpy.int_(data)
else:
# Enforce numpy array
array = numpy.array(data)
data_kind = array.dtype.kind
if data_kind in ["S", "U"]:
value = numpy.asarray(array,
dtype=text_dtype)
else:
value = array # numerical data is already the correct datatype
commonh5.Dataset.__init__(self, name, value, parent, attrs)
def __getattr__(self, item):
"""Proxy to underlying numpy array methods.
"""
if hasattr(self[()], item):
return getattr(self[()], item)
raise AttributeError("FioH5NodeDataset has no attribute %s" % item)
class FioH5(commonh5.File):
"""This class reads a FIO file and exposes it as a *h5py.File*.
It inherits :class:`silx.io.commonh5.Group` (via :class:`commonh5.File`),
which implements most of its API.
"""
def __init__(self, filename, order=1):
"""
:param filename: Path to FioFile in filesystem
:type filename: str
"""
if isinstance(filename, io.IOBase):
# see https://github.com/silx-kit/silx/issues/858
filename = filename.name
if not is_fiofile(filename):
raise IOError("File %s is not a FIO file." % filename)
try:
fiof = FioFile(filename) # reads complete file
except Exception as e:
raise IOError("FIO file %s cannot be read.") from e
attrs = {"NX_class": to_h5py_utf8("NXroot"),
"file_time": to_h5py_utf8(
datetime.datetime.now().isoformat()),
"file_name": to_h5py_utf8(filename),
"creator": to_h5py_utf8("silx fioh5 %s" % silx_version)}
commonh5.File.__init__(self, filename, attrs=attrs)
if fiof.scanno is not None:
scan_key = "%s.%s" % (fiof.scanno, int(order))
else:
scan_key = os.path.splitext(os.path.basename(filename))[0]
scan_group = FioScanGroup(scan_key, parent=self, scan=fiof)
self.add_node(scan_group)
class FioScanGroup(commonh5.Group):
def __init__(self, scan_key, parent, scan):
"""
:param parent: parent Group
:param str scan_key: Scan key (e.g. "1.1")
:param scan: FioFile object
"""
if hasattr(scan, 'user'):
userattr = to_h5py_utf8(scan.user)
else:
userattr = to_h5py_utf8('')
commonh5.Group.__init__(self, scan_key, parent=parent,
attrs={"NX_class": to_h5py_utf8("NXentry"),
"user": userattr})
# 'title', 'start_time' and 'user' are defaults
# in Sardana created files:
if hasattr(scan, 'title'):
title = scan.title
else:
title = scan_key # use scan number as default title
self.add_node(FioH5NodeDataset(name="title",
data=to_h5py_utf8(title),
parent=self))
if hasattr(scan, 'start_time'):
start_time = scan.start_time
self.add_node(FioH5NodeDataset(name="start_time",
data=to_h5py_utf8(start_time),
parent=self))
self.add_node(FioH5NodeDataset(name="comments",
data=to_h5py_utf8(scan.comments),
parent=self))
self.add_node(FioInstrumentGroup(parent=self, scan=scan))
self.add_node(FioMeasurementGroup(parent=self, scan=scan))
class FioMeasurementGroup(commonh5.Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: FioFile object
"""
commonh5.Group.__init__(self, name="measurement", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection")})
for label in scan.names:
safe_label = label.replace("/", "%")
self.add_node(FioH5NodeDataset(name=safe_label,
data=scan.data[label],
parent=self))
class FioInstrumentGroup(commonh5.Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: FioFile object
"""
commonh5.Group.__init__(self, name="instrument", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXinstrument")})
self.add_node(FioParameterGroup(parent=self, scan=scan))
self.add_node(FioFileGroup(parent=self, scan=scan))
self.add_node(FioH5NodeDataset(name="comment",
data=to_h5py_utf8(scan.comments),
parent=self))
class FioFileGroup(commonh5.Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: FioFile object
"""
commonh5.Group.__init__(self, name="fiofile", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection")})
self.add_node(FioH5NodeDataset(name="comments",
data=to_h5py_utf8(scan.commentsection),
parent=self))
self.add_node(FioH5NodeDataset(name="parameter",
data=to_h5py_utf8(scan.parameterssection),
parent=self))
class FioParameterGroup(commonh5.Group):
def __init__(self, parent, scan):
"""
:param parent: parent Group
:param scan: FioFile object
"""
commonh5.Group.__init__(self, name="parameter", parent=parent,
attrs={"NX_class": to_h5py_utf8("NXcollection")})
for label in scan.parameter:
safe_label = label.replace("/", "%")
self.add_node(FioH5NodeDataset(name=safe_label,
data=to_h5py_utf8(scan.parameter[label]),
parent=self))
| [
"numpy.int_",
"os.path.basename",
"h5py.special_dtype",
"numpy.float32",
"numpy.asarray",
"os.path.isfile",
"numpy.array",
"datetime.datetime.now",
"logging.getLogger"
] | [((4544, 4571), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4561, 4571), False, 'import logging\n'), ((4628, 4656), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (4646, 4656), False, 'import h5py\n'), ((5271, 5295), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5285, 5295), False, 'import os\n'), ((5877, 5903), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (5893, 5903), False, 'import os\n'), ((10667, 10686), 'numpy.float32', 'numpy.float32', (['data'], {}), '(data)\n', (10680, 10686), False, 'import numpy\n'), ((10743, 10759), 'numpy.int_', 'numpy.int_', (['data'], {}), '(data)\n', (10753, 10759), False, 'import numpy\n'), ((10828, 10845), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (10839, 10845), False, 'import numpy\n'), ((12739, 12765), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (12755, 12765), False, 'import os\n'), ((10952, 10990), 'numpy.asarray', 'numpy.asarray', (['array'], {'dtype': 'text_dtype'}), '(array, dtype=text_dtype)\n', (10965, 10990), False, 'import numpy\n'), ((12363, 12386), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12384, 12386), False, 'import datetime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.cm
import matplotlib.font_manager as fm
def mpl_setdefaults():
mpl.rc('font', **{'family': 'serif', 'serif': ['stix']})
mpl.rcParams["text.usetex"] = True
mpl.rcParams["text.latex.unicode"] = True
mpl.rcParams['font.family'] = 'STIXGeneral'
# mpl.rcParams['font.sans-serif'] = 'cmr10'
# mpl.rcParams['font.serif'] = 'cmr10'
# mpl.rcParams['font.cursive'] = 'cmmi10'
mpl.rcParams['mathtext.fontset'] = 'stix'
# mpl.rcParams['mathtext.rm'] = 'cmr10'
# mpl.rcParams['mathtext.cal'] = 'cmr10'
# mpl.rcParams['mathtext.it'] = 'cmmi10'
# mpl.rcParams['mathtext.bf'] = 'cmr10'
mpl.rcParams["axes.labelsize"] = 20.
mpl.rcParams["xtick.labelsize"] = 16.
mpl.rcParams["ytick.labelsize"] = 16.
mpl.rcParams["legend.fontsize"] = 14.
mpl_setdefaults()
# prop = fm.FontProperties(fname='/tmp/Serif/cmunrm.ttf')
from matplotlib import collections
import matplotlib.ticker as plticker
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.interpolate
import scipy.stats
import collections
#from collections Iterable
def brighten(rgb):
assert 0 <= factor <= 1
rgb = np.ones(3) - rgb
return np.ones(3) - (rgb * factor)
def darken(rgb, factor):
assert 0 <= factor <= 1
return rgb * factor
def get_dual_linear_colour_map(n_sweeps, reversed=False):
n_circuits = 2
col_se = np.array([.95, .43, .0])
col_se1 = brightnessAdjust(col_se, 1.)
col_se2 = brightnessAdjust(col_se, .6)
col_diff = np.array([.275, .443, .835])
col_diff1 = brightnessAdjust(col_diff, 1.)
col_diff2 = brightnessAdjust(col_diff, .6)
cm_se = mpl.colors.LinearSegmentedColormap.from_list("se_cm", [col_se2, col_se1])
cm_diff = mpl.colors.LinearSegmentedColormap.from_list("diff_cm", [col_diff2, col_diff1])
colours = np.empty((n_circuits, n_sweeps), dtype=np.object)
if n_sweeps == 1:
colours[0, 0] = cm_se(0.)
colours[1, 0] = cm_diff(0.)
else:
for sweep_idx in range(n_sweeps):
colours[0, n_sweeps - sweep_idx - 1] = cm_se(sweep_idx / float(n_sweeps - 1))
colours[1, n_sweeps - sweep_idx - 1] = cm_diff(sweep_idx / float(n_sweeps - 1))
# colours[1, ...] = colours[0, ...]
if reversed:
colours = colours[:, ::-1]
return colours
def log_interp(zz, xx, yy):
"""interpolation between points on a log-log axis (wrapper for scipy interp1d)"""
assert np.all(np.diff(xx) > 0)
logz = np.log10(zz)
logx = np.log10(xx)
logy = np.log10(yy)
interp = sp.interpolate.interp1d(logx, logy, kind="linear")
interp = np.power(10., interp(logz))
return interp
def find_x(y, xx, yy, epsilon=1E-6):
"""binary search: given a series of pairs (xx, yy) where xx = x1, x2, ... xN and yy = f(xx) find x such that f(x) = y"""
if np.array(y).size == 1:
y = np.array([y])
x = np.zeros_like(y)
for i, _y in enumerate(y):
if _y < np.amin(yy) or _y > np.amax(yy):
x[i] = np.nan
elif _y == yy[0]:
x[i] = xx[0]
elif _y == yy[-1]:
x[i] = xx[-1]
else:
def f(x):
return log_interp(x, xx, yy) - _y
x = sp.optimize.bisect(f, xx[0], xx[-1], xtol=epsilon)
return x
def create_2d_colour_map(dim1, dim2):
assert dim1 <= 3, "more than 3 not yet implemented"
col_se = np.array([.95, .43, .0])
col_se1 = brightnessAdjust(col_se, 1.)
col_se2 = brightnessAdjust(col_se, .6)
cm_se = mpl.colors.LinearSegmentedColormap.from_list("se_cm", [col_se2, col_se1])
col_diff = np.array([.275, .443, .835])
col_diff1 = brightnessAdjust(col_diff, 1.)
col_diff2 = brightnessAdjust(col_diff, .6)
cm_diff = mpl.colors.LinearSegmentedColormap.from_list("diff_cm", [col_diff2, col_diff1])
col_diff_cs = np.array([.275, .835, .443])
col_diff_cs2 = brightnessAdjust(col_diff_cs, 1.)
col_diff_cs1 = brightnessAdjust(col_diff_cs, .6)
cm_diff_cs = mpl.colors.LinearSegmentedColormap.from_list("diff_cs_cm", [col_diff_cs2, col_diff_cs1])
colour_maps = [cm_se, cm_diff, cm_diff_cs]
colours = np.empty((dim1, dim2, 3), dtype=np.float)
for sweep_idx in range(dim2):
for cm_idx in range(dim1):
colours[cm_idx, sweep_idx, :] = colour_maps[cm_idx](sweep_idx / float(max(1, dim2 - 1)))[:3]
return colour_maps, colours
def plot_bode(fn, f, mag, ang, ckt_ids=None, labels=None, colours=None, markers=None, figsize=(8, 5), ylim_mag=None, ylim_ang=None, colourmap=mpl.cm.jet, mag_ax_ylabel="Magnitude", ang_ax_ylabel="Phase", title=None, log_scale=20., markers_f=None, markers_mag=None, markers_ang=None, marker_h_colour=np.array([0., 0., 0.]), marker_colours=np.array([0., 0., 0.]), markers_size=40., intersect_markers="o", log_y_axis=True, ang_tick_multiples=60.):
"""
Parameters
----------
mag : numpy array, shape (n_circuits, n_freqs) or (n_circuits, n_sweeps, n_freqs)
"""
assert np.all(mag.shape == ang.shape), "Magnitude and angle should be in the same shape"
fig = plt.figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax.set_xlim(np.amin(f), np.amax(f))
ax2.set_xlim(np.amin(f), np.amax(f))
if ylim_mag is None:
ylim_mag = [np.amin(mag), np.amax(mag)]
if ylim_ang is None:
ylim_ang = [np.amin(ang), np.amax(ang)]
ax.set_ylim(ylim_mag)
ax2.set_ylim(ylim_ang)
if len(mag.shape) == 1:
mag = mag[np.newaxis, np.newaxis, :]
ang = ang[np.newaxis, np.newaxis, :]
elif len(mag.shape) == 2:
mag = mag[:, np.newaxis, :]
ang = ang[:, np.newaxis, :]
n_circuits, n_sweeps, n_freqs = mag.shape
if colours is None:
_, colours = create_2d_colour_map(n_circuits, n_sweeps)
for circuit_idx in range(n_circuits):
for sweep_idx in range(n_sweeps):
if labels is None:
_label = None
else:
if type(labels) is np.ndarray and len(labels.shape) == 2:
_label = labels[circuit_idx, sweep_idx]
else:
assert type(labels) is list \
or (type(labels) is np.ndarray and len(labels.shape) == 1)
_label = labels[sweep_idx]
if markers is None:
_marker = None
else:
if len(_markers.shape) == 2:
_marker = markers[circuit_idx, sweep_idx]
else:
assert len(_labels.shape) == 1
_marker = markers[sweep_idx]
ax.plot(f, mag[circuit_idx, sweep_idx, :], linestyle="-", marker=_marker, markersize=1.2*5.5, color=colours[circuit_idx, sweep_idx, :], label=_label, linewidth=2, zorder=99)#, edgecolor="#ffffff", hatch="-", linewidth=10.0)
ax2.plot(f, ang[circuit_idx, sweep_idx, :], marker=_marker, markersize=1.2*5.5, linestyle="-", color=colours[circuit_idx, sweep_idx, :], label=_label, linewidth=2, zorder=99, alpha=1)#, edgecolor="#ffffff", hatch="-", linewidth=10.0)
if log_y_axis:
ax.set_yscale('log')
ax.set_xscale('log')
ax2.set_xscale('log')
loc = plticker.MultipleLocator(base=ang_tick_multiples) # this locator puts ticks at regular intervals
ax2.yaxis.set_major_locator(loc)
if not markers_f is None:
if not marker_colours is None:
if type(marker_colours) is np.ndarray and len(marker_colours.shape) == 1:
marker_colours = np.tile(marker_colours, (len(markers_f), 1))
marker_colours = np.array(marker_colours)
if not intersect_markers is None:
if not type(intersect_markers) in [list, np.ndarray]:
intersect_markers = np.tile(intersect_markers, len(markers_f))
else:
assert len(intersect_markers) == len(markers_f)
if not type(markers_size) in [list, np.ndarray]:
markers_size = np.tile(markers_size, len(markers_f))
else:
assert len(markers_size) == len(markers_f)
if not markers_mag is None:
assert len(markers_f) == len(markers_mag)
if not markers_ang is None:
assert len(markers_f) == len(markers_ang)
for i, _f in enumerate(markers_f):
if not markers_mag is None:
ax.scatter(_f, markers_mag[i], marker=intersect_markers[i], s=markers_size[i], zorder=999, facecolor=marker_colours[i, :])
ax.plot([_f, _f], [np.amin(ax.get_ylim()), markers_mag[i]], linestyle="--", linewidth=2., color=marker_colours[i, :]) # vertical marker line
ax.plot([np.amin(ax.get_xlim()), _f], [markers_mag[i], markers_mag[i]], linestyle="--", color=marker_h_colour, linewidth=2) # horizontal black marker line
ax2.plot([_f, _f], [markers_ang[i], ax2.get_ylim()[1]], linestyle="--", linewidth=2., color=marker_colours[i, :]) # vertical marker line
if not markers_ang is None:
ax2.scatter(_f, markers_ang[i], marker=intersect_markers[i], s=markers_size[i], zorder=999, facecolor=marker_colours[i, :])
ax2.plot([0, _f], [markers_ang[i], markers_ang[i]], linestyle="--", linewidth=2., color=marker_colours[i, :]) # horizontal coloured marker line
fig.canvas.draw()
ax.set_xticklabels([])
for _ax in [ax, ax2]:
_ax.grid(b=True, zorder=0) # , which='both'
_ax.spines['left'].set_linewidth(1.5)
# _ax.spines['right'].set_visible(False)
# _ax.spines['top'].set_visible(False)
_ax.spines['bottom'].set_linewidth(1.5)
_ax.xaxis.set_ticks_position("bottom")
_ax.yaxis.set_ticks_position("left")
for tick in _ax.xaxis.get_major_ticks():
tick.label.set_fontsize(16)
for tick in _ax.yaxis.get_major_ticks():
tick.label.set_fontsize(16)
lbl = []
for t in ax.get_yticks():
lbl.append(log_scale*np.log10(t))
ax.set_yticklabels(lbl)
ax.set_ylabel(mag_ax_ylabel)
ax2.set_ylabel(ang_ax_ylabel)
ax2.set_xlabel("Frequency [Hz]")
leg = ax.legend(loc="upper right", handlelength=1.8, fontsize=16., fancybox=True, framealpha=1)
if not title is None:
fig.suptitle(title)
fig.savefig(fn)
plt.close(fig)
def brightnessAdjust(rgb, value):
hsv = mpl.colors.rgb_to_hsv(rgb)
hsv[2] = value
rgb = mpl.colors.hsv_to_rgb(hsv)
return rgb
if __name__ == "__main__":
freqs = np.array([1E3, 2E3, 3E3, 4E3, 5E3, 10E3, 20E3, 30E3, 40E3, 50E3, 100E3, 200E3, 300E3, 400E3, 500E3, 1E6]) # [Hz]
amp_se = np.array([20.6, 20.4, 20.3, 20.5, 20.3, 20.0, 18.1, 16.8, 15.5, 13.5, 7.9, 4.2, 2.7, 2.0, 1.5, 0.7])
phase_se = np.array([172, 172, 172, 172, 174, 160, 153, 134, 127, 127, 102, 90, 75, 73, 72, 36])
amp_diff = np.array([20.4, 20.9, 20.5, 21.1, 20.9, 20.9, 20.4, 20., 19.1, 17.9, 13., 7.6, 5.1, 3.84, 3.1, 1.4])
phase_diff = np.array([7, 10, 10, 16., 13, 16, 21., 23., 30., 38, 55, 82., 88., 92., 95., 115])
phase_se -= phase_se[0]
phase_diff -= phase_diff[0]
# phase_se = -phase_se
phase_diff = -phase_diff
gain_se = 0.0101
gain_diff = 0.01
load_R = 10E3
amp_se /= amp_se[0] / gain_se
amp_diff /= amp_diff[0] / gain_diff
f = np.logspace(3, 6, 10)
plot_bode(f, mag, ang, label="Differential", colours=["#4671d5", "#f36e00"])
def plot_pm_vs_gbw(max_pm, max_pm_f, best_c_fb, c_fb_list, fn, circuit_names, c_load_list=None, gbw_list=None, figsize=(2.5, 6), MARKER_SIZE=5):
def _plot_phase_margins(ax, data, circuit_name, legend=False, ylabel=None, log_y_axis=False, ylim=None, labels=None, plot_xlabels=True):
marker, marker_size = get_marker_style(circuit_name, MARKER_SIZE)
circuit_idx = circuit_names.index(circuit_name)
n_circuits = len(circuit_names)
_, colours = create_2d_colour_map(n_circuits, 1)
linescollection = ax.semilogx(1E-6 * np.array(gbw_list), data[circuit_idx, :], marker=marker, markersize=marker_size, color=colours[circuit_idx, 0], linewidth=2.)
ax.set_xlim(1E-6 * np.amin(gbw_list), 1E-6 * np.amax(gbw_list))
if plot_xlabels:
ax.set_xlabel("$GBW$ [MHz]")
ax.set_xticklabels([])
if not ylabel is None:
ax.set_ylabel(ylabel)
if log_y_axis:
ax.set_yscale("log")
if not ylim is None:
ax.set_ylim(ylim)
# ax.legend(linescollection_se + linescollection_diff, tuple(labels_se) + tuple(labels_diff), loc="best")
if legend:
ax.legend(linescollection, tuple(labels), bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.grid(b=True, zorder=0) # , which='both'
ax.spines['left'].set_linewidth(1.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(1.5)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
return linescollection
n_gbw_vals = len(gbw_list)
fig = plt.figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(3, 1)
linescollection_se_pm = _plot_phase_margins(plt.subplot(gs[0]), data=max_pm, circuit_name="single-ended", legend=False, ylim=(0., 161.), plot_xlabels=False)
linescollection_diff_pm = _plot_phase_margins(plt.subplot(gs[0]), data=max_pm, circuit_name="differential", legend=False, ylim=(0., 161.), plot_xlabels=False)
linescollection_diff_cs_pm = _plot_phase_margins(plt.subplot(gs[0]), data=max_pm, circuit_name="differential-counter-steering", legend=False, ylabel="Best obtained\nphase margin [deg]", ylim=(0., 161.), plot_xlabels=False)
linescollection_se_pm_f = _plot_phase_margins(plt.subplot(gs[1]), data=max_pm_f, circuit_name="single-ended", legend=False, ylim=(np.amin(max_pm_f), np.amax(max_pm_f)), log_y_axis=True, plot_xlabels=False)
linescollection_diff_pm_f = _plot_phase_margins(plt.subplot(gs[1]), data=max_pm_f, circuit_name="differential", legend=False, ylim=(np.amin(max_pm_f), np.amax(max_pm_f)), log_y_axis=True, plot_xlabels=False)
linescollection_diff_cs_pm_f = _plot_phase_margins(plt.subplot(gs[1]), data=max_pm_f, circuit_name="differential-counter-steering", legend=False, ylim=(np.amin(max_pm_f), np.amax(max_pm_f)), ylabel="Frequency of best\nphase margin [Hz]", log_y_axis=True, plot_xlabels=False)
linescollection_se_pm = _plot_phase_margins(plt.subplot(gs[2]), data=1E12*best_c_fb, circuit_name="single-ended", legend=False, ylim=(np.amin(1E12*best_c_fb), np.amax(1E12*best_c_fb)), plot_xlabels=True, log_y_axis=True)
linescollection_diff_pm = _plot_phase_margins(plt.subplot(gs[2]), data=1E12*best_c_fb, circuit_name="differential", legend=False, ylim=(np.amin(1E12*best_c_fb), np.amax(1E12*best_c_fb)), ylabel="Optimal\n$C_{FB}$ [pF]", plot_xlabels=True, log_y_axis=True)
linescollection_diff_cs_pm = _plot_phase_margins(plt.subplot(gs[2]), data=1E12*best_c_fb, circuit_name="differential-counter-steering", legend=False, ylim=(np.amin(1E12*best_c_fb), np.amax(1E12*best_c_fb)), ylabel="Optimal\n$C_{FB}$ [pF]", plot_xlabels=True, log_y_axis=True)
fig.savefig(fn)
plt.close(fig)
def plot_pm_vs_gbw2(max_pm, max_pm_f, best_c_fb, c_fb_list, circuit_names, fn, c_load_list=None, gbw_list=None, labels_diff="Differential", labels_se="Single ended", figsize=(8, 6.5), MARKER_SIZE=5):
def _plot_phase_margins(ax, data, colour, legend=False, ylabel=None, log_y_axis=False, ylim=None, labels=None, plot_xlabels=True, alpha=1., plot_ylabels=True):
marker, marker_size = get_marker_style(circuit_name, MARKER_SIZE)
linescollection = ax.semilogx(1E-6 * np.array(gbw_list), data, marker=marker, markersize=marker_size, color=colour, linewidth=2., alpha=alpha)#), markeredgecolor=colours[circuit_idx, c_load_idx])
ax.set_xlim(1E-6 * np.amin(gbw_list), 1E-6 * np.amax(gbw_list))
if plot_xlabels:
ax.set_xlabel("$GBW$ [MHz]")
else:
ax.set_xticklabels([])
ax.grid(True)
if log_y_axis:
ax.set_yscale("log")
if not ylim is None:
ax.set_ylim(ylim)
# ax.legend(linescollection_se + linescollection_diff, tuple(labels_se) + tuple(labels_diff), loc="best")
if plot_ylabels:
ax.set_ylabel(ylabel)
else:
ax.set_yticklabels([])
return linescollection
n_c_load_vals = len(c_load_list)
n_circuits = len(circuit_names)
_, colours = create_2d_colour_map(n_circuits, n_c_load_vals)
fig = plt.figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(3, 3)
for circuit_idx, circuit_name in enumerate(circuit_names):
linescollection = []
labels = []
for c_load_idx, C_L in enumerate(c_load_list):
# faded background single-ended
if not circuit_name == "single-ended":
alpha = .25
_circuit_idx = circuit_names.index("single-ended")
_plot_phase_margins(plt.subplot(gs[0, circuit_idx]), data=max_pm[_circuit_idx, :, c_load_idx], colour=colours[_circuit_idx, c_load_idx], legend=True, ylim=(55.-1E-12, 160.+1E-12), plot_xlabels=False, ylabel="Best obtained\nphase margin [deg]", alpha=alpha, plot_ylabels=False)
_plot_phase_margins(plt.subplot(gs[1, circuit_idx]), data=max_pm_f[_circuit_idx, :, c_load_idx], colour=colours[_circuit_idx, c_load_idx], legend=True, ylim=(np.amin(max_pm_f), np.amax(max_pm_f)), plot_xlabels=False, log_y_axis=True, ylabel="Frequency of best\nphase margin [Hz]", alpha=alpha, plot_ylabels=False)
_plot_phase_margins(plt.subplot(gs[2, circuit_idx]), data=1E12*best_c_fb[_circuit_idx, :, c_load_idx], colour=colours[_circuit_idx, c_load_idx], legend=True, labels=["$C_L = " + str(c_load_list[i]) + "$" for i in range(n_c_load_vals)], ylabel="Optimal\n$C_{FB}$ [pF]", plot_xlabels=True, log_y_axis=True, alpha=alpha, plot_ylabels=False)#, ylim=(1.-1E-12, 100.+1E-12))
alpha = 1.
_plot_phase_margins(plt.subplot(gs[0, circuit_idx]), data=max_pm[circuit_idx, :, c_load_idx], colour=colours[circuit_idx, c_load_idx], legend=True, ylim=(55.-1E-12, 160.+1E-12), plot_xlabels=False, ylabel="Best obtained\nphase margin [deg]", alpha=alpha, plot_ylabels=circuit_idx == 0)
_plot_phase_margins(plt.subplot(gs[1, circuit_idx]), data=max_pm_f[circuit_idx, :, c_load_idx], colour=colours[circuit_idx, c_load_idx], legend=True, ylim=(np.amin(max_pm_f), np.amax(max_pm_f)), plot_xlabels=False, log_y_axis=True, ylabel="Frequency of best\nphase margin [Hz]", alpha=alpha, plot_ylabels=circuit_idx == 0)
linescollection += _plot_phase_margins(plt.subplot(gs[2, circuit_idx]), data=1E12*best_c_fb[circuit_idx, :, c_load_idx], colour=colours[circuit_idx, c_load_idx], legend=True, labels=["$C_L = " + str(c_load_list[i]) + "$" for i in range(n_c_load_vals)], ylabel="Optimal\n$C_{FB}$ [pF]", plot_xlabels=True, log_y_axis=True, alpha=alpha, plot_ylabels=circuit_idx == 0)#, ylim=(1.-1E-12, 100.+1E-12))
labels += ["$C_L = " + str(C_L) + "$"]
# plot legend on top row
ax = plt.subplot(gs[circuit_idx, 0])
ax.legend(linescollection, tuple(labels), bbox_to_anchor=(1.05, 1), loc=9, borderaxespad=0.)
fig.subplots_adjust(right=.8)
fig.savefig(fn)
plt.close(fig)
def plot_phase_margins_vs_cfb(pm, c_fb_list, fn, circuit_names, c_load_list=None, figsize=(5, 6), MARKER_SIZE=5, ang_ylim=(59., 161.), title=""):
def _plot_phase_margins(ax, circuit_idx, circuit_name, labels=[], legend=False):
assert 0 <= circuit_idx <= 2
marker, marker_size = get_marker_style(circuit_name, MARKER_SIZE)
if circuit_idx == 1:
other_marker = "o"
other_marker_size = MARKER_SIZE
elif circuit_idx == 2:
other_marker = "s"
other_marker_size = MARKER_SIZE
ax.set_xlabel("$C_{FB}$ [pF]")
ax.set_ylabel("Phase margin [deg]")
else:
other_marker = "d"
other_marker_size = MARKER_SIZE * 1.25
other_circuit_idx = 1 - circuit_idx
n_circuits = len(circuit_names)
_, colours = create_2d_colour_map(n_circuits, n_c_load_vals)
linescollection = None
for c_load_idx in range(n_c_load_vals):
_l = ax.semilogx(1E12 * c_fb_list, pm[other_circuit_idx, :, c_load_idx], marker=other_marker, markersize=other_marker_size, color=colours[other_circuit_idx, c_load_idx], linewidth=2., alpha=.25)#), markeredgecolor=colours[other_circuit_idx, c_load_idx])
_l = ax.semilogx(1E12 * c_fb_list, pm[circuit_idx, :, c_load_idx], marker=marker, markersize=marker_size, color=colours[circuit_idx, c_load_idx], linewidth=2.)#, markeredgecolor=colours[circuit_idx, c_load_idx])
if linescollection is None:
linescollection = _l
else:
linescollection += _l
if circuit_idx == 0:
ax.set_xticklabels([])
ax.set_xlim(1E12 * np.amin(c_fb_list), 1E12 * np.amax(c_fb_list))
if not ang_ylim is None:
ax.set_ylim(ang_ylim)
ax.grid(True)
if legend:
leg = ax.legend(linescollection, tuple(labels), loc="best")
leg.get_frame().set_alpha(.6)
return linescollection
n_circuits = len(circuit_names)
n_c_fb_vals = len(c_fb_list)
n_c_load_vals = len(c_load_list)
# pm = np.inf * np.ones((n_circuits, n_c_fb_vals, n_c_load_vals))
fig = plt.figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(n_circuits, 1)
for circuit_idx, circuit_name in enumerate(circuit_names):
_plot_phase_margins(plt.subplot(gs[circuit_idx]), circuit_idx=circuit_idx, circuit_name=circuit_name, labels=["$C_L = " + "{:.2E}".format(c_load_list[i]) + "$" + " (" + circuit_name + ")"for i in range(n_c_load_vals)], legend=True)
fig.suptitle(title)
fig.savefig(fn)
plt.close(fig)
def get_marker_style(circuit_name, _marker_size=40.):
if circuit_name == "se":
circuit_name = "single-ended"
if circuit_name == "sym":
circuit_name = "differential"
if circuit_name == "ctst":
circuit_name = "differential-counter-steering"
assert circuit_name in ["single-ended", "differential", "differential-counter-steering"]
if circuit_name == "differential":
return "d", _marker_size * 1.25
if circuit_name == "differential-counter-steering":
return "s", _marker_size
assert circuit_name == "single-ended"
return "o", _marker_size
| [
"matplotlib.rc",
"numpy.amin",
"numpy.empty",
"numpy.logspace",
"numpy.ones",
"matplotlib.pyplot.figure",
"scipy.interpolate.interp1d",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.ticker.MultipleLocator",
"numpy.log10",
"m... | [((73, 87), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (80, 87), True, 'import matplotlib as mpl\n'), ((171, 227), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['stix']})\n", (177, 227), True, 'import matplotlib as mpl\n'), ((1474, 1501), 'numpy.array', 'np.array', (['[0.95, 0.43, 0.0]'], {}), '([0.95, 0.43, 0.0])\n', (1482, 1501), True, 'import numpy as np\n'), ((1591, 1622), 'numpy.array', 'np.array', (['[0.275, 0.443, 0.835]'], {}), '([0.275, 0.443, 0.835])\n', (1599, 1622), True, 'import numpy as np\n'), ((1717, 1790), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""se_cm"""', '[col_se2, col_se1]'], {}), "('se_cm', [col_se2, col_se1])\n", (1761, 1790), True, 'import matplotlib as mpl\n'), ((1802, 1881), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""diff_cm"""', '[col_diff2, col_diff1]'], {}), "('diff_cm', [col_diff2, col_diff1])\n", (1846, 1881), True, 'import matplotlib as mpl\n'), ((1895, 1944), 'numpy.empty', 'np.empty', (['(n_circuits, n_sweeps)'], {'dtype': 'np.object'}), '((n_circuits, n_sweeps), dtype=np.object)\n', (1903, 1944), True, 'import numpy as np\n'), ((2483, 2495), 'numpy.log10', 'np.log10', (['zz'], {}), '(zz)\n', (2491, 2495), True, 'import numpy as np\n'), ((2504, 2516), 'numpy.log10', 'np.log10', (['xx'], {}), '(xx)\n', (2512, 2516), True, 'import numpy as np\n'), ((2525, 2537), 'numpy.log10', 'np.log10', (['yy'], {}), '(yy)\n', (2533, 2537), True, 'import numpy as np\n'), ((2549, 2599), 'scipy.interpolate.interp1d', 'sp.interpolate.interp1d', (['logx', 'logy'], {'kind': '"""linear"""'}), "(logx, logy, kind='linear')\n", (2572, 2599), True, 'import scipy as sp\n'), ((2867, 2883), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (2880, 2883), True, 'import numpy as np\n'), ((3279, 3306), 'numpy.array', 'np.array', (['[0.95, 0.43, 0.0]'], {}), '([0.95, 0.43, 0.0])\n', (3287, 3306), True, 'import numpy as np\n'), ((3393, 3466), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""se_cm"""', '[col_se2, col_se1]'], {}), "('se_cm', [col_se2, col_se1])\n", (3437, 3466), True, 'import matplotlib as mpl\n'), ((3480, 3511), 'numpy.array', 'np.array', (['[0.275, 0.443, 0.835]'], {}), '([0.275, 0.443, 0.835])\n', (3488, 3511), True, 'import numpy as np\n'), ((3608, 3687), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""diff_cm"""', '[col_diff2, col_diff1]'], {}), "('diff_cm', [col_diff2, col_diff1])\n", (3652, 3687), True, 'import matplotlib as mpl\n'), ((3705, 3736), 'numpy.array', 'np.array', (['[0.275, 0.835, 0.443]'], {}), '([0.275, 0.835, 0.443])\n', (3713, 3736), True, 'import numpy as np\n'), ((3848, 3940), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""diff_cs_cm"""', '[col_diff_cs2, col_diff_cs1]'], {}), "('diff_cs_cm', [col_diff_cs2,\n col_diff_cs1])\n", (3892, 3940), True, 'import matplotlib as mpl\n'), ((3995, 4036), 'numpy.empty', 'np.empty', (['(dim1, dim2, 3)'], {'dtype': 'np.float'}), '((dim1, dim2, 3), dtype=np.float)\n', (4003, 4036), True, 'import numpy as np\n'), ((4525, 4550), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4533, 4550), True, 'import numpy as np\n'), ((4564, 4589), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4572, 4589), True, 'import numpy as np\n'), ((4800, 4830), 'numpy.all', 'np.all', (['(mag.shape == ang.shape)'], {}), '(mag.shape == ang.shape)\n', (4806, 4830), True, 'import numpy as np\n'), ((4890, 4917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4900, 4917), True, 'import matplotlib.pyplot as plt\n'), ((4924, 4973), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[3, 1]'}), '(2, 1, height_ratios=[3, 1])\n', (4945, 4973), True, 'import matplotlib as mpl\n'), ((4981, 4999), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (4992, 4999), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5025), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (5018, 5025), True, 'import matplotlib.pyplot as plt\n'), ((6737, 6786), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': 'ang_tick_multiples'}), '(base=ang_tick_multiples)\n', (6761, 6786), True, 'import matplotlib.ticker as plticker\n'), ((9478, 9492), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9487, 9492), True, 'import matplotlib.pyplot as plt\n'), ((9537, 9563), 'matplotlib.colors.rgb_to_hsv', 'mpl.colors.rgb_to_hsv', (['rgb'], {}), '(rgb)\n', (9558, 9563), True, 'import matplotlib as mpl\n'), ((9587, 9613), 'matplotlib.colors.hsv_to_rgb', 'mpl.colors.hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (9608, 9613), True, 'import matplotlib as mpl\n'), ((9668, 9833), 'numpy.array', 'np.array', (['[1000.0, 2000.0, 3000.0, 4000.0, 5000.0, 10000.0, 20000.0, 30000.0, 40000.0,\n 50000.0, 100000.0, 200000.0, 300000.0, 400000.0, 500000.0, 1000000.0]'], {}), '([1000.0, 2000.0, 3000.0, 4000.0, 5000.0, 10000.0, 20000.0, 30000.0,\n 40000.0, 50000.0, 100000.0, 200000.0, 300000.0, 400000.0, 500000.0, \n 1000000.0])\n', (9676, 9833), True, 'import numpy as np\n'), ((9792, 9897), 'numpy.array', 'np.array', (['[20.6, 20.4, 20.3, 20.5, 20.3, 20.0, 18.1, 16.8, 15.5, 13.5, 7.9, 4.2, 2.7,\n 2.0, 1.5, 0.7]'], {}), '([20.6, 20.4, 20.3, 20.5, 20.3, 20.0, 18.1, 16.8, 15.5, 13.5, 7.9, \n 4.2, 2.7, 2.0, 1.5, 0.7])\n', (9800, 9897), True, 'import numpy as np\n'), ((9905, 9994), 'numpy.array', 'np.array', (['[172, 172, 172, 172, 174, 160, 153, 134, 127, 127, 102, 90, 75, 73, 72, 36]'], {}), '([172, 172, 172, 172, 174, 160, 153, 134, 127, 127, 102, 90, 75, 73,\n 72, 36])\n', (9913, 9994), True, 'import numpy as np\n'), ((10007, 10113), 'numpy.array', 'np.array', (['[20.4, 20.9, 20.5, 21.1, 20.9, 20.9, 20.4, 20.0, 19.1, 17.9, 13.0, 7.6, 5.1,\n 3.84, 3.1, 1.4]'], {}), '([20.4, 20.9, 20.5, 21.1, 20.9, 20.9, 20.4, 20.0, 19.1, 17.9, 13.0,\n 7.6, 5.1, 3.84, 3.1, 1.4])\n', (10015, 10113), True, 'import numpy as np\n'), ((10122, 10217), 'numpy.array', 'np.array', (['[7, 10, 10, 16.0, 13, 16, 21.0, 23.0, 30.0, 38, 55, 82.0, 88.0, 92.0, 95.0, 115\n ]'], {}), '([7, 10, 10, 16.0, 13, 16, 21.0, 23.0, 30.0, 38, 55, 82.0, 88.0, \n 92.0, 95.0, 115])\n', (10130, 10217), True, 'import numpy as np\n'), ((10445, 10466), 'numpy.logspace', 'np.logspace', (['(3)', '(6)', '(10)'], {}), '(3, 6, 10)\n', (10456, 10466), True, 'import numpy as np\n'), ((12046, 12073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12056, 12073), True, 'import matplotlib.pyplot as plt\n'), ((12080, 12107), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (12101, 12107), True, 'import matplotlib as mpl\n'), ((14122, 14136), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14131, 14136), True, 'import matplotlib.pyplot as plt\n'), ((15374, 15401), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (15384, 15401), True, 'import matplotlib.pyplot as plt\n'), ((15408, 15435), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(3)', '(3)'], {}), '(3, 3)\n', (15429, 15435), True, 'import matplotlib as mpl\n'), ((17991, 18005), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (18000, 18005), True, 'import matplotlib.pyplot as plt\n'), ((19920, 19947), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (19930, 19947), True, 'import matplotlib.pyplot as plt\n'), ((19954, 19990), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['n_circuits', '(1)'], {}), '(n_circuits, 1)\n', (19975, 19990), True, 'import matplotlib as mpl\n'), ((20324, 20338), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20333, 20338), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1271), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1268, 1271), True, 'import numpy as np\n'), ((1286, 1296), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1293, 1296), True, 'import numpy as np\n'), ((2848, 2861), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (2856, 2861), True, 'import numpy as np\n'), ((5039, 5049), 'numpy.amin', 'np.amin', (['f'], {}), '(f)\n', (5046, 5049), True, 'import numpy as np\n'), ((5051, 5061), 'numpy.amax', 'np.amax', (['f'], {}), '(f)\n', (5058, 5061), True, 'import numpy as np\n'), ((5077, 5087), 'numpy.amin', 'np.amin', (['f'], {}), '(f)\n', (5084, 5087), True, 'import numpy as np\n'), ((5089, 5099), 'numpy.amax', 'np.amax', (['f'], {}), '(f)\n', (5096, 5099), True, 'import numpy as np\n'), ((12154, 12172), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (12165, 12172), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12332), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (12325, 12332), True, 'import matplotlib.pyplot as plt\n'), ((12477, 12495), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (12488, 12495), True, 'import matplotlib.pyplot as plt\n'), ((12700, 12718), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (12711, 12718), True, 'import matplotlib.pyplot as plt\n'), ((12909, 12927), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (12920, 12927), True, 'import matplotlib.pyplot as plt\n'), ((13121, 13139), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (13132, 13139), True, 'import matplotlib.pyplot as plt\n'), ((13392, 13410), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (13403, 13410), True, 'import matplotlib.pyplot as plt\n'), ((13616, 13634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (13627, 13634), True, 'import matplotlib.pyplot as plt\n'), ((13876, 13894), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (13887, 13894), True, 'import matplotlib.pyplot as plt\n'), ((17814, 17845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[circuit_idx, 0]'], {}), '(gs[circuit_idx, 0])\n', (17825, 17845), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2468), 'numpy.diff', 'np.diff', (['xx'], {}), '(xx)\n', (2464, 2468), True, 'import numpy as np\n'), ((2819, 2830), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2827, 2830), True, 'import numpy as np\n'), ((5137, 5149), 'numpy.amin', 'np.amin', (['mag'], {}), '(mag)\n', (5144, 5149), True, 'import numpy as np\n'), ((5151, 5163), 'numpy.amax', 'np.amax', (['mag'], {}), '(mag)\n', (5158, 5163), True, 'import numpy as np\n'), ((5201, 5213), 'numpy.amin', 'np.amin', (['ang'], {}), '(ang)\n', (5208, 5213), True, 'import numpy as np\n'), ((5215, 5227), 'numpy.amax', 'np.amax', (['ang'], {}), '(ang)\n', (5222, 5227), True, 'import numpy as np\n'), ((7092, 7116), 'numpy.array', 'np.array', (['marker_colours'], {}), '(marker_colours)\n', (7100, 7116), True, 'import numpy as np\n'), ((20073, 20101), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[circuit_idx]'], {}), '(gs[circuit_idx])\n', (20084, 20101), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2933), 'numpy.amin', 'np.amin', (['yy'], {}), '(yy)\n', (2929, 2933), True, 'import numpy as np\n'), ((2942, 2953), 'numpy.amax', 'np.amax', (['yy'], {}), '(yy)\n', (2949, 2953), True, 'import numpy as np\n'), ((9178, 9189), 'numpy.log10', 'np.log10', (['t'], {}), '(t)\n', (9186, 9189), True, 'import numpy as np\n'), ((11077, 11095), 'numpy.array', 'np.array', (['gbw_list'], {}), '(gbw_list)\n', (11085, 11095), True, 'import numpy as np\n'), ((11225, 11242), 'numpy.amin', 'np.amin', (['gbw_list'], {}), '(gbw_list)\n', (11232, 11242), True, 'import numpy as np\n'), ((11251, 11268), 'numpy.amax', 'np.amax', (['gbw_list'], {}), '(gbw_list)\n', (11258, 11268), True, 'import numpy as np\n'), ((12784, 12801), 'numpy.amin', 'np.amin', (['max_pm_f'], {}), '(max_pm_f)\n', (12791, 12801), True, 'import numpy as np\n'), ((12803, 12820), 'numpy.amax', 'np.amax', (['max_pm_f'], {}), '(max_pm_f)\n', (12810, 12820), True, 'import numpy as np\n'), ((12993, 13010), 'numpy.amin', 'np.amin', (['max_pm_f'], {}), '(max_pm_f)\n', (13000, 13010), True, 'import numpy as np\n'), ((13012, 13029), 'numpy.amax', 'np.amax', (['max_pm_f'], {}), '(max_pm_f)\n', (13019, 13029), True, 'import numpy as np\n'), ((13222, 13239), 'numpy.amin', 'np.amin', (['max_pm_f'], {}), '(max_pm_f)\n', (13229, 13239), True, 'import numpy as np\n'), ((13241, 13258), 'numpy.amax', 'np.amax', (['max_pm_f'], {}), '(max_pm_f)\n', (13248, 13258), True, 'import numpy as np\n'), ((13482, 13518), 'numpy.amin', 'np.amin', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (13489, 13518), True, 'import numpy as np\n'), ((13507, 13543), 'numpy.amax', 'np.amax', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (13514, 13543), True, 'import numpy as np\n'), ((13706, 13742), 'numpy.amin', 'np.amin', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (13713, 13742), True, 'import numpy as np\n'), ((13731, 13767), 'numpy.amax', 'np.amax', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (13738, 13767), True, 'import numpy as np\n'), ((13983, 14019), 'numpy.amin', 'np.amin', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (13990, 14019), True, 'import numpy as np\n'), ((14008, 14044), 'numpy.amax', 'np.amax', (['(1000000000000.0 * best_c_fb)'], {}), '(1000000000000.0 * best_c_fb)\n', (14015, 14044), True, 'import numpy as np\n'), ((14611, 14629), 'numpy.array', 'np.array', (['gbw_list'], {}), '(gbw_list)\n', (14619, 14629), True, 'import numpy as np\n'), ((14792, 14809), 'numpy.amin', 'np.amin', (['gbw_list'], {}), '(gbw_list)\n', (14799, 14809), True, 'import numpy as np\n'), ((14818, 14835), 'numpy.amax', 'np.amax', (['gbw_list'], {}), '(gbw_list)\n', (14825, 14835), True, 'import numpy as np\n'), ((16745, 16776), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, circuit_idx]'], {}), '(gs[0, circuit_idx])\n', (16756, 16776), True, 'import matplotlib.pyplot as plt\n'), ((17034, 17065), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, circuit_idx]'], {}), '(gs[1, circuit_idx])\n', (17045, 17065), True, 'import matplotlib.pyplot as plt\n'), ((17379, 17410), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2, circuit_idx]'], {}), '(gs[2, circuit_idx])\n', (17390, 17410), True, 'import matplotlib.pyplot as plt\n'), ((19495, 19513), 'numpy.amin', 'np.amin', (['c_fb_list'], {}), '(c_fb_list)\n', (19502, 19513), True, 'import numpy as np\n'), ((19522, 19540), 'numpy.amax', 'np.amax', (['c_fb_list'], {}), '(c_fb_list)\n', (19529, 19540), True, 'import numpy as np\n'), ((3112, 3162), 'scipy.optimize.bisect', 'sp.optimize.bisect', (['f', 'xx[0]', 'xx[-1]'], {'xtol': 'epsilon'}), '(f, xx[0], xx[-1], xtol=epsilon)\n', (3130, 3162), True, 'import scipy as sp\n'), ((15759, 15790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, circuit_idx]'], {}), '(gs[0, circuit_idx])\n', (15770, 15790), True, 'import matplotlib.pyplot as plt\n'), ((16040, 16071), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, circuit_idx]'], {}), '(gs[1, circuit_idx])\n', (16051, 16071), True, 'import matplotlib.pyplot as plt\n'), ((16358, 16389), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2, circuit_idx]'], {}), '(gs[2, circuit_idx])\n', (16369, 16389), True, 'import matplotlib.pyplot as plt\n'), ((17170, 17187), 'numpy.amin', 'np.amin', (['max_pm_f'], {}), '(max_pm_f)\n', (17177, 17187), True, 'import numpy as np\n'), ((17189, 17206), 'numpy.amax', 'np.amax', (['max_pm_f'], {}), '(max_pm_f)\n', (17196, 17206), True, 'import numpy as np\n'), ((16178, 16195), 'numpy.amin', 'np.amin', (['max_pm_f'], {}), '(max_pm_f)\n', (16185, 16195), True, 'import numpy as np\n'), ((16197, 16214), 'numpy.amax', 'np.amax', (['max_pm_f'], {}), '(max_pm_f)\n', (16204, 16214), True, 'import numpy as np\n')] |
#debug
import os
from scipy import ndimage
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
# Load raw speckle images in .dat format
# Convert .dat to .tiff
# Load .tiff image
# calculate speckle contrast (Dynamic Imaging of Cerebral Blood Flow Using Laser Speckle)
# Add dimensions to image (e.g. 5x4 [mm))
#function for lasca contrast calculation
def lasca(imarray, wsize = 10):
immean = ndimage.uniform_filter(imarray, size=wsize)
im2mean = ndimage.uniform_filter(np.square(imarray), size=wsize)
imcontrast = np.sqrt(im2mean / np.square(immean) - 1)
return imcontrast
# make sure file exists
print(os.listdir())
# make sure I'm in the directory I think I'm in
print(os.getcwd())
############ Main
#load test dataset
im = Image.open("./../data/interim/datauint16.tiff")
#taking only 1st frame
#convert to the float for filtering and calculation
imarray = np.array(im).astype(float)
#make contrast with window of 5 pixels
imcontrast05 = lasca(imarray, 5)
#make contrast with window of 10 pixels
imcontrast10 = lasca(imarray, 10)
#finally plot the data and results
plt.figure(figsize=(12,5))
plt.subplot(1,3,1);
plt.imshow(imarray, cmap=plt.get_cmap("gray"))
plt.colorbar(orientation = 'horizontal', ticks = np.linspace(500, 2000, 4, endpoint=True))
plt.subplot(1,3,2);
plt.imshow(imcontrast05, vmin=0.1, vmax=0.4, cmap=plt.get_cmap("jet"))
plt.colorbar(orientation = 'horizontal', ticks = [0.1, 0.2, 0.3, 0.4])
plt.subplot(1,3,3);
plt.imshow(imcontrast10, vmin=0.1, vmax=0.4, cmap=plt.get_cmap("jet"))
plt.colorbar(orientation = 'horizontal', ticks = [0.1, 0.2, 0.3, 0.4])
#saves figure
plt.savefig('./../data/final/result.jpg', bbox_inches=0, pad_inches=0,dpi=120);
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"os.getcwd",
"numpy.square",
"matplotlib.pyplot.colorbar",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"numpy.array",
"scipy.ndimage.uniform_filter",
"numpy.linspace",
"os.listdir",
"matplotlib.pyplot.save... | [((785, 832), 'PIL.Image.open', 'Image.open', (['"""./../data/interim/datauint16.tiff"""'], {}), "('./../data/interim/datauint16.tiff')\n", (795, 832), False, 'from PIL import Image\n'), ((1128, 1155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (1138, 1155), True, 'from matplotlib import pyplot as plt\n'), ((1155, 1175), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1166, 1175), True, 'from matplotlib import pyplot as plt\n'), ((1316, 1336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1327, 1336), True, 'from matplotlib import pyplot as plt\n'), ((1408, 1474), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""', 'ticks': '[0.1, 0.2, 0.3, 0.4]'}), "(orientation='horizontal', ticks=[0.1, 0.2, 0.3, 0.4])\n", (1420, 1474), True, 'from matplotlib import pyplot as plt\n'), ((1480, 1500), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1491, 1500), True, 'from matplotlib import pyplot as plt\n'), ((1572, 1638), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""', 'ticks': '[0.1, 0.2, 0.3, 0.4]'}), "(orientation='horizontal', ticks=[0.1, 0.2, 0.3, 0.4])\n", (1584, 1638), True, 'from matplotlib import pyplot as plt\n'), ((1658, 1737), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./../data/final/result.jpg"""'], {'bbox_inches': '(0)', 'pad_inches': '(0)', 'dpi': '(120)'}), "('./../data/final/result.jpg', bbox_inches=0, pad_inches=0, dpi=120)\n", (1669, 1737), True, 'from matplotlib import pyplot as plt\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'from matplotlib import pyplot as plt\n'), ((436, 479), 'scipy.ndimage.uniform_filter', 'ndimage.uniform_filter', (['imarray'], {'size': 'wsize'}), '(imarray, size=wsize)\n', (458, 479), False, 'from scipy import ndimage\n'), ((661, 673), 'os.listdir', 'os.listdir', ([], {}), '()\n', (671, 673), False, 'import os\n'), ((729, 740), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (738, 740), False, 'import os\n'), ((518, 536), 'numpy.square', 'np.square', (['imarray'], {}), '(imarray)\n', (527, 536), True, 'import numpy as np\n'), ((918, 930), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (926, 930), True, 'import numpy as np\n'), ((1201, 1221), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (1213, 1221), True, 'from matplotlib import pyplot as plt\n'), ((1273, 1313), 'numpy.linspace', 'np.linspace', (['(500)', '(2000)', '(4)'], {'endpoint': '(True)'}), '(500, 2000, 4, endpoint=True)\n', (1284, 1313), True, 'import numpy as np\n'), ((1387, 1406), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (1399, 1406), True, 'from matplotlib import pyplot as plt\n'), ((1551, 1570), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (1563, 1570), True, 'from matplotlib import pyplot as plt\n'), ((585, 602), 'numpy.square', 'np.square', (['immean'], {}), '(immean)\n', (594, 602), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.