hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73629463525a2647b03c609ca5952eb27adc3d4 | 7,419 | py | Python | Scripts/Plotting/Posteriors_cosmo_model1/Posteriors_cosmo_model1_alternative_dust.py | LBJ-Wade/GALLUMI_public | dbef3ff1ae6934c9551a44cbbe0270e2f17f5527 | [
"MIT"
] | 1 | 2021-12-15T00:17:15.000Z | 2021-12-15T00:17:15.000Z | Scripts/Plotting/Posteriors_cosmo_model1/Posteriors_cosmo_model1_alternative_dust.py | NNSSA/GALLUMI_public | 4529ab32ccfc281e5976f482fe556b672b8f464f | [
"MIT"
] | null | null | null | Scripts/Plotting/Posteriors_cosmo_model1/Posteriors_cosmo_model1_alternative_dust.py | NNSSA/GALLUMI_public | 4529ab32ccfc281e5976f482fe556b672b8f464f | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
import glob
from matplotlib import patches as mpatches
import scipy.ndimage
from scipy.interpolate import PchipInterpolator
plt.style.use("../template.mplstyle")
# purple - green - darkgoldenrod - blue - red
colors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']
linestyles = [(0, (1,1.05)), (0, (3, 1, 1, 1)), (0, (1,3)), (0, (3,3.65)), (0, (3,2.772)), (0, (3, 1, 1, 1, 1, 1))]
#########################################################################################
def ctr_level2d(histogram2d, lvl, infinite=False):
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def get_hist2d(datax, datay, num_bins=40, weights=[None]):
if not any(weights):
weights = np.ones(len(datax))
hist, bin_edgesx, bin_edgesy = np.histogram2d(datax, datay, bins=num_bins, weights=weights)
bin_centresx = 0.5*(bin_edgesx[1:]+bin_edgesx[:-1])
bin_centresy = 0.5*(bin_edgesy[1:]+bin_edgesy[:-1])
return hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def plot_hist2d(datax, datay, ax, num_bins=30, weights=[None], color=None, zorder=0):
if not any(weights):
weights = np.ones(len(datax))
if color == None:
color="black"
hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy = get_hist2d(datax, datay, num_bins=num_bins, weights=weights)
interpolation_smoothing = 3.
gaussian_smoothing = 0.5
sigma = interpolation_smoothing * gaussian_smoothing
interp_y_centers = scipy.ndimage.zoom(bin_centresy, interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(bin_centresx,interpolation_smoothing, mode='reflect')
interp_hist = scipy.ndimage.zoom(hist, interpolation_smoothing, mode='reflect')
interp_smoothed_hist = scipy.ndimage.filters.gaussian_filter(interp_hist, [sigma,sigma], mode='reflect')
ax.contourf(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[adjust_lightness(color,1.4), adjust_lightness(color,0.8)], levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder, alpha=0.45)
ax.contour(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[color, adjust_lightness(color,0.8)], linewidths=2., levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder)
##################################################################################################
UVLF_Overzier = []
UVLF_Bouwens = []
UVLF_Casey = []
for filepath in glob.iglob('../../Data/UVLF_HST_ST_model1/*__*.txt'):
data = np.loadtxt(filepath)
UVLF_Overzier.append(data)
for filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Bouwens2016/*__*.txt'):
data = np.loadtxt(filepath)
UVLF_Bouwens.append(data)
for filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Casey2014/*__*.txt'):
data = np.loadtxt(filepath)
UVLF_Casey.append(data)
UVLF_Overzier = np.vstack(np.array(UVLF_Overzier))
UVLF_Bouwens = np.vstack(np.array(UVLF_Bouwens))
UVLF_Casey = np.vstack(np.array(UVLF_Casey))
betadata = np.loadtxt("Beta_parameters.txt", unpack=True)
betainterp = PchipInterpolator(betadata[0], betadata[1])
dbetadMUVinterp = PchipInterpolator(betadata[0], betadata[2])
def betaAverage(z, MUV):
if MUV < -19.5:
return dbetadMUVinterp(z) * (MUV + 19.5) + betainterp(z)
return (betainterp(z) + 2.33) * np.exp((dbetadMUVinterp(z) * (MUV + 19.5)) / (betainterp(z) + 2.33)) - 2.33
@np.vectorize
def AUV(z, MUV, index):
if z < 2.5 or z > 8:
return 0.
sigmabeta = 0.34
if index==0:
return max(0., 4.54 + 0.2 * np.log(10) * (2.07**2) * (sigmabeta**2) + 2.07 * betaAverage(z, MUV)) # Overzier 2011
if index==1:
return max(0., 3.36 + 0.2 * np.log(10) * (2.04**2) * (sigmabeta**2) + 2.04 * betaAverage(z, MUV)) # Casey 2014
if index==2:
return max(0., 2.45 + 0.2 * np.log(10) * (1.1**2) * (sigmabeta**2) + 1.1 * betaAverage(z, MUV)) # Bouwens 2016
plt.figure(figsize=(24.,6.))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
ax1.tick_params(axis='x', which='major', pad=6)
ax2.tick_params(axis='x', which='major', pad=6)
ax3.tick_params(axis='x', which='major', pad=6)
ax1.tick_params(axis='both', which='major', labelsize=26)
ax1.tick_params(axis='both', which='minor', labelsize=26)
ax2.tick_params(axis='both', which='major', labelsize=26)
ax2.tick_params(axis='both', which='minor', labelsize=26)
ax3.tick_params(axis='both', which='major', labelsize=26)
ax3.tick_params(axis='both', which='minor', labelsize=26)
for axis in ['top','bottom','left','right']:
ax1.spines[axis].set_linewidth(2.2)
ax2.spines[axis].set_linewidth(2.2)
ax3.spines[axis].set_linewidth(2.2)
###############
ax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 0), color=colors[3], lw=2.5)
ax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 1), linestyle=linestyles[2], color=colors[1], lw=3.)
ax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 2), linestyle=linestyles[3], color=colors[-1], lw=2.5)
ax1.set_xlabel(r'$M_\mathrm{UV}$', labelpad=10, fontsize=30)
ax1.set_ylabel(r'$A_\mathrm{UV}$', labelpad=12, fontsize=30)
ax1.set_xlim(-23, -16)
ax1.set_ylim(0., 1.3)
patch_blue = mpatches.Patch(color=colors[3], lw=1.5, label=r"$\mathrm{Overzier\ 2011}$")
patch_green = mpatches.Patch(color=colors[1], lw=1.5, label=r"$\mathrm{Casey\ 2014}$")
patch_yellow = mpatches.Patch(color=colors[-1], lw=1.5, label=r"$\mathrm{Bouwens\ 2016}$")
leg = ax1.legend(handles=[patch_blue, patch_green,patch_yellow], loc="upper right", frameon=False, markerfirst=False, prop={'size': 21}, handlelength=1.9, handletextpad=0.5)
###############
plot_hist2d(datax=UVLF_Overzier[:,-7], datay=UVLF_Overzier[:,2], ax=ax2, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)
plot_hist2d(datax=UVLF_Bouwens[:,-7], datay=UVLF_Bouwens[:,2], ax=ax2, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)
plot_hist2d(datax=UVLF_Casey[:,-7], datay=UVLF_Casey[:,2], ax=ax2, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)
ax2.set_xlabel(r'$\Omega_\mathrm{m}$', labelpad=10, fontsize=30)
ax2.set_ylabel(r'$\sigma_8$', labelpad=8, fontsize=30)
ax2.set_xlim(0.2, 0.4)
ax2.set_ylim(0.3, 1.3)
###############
plot_hist2d(datax=UVLF_Overzier[:,5], datay=UVLF_Overzier[:,2], ax=ax3, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)
plot_hist2d(datax=UVLF_Bouwens[:,5], datay=UVLF_Bouwens[:,2], ax=ax3, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)
plot_hist2d(datax=UVLF_Casey[:,5], datay=UVLF_Casey[:,2], ax=ax3, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)
ax3.set_ylabel(r'$\sigma_8$', labelpad=8, fontsize=30)
ax3.set_xlabel(r'$n_\mathrm{s}$', labelpad=10, fontsize=30)
ax3.set_xlim(0.7, 1.3)
ax3.set_ylim(0.3, 1.3)
plt.savefig("Posteriors_cosmo_model1_alternative_dust.pdf")
| 45.515337 | 244 | 0.669632 | import numpy as np
from matplotlib import pyplot as plt
import glob
from matplotlib import patches as mpatches
import scipy.ndimage
from scipy.interpolate import PchipInterpolator
plt.style.use("../template.mplstyle")
colors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']
linestyles = [(0, (1,1.05)), (0, (3, 1, 1, 1)), (0, (1,3)), (0, (3,3.65)), (0, (3,2.772)), (0, (3, 1, 1, 1, 1, 1))]
| true | true |
f7362a6ca3d104844807c89068693be7e4cdaf8b | 346 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py | jalauzon-msft/azure-sdk-for-python | 15967f5c6d3376f2334a382486ba86339786e028 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "27.1.0"
| 38.444444 | 76 | 0.416185 |
VERSION = "27.1.0"
| true | true |
f7362b15d038c90e57d13b0b966653c28ef74792 | 3,507 | py | Python | train.py | Yugeeth/chat-bot | 3198fb160f743c7be1f377d2febb889423da8c06 | [
"MIT"
] | null | null | null | train.py | Yugeeth/chat-bot | 3198fb160f743c7be1f377d2febb889423da8c06 | [
"MIT"
] | null | null | null | train.py | Yugeeth/chat-bot | 3198fb160f743c7be1f377d2febb889423da8c06 | [
"MIT"
] | null | null | null | import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}') | 27.186047 | 75 | 0.633875 | import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
outputs = model(words)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}') | true | true |
f7362b1d3b7934ae1c2a4ac2d3bcb7a2674d48d6 | 1,310 | py | Python | para_averaging.py | TurkuNLP/paraphrase-classification | 625f0cf5223ecff9d25c2a4f558ca39fa5ecc794 | [
"Apache-2.0"
] | null | null | null | para_averaging.py | TurkuNLP/paraphrase-classification | 625f0cf5223ecff9d25c2a4f558ca39fa5ecc794 | [
"Apache-2.0"
] | null | null | null | para_averaging.py | TurkuNLP/paraphrase-classification | 625f0cf5223ecff9d25c2a4f558ca39fa5ecc794 | [
"Apache-2.0"
] | null | null | null | import torch.nn.functional as F
import torch
import para_model
class ParaAvgModel(para_model.PARAModel):
def __init__(self, **args):
super().__init__(**args)
# self.drop_layer=torch.nn.Dropout(p=0.2)
self.cls_layer=torch.nn.Linear(self.bert.config.hidden_size*5, args['num_classes'])
def forward(self, batch):
input_ids = batch['input_ids']
token_type_ids = batch['token_type_ids']
attention_mask = batch['attention_mask']
cls_mask = batch['cls_mask']
sep1_mask = batch['sep1_mask']
sep2_mask = batch['sep2_mask']
left_mask = batch['left_mask']
right_mask = batch['right_mask']
enc = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0] #BxS_LENxSIZE; BxSIZE
cls = (enc*cls_mask.unsqueeze(-1)).sum(1) # enc.pooler_output
sep1 = (enc*sep1_mask.unsqueeze(-1)).sum(1)
sep2 = (enc*sep2_mask.unsqueeze(-1)).sum(1)
left = (enc*left_mask.unsqueeze(-1)).sum(1) / left_mask.sum(-1).unsqueeze(-1)
right = (enc*right_mask.unsqueeze(-1)).sum(1) / right_mask.sum(-1).unsqueeze(-1)
catenated = torch.cat((cls, sep1, sep2, left, right), -1)
# dropped = self.drop_layer(catenated)
return self.cls_layer(catenated) | 43.666667 | 129 | 0.655725 | import torch.nn.functional as F
import torch
import para_model
class ParaAvgModel(para_model.PARAModel):
def __init__(self, **args):
super().__init__(**args)
self.cls_layer=torch.nn.Linear(self.bert.config.hidden_size*5, args['num_classes'])
def forward(self, batch):
input_ids = batch['input_ids']
token_type_ids = batch['token_type_ids']
attention_mask = batch['attention_mask']
cls_mask = batch['cls_mask']
sep1_mask = batch['sep1_mask']
sep2_mask = batch['sep2_mask']
left_mask = batch['left_mask']
right_mask = batch['right_mask']
enc = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0]
cls = (enc*cls_mask.unsqueeze(-1)).sum(1)
sep1 = (enc*sep1_mask.unsqueeze(-1)).sum(1)
sep2 = (enc*sep2_mask.unsqueeze(-1)).sum(1)
left = (enc*left_mask.unsqueeze(-1)).sum(1) / left_mask.sum(-1).unsqueeze(-1)
right = (enc*right_mask.unsqueeze(-1)).sum(1) / right_mask.sum(-1).unsqueeze(-1)
catenated = torch.cat((cls, sep1, sep2, left, right), -1)
return self.cls_layer(catenated) | true | true |
f7362b4bfc9f1a5b48252b4883e71f75146af047 | 15,417 | py | Python | mne/viz/circle.py | joewalter/mne-python | b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc | [
"BSD-3-Clause"
] | null | null | null | mne/viz/circle.py | joewalter/mne-python | b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc | [
"BSD-3-Clause"
] | null | null | null | mne/viz/circle.py | joewalter/mne-python | b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc | [
"BSD-3-Clause"
] | null | null | null | """Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
from ..externals.six import string_types
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| 37.23913 | 79 | 0.621846 | from __future__ import print_function
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
from ..externals.six import string_types
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
if event.inaxes != axes:
return
if event.button == 1:
if not ylim[0] <= event.ydata <= ylim[1]:
return
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3:
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
node_angles = node_angles * np.pi / 180
else:
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| true | true |
f7362b4e7693e7a3993c8f04eaae12595182ff80 | 34 | py | Python | turkishsuffix/__init__.py | ugur-zongur/turkishsuffix | f013e34f3094b5357244b1fd426ce9594c9c36ab | [
"MIT"
] | 8 | 2018-09-12T03:45:49.000Z | 2021-03-05T15:55:11.000Z | turkishsuffix/__init__.py | ugur-zongur/turkishsuffix | f013e34f3094b5357244b1fd426ce9594c9c36ab | [
"MIT"
] | 1 | 2020-04-07T16:06:12.000Z | 2020-04-07T16:06:12.000Z | turkishsuffix/__init__.py | ugur-zongur/turkishsuffix | f013e34f3094b5357244b1fd426ce9594c9c36ab | [
"MIT"
] | 2 | 2018-03-02T11:08:33.000Z | 2020-03-12T22:10:00.000Z | from .suffix import turkishSuffix
| 17 | 33 | 0.852941 | from .suffix import turkishSuffix
| true | true |
f7362c07e55a424db3a7dda53b5b90643a1ec703 | 9,664 | py | Python | lib/airflow/airflow/operators/subdag.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | lib/airflow/airflow/operators/subdag.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | lib/airflow/airflow/operators/subdag.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The module which provides a way to nest your DAGs and so your levels of complexity."""
from enum import Enum
from typing import Dict, Optional
from sqlalchemy.orm.session import Session
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException, TaskInstanceNotFound
from airflow.models import DagRun
from airflow.models.dag import DAG, DagContext
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class SkippedStatePropagationOptions(Enum):
"""Available options for skipped state propagation of subdag's tasks to parent dag tasks."""
ALL_LEAVES = 'all_leaves'
ANY_LEAF = 'any_leaf'
class SubDagOperator(BaseSensorOperator):
"""
This runs a sub dag. By convention, a sub dag's dag_id
should be prefixed by its parent and a dot. As in `parent.child`.
Although SubDagOperator can occupy a pool/concurrency slot,
user can specify the mode=reschedule so that the slot will be
released periodically to avoid potential deadlock.
:param subdag: the DAG object to run as a subdag of the current DAG.
:param session: sqlalchemy session
:param conf: Configuration for the subdag
:type conf: dict
:param propagate_skipped_state: by setting this argument you can define
whether the skipped state of leaf task(s) should be propagated to the
parent dag's downstream task.
"""
ui_color = '#555'
ui_fgcolor = '#fff'
@provide_session
@apply_defaults
def __init__(
self,
*,
subdag: DAG,
session: Optional[Session] = None,
conf: Optional[Dict] = None,
propagate_skipped_state: Optional[SkippedStatePropagationOptions] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.subdag = subdag
self.conf = conf
self.propagate_skipped_state = propagate_skipped_state
self._validate_dag(kwargs)
self._validate_pool(session)
def _validate_dag(self, kwargs):
dag = kwargs.get('dag') or DagContext.get_current_dag()
if not dag:
raise AirflowException('Please pass in the `dag` param or call within a DAG context manager')
if dag.dag_id + '.' + kwargs['task_id'] != self.subdag.dag_id:
raise AirflowException(
"The subdag's dag_id should have the form '{{parent_dag_id}}.{{this_task_id}}'. "
"Expected '{d}.{t}'; received '{rcvd}'.".format(
d=dag.dag_id, t=kwargs['task_id'], rcvd=self.subdag.dag_id
)
)
def _validate_pool(self, session):
if self.pool:
conflicts = [t for t in self.subdag.tasks if t.pool == self.pool]
if conflicts:
# only query for pool conflicts if one may exist
pool = session.query(Pool).filter(Pool.slots == 1).filter(Pool.pool == self.pool).first()
if pool and any(t.pool == self.pool for t in self.subdag.tasks):
raise AirflowException(
'SubDagOperator {sd} and subdag task{plural} {t} both '
'use pool {p}, but the pool only has 1 slot. The '
'subdag tasks will never run.'.format(
sd=self.task_id,
plural=len(conflicts) > 1,
t=', '.join(t.task_id for t in conflicts),
p=self.pool,
)
)
def _get_dagrun(self, execution_date):
dag_runs = DagRun.find(
dag_id=self.subdag.dag_id,
execution_date=execution_date,
)
return dag_runs[0] if dag_runs else None
def _reset_dag_run_and_task_instances(self, dag_run, execution_date):
"""
Set the DagRun state to RUNNING and set the failed TaskInstances to None state
for scheduler to pick up.
:param dag_run: DAG run
:param execution_date: Execution date
:return: None
"""
with create_session() as session:
dag_run.state = State.RUNNING
session.merge(dag_run)
failed_task_instances = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == self.subdag.dag_id)
.filter(TaskInstance.execution_date == execution_date)
.filter(TaskInstance.state.in_([State.FAILED, State.UPSTREAM_FAILED]))
)
for task_instance in failed_task_instances:
task_instance.state = State.NONE
session.merge(task_instance)
session.commit()
def pre_execute(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date)
if dag_run is None:
dag_run = self.subdag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=State.RUNNING,
conf=self.conf,
external_trigger=True,
)
self.log.info("Created DagRun: %s", dag_run.run_id)
if 'notification_server_uri' in context and context['notification_server_uri']:
from airflow.events.scheduler_events import DagRunCreatedEvent
from notification_service.client import NotificationClient
dag_run_created_event = DagRunCreatedEvent(
dag_id=self.subdag.dag_id,
execution_date=dag_run.execution_date
).to_event()
try:
client = NotificationClient(server_uri=context['notification_server_uri'],
default_namespace=dag_run_created_event.namespace,
sender=dag_run_created_event.sender)
self.log.info("SubDagOperator sending event: {}".format(dag_run_created_event))
client.send_event(dag_run_created_event)
finally:
client.close()
else:
self.log.info("Found existing DagRun: %s", dag_run.run_id)
if dag_run.state == State.FAILED:
self._reset_dag_run_and_task_instances(dag_run, execution_date)
def poke(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
return dag_run.state != State.RUNNING
def post_execute(self, context, result=None):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
self.log.info("Execution finished. State is %s", dag_run.state)
if dag_run.state != State.SUCCESS:
raise AirflowException(f"Expected state: SUCCESS. Actual state: {dag_run.state}")
if self.propagate_skipped_state and self._check_skipped_states(context):
self._skip_downstream_tasks(context)
def _check_skipped_states(self, context):
leaves_tis = self._get_leaves_tis(context['execution_date'])
if self.propagate_skipped_state == SkippedStatePropagationOptions.ANY_LEAF:
return any(ti.state == State.SKIPPED for ti in leaves_tis)
if self.propagate_skipped_state == SkippedStatePropagationOptions.ALL_LEAVES:
return all(ti.state == State.SKIPPED for ti in leaves_tis)
raise AirflowException(
f'Unimplemented SkippedStatePropagationOptions {self.propagate_skipped_state} used.'
)
def _get_leaves_tis(self, execution_date):
leaves_tis = []
for leaf in self.subdag.leaves:
try:
ti = get_task_instance(
dag_id=self.subdag.dag_id, task_id=leaf.task_id, execution_date=execution_date
)
leaves_tis.append(ti)
except TaskInstanceNotFound:
continue
return leaves_tis
def _skip_downstream_tasks(self, context):
self.log.info(
'Skipping downstream tasks because propagate_skipped_state is set to %s '
'and skipped task(s) were found.',
self.propagate_skipped_state,
)
downstream_tasks = context['task'].downstream_list
self.log.debug('Downstream task_ids %s', downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['execution_date'], downstream_tasks)
self.log.info('Done.')
| 41.835498 | 105 | 0.640625 |
from enum import Enum
from typing import Dict, Optional
from sqlalchemy.orm.session import Session
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException, TaskInstanceNotFound
from airflow.models import DagRun
from airflow.models.dag import DAG, DagContext
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class SkippedStatePropagationOptions(Enum):
ALL_LEAVES = 'all_leaves'
ANY_LEAF = 'any_leaf'
class SubDagOperator(BaseSensorOperator):
ui_color = '#555'
ui_fgcolor = '#fff'
@provide_session
@apply_defaults
def __init__(
self,
*,
subdag: DAG,
session: Optional[Session] = None,
conf: Optional[Dict] = None,
propagate_skipped_state: Optional[SkippedStatePropagationOptions] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.subdag = subdag
self.conf = conf
self.propagate_skipped_state = propagate_skipped_state
self._validate_dag(kwargs)
self._validate_pool(session)
def _validate_dag(self, kwargs):
dag = kwargs.get('dag') or DagContext.get_current_dag()
if not dag:
raise AirflowException('Please pass in the `dag` param or call within a DAG context manager')
if dag.dag_id + '.' + kwargs['task_id'] != self.subdag.dag_id:
raise AirflowException(
"The subdag's dag_id should have the form '{{parent_dag_id}}.{{this_task_id}}'. "
"Expected '{d}.{t}'; received '{rcvd}'.".format(
d=dag.dag_id, t=kwargs['task_id'], rcvd=self.subdag.dag_id
)
)
def _validate_pool(self, session):
if self.pool:
conflicts = [t for t in self.subdag.tasks if t.pool == self.pool]
if conflicts:
# only query for pool conflicts if one may exist
pool = session.query(Pool).filter(Pool.slots == 1).filter(Pool.pool == self.pool).first()
if pool and any(t.pool == self.pool for t in self.subdag.tasks):
raise AirflowException(
'SubDagOperator {sd} and subdag task{plural} {t} both '
'use pool {p}, but the pool only has 1 slot. The '
'subdag tasks will never run.'.format(
sd=self.task_id,
plural=len(conflicts) > 1,
t=', '.join(t.task_id for t in conflicts),
p=self.pool,
)
)
def _get_dagrun(self, execution_date):
dag_runs = DagRun.find(
dag_id=self.subdag.dag_id,
execution_date=execution_date,
)
return dag_runs[0] if dag_runs else None
def _reset_dag_run_and_task_instances(self, dag_run, execution_date):
with create_session() as session:
dag_run.state = State.RUNNING
session.merge(dag_run)
failed_task_instances = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == self.subdag.dag_id)
.filter(TaskInstance.execution_date == execution_date)
.filter(TaskInstance.state.in_([State.FAILED, State.UPSTREAM_FAILED]))
)
for task_instance in failed_task_instances:
task_instance.state = State.NONE
session.merge(task_instance)
session.commit()
def pre_execute(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date)
if dag_run is None:
dag_run = self.subdag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=State.RUNNING,
conf=self.conf,
external_trigger=True,
)
self.log.info("Created DagRun: %s", dag_run.run_id)
if 'notification_server_uri' in context and context['notification_server_uri']:
from airflow.events.scheduler_events import DagRunCreatedEvent
from notification_service.client import NotificationClient
dag_run_created_event = DagRunCreatedEvent(
dag_id=self.subdag.dag_id,
execution_date=dag_run.execution_date
).to_event()
try:
client = NotificationClient(server_uri=context['notification_server_uri'],
default_namespace=dag_run_created_event.namespace,
sender=dag_run_created_event.sender)
self.log.info("SubDagOperator sending event: {}".format(dag_run_created_event))
client.send_event(dag_run_created_event)
finally:
client.close()
else:
self.log.info("Found existing DagRun: %s", dag_run.run_id)
if dag_run.state == State.FAILED:
self._reset_dag_run_and_task_instances(dag_run, execution_date)
def poke(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
return dag_run.state != State.RUNNING
def post_execute(self, context, result=None):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
self.log.info("Execution finished. State is %s", dag_run.state)
if dag_run.state != State.SUCCESS:
raise AirflowException(f"Expected state: SUCCESS. Actual state: {dag_run.state}")
if self.propagate_skipped_state and self._check_skipped_states(context):
self._skip_downstream_tasks(context)
def _check_skipped_states(self, context):
leaves_tis = self._get_leaves_tis(context['execution_date'])
if self.propagate_skipped_state == SkippedStatePropagationOptions.ANY_LEAF:
return any(ti.state == State.SKIPPED for ti in leaves_tis)
if self.propagate_skipped_state == SkippedStatePropagationOptions.ALL_LEAVES:
return all(ti.state == State.SKIPPED for ti in leaves_tis)
raise AirflowException(
f'Unimplemented SkippedStatePropagationOptions {self.propagate_skipped_state} used.'
)
def _get_leaves_tis(self, execution_date):
leaves_tis = []
for leaf in self.subdag.leaves:
try:
ti = get_task_instance(
dag_id=self.subdag.dag_id, task_id=leaf.task_id, execution_date=execution_date
)
leaves_tis.append(ti)
except TaskInstanceNotFound:
continue
return leaves_tis
def _skip_downstream_tasks(self, context):
self.log.info(
'Skipping downstream tasks because propagate_skipped_state is set to %s '
'and skipped task(s) were found.',
self.propagate_skipped_state,
)
downstream_tasks = context['task'].downstream_list
self.log.debug('Downstream task_ids %s', downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['execution_date'], downstream_tasks)
self.log.info('Done.')
| true | true |
f7362d25cfa413edc1cc0f2e3479ec9ea1277b43 | 367 | py | Python | 1_The_Basics/1_1_Welcome/main.py | Aurora-College-SDD-2022/Python4SDD | 068b74bafd0c5fda2d9a99dc694911419d67b28a | [
"CC0-1.0"
] | null | null | null | 1_The_Basics/1_1_Welcome/main.py | Aurora-College-SDD-2022/Python4SDD | 068b74bafd0c5fda2d9a99dc694911419d67b28a | [
"CC0-1.0"
] | 3 | 2021-01-03T11:02:03.000Z | 2021-01-03T11:03:15.000Z | 1_The_Basics/1_1_Welcome/main.py | Aurora-College-SDD-2022/Python4SDD | 068b74bafd0c5fda2d9a99dc694911419d67b28a | [
"CC0-1.0"
] | 1 | 2021-02-18T22:05:16.000Z | 2021-02-18T22:05:16.000Z | """ your first python program.
Replace the words 'look at me I am coding' with your own message
to let the world know you are now an apprentice code monkey.
"""
def print_message():
"""prints a simple message."""
print("look at me I am coding")
#Ignore everything below this line...we will talk about it later
if __name__ == "__main__":
print_message() | 28.230769 | 64 | 0.708447 |
def print_message():
print("look at me I am coding")
if __name__ == "__main__":
print_message() | true | true |
f7362d59a01649e753aafc6708a779ef76437b4c | 8,296 | py | Python | python/GafferSceneUI/OutputsUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferSceneUI/OutputsUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferSceneUI/OutputsUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.Outputs,
"description",
"""
Defines the image outputs to be created by the renderer. Arbitrary
outputs can be defined within the UI and also via the
`Outputs::addOutput()` API. Commonly used outputs may also
be predefined at startup via a config file - see
$GAFFER_ROOT/startup/gui/outputs.py for an example.
""",
plugs = {
"outputs" : [
"description",
"""
The outputs defined by this node.
""",
"plugValueWidget:type", "GafferSceneUI.OutputsUI.OutputsPlugValueWidget",
],
"outputs.*" : [
"plugValueWidget:type", "GafferSceneUI.OutputsUI.ChildPlugValueWidget",
],
"outputs.*.parameters.quantize.value" : [
"description",
"""
The bit depth of the image.
""",
"preset:8 bit", IECore.IntVectorData( [ 0, 255, 0, 255 ] ),
"preset:16 bit", IECore.IntVectorData( [ 0, 65535, 0, 65535 ] ),
"preset:Float", IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"outputs.*.fileName" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:bookmarks", "image",
"path:leaf", True,
],
"outputs.*.active" : [
"boolPlugValueWidget:displayMode", "switch",
],
}
)
##########################################################################
# Custom PlugValueWidgets for listing outputs
##########################################################################
class OutputsPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
column = GafferUI.ListContainer( spacing = 6 )
GafferUI.PlugValueWidget.__init__( self, column, plug )
with column :
# this will take care of laying out our list of outputs, as
# each output is represented as a child plug of the main plug.
GafferUI.PlugLayout( plug )
# now we just need a little footer with a button for adding new outputs
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.MenuButton(
image="plus.png", hasFrame=False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__addMenuDefinition ) )
)
GafferUI.Spacer( imath.V2i( 1 ), maximumSize = imath.V2i( 100000, 1 ), parenting = { "expand" : True } )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __addMenuDefinition( self ) :
node = self.getPlug().node()
currentNames = set( [ output["name"].getValue() for output in node["outputs"].children() ] )
m = IECore.MenuDefinition()
registeredOutputs = node.registeredOutputs()
for name in registeredOutputs :
menuPath = name
if not menuPath.startswith( "/" ) :
menuPath = "/" + menuPath
m.append(
menuPath,
{
"command" : functools.partial( node.addOutput, name ),
"active" : name not in currentNames
}
)
if len( registeredOutputs ) :
m.append( "/BlankDivider", { "divider" : True } )
m.append( "/Blank", { "command" : functools.partial( node.addOutput, "", IECoreScene.Output( "", "", "" ) ) } )
return m
# A widget for representing an individual output.
class ChildPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, childPlug ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=4 )
GafferUI.PlugValueWidget.__init__( self, column, childPlug )
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=4 ) as header :
collapseButton = GafferUI.Button( image = "collapsibleArrowRight.png", hasFrame=False )
collapseButton.clickedSignal().connect( Gaffer.WeakMethod( self.__collapseButtonClicked ), scoped = False )
GafferUI.PlugValueWidget.create( childPlug["active"] )
self.__label = GafferUI.Label( self.__namePlug().getValue() )
GafferUI.Spacer( imath.V2i( 1 ), maximumSize = imath.V2i( 100000, 1 ), parenting = { "expand" : True } )
self.__deleteButton = GafferUI.Button( image = "delete.png", hasFrame=False )
self.__deleteButton.clickedSignal().connect( Gaffer.WeakMethod( self.__deleteButtonClicked ), scoped = False )
self.__deleteButton.setVisible( False )
self.__detailsColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
self.__detailsColumn.setVisible( False )
header.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
header.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
with self.getContext() :
enabled = self.getPlug()["active"].getValue()
self.__label.setEnabled( enabled )
self.__detailsColumn.setEnabled( enabled )
self.__label.setText( self.__namePlug().getValue() )
def __namePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "label" ) or plug.getChild( "name" )
def __fileNamePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "fileName" ) or plug.getChild( "name" )
def __enter( self, widget ) :
self.__deleteButton.setVisible( True )
def __leave( self, widget ) :
self.__deleteButton.setVisible( False )
def __collapseButtonClicked( self, button ) :
visible = not self.__detailsColumn.getVisible()
if visible and not len( self.__detailsColumn ) :
# Build details section the first time it is shown,
# to avoid excessive overhead in the initial UI build.
with self.__detailsColumn :
GafferUI.PlugWidget( self.__namePlug() )
GafferUI.PlugWidget( self.__fileNamePlug() )
GafferUI.PlugWidget( self.getPlug()["type"] )
GafferUI.PlugWidget( self.getPlug()["data"] )
GafferUI.CompoundDataPlugValueWidget( self.getPlug()["parameters"] )
GafferUI.Divider( GafferUI.Divider.Orientation.Horizontal )
self.__detailsColumn.setVisible( visible )
button.setImage( "collapsibleArrowDown.png" if visible else "collapsibleArrowRight.png" )
def __deleteButtonClicked( self, button ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().parent().removeChild( self.getPlug() )
| 31.30566 | 114 | 0.674301 | true | true | |
f7362d686b031b37aec567847d3c0a6b7d8027e9 | 1,864 | py | Python | tradebook/users/models.py | isuryanarayanan/tradebook-backend | 86c9e3c5adccb13b95945604c3d1e5c43066532c | [
"MIT"
] | null | null | null | tradebook/users/models.py | isuryanarayanan/tradebook-backend | 86c9e3c5adccb13b95945604c3d1e5c43066532c | [
"MIT"
] | null | null | null | tradebook/users/models.py | isuryanarayanan/tradebook-backend | 86c9e3c5adccb13b95945604c3d1e5c43066532c | [
"MIT"
] | null | null | null | """ User account models """
from django.db import models
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField(unique=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = "user"
verbose_name_plural = "users"
app_label = "users"
| 35.846154 | 84 | 0.688841 | from django.db import models
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField(unique=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = "user"
verbose_name_plural = "users"
app_label = "users"
| true | true |
f7362de671c7775a19073b3d25d77d55bee0b8d2 | 1,602 | py | Python | test/test_instructions/test_invocations.py | ronyhe/pyjvm | bb15bed8719335c09779f95d4bc16947b7bb7b98 | [
"MIT"
] | 15 | 2018-07-31T11:25:18.000Z | 2021-07-28T09:13:21.000Z | test/test_instructions/test_invocations.py | ronyhe/pyjvm | bb15bed8719335c09779f95d4bc16947b7bb7b98 | [
"MIT"
] | null | null | null | test/test_instructions/test_invocations.py | ronyhe/pyjvm | bb15bed8719335c09779f95d4bc16947b7bb7b98 | [
"MIT"
] | 1 | 2021-07-22T07:36:23.000Z | 2021-07-22T07:36:23.000Z | from jawa.constants import ConstantPool
from jawa.util.bytecode import Instruction
from pyjvm.core.actions import Pop, Invoke
from pyjvm.core.class_loaders import FixedClassLoader
from pyjvm.core.jvm_class import JvmClass, BytecodeMethod, MethodKey
from pyjvm.core.jvm_types import Integer, RootObjectType
from test.utils import constant_instruction, assert_instruction, SOME_INT
def test_invoke_v():
method_name = 'method_name'
class_name = 'class_name'
consts = ConstantPool()
descriptor = '(II)V'
key = MethodKey(method_name, descriptor)
no_op = Instruction.create('nop')
method = BytecodeMethod(
name='method_name',
descriptor='(II)V',
max_locals=5,
max_stack=5,
instructions=[no_op, no_op],
args=[Integer, Integer],
)
jvm_class = JvmClass(
class_name,
RootObjectType.refers_to,
consts,
methods={
key: method
}
)
method_ref = consts.create_method_ref(class_name, method_name, descriptor)
instruction = constant_instruction('invokevirtual', method_ref)
loader = FixedClassLoader({
class_name: jvm_class
})
instance = loader.default_instance(class_name)
arg_value = SOME_INT
arguments = [instance, arg_value, arg_value]
reversed_arguments = list(reversed(arguments))
assert_instruction(
constants=consts,
loader=loader,
instruction=instruction,
op_stack=reversed_arguments,
expected=[
Pop(3),
Invoke(class_name, key, arguments)
]
)
| 27.62069 | 78 | 0.673533 | from jawa.constants import ConstantPool
from jawa.util.bytecode import Instruction
from pyjvm.core.actions import Pop, Invoke
from pyjvm.core.class_loaders import FixedClassLoader
from pyjvm.core.jvm_class import JvmClass, BytecodeMethod, MethodKey
from pyjvm.core.jvm_types import Integer, RootObjectType
from test.utils import constant_instruction, assert_instruction, SOME_INT
def test_invoke_v():
method_name = 'method_name'
class_name = 'class_name'
consts = ConstantPool()
descriptor = '(II)V'
key = MethodKey(method_name, descriptor)
no_op = Instruction.create('nop')
method = BytecodeMethod(
name='method_name',
descriptor='(II)V',
max_locals=5,
max_stack=5,
instructions=[no_op, no_op],
args=[Integer, Integer],
)
jvm_class = JvmClass(
class_name,
RootObjectType.refers_to,
consts,
methods={
key: method
}
)
method_ref = consts.create_method_ref(class_name, method_name, descriptor)
instruction = constant_instruction('invokevirtual', method_ref)
loader = FixedClassLoader({
class_name: jvm_class
})
instance = loader.default_instance(class_name)
arg_value = SOME_INT
arguments = [instance, arg_value, arg_value]
reversed_arguments = list(reversed(arguments))
assert_instruction(
constants=consts,
loader=loader,
instruction=instruction,
op_stack=reversed_arguments,
expected=[
Pop(3),
Invoke(class_name, key, arguments)
]
)
| true | true |
f7362deaf881659f32749a483965edb06eda5a8f | 33,921 | py | Python | src/qtt/instrument_drivers/virtual_awg.py | jsaez8/qtt | fa6497ace86a255f33a2192ba01d063d07d6895e | [
"MIT"
] | null | null | null | src/qtt/instrument_drivers/virtual_awg.py | jsaez8/qtt | fa6497ace86a255f33a2192ba01d063d07d6895e | [
"MIT"
] | null | null | null | src/qtt/instrument_drivers/virtual_awg.py | jsaez8/qtt | fa6497ace86a255f33a2192ba01d063d07d6895e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 31 13:04:09 2016
@author: diepencjv
"""
# %%
import numpy as np
import scipy.signal
import logging
import warnings
import qcodes
from qcodes import Instrument
from qcodes.plots.pyqtgraph import QtPlot
from qcodes.data.data_array import DataArray
import qtt
import qtt.utilities.tools
logger = logging.getLogger(__name__)
# %%
class virtual_awg(Instrument):
"""
Attributes:
_awgs (list): handles to instruments
awg_map (dict)
hardware (Instrument): contains AWG to plunger values
corr (float): unknown
delay_FPGA (float): time delay of signals going through fridge
"""
def __init__(self, name, instruments=[], awg_map=None, hardware=None, verbose=1, **kwargs):
super().__init__(name, **kwargs)
logger.info('initialize virtual_awg %s' % name)
self._awgs = instruments
self.awg_map = awg_map
self.hardware = hardware
self.verbose = verbose
self.delay_FPGA = 2.0e-6 # should depend on filterboxes
self.corr = .0 # legacy code, specific for FPGA board not used any more
self.maxdatapts = 16e6 # This used to be set to the fpga maximum, but that maximum should not be handled here
self.awg_seq = None
if len(self._awgs) == 0 and self.verbose:
print('no physical AWGs connected')
elif len(self._awgs) == 1:
self.awg_cont = self._awgs[0]
self.awg_cont.set('run_mode', 'CONT')
elif len(self._awgs) == 2 and 'awg_mk' in self.awg_map:
self.awg_cont = self._awgs[self.awg_map['awg_mk'][0]]
self.awg_cont.set('run_mode', 'CONT')
self.awg_seq = self._awgs[(self.awg_map['awg_mk'][0] + 1) % 2]
self._set_seq_mode(self.awg_seq)
self.delay_AWG = self.hardware.parameters['delay_AWG'].get()
else:
raise Exception(
'Configuration of AWGs not supported by virtual_awg instrument')
self.AWG_clock = 1e8
self.ch_amp = 4.0
for awg in self._awgs:
awg.set('clock_freq', self.AWG_clock)
awg.delete_all_waveforms_from_list()
for i in range(1, 5):
awg.set('ch%s_amp' % i, self.ch_amp)
def _set_seq_mode(self, a):
a.set('run_mode', 'SEQ')
a.sequence_length.set(1)
a.set_sqel_trigger_wait(1, 0)
def get_idn(self):
''' Overrule because the default VISA command does not work '''
IDN = {'vendor': 'QuTech', 'model': 'virtual_awg',
'serial': None, 'firmware': None}
return IDN
def awg_gate(self, gate):
""" Return true of the gate can be controlled by the awg
Args:
gate ()
"""
if gate is None:
return False
if isinstance(gate, dict):
# vector scan, assume we can do it fast if all components are fast
return np.all([self.awg_gate(g) for g in gate])
if self.awg_map is None:
return False
if gate in self.awg_map:
return True
else:
return False
def stop(self, verbose=0):
''' Stops all AWGs and turns of all channels '''
for awg in self._awgs:
awg.stop()
for i in range(1, 5):
awg.set('ch%d_state' % i, 0)
if verbose:
print('Stopped AWGs')
def sweep_init(self, waveforms, period=1e-3, delete=True, samp_freq=None):
''' Send waveform(s) to gate(s)
Arguments:
waveforms (dict): the waveforms with the gates as keys
period (float): period of the waveform in seconds
Returns:
sweep_info (dict): the keys are tuples of the awgs and channels to activate
Example:
--------
>> sweep_info = sweep_init(waveforms)
'''
sweepgates = [g for g in waveforms]
if delete:
for awg in self._awgs:
awg.delete_all_waveforms_from_list()
awgs = [self._awgs[self.awg_map[g][0]] for g in sweepgates]
if 'fpga_mk' in self.awg_map:
marker_info = self.awg_map['fpga_mk']
marker_delay = self.delay_FPGA
marker_name = 'fpga_mk'
elif 'm4i_mk' in self.awg_map:
marker_info = self.awg_map['m4i_mk']
if samp_freq is not None:
pretrigger_period = 16 / samp_freq
else:
pretrigger_period = 0
marker_delay = self.delay_FPGA + pretrigger_period
marker_name = 'm4i_mk'
awgs.append(self._awgs[marker_info[0]])
sweep_info = dict()
wave_len = len(waveforms[sweepgates[0]]['wave'])
for g in sweepgates:
sweep_info[self.awg_map[g]] = dict()
sweep_info[self.awg_map[g]]['waveform'] = waveforms[g]['wave']
sweep_info[self.awg_map[g]]['marker1'] = np.zeros(wave_len)
sweep_info[self.awg_map[g]]['marker2'] = np.zeros(wave_len)
if 'name' in waveforms[g]:
sweep_info[self.awg_map[g]]['name'] = waveforms[g]['name']
else:
sweep_info[self.awg_map[g]]['name'] = 'waveform_%s' % g
if marker_info[:2] == self.awg_map[g]:
sweep_info[marker_info[:2]]['delay'] = marker_delay
# marker points
marker_points = np.zeros(wave_len)
marker_points[int(marker_delay * self.AWG_clock):(int(marker_delay * self.AWG_clock) + wave_len // 20)] = 1.0
if marker_info[:2] not in sweep_info:
sweep_info[marker_info[:2]] = dict()
sweep_info[marker_info[:2]]['waveform'] = np.zeros(wave_len)
sweep_info[marker_info[:2]]['marker1'] = np.zeros(wave_len)
sweep_info[marker_info[:2]]['marker2'] = np.zeros(wave_len)
for g in sweepgates:
marker_name += '_%s' % g
sweep_info[marker_info[:2]]['name'] = marker_name
sweep_info[marker_info[:2]]['delay'] = marker_delay
sweep_info[marker_info[:2]]['marker%d' % marker_info[2]] = marker_points
self._awgs[marker_info[0]].set(
'ch%i_m%i_low' % (marker_info[1], marker_info[2]), 0)
self._awgs[marker_info[0]].set(
'ch%i_m%i_high' % (marker_info[1], marker_info[2]), 2.6)
# awg marker
if getattr(self, 'awg_seq', None) is not None:
awg_info = self.awg_map['awg_mk']
if awg_info[:2] not in sweep_info:
awgs.append(self._awgs[awg_info[0]])
sweep_info[awg_info[:2]] = dict()
sweep_info[awg_info[:2]]['waveform'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['marker1'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['marker2'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['name'] = 'awg_mk'
awg_marker = np.zeros(wave_len)
awg_marker[0:wave_len // 20] = 1
awg_marker = np.roll(
awg_marker, wave_len - int(self.delay_AWG * self.AWG_clock))
sweep_info[awg_info[:2]]['marker%d' %
self.awg_map['awg_mk'][2]] = awg_marker
self._awgs[awg_info[0]].set(
'ch%i_m%i_low' % (awg_info[1], awg_info[2]), 0)
self._awgs[awg_info[0]].set(
'ch%i_m%i_high' % (awg_info[1], awg_info[2]), 2.6)
# send waveforms
if delete:
for sweep in sweep_info:
try:
self._awgs[sweep[0]].send_waveform_to_list(sweep_info[sweep]['waveform'], sweep_info[
sweep]['marker1'], sweep_info[sweep]['marker2'], sweep_info[sweep]['name'])
except Exception as ex:
print(ex)
print('sweep_info[sweep][waveform] %s' % (sweep_info[sweep]['waveform'].shape,))
print('sweep_info[sweep][marker1] %s' % (sweep_info[sweep]['marker1'].shape,))
print('sweep_info[sweep][marker2] %s' % (sweep_info[sweep]['marker2'].shape,))
return sweep_info
def sweep_run(self, sweep_info):
''' Activate AWG(s) and channel(s) for the sweep(s).
Arguments:
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
for sweep in sweep_info:
if hasattr(self, 'awg_seq') and self._awgs[sweep[0]] == self.awg_seq:
self._awgs[sweep[0]].set_sqel_waveform(
sweep_info[sweep]['name'], sweep[1], 1)
self._awgs[sweep[0]].set_sqel_loopcnt_to_inf(1)
self._awgs[sweep[0]].set_sqel_event_jump_target_index(
sweep[1], 1)
self._awgs[sweep[0]].set_sqel_event_jump_type(1, 'IND')
else:
self._awgs[sweep[0]].set(
'ch%i_waveform' % sweep[1], sweep_info[sweep]['name'])
for sweep in sweep_info:
self._awgs[sweep[0]].set('ch%i_state' % sweep[1], 1)
awgnrs = set([sweep[0] for sweep in sweep_info])
for nr in awgnrs:
self._awgs[nr].run()
def make_sawtooth(self, sweeprange, period, width=.95, repetitionnr=1, start_zero=False):
'''Make a sawtooth with a decline width determined by width. Not yet scaled with
awg_to_plunger value.
Arguments:
sweeprange (float): the range of voltages to sweep over
period (float): the period of the triangular signal
Returns:
wave_raw (array): raw data which represents the waveform
'''
samplerate = 1. / self.AWG_clock
tt = np.arange(0, period * repetitionnr + samplerate, samplerate)
v_wave = float(sweeprange / ((self.ch_amp / 2.0)))
wave_raw = (v_wave / 2) * scipy.signal.sawtooth(2 * np.pi * tt / period, width=width)
# idx_zero = np.argmin(np.abs(wave_raw))
# wave_raw = np.roll(wave_raw, wave_raw.size-idx_zero)
if start_zero:
o = int((wave_raw.size) * (1 - width) / 2)
wave_raw = np.roll(wave_raw, o)
return wave_raw
def make_pulses(self, voltages, waittimes, reps=1, filtercutoff=None, mvrange=None):
"""Make a pulse sequence with custom voltage levels and wait times at each level.
Arguments:
voltages (list of floats): voltage levels to be applied in the sequence
waittimes (list of floats): duration of each pulse in the sequence
reps (int): number of times to repeat the pulse sequence in the waveform
filtercutoff (float): cutoff frequency of a 1st order butterworth filter to make the pulse steps smoother
Returns:
wave_raw (array): raw data which represents the waveform
"""
if len(waittimes) != len(voltages):
raise Exception('Number of voltage levels must be equal to the number of wait times')
samples = [int(x * self.AWG_clock) for x in waittimes]
if mvrange is None:
mvrange = [max(voltages), min(voltages)]
v_wave = float((mvrange[0] - mvrange[1]) / self.ch_amp)
v_prop = [2 * ((x - mvrange[1]) / (mvrange[0] - mvrange[1])) - 1 for x in voltages]
wave_raw = np.concatenate([x * v_wave * np.ones(y) for x, y in zip(v_prop, samples)])
if filtercutoff is not None:
b, a = scipy.signal.butter(1, 0.5 * filtercutoff / self.AWG_clock, btype='low', analog=False, output='ba')
wave_raw = scipy.signal.filtfilt(b, a, wave_raw)
wave_raw = np.tile(wave_raw, reps)
return wave_raw
def check_frequency_waveform(self, period, width):
""" Check whether a sawtooth waveform with specified period can be generated """
old_sr = self.AWG_clock
new_sr = 5 / (period * (1 - width))
if (new_sr) > old_sr:
warnings.warn('awg sampling frequency %.1f MHz is too low for signal requested (sr %.1f [MHz], period %.1f [ms])' % (
old_sr / 1e6, new_sr / 1e6, 1e3 * period), UserWarning)
return new_sr
def sweep_gate(self, gate, sweeprange, period, width=.95, wave_name=None, delete=True):
''' Send a sawtooth signal with the AWG to a gate to sweep. Also
send a marker to the measurement instrument.
Args:
gate (string): the name of the gate to sweep
sweeprange (float): the range of voltages to sweep over
period (float): the period of the triangular signal
Returns:
waveform (dict): The waveform being send with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
Example:
>>> waveform, sweep_info = sweep_gate('P1',sweeprange=60,period=1e-3)
'''
self.check_frequency_waveform(period, width)
self.check_amplitude(gate, sweeprange)
start_zero = True
waveform = dict()
wave_raw = self.make_sawtooth(sweeprange, period, width, start_zero=start_zero)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % gate].get()
wave = wave_raw / awg_to_plunger
waveform[gate] = dict()
waveform[gate]['wave'] = wave
if wave_name is None:
waveform[gate]['name'] = 'sweep_%s' % gate
else:
waveform[gate]['name'] = wave_name
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['start_zero'] = start_zero
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_gate_virt(self, gate_comb, sweeprange, period, width=.95, delete=True):
''' Send a sawtooth signal with the AWG to a linear combination of
gates to sweep. Also send a marker to the measurement instrument.
Arguments:
gate_comb (dict): the gates to sweep and the coefficients as values
sweeprange (float): the range of voltages to sweep over
period (float): the period of the triangular signal
Returns:
waveform (dict): The waveform being send with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
self.check_frequency_waveform(period, width)
waveform = dict()
for g in gate_comb:
self.check_amplitude(g, gate_comb[g] * sweeprange)
for g in gate_comb:
wave_raw = self.make_sawtooth(sweeprange, period, width)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gate_comb[g] / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_%s' % g
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweepandpulse_gate(self, sweepdata, pulsedata, wave_name=None, delete=True, shift_zero=True):
''' Makes and outputs a waveform which overlays a sawtooth signal to sweep
a gate, with a pulse sequence. A marker is sent to the measurement instrument
at the start of the waveform.
IMPORTANT: The function offsets the voltages values so that the last point is 0 V on all gates (i.e. it centers the pulse sequence on the last point)
Args:
sweepdata (dict): inputs for the sawtooth (gate, sweeprange, period, width).
See sweep_gate for more info.
pulsedata (dict): inputs for the pulse sequence (gate_voltages, waittimes).
See pulse_gates for more info.
Returns:
waveform (dict): The waveform being sent with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
sweepgate = sweepdata['gate']
sweeprange = sweepdata['sweeprange']
period = sweepdata['period']
width = sweepdata.get('width', 0.95)
gate_voltages = pulsedata['gate_voltages'].copy()
if shift_zero:
for g in gate_voltages:
gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]
waittimes = pulsedata['waittimes']
filtercutoff = pulsedata.get('filtercutoff', None)
pulsesamp = [int(round(x * self.AWG_clock)) for x in waittimes]
sawsamp = int(round(period * width * self.AWG_clock))
pulsereps = int(np.ceil(self.AWG_clock * period * width / sum(pulsesamp)))
allvoltages = np.concatenate([v for v in gate_voltages.values()])
mvrange = [max(allvoltages), min(allvoltages)]
self.check_frequency_waveform(period, width)
waveform = dict()
wave_sweep = self.make_sawtooth(sweeprange, period, width)
for g in gate_voltages:
self.check_amplitude(g, sweeprange + (mvrange[0] - mvrange[1]))
for g in gate_voltages:
wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=pulsereps,
filtercutoff=filtercutoff, mvrange=mvrange)
wave_raw = wave_raw[:sawsamp]
wave_raw = np.pad(wave_raw, (0, len(wave_sweep) - len(wave_raw)), 'edge')
if sweepgate == g:
wave_raw += wave_sweep
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
if wave_name is None:
waveform[g]['name'] = 'sweepandpulse_%s' % g
else:
waveform[g]['name'] = wave_name
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
waveform['pulse_voltages'] = gate_voltages
waveform['pulse_waittimes'] = waittimes
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_process(self, data, waveform, Naverage=1, direction='forwards', start_offset=1):
""" Process the data returned by reading out based on the shape of
the sawtooth send with the AWG.
Args:
data (list or Nxk array): the data (N is the number of samples)
waveform (dict): contains the wave and the sawtooth width
Naverage (int): number of times the signal was averaged
direction (string): option to use backwards signal i.o. forwards
Returns:
data_processed (array): The data after dropping part of it.
Example:
>> data_processed = sweep_process(data, waveform, 25)
"""
width = waveform['width']
if isinstance(data, list):
data = np.array(data)
if direction == 'forwards':
end = int(np.floor(width * data.shape[0] - 1))
data_processed = data[start_offset:end]
elif direction == 'backwards':
begin = int(np.ceil(width * data.shape[0] + 1))
data_processed = data[begin:]
data_processed = data_processed[::-1]
data_processed = np.array(data_processed) / Naverage
return data_processed
def sweep_2D(self, samp_freq, sweepgates, sweepranges, resolution, width=.95, comp=None, delete=True):
''' Send sawtooth signals to the sweepgates which effectively do a 2D
scan.
The first sweepgate is the fast changing gate (on the horizontal axis).
Arguments:
samp_freq (float): sampling frequency of the measurement instrument in Hertz.
sweepgates (list): two strings with names of gates to sweep
sweepranges (list): two floats for sweepranges in milliVolts
Returns:
waveform (dict): The waveforms being send with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
# JP: I think FPGA exceptions should not be handled by awg
# if resolution[0] * resolution[1] > self.maxdatapts:
# raise Exception('resolution is set higher than FPGA memory allows')
if self.corr != 0:
raise Exception('please do not use the .corr setting any more')
error_corr = resolution[0] * self.corr
period_horz = resolution[0] / samp_freq + error_corr
period_vert = resolution[1] * period_horz
self.check_frequency_waveform(period_horz, width)
for g, r in zip(sweepgates, sweepranges):
self.check_amplitude(g, r)
waveform = dict()
# horizontal waveform
wave_horz_raw = self.make_sawtooth(
sweepranges[0], period_horz, repetitionnr=resolution[1])
awg_to_plunger_horz = self.hardware.parameters[
'awg_to_%s' % sweepgates[0]].get()
wave_horz = wave_horz_raw / awg_to_plunger_horz
waveform[sweepgates[0]] = dict()
waveform[sweepgates[0]]['wave'] = wave_horz
waveform[sweepgates[0]]['name'] = 'sweep_2D_horz_%s' % sweepgates[0]
# vertical waveform
wave_vert_raw = self.make_sawtooth(sweepranges[1], period_vert)
awg_to_plunger_vert = self.hardware.parameters[
'awg_to_%s' % sweepgates[1]].get()
wave_vert = wave_vert_raw / awg_to_plunger_vert
waveform[sweepgates[1]] = dict()
waveform[sweepgates[1]]['wave'] = wave_vert
waveform[sweepgates[1]]['name'] = 'sweep_2D_vert_%s' % sweepgates[1]
if comp is not None:
for g in comp:
if g not in sweepgates:
waveform[g] = dict()
waveform[g]['wave'] = comp[g]['vert'] * \
wave_vert + comp[g]['horz'] * wave_horz
waveform[g]['name'] = 'sweep_2D_comp_%s' % g
else:
raise Exception('Can not compensate a sweepgate')
sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)
self.sweep_run(sweep_info)
waveform['width_horz'] = width
waveform['sweeprange_horz'] = sweepranges[0]
waveform['width_vert'] = width
waveform['sweeprange_vert'] = sweepranges[1]
waveform['resolution'] = resolution
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period_vert
waveform['period_horz'] = period_horz
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_2D_virt(self, samp_freq, gates_horz, gates_vert, sweepranges, resolution, width=.95, delete=True):
''' Send sawtooth signals to the linear combinations of gates set by
gates_horz and gates_vert which effectively do a 2D scan of two virtual
gates.
The horizontal direction is the direction where the AWG signal is changing fastest. It is the first element in the resolution and sweepranges.
Arguments:
samp_freq (float): sampling frequency of the measurement instrument in Hertz.
gates_horz (dict): the gates for the horizontal direction and their coefficients
gates_vert (dict): the gates for the vertical direction and their coefficients
sweepranges (list): two floats for sweepranges in milliVolts
resolution (list): two ints for numbers of pixels
Returns:
waveform (dict): The waveforms being send with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
# JP: I think FPGA exceptions should not be handled by awg
# if resolution[0] * resolution[1] > self.maxdatapts:
# raise Exception('resolution is set higher than memory allows')
error_corr = resolution[0] * self.corr
period_horz = resolution[0] / samp_freq + error_corr
period_vert = resolution[1] * period_horz
new_sr = self.check_frequency_waveform(period_horz, width)
# self.reset_AWG(new_sr)
waveform = dict()
# horizontal virtual gate
for g in gates_horz:
self.check_amplitude(g, sweepranges[0] * gates_horz[g])
for g in gates_horz:
wave_raw = self.make_sawtooth(sweepranges[0], period_horz, repetitionnr=resolution[1])
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gates_horz[g] / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_2D_virt_%s' % g
# vertical virtual gate
for g in gates_vert:
self.check_amplitude(g, sweepranges[1] * gates_vert[g])
for g in gates_vert:
wave_raw = self.make_sawtooth(sweepranges[1], period_vert)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gates_vert[g] / awg_to_plunger
if g in waveform:
waveform[g]['wave'] = waveform[g]['wave'] + wave
else:
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_2D_virt_%s' % g
# TODO: Implement compensation of sensing dot plunger
sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)
self.sweep_run(sweep_info)
waveform['width_horz'] = width
waveform['sweeprange_horz'] = sweepranges[0]
waveform['width_vert'] = width
waveform['sweeprange_vert'] = sweepranges[1]
waveform['resolution'] = resolution
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period_vert
waveform['period_horz'] = period_horz
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_2D_process(self, data, waveform, diff_dir=None):
''' Process data from sweep_2D
Arguments:
data (list): the raw measured data
waveform (dict): The waveforms that was sent with the AWG.
Returns:
data_processed (list): the processed data
'''
width_horz = waveform['width_horz']
width_vert = waveform['width_vert']
resolution = waveform['resolution']
# split up the fpga data in chunks of horizontal sweeps
chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]
chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]
data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]
if diff_dir is not None:
data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)
return data_processed
def pulse_gates(self, gate_voltages, waittimes, reps=1, filtercutoff=None, reset_to_zero=False, delete=True):
''' Send a pulse sequence with the AWG that can span over any gate space.
Sends a marker to measurement instrument at the start of the sequence.
Only works with physical gates.
Arguments:
gate_voltages (dict): keys are gates to apply the sequence to, and values
are arrays with the voltage levels to be applied in the sequence
waittimes (list of floats): duration of each pulse in the sequence
reset_to_zero (bool): if True, the function offsets the voltages values so that the last point is 0V
on all gates (i.e. it centers the pulse sequence on the last point).
Returns:
waveform (dict): The waveform being send with the AWG.
sweep_info (dict): the keys are tuples of the awgs and channels to activate
'''
period = sum(waittimes)
if reset_to_zero:
for g in gate_voltages:
gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]
allvoltages = np.concatenate([v for v in gate_voltages.values()])
mvrange = [max(allvoltages), min(allvoltages)]
waveform = dict()
for g in gate_voltages:
wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=reps,
filtercutoff=filtercutoff, mvrange=mvrange)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'pulses_%s' % g
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['voltages'] = gate_voltages
waveform['samplerate'] = 1 / self.AWG_clock
waveform['waittimes'] = waittimes
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def reset_AWG(self, clock=1e8):
""" Reset AWG to videomode and scanfast """
self.AWG_clock = clock
for a in self._awgs:
a.clock_freq.set(clock)
a.trigger_mode.set('CONT')
a.trigger_source.set('INT')
for ii in range(1, 5):
f = getattr(a, 'ch%d_amp' % ii)
val = f()
if val != 4.0:
warnings.warn('AWG channel %d output not at 4.0 V' % ii)
if self.awg_seq is not None:
self._set_seq_mode(self.awg_seq)
def set_amplitude(self, amplitude):
""" Set the AWG peak-to-peak amplitude for all channels
Args:
amplitude (float): peak-to-peak amplitude (V)
"""
if amplitude < 0.02:
warnings.warn('Trying to set AWG amplitude too low, setting it to minimum (20mV)')
amplitude = 0.02
elif amplitude > 4.5:
warnings.warn('Trying to set AWG amplitude too high, setting it to maximum (4.5V)')
amplitude = 4.5
# tektronics 5014 has precision of 1mV
self.ch_amp = round(amplitude, 3)
for awg in self._awgs:
for i in range(1, 5):
awg.set('ch%s_amp' % i, self.ch_amp)
def check_amplitude(self, gate, mvrange):
""" Calculates the lowest allowable AWG peak-to-peak amplitude based on the
ranges to be applied to the gates. If the AWG amplitude is too low, it gives
a warning and increases the amplitude.
Args:
gate (str): name of the gate to check
mvrange (float): voltage range, in mV, that the gate needs to reach
"""
min_amp = mvrange / self.hardware.parameters['awg_to_%s' % gate].get()
if min_amp > 4:
raise(Exception('Sweep range of gate %s is larger than maximum allowed by the AWG' % gate))
if self.ch_amp < min_amp:
min_amp = np.ceil(min_amp * 10) / 10
self.set_amplitude(min_amp)
warnings.warn('AWG amplitude too low for this range, setting to %.1f' % min_amp)
# %%
def plot_wave_raw(wave_raw, samplerate=None, station=None):
''' Plot the raw wave
Arguments:
wave_raw (array): raw data which represents the waveform
Returns:
plot (QtPlot): the plot showing the data
'''
if samplerate is None:
if station is None:
raise Exception('There is no station')
samplerate = 1 / station.awg.getattr('AWG_clock')
else:
samplerate = samplerate
horz_var = np.arange(0, len(wave_raw) * samplerate, samplerate)
x = DataArray(name='time(s)', label='time (s)',
preset_data=horz_var, is_setpoint=True)
y = DataArray(
label='sweep value (mV)', preset_data=wave_raw, set_arrays=(x,))
plot = QtPlot(x, y)
return plot
def sweep_2D_process(data, waveform, diff_dir=None):
''' Process data from sweep_2D
Arguments:
data (list): the raw measured data
waveform (dict): The waveforms that was sent with the AWG.
Returns:
data_processed (list): the processed data
'''
width_horz = waveform['width_horz']
width_vert = waveform['width_vert']
resolution = waveform['resolution']
# split up the fpga data in chunks of horizontal sweeps
chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]
chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]
data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]
if diff_dir is not None:
data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)
return data_processed
| 41.774631 | 157 | 0.60296 |
import numpy as np
import scipy.signal
import logging
import warnings
import qcodes
from qcodes import Instrument
from qcodes.plots.pyqtgraph import QtPlot
from qcodes.data.data_array import DataArray
import qtt
import qtt.utilities.tools
logger = logging.getLogger(__name__)
class virtual_awg(Instrument):
def __init__(self, name, instruments=[], awg_map=None, hardware=None, verbose=1, **kwargs):
super().__init__(name, **kwargs)
logger.info('initialize virtual_awg %s' % name)
self._awgs = instruments
self.awg_map = awg_map
self.hardware = hardware
self.verbose = verbose
self.delay_FPGA = 2.0e-6
self.corr = .0
self.maxdatapts = 16e6
self.awg_seq = None
if len(self._awgs) == 0 and self.verbose:
print('no physical AWGs connected')
elif len(self._awgs) == 1:
self.awg_cont = self._awgs[0]
self.awg_cont.set('run_mode', 'CONT')
elif len(self._awgs) == 2 and 'awg_mk' in self.awg_map:
self.awg_cont = self._awgs[self.awg_map['awg_mk'][0]]
self.awg_cont.set('run_mode', 'CONT')
self.awg_seq = self._awgs[(self.awg_map['awg_mk'][0] + 1) % 2]
self._set_seq_mode(self.awg_seq)
self.delay_AWG = self.hardware.parameters['delay_AWG'].get()
else:
raise Exception(
'Configuration of AWGs not supported by virtual_awg instrument')
self.AWG_clock = 1e8
self.ch_amp = 4.0
for awg in self._awgs:
awg.set('clock_freq', self.AWG_clock)
awg.delete_all_waveforms_from_list()
for i in range(1, 5):
awg.set('ch%s_amp' % i, self.ch_amp)
def _set_seq_mode(self, a):
a.set('run_mode', 'SEQ')
a.sequence_length.set(1)
a.set_sqel_trigger_wait(1, 0)
def get_idn(self):
IDN = {'vendor': 'QuTech', 'model': 'virtual_awg',
'serial': None, 'firmware': None}
return IDN
def awg_gate(self, gate):
if gate is None:
return False
if isinstance(gate, dict):
return np.all([self.awg_gate(g) for g in gate])
if self.awg_map is None:
return False
if gate in self.awg_map:
return True
else:
return False
def stop(self, verbose=0):
for awg in self._awgs:
awg.stop()
for i in range(1, 5):
awg.set('ch%d_state' % i, 0)
if verbose:
print('Stopped AWGs')
def sweep_init(self, waveforms, period=1e-3, delete=True, samp_freq=None):
sweepgates = [g for g in waveforms]
if delete:
for awg in self._awgs:
awg.delete_all_waveforms_from_list()
awgs = [self._awgs[self.awg_map[g][0]] for g in sweepgates]
if 'fpga_mk' in self.awg_map:
marker_info = self.awg_map['fpga_mk']
marker_delay = self.delay_FPGA
marker_name = 'fpga_mk'
elif 'm4i_mk' in self.awg_map:
marker_info = self.awg_map['m4i_mk']
if samp_freq is not None:
pretrigger_period = 16 / samp_freq
else:
pretrigger_period = 0
marker_delay = self.delay_FPGA + pretrigger_period
marker_name = 'm4i_mk'
awgs.append(self._awgs[marker_info[0]])
sweep_info = dict()
wave_len = len(waveforms[sweepgates[0]]['wave'])
for g in sweepgates:
sweep_info[self.awg_map[g]] = dict()
sweep_info[self.awg_map[g]]['waveform'] = waveforms[g]['wave']
sweep_info[self.awg_map[g]]['marker1'] = np.zeros(wave_len)
sweep_info[self.awg_map[g]]['marker2'] = np.zeros(wave_len)
if 'name' in waveforms[g]:
sweep_info[self.awg_map[g]]['name'] = waveforms[g]['name']
else:
sweep_info[self.awg_map[g]]['name'] = 'waveform_%s' % g
if marker_info[:2] == self.awg_map[g]:
sweep_info[marker_info[:2]]['delay'] = marker_delay
marker_points = np.zeros(wave_len)
marker_points[int(marker_delay * self.AWG_clock):(int(marker_delay * self.AWG_clock) + wave_len // 20)] = 1.0
if marker_info[:2] not in sweep_info:
sweep_info[marker_info[:2]] = dict()
sweep_info[marker_info[:2]]['waveform'] = np.zeros(wave_len)
sweep_info[marker_info[:2]]['marker1'] = np.zeros(wave_len)
sweep_info[marker_info[:2]]['marker2'] = np.zeros(wave_len)
for g in sweepgates:
marker_name += '_%s' % g
sweep_info[marker_info[:2]]['name'] = marker_name
sweep_info[marker_info[:2]]['delay'] = marker_delay
sweep_info[marker_info[:2]]['marker%d' % marker_info[2]] = marker_points
self._awgs[marker_info[0]].set(
'ch%i_m%i_low' % (marker_info[1], marker_info[2]), 0)
self._awgs[marker_info[0]].set(
'ch%i_m%i_high' % (marker_info[1], marker_info[2]), 2.6)
if getattr(self, 'awg_seq', None) is not None:
awg_info = self.awg_map['awg_mk']
if awg_info[:2] not in sweep_info:
awgs.append(self._awgs[awg_info[0]])
sweep_info[awg_info[:2]] = dict()
sweep_info[awg_info[:2]]['waveform'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['marker1'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['marker2'] = np.zeros(wave_len)
sweep_info[awg_info[:2]]['name'] = 'awg_mk'
awg_marker = np.zeros(wave_len)
awg_marker[0:wave_len // 20] = 1
awg_marker = np.roll(
awg_marker, wave_len - int(self.delay_AWG * self.AWG_clock))
sweep_info[awg_info[:2]]['marker%d' %
self.awg_map['awg_mk'][2]] = awg_marker
self._awgs[awg_info[0]].set(
'ch%i_m%i_low' % (awg_info[1], awg_info[2]), 0)
self._awgs[awg_info[0]].set(
'ch%i_m%i_high' % (awg_info[1], awg_info[2]), 2.6)
if delete:
for sweep in sweep_info:
try:
self._awgs[sweep[0]].send_waveform_to_list(sweep_info[sweep]['waveform'], sweep_info[
sweep]['marker1'], sweep_info[sweep]['marker2'], sweep_info[sweep]['name'])
except Exception as ex:
print(ex)
print('sweep_info[sweep][waveform] %s' % (sweep_info[sweep]['waveform'].shape,))
print('sweep_info[sweep][marker1] %s' % (sweep_info[sweep]['marker1'].shape,))
print('sweep_info[sweep][marker2] %s' % (sweep_info[sweep]['marker2'].shape,))
return sweep_info
def sweep_run(self, sweep_info):
for sweep in sweep_info:
if hasattr(self, 'awg_seq') and self._awgs[sweep[0]] == self.awg_seq:
self._awgs[sweep[0]].set_sqel_waveform(
sweep_info[sweep]['name'], sweep[1], 1)
self._awgs[sweep[0]].set_sqel_loopcnt_to_inf(1)
self._awgs[sweep[0]].set_sqel_event_jump_target_index(
sweep[1], 1)
self._awgs[sweep[0]].set_sqel_event_jump_type(1, 'IND')
else:
self._awgs[sweep[0]].set(
'ch%i_waveform' % sweep[1], sweep_info[sweep]['name'])
for sweep in sweep_info:
self._awgs[sweep[0]].set('ch%i_state' % sweep[1], 1)
awgnrs = set([sweep[0] for sweep in sweep_info])
for nr in awgnrs:
self._awgs[nr].run()
def make_sawtooth(self, sweeprange, period, width=.95, repetitionnr=1, start_zero=False):
samplerate = 1. / self.AWG_clock
tt = np.arange(0, period * repetitionnr + samplerate, samplerate)
v_wave = float(sweeprange / ((self.ch_amp / 2.0)))
wave_raw = (v_wave / 2) * scipy.signal.sawtooth(2 * np.pi * tt / period, width=width)
if start_zero:
o = int((wave_raw.size) * (1 - width) / 2)
wave_raw = np.roll(wave_raw, o)
return wave_raw
def make_pulses(self, voltages, waittimes, reps=1, filtercutoff=None, mvrange=None):
if len(waittimes) != len(voltages):
raise Exception('Number of voltage levels must be equal to the number of wait times')
samples = [int(x * self.AWG_clock) for x in waittimes]
if mvrange is None:
mvrange = [max(voltages), min(voltages)]
v_wave = float((mvrange[0] - mvrange[1]) / self.ch_amp)
v_prop = [2 * ((x - mvrange[1]) / (mvrange[0] - mvrange[1])) - 1 for x in voltages]
wave_raw = np.concatenate([x * v_wave * np.ones(y) for x, y in zip(v_prop, samples)])
if filtercutoff is not None:
b, a = scipy.signal.butter(1, 0.5 * filtercutoff / self.AWG_clock, btype='low', analog=False, output='ba')
wave_raw = scipy.signal.filtfilt(b, a, wave_raw)
wave_raw = np.tile(wave_raw, reps)
return wave_raw
def check_frequency_waveform(self, period, width):
old_sr = self.AWG_clock
new_sr = 5 / (period * (1 - width))
if (new_sr) > old_sr:
warnings.warn('awg sampling frequency %.1f MHz is too low for signal requested (sr %.1f [MHz], period %.1f [ms])' % (
old_sr / 1e6, new_sr / 1e6, 1e3 * period), UserWarning)
return new_sr
def sweep_gate(self, gate, sweeprange, period, width=.95, wave_name=None, delete=True):
self.check_frequency_waveform(period, width)
self.check_amplitude(gate, sweeprange)
start_zero = True
waveform = dict()
wave_raw = self.make_sawtooth(sweeprange, period, width, start_zero=start_zero)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % gate].get()
wave = wave_raw / awg_to_plunger
waveform[gate] = dict()
waveform[gate]['wave'] = wave
if wave_name is None:
waveform[gate]['name'] = 'sweep_%s' % gate
else:
waveform[gate]['name'] = wave_name
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['start_zero'] = start_zero
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_gate_virt(self, gate_comb, sweeprange, period, width=.95, delete=True):
self.check_frequency_waveform(period, width)
waveform = dict()
for g in gate_comb:
self.check_amplitude(g, gate_comb[g] * sweeprange)
for g in gate_comb:
wave_raw = self.make_sawtooth(sweeprange, period, width)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gate_comb[g] / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_%s' % g
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweepandpulse_gate(self, sweepdata, pulsedata, wave_name=None, delete=True, shift_zero=True):
sweepgate = sweepdata['gate']
sweeprange = sweepdata['sweeprange']
period = sweepdata['period']
width = sweepdata.get('width', 0.95)
gate_voltages = pulsedata['gate_voltages'].copy()
if shift_zero:
for g in gate_voltages:
gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]
waittimes = pulsedata['waittimes']
filtercutoff = pulsedata.get('filtercutoff', None)
pulsesamp = [int(round(x * self.AWG_clock)) for x in waittimes]
sawsamp = int(round(period * width * self.AWG_clock))
pulsereps = int(np.ceil(self.AWG_clock * period * width / sum(pulsesamp)))
allvoltages = np.concatenate([v for v in gate_voltages.values()])
mvrange = [max(allvoltages), min(allvoltages)]
self.check_frequency_waveform(period, width)
waveform = dict()
wave_sweep = self.make_sawtooth(sweeprange, period, width)
for g in gate_voltages:
self.check_amplitude(g, sweeprange + (mvrange[0] - mvrange[1]))
for g in gate_voltages:
wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=pulsereps,
filtercutoff=filtercutoff, mvrange=mvrange)
wave_raw = wave_raw[:sawsamp]
wave_raw = np.pad(wave_raw, (0, len(wave_sweep) - len(wave_raw)), 'edge')
if sweepgate == g:
wave_raw += wave_sweep
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
if wave_name is None:
waveform[g]['name'] = 'sweepandpulse_%s' % g
else:
waveform[g]['name'] = wave_name
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['width'] = width
waveform['sweeprange'] = sweeprange
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period
waveform['pulse_voltages'] = gate_voltages
waveform['pulse_waittimes'] = waittimes
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_process(self, data, waveform, Naverage=1, direction='forwards', start_offset=1):
width = waveform['width']
if isinstance(data, list):
data = np.array(data)
if direction == 'forwards':
end = int(np.floor(width * data.shape[0] - 1))
data_processed = data[start_offset:end]
elif direction == 'backwards':
begin = int(np.ceil(width * data.shape[0] + 1))
data_processed = data[begin:]
data_processed = data_processed[::-1]
data_processed = np.array(data_processed) / Naverage
return data_processed
def sweep_2D(self, samp_freq, sweepgates, sweepranges, resolution, width=.95, comp=None, delete=True):
if self.corr != 0:
raise Exception('please do not use the .corr setting any more')
error_corr = resolution[0] * self.corr
period_horz = resolution[0] / samp_freq + error_corr
period_vert = resolution[1] * period_horz
self.check_frequency_waveform(period_horz, width)
for g, r in zip(sweepgates, sweepranges):
self.check_amplitude(g, r)
waveform = dict()
wave_horz_raw = self.make_sawtooth(
sweepranges[0], period_horz, repetitionnr=resolution[1])
awg_to_plunger_horz = self.hardware.parameters[
'awg_to_%s' % sweepgates[0]].get()
wave_horz = wave_horz_raw / awg_to_plunger_horz
waveform[sweepgates[0]] = dict()
waveform[sweepgates[0]]['wave'] = wave_horz
waveform[sweepgates[0]]['name'] = 'sweep_2D_horz_%s' % sweepgates[0]
wave_vert_raw = self.make_sawtooth(sweepranges[1], period_vert)
awg_to_plunger_vert = self.hardware.parameters[
'awg_to_%s' % sweepgates[1]].get()
wave_vert = wave_vert_raw / awg_to_plunger_vert
waveform[sweepgates[1]] = dict()
waveform[sweepgates[1]]['wave'] = wave_vert
waveform[sweepgates[1]]['name'] = 'sweep_2D_vert_%s' % sweepgates[1]
if comp is not None:
for g in comp:
if g not in sweepgates:
waveform[g] = dict()
waveform[g]['wave'] = comp[g]['vert'] * \
wave_vert + comp[g]['horz'] * wave_horz
waveform[g]['name'] = 'sweep_2D_comp_%s' % g
else:
raise Exception('Can not compensate a sweepgate')
sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)
self.sweep_run(sweep_info)
waveform['width_horz'] = width
waveform['sweeprange_horz'] = sweepranges[0]
waveform['width_vert'] = width
waveform['sweeprange_vert'] = sweepranges[1]
waveform['resolution'] = resolution
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period_vert
waveform['period_horz'] = period_horz
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_2D_virt(self, samp_freq, gates_horz, gates_vert, sweepranges, resolution, width=.95, delete=True):
error_corr = resolution[0] * self.corr
period_horz = resolution[0] / samp_freq + error_corr
period_vert = resolution[1] * period_horz
new_sr = self.check_frequency_waveform(period_horz, width)
waveform = dict()
for g in gates_horz:
self.check_amplitude(g, sweepranges[0] * gates_horz[g])
for g in gates_horz:
wave_raw = self.make_sawtooth(sweepranges[0], period_horz, repetitionnr=resolution[1])
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gates_horz[g] / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_2D_virt_%s' % g
for g in gates_vert:
self.check_amplitude(g, sweepranges[1] * gates_vert[g])
for g in gates_vert:
wave_raw = self.make_sawtooth(sweepranges[1], period_vert)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw * gates_vert[g] / awg_to_plunger
if g in waveform:
waveform[g]['wave'] = waveform[g]['wave'] + wave
else:
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'sweep_2D_virt_%s' % g
sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)
self.sweep_run(sweep_info)
waveform['width_horz'] = width
waveform['sweeprange_horz'] = sweepranges[0]
waveform['width_vert'] = width
waveform['sweeprange_vert'] = sweepranges[1]
waveform['resolution'] = resolution
waveform['samplerate'] = 1 / self.AWG_clock
waveform['period'] = period_vert
waveform['period_horz'] = period_horz
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def sweep_2D_process(self, data, waveform, diff_dir=None):
width_horz = waveform['width_horz']
width_vert = waveform['width_vert']
resolution = waveform['resolution']
chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]
chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]
data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]
if diff_dir is not None:
data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)
return data_processed
def pulse_gates(self, gate_voltages, waittimes, reps=1, filtercutoff=None, reset_to_zero=False, delete=True):
period = sum(waittimes)
if reset_to_zero:
for g in gate_voltages:
gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]
allvoltages = np.concatenate([v for v in gate_voltages.values()])
mvrange = [max(allvoltages), min(allvoltages)]
waveform = dict()
for g in gate_voltages:
wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=reps,
filtercutoff=filtercutoff, mvrange=mvrange)
awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()
wave = wave_raw / awg_to_plunger
waveform[g] = dict()
waveform[g]['wave'] = wave
waveform[g]['name'] = 'pulses_%s' % g
sweep_info = self.sweep_init(waveform, period, delete)
self.sweep_run(sweep_info)
waveform['voltages'] = gate_voltages
waveform['samplerate'] = 1 / self.AWG_clock
waveform['waittimes'] = waittimes
for channels in sweep_info:
if 'delay' in sweep_info[channels]:
waveform['markerdelay'] = sweep_info[channels]['delay']
return waveform, sweep_info
def reset_AWG(self, clock=1e8):
self.AWG_clock = clock
for a in self._awgs:
a.clock_freq.set(clock)
a.trigger_mode.set('CONT')
a.trigger_source.set('INT')
for ii in range(1, 5):
f = getattr(a, 'ch%d_amp' % ii)
val = f()
if val != 4.0:
warnings.warn('AWG channel %d output not at 4.0 V' % ii)
if self.awg_seq is not None:
self._set_seq_mode(self.awg_seq)
def set_amplitude(self, amplitude):
if amplitude < 0.02:
warnings.warn('Trying to set AWG amplitude too low, setting it to minimum (20mV)')
amplitude = 0.02
elif amplitude > 4.5:
warnings.warn('Trying to set AWG amplitude too high, setting it to maximum (4.5V)')
amplitude = 4.5
self.ch_amp = round(amplitude, 3)
for awg in self._awgs:
for i in range(1, 5):
awg.set('ch%s_amp' % i, self.ch_amp)
def check_amplitude(self, gate, mvrange):
min_amp = mvrange / self.hardware.parameters['awg_to_%s' % gate].get()
if min_amp > 4:
raise(Exception('Sweep range of gate %s is larger than maximum allowed by the AWG' % gate))
if self.ch_amp < min_amp:
min_amp = np.ceil(min_amp * 10) / 10
self.set_amplitude(min_amp)
warnings.warn('AWG amplitude too low for this range, setting to %.1f' % min_amp)
def plot_wave_raw(wave_raw, samplerate=None, station=None):
if samplerate is None:
if station is None:
raise Exception('There is no station')
samplerate = 1 / station.awg.getattr('AWG_clock')
else:
samplerate = samplerate
horz_var = np.arange(0, len(wave_raw) * samplerate, samplerate)
x = DataArray(name='time(s)', label='time (s)',
preset_data=horz_var, is_setpoint=True)
y = DataArray(
label='sweep value (mV)', preset_data=wave_raw, set_arrays=(x,))
plot = QtPlot(x, y)
return plot
def sweep_2D_process(data, waveform, diff_dir=None):
width_horz = waveform['width_horz']
width_vert = waveform['width_vert']
resolution = waveform['resolution']
chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]
chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]
data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]
if diff_dir is not None:
data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)
return data_processed
| true | true |
f7362e1dc422d449f05e6af959692365df74ce49 | 5,374 | py | Python | artic/deprecated/plot_amplicon_depth.py | MarkusHaak/fieldbioinformatics | 3d291477a3d84968816c8e57e6078fc80135f422 | [
"MIT"
] | 72 | 2018-12-21T22:48:50.000Z | 2022-02-24T17:04:53.000Z | artic/deprecated/plot_amplicon_depth.py | MarkusHaak/fieldbioinformatics | 3d291477a3d84968816c8e57e6078fc80135f422 | [
"MIT"
] | 70 | 2020-02-05T13:39:09.000Z | 2022-03-29T01:47:19.000Z | artic/deprecated/plot_amplicon_depth.py | MarkusHaak/fieldbioinformatics | 3d291477a3d84968816c8e57e6078fc80135f422 | [
"MIT"
] | 54 | 2019-03-11T13:33:21.000Z | 2022-03-21T09:27:50.000Z | #!/usr/bin/env python3
"""
Plot the mean read depth per amplicon.
This has been written for use in the ARTIC pipeline so there are no file checks - it assumes the following:
* the primer scheme is in ARTIC format
* the input depth files are in the format: `chrom\treadgroup\tposition\tdepth
* readgroup equates to primer pool
* the primer pairs in the scheme are sorted by amplicon number (i.e. readgroups are interleaved)
* depth values are provided for all positions (see output of make_depth_mask.py for expected format)
"""
from .vcftagprimersites import read_bed_file
import sys
import pandas as pd
import numpy as np
import argparse
import os
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def go(args):
# get the primer scheme
primerScheme = read_bed_file(args.primerScheme)
# number the amplicons in the scheme and link them to primer start site
ampliconCounter = 1
# store the amplicon number and starts by read group dict
rgAmplicons = {}
rgStarts = {}
# process the primers by readgroup
for primer in primerScheme:
poolName = primer['PoolName']
if poolName not in rgAmplicons:
rgAmplicons[poolName] = []
rgStarts[poolName] = []
if primer['direction'] == '+':
rgAmplicons[poolName].append(ampliconCounter)
rgStarts[poolName].append(primer['start'])
ampliconCounter += 1
# for pandas cut func to create bins, we need to add an extra value to the starts (just use inf)
for startList in rgStarts.values():
startList.append(np.inf)
# process the depth files
dfs = {}
for depthFile in args.depthFiles:
# read in the depth file
df = pd.read_csv(depthFile, sep='\t', header=None,
names=['refName', 'readGroup',
'position', 'depth'],
dtype={'refName': str, 'readGroup': str,
'position': int, 'depth': int},
usecols=(0, 1, 2, 3),)
# check that there aren't too many positions in the depth data for plotting
# assert len(df.index) < 30000, "error: too many data points to plot"
# check all ref positions have a depth value
startPos = df["position"][0]
endPos = df["position"][df.index[-1]]
assert len(df.index) == ((endPos - startPos) +
1), "error: depth needs to be reported at all positions"
# check the primer scheme contains the readgroup
rgList = df.readGroup.unique()
assert len(rgList) == 1, "error: depth file has %d readgroups, need 1 (%s)" % (
len(rgList), depthFile)
rg = rgList[0]
assert rg in rgAmplicons, "error: readgroup not found in provided primer scheme (%s)" % (
rg)
# get the amplicon starts for this readgroup
amplicons = sorted(rgAmplicons[rg])
starts = sorted(rgStarts[rg])
# bin read depths by amplicon for this readgroup
df['amplicon'] = pd.cut(
x=df['position'], bins=starts, labels=amplicons)
# store the mean of each bin
bins = (df.groupby(['amplicon'])[
'depth'].mean()).rename(depthFile.name)
# add to the pile
assert rg not in dfs, "error: readgroup present in multiple files (%s)" % (
rg)
dfs[rg] = bins
# combine the series data from each input file
newDF = pd.concat(dfs, axis=1)
newDF.sort_index(axis=0, inplace=True)
newDF.reset_index(inplace=True)
# melt the DF for seaborn
newDF = newDF.melt("amplicon", var_name="read group",
value_name="mean amplicon read depth")
newDF = newDF.dropna()
# plot the bar
g = sns.catplot(data=newDF,
x="amplicon",
y="mean amplicon read depth",
hue="read group",
height=4,
aspect=3,
kind="bar",
dodge=False,
legend=False)
g.set(yscale="log")
g.fig.suptitle(args.sampleID)
plt.legend(loc='upper right')
plt.xticks(rotation=45, size=6)
plt.savefig(args.outFilePrefix + "-barplot.png")
plt.close()
# plot the box
g = sns.catplot(data=newDF,
x="read group",
y="mean amplicon read depth",
kind="box")
g.fig.suptitle(args.sampleID)
plt.savefig(args.outFilePrefix + "-boxplot.png")
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--primerScheme', required=True,
help='the ARTIC primer scheme')
parser.add_argument('--sampleID', required=True,
help='the sample ID for the provided depth files')
parser.add_argument('--outFilePrefix', default="./amplicon-depth",
help='the prefix to give the output plot file')
parser.add_argument(
"depthFiles", type=argparse.FileType('r'), nargs='+', help='the depth files produced by make_depth_mask.py')
args = parser.parse_args()
go(args)
if __name__ == "__main__":
main()
| 34.896104 | 116 | 0.592668 |
from .vcftagprimersites import read_bed_file
import sys
import pandas as pd
import numpy as np
import argparse
import os
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def go(args):
primerScheme = read_bed_file(args.primerScheme)
ampliconCounter = 1
rgAmplicons = {}
rgStarts = {}
for primer in primerScheme:
poolName = primer['PoolName']
if poolName not in rgAmplicons:
rgAmplicons[poolName] = []
rgStarts[poolName] = []
if primer['direction'] == '+':
rgAmplicons[poolName].append(ampliconCounter)
rgStarts[poolName].append(primer['start'])
ampliconCounter += 1
for startList in rgStarts.values():
startList.append(np.inf)
dfs = {}
for depthFile in args.depthFiles:
df = pd.read_csv(depthFile, sep='\t', header=None,
names=['refName', 'readGroup',
'position', 'depth'],
dtype={'refName': str, 'readGroup': str,
'position': int, 'depth': int},
usecols=(0, 1, 2, 3),)
# assert len(df.index) < 30000, "error: too many data points to plot"
# check all ref positions have a depth value
startPos = df["position"][0]
endPos = df["position"][df.index[-1]]
assert len(df.index) == ((endPos - startPos) +
1), "error: depth needs to be reported at all positions"
# check the primer scheme contains the readgroup
rgList = df.readGroup.unique()
assert len(rgList) == 1, "error: depth file has %d readgroups, need 1 (%s)" % (
len(rgList), depthFile)
rg = rgList[0]
assert rg in rgAmplicons, "error: readgroup not found in provided primer scheme (%s)" % (
rg)
# get the amplicon starts for this readgroup
amplicons = sorted(rgAmplicons[rg])
starts = sorted(rgStarts[rg])
# bin read depths by amplicon for this readgroup
df['amplicon'] = pd.cut(
x=df['position'], bins=starts, labels=amplicons)
# store the mean of each bin
bins = (df.groupby(['amplicon'])[
'depth'].mean()).rename(depthFile.name)
# add to the pile
assert rg not in dfs, "error: readgroup present in multiple files (%s)" % (
rg)
dfs[rg] = bins
# combine the series data from each input file
newDF = pd.concat(dfs, axis=1)
newDF.sort_index(axis=0, inplace=True)
newDF.reset_index(inplace=True)
# melt the DF for seaborn
newDF = newDF.melt("amplicon", var_name="read group",
value_name="mean amplicon read depth")
newDF = newDF.dropna()
# plot the bar
g = sns.catplot(data=newDF,
x="amplicon",
y="mean amplicon read depth",
hue="read group",
height=4,
aspect=3,
kind="bar",
dodge=False,
legend=False)
g.set(yscale="log")
g.fig.suptitle(args.sampleID)
plt.legend(loc='upper right')
plt.xticks(rotation=45, size=6)
plt.savefig(args.outFilePrefix + "-barplot.png")
plt.close()
# plot the box
g = sns.catplot(data=newDF,
x="read group",
y="mean amplicon read depth",
kind="box")
g.fig.suptitle(args.sampleID)
plt.savefig(args.outFilePrefix + "-boxplot.png")
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--primerScheme', required=True,
help='the ARTIC primer scheme')
parser.add_argument('--sampleID', required=True,
help='the sample ID for the provided depth files')
parser.add_argument('--outFilePrefix', default="./amplicon-depth",
help='the prefix to give the output plot file')
parser.add_argument(
"depthFiles", type=argparse.FileType('r'), nargs='+', help='the depth files produced by make_depth_mask.py')
args = parser.parse_args()
go(args)
if __name__ == "__main__":
main()
| true | true |
f736303816e77fc8b6b0357f24619105dad9aa31 | 1,043 | py | Python | 2_Scraper-Scripts/RunThis.py | kshahnazari1998/SociaGrow-Public | 54042634ad8d16ea044bfd1fe265d68e1d74102b | [
"MIT"
] | null | null | null | 2_Scraper-Scripts/RunThis.py | kshahnazari1998/SociaGrow-Public | 54042634ad8d16ea044bfd1fe265d68e1d74102b | [
"MIT"
] | null | null | null | 2_Scraper-Scripts/RunThis.py | kshahnazari1998/SociaGrow-Public | 54042634ad8d16ea044bfd1fe265d68e1d74102b | [
"MIT"
] | null | null | null | import subprocess
import time
from VpnConnect import VpnConnects
vpc = VpnConnects()
while True:
time.sleep(10)
processf2 = subprocess.Popen(["python", "Targetaccountstatus.py"])
time.sleep(3600)
processf2.kill()
# Scrape User
processmain = subprocess.Popen(["python", "ManagerAdmin.py"])
time.sleep(45)
print("Opened The SubProcesses")
process1 = subprocess.Popen(["python", "Manager1.py"])
process2 = subprocess.Popen(["python", "Manager2.py"])
process3 = subprocess.Popen(["python", "Manager3.py"])
process4 = subprocess.Popen(["python", "Manager4.py"])
process5 = subprocess.Popen(["python", "Manager5.py"])
time.sleep(6000)
processmain.kill()
process1.kill()
process2.kill()
process3.kill()
process4.kill()
process5.kill()
print("Killed the Managers")
time.sleep(10)
subprocess.call("TASKKILL /f /IM CHROME.EXE")
subprocess.call("TASKKILL /f /IM CHROMEDRIVER.EXE")
time.sleep(45)
| 24.255814 | 71 | 0.634708 | import subprocess
import time
from VpnConnect import VpnConnects
vpc = VpnConnects()
while True:
time.sleep(10)
processf2 = subprocess.Popen(["python", "Targetaccountstatus.py"])
time.sleep(3600)
processf2.kill()
processmain = subprocess.Popen(["python", "ManagerAdmin.py"])
time.sleep(45)
print("Opened The SubProcesses")
process1 = subprocess.Popen(["python", "Manager1.py"])
process2 = subprocess.Popen(["python", "Manager2.py"])
process3 = subprocess.Popen(["python", "Manager3.py"])
process4 = subprocess.Popen(["python", "Manager4.py"])
process5 = subprocess.Popen(["python", "Manager5.py"])
time.sleep(6000)
processmain.kill()
process1.kill()
process2.kill()
process3.kill()
process4.kill()
process5.kill()
print("Killed the Managers")
time.sleep(10)
subprocess.call("TASKKILL /f /IM CHROME.EXE")
subprocess.call("TASKKILL /f /IM CHROMEDRIVER.EXE")
time.sleep(45)
| true | true |
f73632ee2a6251e65ea96df144d8af26abd3cbc3 | 1,818 | py | Python | python/heliosUpdateTargetSecretKey/heliosUpdateTargetSecretKey.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | python/heliosUpdateTargetSecretKey/heliosUpdateTargetSecretKey.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | python/heliosUpdateTargetSecretKey/heliosUpdateTargetSecretKey.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Helios Update Secret Key for External Targets"""
from pyhesity import *
from datetime import datetime
import getpass
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-pwd', '--password', type=str)
parser.add_argument('-a', '--accesskey', type=str, required=True)
parser.add_argument('-s', '--secretkey', type=str, default='')
args = parser.parse_args()
username = args.username
password = args.password
accesskey = args.accesskey
secretkey = args.secretkey
while secretkey is None or len(secretkey) < 2:
secretkey = getpass.getpass("Please enter the secretkey: ")
### authenticate
apiauth(vip='helios.cohesity.com', username=username, domain='local', password=password)
now = datetime.now()
dateString = now.strftime("%Y-%m-%d")
f = open('vaults-updated-%s.txt' % dateString, 'w')
for hcluster in heliosClusters():
heliosCluster(hcluster['name'])
cluster = api('get', 'cluster')
if cluster:
print('%s' % hcluster['name'])
f.write('%s\n' % hcluster['name'])
vaults = api('get', 'vaults')
if len(vaults) > 0:
vaults = [v for v in vaults if 'amazon' in v['config'] and v['config']['amazon']['accessKeyId'] == accesskey]
for vault in vaults:
print(' updating key for target: %s...' % vault['name'])
f.write(' updating key for target: %s...\n' % vault['name'])
vault['config']['amazon']['secretAccessKey'] = secretkey
result = api('put', 'vaults/%s' % vault['id'], vault)
else:
print('%s (trouble accessing cluster)' % hcluster['name'])
f.write('%s (trouble accessing cluster)\n' % hcluster['name'])
f.close()
| 35.647059 | 121 | 0.636964 |
from pyhesity import *
from datetime import datetime
import getpass
()
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-pwd', '--password', type=str)
parser.add_argument('-a', '--accesskey', type=str, required=True)
parser.add_argument('-s', '--secretkey', type=str, default='')
args = parser.parse_args()
username = args.username
password = args.password
accesskey = args.accesskey
secretkey = args.secretkey
while secretkey is None or len(secretkey) < 2:
secretkey = getpass.getpass("Please enter the secretkey: ")
.com', username=username, domain='local', password=password)
now = datetime.now()
dateString = now.strftime("%Y-%m-%d")
f = open('vaults-updated-%s.txt' % dateString, 'w')
for hcluster in heliosClusters():
heliosCluster(hcluster['name'])
cluster = api('get', 'cluster')
if cluster:
print('%s' % hcluster['name'])
f.write('%s\n' % hcluster['name'])
vaults = api('get', 'vaults')
if len(vaults) > 0:
vaults = [v for v in vaults if 'amazon' in v['config'] and v['config']['amazon']['accessKeyId'] == accesskey]
for vault in vaults:
print(' updating key for target: %s...' % vault['name'])
f.write(' updating key for target: %s...\n' % vault['name'])
vault['config']['amazon']['secretAccessKey'] = secretkey
result = api('put', 'vaults/%s' % vault['id'], vault)
else:
print('%s (trouble accessing cluster)' % hcluster['name'])
f.write('%s (trouble accessing cluster)\n' % hcluster['name'])
f.close()
| true | true |
f73632faf3e8e654e58ba098197bb5b21ddef5a6 | 6,133 | py | Python | managesf/controllers/api/v2/base.py | softwarefactory-project/managesf | 7018d041291f50b90e782ca31d0cfc67abd10170 | [
"Apache-2.0"
] | 1 | 2018-08-02T23:30:03.000Z | 2018-08-02T23:30:03.000Z | managesf/controllers/api/v2/base.py | softwarefactory-project/managesf | 7018d041291f50b90e782ca31d0cfc67abd10170 | [
"Apache-2.0"
] | 1 | 2021-12-13T18:24:10.000Z | 2021-12-13T20:10:39.000Z | managesf/controllers/api/v2/base.py | softwarefactory-project/managesf | 7018d041291f50b90e782ca31d0cfc67abd10170 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2017 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os.path
import re
from pecan import conf
from pecan import request, response, abort, expose
from pecan.rest import RestController
from managesf.model.yamlbkd.engine import SFResourceBackendEngine
from managesf import policy
# TODO do it with v2
from managesf.model import SFUserCRUD
from git.exc import GitCommandError
logger = logging.getLogger(__name__)
# TODO move to managesf.api once users API is started
def get_user_groups(username):
user_email = SFUserCRUD().get(username=username).get('email')
logger.info('Found email %s for username %s' % (user_email, username))
resources_engine = SFResourceBackendEngine(
os.path.join(conf.resources['workdir'], 'read'),
conf.resources['subdir'])
try:
resources = resources_engine.get(
conf.resources['master_repo'], 'master')
except GitCommandError:
logger.info("Unable to read groups from the resources engine.")
logger.info("It is probably because we are boostrapping SF.")
return []
groups = resources['resources'].get('groups', {})
return [g for g in groups if user_email in groups[g]['members']]
def authorize(rule_name, target):
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
credentials = {'username': request.remote_user, 'groups': []}
if request.remote_user:
credentials['groups'] = get_user_groups(request.remote_user)
return policy.authorize(rule_name, target, credentials)
class APIv2RestController(RestController):
def __init__(self, *args, **kwargs):
super(APIv2RestController, self).__init__(*args, **kwargs)
self._logger = logging.getLogger(
'managesf.v2.controllers.%s' % self.__class__.__name__)
class APIv2RestProxyController(APIv2RestController):
manager = None
policies_map = {'get .+/path/to/(?P<x>.+)/command': 'managesf.policy.name'}
def _find_policy(self, lookup):
"""Find policy according to REST path."""
for expr in self.policies_map:
regex = re.compile(expr)
if regex.search(lookup):
target_elements = regex.search(lookup).groupdict()
return {'policy': self.policies_map[expr],
'target_elements': target_elements}
return {}
def _policy_target(self, verb, target_elements, *args, **kwargs):
# override me
target = {}
return target
# This makes the assumption that backend services always return JSON.
# This is true for all of them except gerrit, which will not be covered
# this way.
def _do(self, verb):
def action(*args, **kwargs):
if not self.manager:
return abort(404,
detail='This service is not configured.')
path = request.path
lookup = ("%s %s" % (verb, path))
pol_scan = self._find_policy(lookup)
pol, target_elements = None, {}
if pol_scan:
pol = pol_scan['policy']
target_elements = pol_scan['target_elements']
if not kwargs and request.content_length:
if 'json' in request.content_type:
kwargs = request.json
else:
kwargs = request.params
target = self._policy_target(verb, target_elements,
*args, **kwargs)
if not pol:
# Unknown endpoint, default behavior is to forbid access
pol = 'rule:none'
if not authorize(pol, target=target):
return abort(401,
detail='Failure to comply with policy %s' % pol)
# HACK The RestController's routing method seems to discard
# extensions on the last remainder. This is a dirty fix
full_path = request.path_url
last_arg = full_path.split('/')[-1]
if last_arg and args[-1] != last_arg:
args = args[:-1] + (last_arg, )
if request.content_length and 'json' in request.content_type:
proxied_response = getattr(self.manager, verb)(
*args, json=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', json: '%s'" % (args, kwargs))
elif kwargs:
proxied_response = getattr(self.manager, verb)(
*args, params=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', params: '%s'" % (args, kwargs))
else:
proxied_response = getattr(self.manager, verb)(*args)
response.status = proxied_response.status_code
if int(proxied_response.status_code) > 399:
response.text = proxied_response.text
return abort(proxied_response.status_code)
else:
return proxied_response.json()
return action
@expose('json')
def get(self, *args, **kwargs):
return self._do('get')(*args, **kwargs)
@expose('json')
def post(self, *args, **kwargs):
return self._do('post')(*args, **kwargs)
@expose('json')
def put(self, *args, **kwargs):
return self._do('put')(*args, **kwargs)
@expose('json')
def delete(self, *args, **kwargs):
return self._do('delete')(*args, **kwargs)
| 38.093168 | 79 | 0.608022 |
import logging
import os.path
import re
from pecan import conf
from pecan import request, response, abort, expose
from pecan.rest import RestController
from managesf.model.yamlbkd.engine import SFResourceBackendEngine
from managesf import policy
from managesf.model import SFUserCRUD
from git.exc import GitCommandError
logger = logging.getLogger(__name__)
def get_user_groups(username):
user_email = SFUserCRUD().get(username=username).get('email')
logger.info('Found email %s for username %s' % (user_email, username))
resources_engine = SFResourceBackendEngine(
os.path.join(conf.resources['workdir'], 'read'),
conf.resources['subdir'])
try:
resources = resources_engine.get(
conf.resources['master_repo'], 'master')
except GitCommandError:
logger.info("Unable to read groups from the resources engine.")
logger.info("It is probably because we are boostrapping SF.")
return []
groups = resources['resources'].get('groups', {})
return [g for g in groups if user_email in groups[g]['members']]
def authorize(rule_name, target):
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
credentials = {'username': request.remote_user, 'groups': []}
if request.remote_user:
credentials['groups'] = get_user_groups(request.remote_user)
return policy.authorize(rule_name, target, credentials)
class APIv2RestController(RestController):
def __init__(self, *args, **kwargs):
super(APIv2RestController, self).__init__(*args, **kwargs)
self._logger = logging.getLogger(
'managesf.v2.controllers.%s' % self.__class__.__name__)
class APIv2RestProxyController(APIv2RestController):
manager = None
policies_map = {'get .+/path/to/(?P<x>.+)/command': 'managesf.policy.name'}
def _find_policy(self, lookup):
for expr in self.policies_map:
regex = re.compile(expr)
if regex.search(lookup):
target_elements = regex.search(lookup).groupdict()
return {'policy': self.policies_map[expr],
'target_elements': target_elements}
return {}
def _policy_target(self, verb, target_elements, *args, **kwargs):
target = {}
return target
def _do(self, verb):
def action(*args, **kwargs):
if not self.manager:
return abort(404,
detail='This service is not configured.')
path = request.path
lookup = ("%s %s" % (verb, path))
pol_scan = self._find_policy(lookup)
pol, target_elements = None, {}
if pol_scan:
pol = pol_scan['policy']
target_elements = pol_scan['target_elements']
if not kwargs and request.content_length:
if 'json' in request.content_type:
kwargs = request.json
else:
kwargs = request.params
target = self._policy_target(verb, target_elements,
*args, **kwargs)
if not pol:
pol = 'rule:none'
if not authorize(pol, target=target):
return abort(401,
detail='Failure to comply with policy %s' % pol)
# extensions on the last remainder. This is a dirty fix
full_path = request.path_url
last_arg = full_path.split('/')[-1]
if last_arg and args[-1] != last_arg:
args = args[:-1] + (last_arg, )
if request.content_length and 'json' in request.content_type:
proxied_response = getattr(self.manager, verb)(
*args, json=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', json: '%s'" % (args, kwargs))
elif kwargs:
proxied_response = getattr(self.manager, verb)(
*args, params=kwargs)
self._logger.debug(
"calling passthrough manager with "
"args: '%s', params: '%s'" % (args, kwargs))
else:
proxied_response = getattr(self.manager, verb)(*args)
response.status = proxied_response.status_code
if int(proxied_response.status_code) > 399:
response.text = proxied_response.text
return abort(proxied_response.status_code)
else:
return proxied_response.json()
return action
@expose('json')
def get(self, *args, **kwargs):
return self._do('get')(*args, **kwargs)
@expose('json')
def post(self, *args, **kwargs):
return self._do('post')(*args, **kwargs)
@expose('json')
def put(self, *args, **kwargs):
return self._do('put')(*args, **kwargs)
@expose('json')
def delete(self, *args, **kwargs):
return self._do('delete')(*args, **kwargs)
| true | true |
f73633465de3445622fe64746dfb0a3abd0dc8d9 | 4,477 | py | Python | sponge-kb/sponge-kb-mpd-mpc/src/main/resources/sponge/mpd-mpc/mpd_mpc_player.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 9 | 2017-12-16T21:48:57.000Z | 2022-01-06T12:22:24.000Z | sponge-kb/sponge-kb-mpd-mpc/src/main/resources/sponge/mpd-mpc/mpd_mpc_player.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 3 | 2020-12-18T11:56:46.000Z | 2022-03-31T18:37:10.000Z | sponge-kb/sponge-kb-mpd-mpc/src/main/resources/sponge/mpd-mpc/mpd_mpc_player.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 2 | 2019-12-29T16:08:32.000Z | 2020-06-15T14:05:34.000Z | """
Sponge Knowledge Base
MPD player.
"""
class MpdPlayer(Action):
def onConfigure(self):
self.withLabel("Player").withDescription("The MPD player.")
self.withArgs([
# The song info arguments.
StringType("song").withLabel("Song").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
StringType("album").withLabel("Album").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
StringType("date").withLabel("Date").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
# The position arguments.
IntegerType("position").withLabel("Position").withNullable().withAnnotated()
.withMinValue(0).withMaxValue(100)
.withFeatures({"widget":"slider", "group":"position"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
StringType("time").withLabel("Time").withNullable().withReadOnly()
.withFeatures({"group":"position"}).withProvided(ProvidedMeta().withValue()),
# The navigation arguments.
VoidType("prev").withLabel("Previous").withAnnotated()
.withFeatures({"icon":IconInfo().withName("skip-previous").withSize(30),
"group":"navigation", "align":"center"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
BooleanType("play").withLabel("Play").withAnnotated().withFeatures({"group":"navigation"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
VoidType("next").withLabel("Next").withAnnotated()
.withFeatures({"icon":IconInfo().withName("skip-next").withSize(30), "group":"navigation"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
# The volume argument.
IntegerType("volume").withLabel("Volume").withAnnotated().withMinValue(0).withMaxValue(100)
.withFeatures({"widget":"slider"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
# The mode arguments.
BooleanType("repeat").withLabel("Repeat").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"repeat", "align":"right"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("single").withLabel("Single").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"numeric-1"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("random").withLabel("Random").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"shuffle"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("consume").withLabel("Consume").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"pac-man"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate())
]).withNonCallable().withActivatable()
self.withFeatures({"refreshEvents":["statusPolling", "mpdNotification_.*"], "icon":"music", "contextActions":[
SubAction("MpdPlaylist"),
SubAction("MpdFindAndAddToPlaylist"),
SubAction("ViewSongInfo"),
SubAction("ViewSongLyrics"),
SubAction("MpdLibrary"),
SubAction("ViewMpdStatus"),
]})
def onIsActive(self, context):
return sponge.getVariable("mpc").isConnected()
def onProvideArgs(self, context):
"""This callback method:
a) Modifies the MPD state by using the argument values submitted by the user. The names
are specified in the context.submit set, the values are stored in the context.current map.
b) Sets the values of arguments that are to be provided to the client. The names are
specified in the context.provide set, the values are to be stored in the context.provided map.
"""
MpdPlayerProvideArgsRuntime(context).run()
| 58.142857 | 118 | 0.624972 |
class MpdPlayer(Action):
def onConfigure(self):
self.withLabel("Player").withDescription("The MPD player.")
self.withArgs([
StringType("song").withLabel("Song").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
StringType("album").withLabel("Album").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
StringType("date").withLabel("Date").withNullable().withReadOnly()
.withProvided(ProvidedMeta().withValue()),
IntegerType("position").withLabel("Position").withNullable().withAnnotated()
.withMinValue(0).withMaxValue(100)
.withFeatures({"widget":"slider", "group":"position"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
StringType("time").withLabel("Time").withNullable().withReadOnly()
.withFeatures({"group":"position"}).withProvided(ProvidedMeta().withValue()),
VoidType("prev").withLabel("Previous").withAnnotated()
.withFeatures({"icon":IconInfo().withName("skip-previous").withSize(30),
"group":"navigation", "align":"center"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
BooleanType("play").withLabel("Play").withAnnotated().withFeatures({"group":"navigation"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
VoidType("next").withLabel("Next").withAnnotated()
.withFeatures({"icon":IconInfo().withName("skip-next").withSize(30), "group":"navigation"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable()),
IntegerType("volume").withLabel("Volume").withAnnotated().withMinValue(0).withMaxValue(100)
.withFeatures({"widget":"slider"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("repeat").withLabel("Repeat").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"repeat", "align":"right"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("single").withLabel("Single").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"numeric-1"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("random").withLabel("Random").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"shuffle"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate()),
BooleanType("consume").withLabel("Consume").withAnnotated()
.withFeatures({"group":"mode", "widget":"toggleButton", "icon":"pac-man"})
.withProvided(ProvidedMeta().withValue().withOverwrite().withSubmittable().withLazyUpdate())
]).withNonCallable().withActivatable()
self.withFeatures({"refreshEvents":["statusPolling", "mpdNotification_.*"], "icon":"music", "contextActions":[
SubAction("MpdPlaylist"),
SubAction("MpdFindAndAddToPlaylist"),
SubAction("ViewSongInfo"),
SubAction("ViewSongLyrics"),
SubAction("MpdLibrary"),
SubAction("ViewMpdStatus"),
]})
def onIsActive(self, context):
return sponge.getVariable("mpc").isConnected()
def onProvideArgs(self, context):
MpdPlayerProvideArgsRuntime(context).run()
| true | true |
f7363416c8ea9009adc45b2b90d259e589d247e0 | 14,015 | py | Python | gsom.py | cperales/pygsom | ac4d4818f441d862cb5183e1d2ea814e3f805759 | [
"MIT"
] | null | null | null | gsom.py | cperales/pygsom | ac4d4818f441d862cb5183e1d2ea814e3f805759 | [
"MIT"
] | null | null | null | gsom.py | cperales/pygsom | ac4d4818f441d862cb5183e1d2ea814e3f805759 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015 Philipp Ludwig <git@philippludwig.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""@package GSOM
This is an implementation of the growing self-organizing map.
Different possible approaches for the GSOM have been presented in the past
by various researchers. To make things clear, this implementation is based
on the one described in the work of:
Alahakoon, Damminda, S. Halgamuge, and Bala Srinivasan:
"Dynamic self-organizing maps with controlled growth for knowledge discovery."
Neural Networks, IEEE Transactions on 11.3 (2000): 601-614.
Sadly, this article is not as comprehensive as desirable. Therefore this
implementation should not be taken as a reference, but as a best-effort
version. Some details of the algorithm have been assembled based on the
work of Mengxue Cao et. al, who described their approach within their work:
"Growing Self-Organizing Map Approach for Semantic Acquisition Modeling
way within their work"
Refer to both papers for further details.
Additionally, this algorithm picks up some of the aspects proposed in the
work of:
Andreas Nürnberger and Marcin Detyniecki:
"Externally growing self-organizing maps and its application to e-mail
database visualization and exploration"
"""
from math import log, exp
import itertools
import math
import random
import scipy
class GSOMNode:
""" Represents one node in a growing SOM. """
R = random.Random()
def __init__(self, dim, x, y, data):
""" Initialize this node. """
# Create a weight vector of the given dimension:
# Initialize the weight vector with random values between 0 and 1.
self.weights = scipy.array([self.R.random() for _ in range(dim)])
# Remember the error occuring at this particular node
self.error = 0.0
# Holds the number of the iteration during the node has been inserted.
self.it = 0
# Holds the number of the last iteration where the node has won.
self.last_it = 0
# Holds the best-matching data.
self.data = data
self.last_changed = 0
# This node has no neighbours yet.
self.right = None
self.left = None
self.up = None
self.down = None
# Copy the given coordinates.
self.x, self.y = x, y
def adjust_weights(self, target, learn_rate):
""" Adjust the weights of this node. """
for w in range(0, len(target)):
self.weights[w] += learn_rate * (target[w] - self.weights[w])
def is_boundary(self):
""" Check if this node is at the boundary of the map. """
if not self.right: return True
if not self.left: return True
if not self.up: return True
if not self.down: return True
return False
class GSOM:
""" Represents a growing self-organizing map. """
@staticmethod
def _distance(v1, v2):
""" Calculate the euclidean distance between two scipy arrays."""
dist = 0.0
for v, w in zip(v1, v2):
dist += pow(v - w, 2)
return dist
def _find_bmu(self, vec):
""" Find the best matching unit within the map for the given input_
vector. """
dist=float("inf")
winner = False
for node in self.nodes:
d = self._distance(vec, node.weights)
if d < dist:
dist = d
winner = node
return winner
def _find_similar_boundary(self, node):
""" Find the most similar boundary node to the given node. """
dist = float("inf")
winner = False
for boundary in self.nodes:
if not boundary.is_boundary(): continue
if boundary == node: continue
d = self._distance(node.weights, boundary.weights)
if d < dist:
dist = d
winner = node
return winner
def __init__(self, X, y, spread_factor=0.5):
""" Initializes this GSOM using the given data. """
# Assign the data
self.data = []
for fn, t in zip(X, y):
arr = scipy.array([t])
self.data.append([fn, arr])
# Determine the dimension of the data.
self.dim = len(self.data[0][0])
# Calculate the growing threshold:
self._GT = -self.dim * math.log(spread_factor, 2)
# Create the 4 starting Nodes.
self.nodes = []
n00 = GSOMNode(dim=self.dim, x=0, y=0, data=self.data)
n01 = GSOMNode(self.dim, 0, 1, self.data)
n10 = GSOMNode(self.dim, 1, 0, self.data)
n11 = GSOMNode(self.dim, 1, 1, self.data)
self.nodes.extend([n00, n01, n10, n11])
# Create starting topology
n00.right = n10
n00.up = n01
n01.right = n11
n01.down = n00
n10.up = n11
n10.left = n00
n11.left = n01
n11.down = n10
# Set properties
self.it = 0 # Current iteration
self.max_it = len(self.data)
self.num_it = 1000 # Total iterations
self.init_lr = 0.1 # Initial value of the learning rate
self.alpha = 0.1
self.output = open("gsom.csv", "w")
def train(self):
# Select the next input_.
input_ = random.choice(self.data)[1]
input_ = random.choice(self.data)[0]
# Calculate the learn rate.
# Note that the learning rate, according to the original paper,
# is reseated for every new input_.
learn_rate = self.init_lr * self.alpha * (1 - 1.5/len(self.nodes))
# We now present the input_ several times to the network.
# It is unclear what's a good number here, since no publication
# took the effort to name a value. However, the implementation
# provided by Arkadi Kagan presents the input_ 20 times, so we
# will copy that here.
recalc_nodes = []
for _ in range(20):
# Find the best matching unit
BMU = self._find_bmu(input_)
BMU.last_it = self.it
# Adapt the weights of the direct topological neighbours
neighbours = []
neighbours.append(BMU)
if BMU.left: neighbours.append(BMU.left)
if BMU.right: neighbours.append(BMU.right)
if BMU.up: neighbours.append(BMU.up)
if BMU.down: neighbours.append(BMU.down)
if BMU not in recalc_nodes: recalc_nodes.append(BMU)
for node in neighbours:
node.adjust_weights(input_, learn_rate)
if node not in recalc_nodes: recalc_nodes.append(node)
# Calculate the error.
err = self._distance(BMU.weights, input_)
# Add the error to the node.
growing, nodes = self._node_add_error(BMU, err)
if growing: recalc_nodes.extend(nodes)
# Count the iteration
self.it += 1
# Re-Calc representative data elements for changed nodes.
used_data = []
for node in self.nodes:
used_data.append(node.data)
for node in recalc_nodes:
dist = float("inf")
winner = False
winner_fn = False
for fn, point in self.data:
# if fn in used_data: continue
d = self._distance(point, node.weights)
if(d < dist):
dist = d
winner = point
winner_fn = fn
if node.data != winner_fn:
node.data = winner_fn
node.last_changed = self.it
self.output.write(str(node.x) + "," + str(node.y)\
+ ",change\n")
used_data.append(winner_fn)
# Remove unused nodes.
self._remove_unused_nodes()
def _node_add_error(self, node, error):
""" Add the given error to the error value of the given node.
This will also take care of growing the map (if necessary) and
distributing the error along the neighbours (if necessary) """
node.error += error
# Consider growing
if node.error > self._GT:
if not node.is_boundary():
# Find the boundary node which is most similar to this node.
node = self._find_similar_boundary(node)
if not node:
print("GSOM: Error: No free boundary node found!")
""" Old method:
# Distribute the error along the neighbours.
# Since this is not a boundary node, this node must have
# 4 neighbours.
node.error = 0.5 * self._GT
node.left.error += 0.25 * node.left.error
node.right.error += 0.25 * node.right.error
node.up.error += 0.25 * node.up.error
node.down.error += 0.25 * node.down.error
"""
nodes = self._grow(node)
return True, nodes
return False, 0
def _grow(self, node):
""" Grow this GSOM. """
# We grow this GSOM at every possible direction.
nodes = []
if node.left is None:
nn = self._insert(node.x - 1, node.y, node)
nodes.append(nn)
print("Growing left at: (" + str(node.x) + "," + str(node.y)\
+ ") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.right is None:
nn = self._insert(node.x + 1, node.y, node)
nodes.append(nn)
print("Growing right at: (" + str(node.x) + "," + str(node.y)\
+ ") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.up is None:
nn = self._insert(node.x, node.y + 1, node)
nodes.append(nn)
print("Growing up at: (" + str(node.x) + "," + str(node.y) +\
") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.down is None:
nn = self._insert(node.x, node.y - 1, node)
nodes.append(nn)
print("Growing down at: (" + str(node.x) + "," + str(node.y) +\
") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
return nodes
def _insert(self, x, y, init_node):
# Create new node
new_node = GSOMNode(self.dim, x, y, self.data)
self.nodes.append(new_node)
# Save the number of the current iteration. We need this to prune
# this node later (if neccessary).
new_node.it = new_node.last_it = self.it
# Create the connections to possible neighbouring nodes.
for node in self.nodes:
# Left, Right, Up, Down
if node.x == x - 1 and node.y == y:
new_node.left = node
node.right = new_node
if node.x == x + 1 and node.y == y:
new_node.right = node
node.left = new_node
if node.x == x and node.y == y + 1:
new_node.up = node
node.down = new_node
if node.x == x and node.y == y - 1:
new_node.down = node
node.up = new_node
# Calculate new weights, look for a neighbour.
neigh = new_node.left
if neigh is None: neigh = new_node.right
if neigh is None: neigh = new_node.up
if neigh is None: neigh = new_node.down
if neigh is None: print("_insert: No neighbour found!")
for i in range(0, len(new_node.weights)):
new_node.weights[i] = 2 * init_node.weights[i] - neigh.weights[i]
return new_node
def _remove_unused_nodes(self):
""" Remove all nodes from the GSOM that have not been used. """
to_remove = []
# Iterate over all nodes.
for node in self.nodes:
# Different rules for nodes that have been used or not.
iterations_not_won = self.it - node.last_it
# If we have 50 nodes, every node is allowed not to win 50 times
# in a row. This means every node must be picked at least once.
if iterations_not_won < len(self.nodes) * 4.0 * (1 + self.it/len(self.data)) : continue
# First, remove the connections to the neighbouring nodes.
if node.left: node.left.right = None
if node.up: node.up.down = None
if node.down: node.down.up = None
if node.right: node.right.left = None
# Save this node for removing.
to_remove.append(node)
# Now remove all marked nodes.
for node in to_remove:
print("Removing node @ " + str(node.x) + ", " + str(node.y) + \
" - Current it: " + str(self.it) + " - Last time won: " +\
str(node.last_it))
if node.data:
self.output.write(node.data + "," + str(node.x)+","+str(node.y)\
+ ",remove\n")
self.nodes.remove(node)
| 36.028278 | 99 | 0.577595 |
from math import log, exp
import itertools
import math
import random
import scipy
class GSOMNode:
R = random.Random()
def __init__(self, dim, x, y, data):
self.weights = scipy.array([self.R.random() for _ in range(dim)])
self.error = 0.0
self.it = 0
self.last_it = 0
self.data = data
self.last_changed = 0
self.right = None
self.left = None
self.up = None
self.down = None
self.x, self.y = x, y
def adjust_weights(self, target, learn_rate):
for w in range(0, len(target)):
self.weights[w] += learn_rate * (target[w] - self.weights[w])
def is_boundary(self):
if not self.right: return True
if not self.left: return True
if not self.up: return True
if not self.down: return True
return False
class GSOM:
@staticmethod
def _distance(v1, v2):
dist = 0.0
for v, w in zip(v1, v2):
dist += pow(v - w, 2)
return dist
def _find_bmu(self, vec):
dist=float("inf")
winner = False
for node in self.nodes:
d = self._distance(vec, node.weights)
if d < dist:
dist = d
winner = node
return winner
def _find_similar_boundary(self, node):
dist = float("inf")
winner = False
for boundary in self.nodes:
if not boundary.is_boundary(): continue
if boundary == node: continue
d = self._distance(node.weights, boundary.weights)
if d < dist:
dist = d
winner = node
return winner
def __init__(self, X, y, spread_factor=0.5):
self.data = []
for fn, t in zip(X, y):
arr = scipy.array([t])
self.data.append([fn, arr])
self.dim = len(self.data[0][0])
self._GT = -self.dim * math.log(spread_factor, 2)
self.nodes = []
n00 = GSOMNode(dim=self.dim, x=0, y=0, data=self.data)
n01 = GSOMNode(self.dim, 0, 1, self.data)
n10 = GSOMNode(self.dim, 1, 0, self.data)
n11 = GSOMNode(self.dim, 1, 1, self.data)
self.nodes.extend([n00, n01, n10, n11])
n00.right = n10
n00.up = n01
n01.right = n11
n01.down = n00
n10.up = n11
n10.left = n00
n11.left = n01
n11.down = n10
self.it = 0
self.max_it = len(self.data)
self.num_it = 1000
self.init_lr = 0.1
self.alpha = 0.1
self.output = open("gsom.csv", "w")
def train(self):
input_ = random.choice(self.data)[1]
input_ = random.choice(self.data)[0]
learn_rate = self.init_lr * self.alpha * (1 - 1.5/len(self.nodes))
# took the effort to name a value. However, the implementation
# provided by Arkadi Kagan presents the input_ 20 times, so we
# will copy that here.
recalc_nodes = []
for _ in range(20):
# Find the best matching unit
BMU = self._find_bmu(input_)
BMU.last_it = self.it
# Adapt the weights of the direct topological neighbours
neighbours = []
neighbours.append(BMU)
if BMU.left: neighbours.append(BMU.left)
if BMU.right: neighbours.append(BMU.right)
if BMU.up: neighbours.append(BMU.up)
if BMU.down: neighbours.append(BMU.down)
if BMU not in recalc_nodes: recalc_nodes.append(BMU)
for node in neighbours:
node.adjust_weights(input_, learn_rate)
if node not in recalc_nodes: recalc_nodes.append(node)
# Calculate the error.
err = self._distance(BMU.weights, input_)
# Add the error to the node.
growing, nodes = self._node_add_error(BMU, err)
if growing: recalc_nodes.extend(nodes)
# Count the iteration
self.it += 1
# Re-Calc representative data elements for changed nodes.
used_data = []
for node in self.nodes:
used_data.append(node.data)
for node in recalc_nodes:
dist = float("inf")
winner = False
winner_fn = False
for fn, point in self.data:
# if fn in used_data: continue
d = self._distance(point, node.weights)
if(d < dist):
dist = d
winner = point
winner_fn = fn
if node.data != winner_fn:
node.data = winner_fn
node.last_changed = self.it
self.output.write(str(node.x) + "," + str(node.y)\
+ ",change\n")
used_data.append(winner_fn)
# Remove unused nodes.
self._remove_unused_nodes()
def _node_add_error(self, node, error):
node.error += error
# Consider growing
if node.error > self._GT:
if not node.is_boundary():
# Find the boundary node which is most similar to this node.
node = self._find_similar_boundary(node)
if not node:
print("GSOM: Error: No free boundary node found!")
nodes = self._grow(node)
return True, nodes
return False, 0
def _grow(self, node):
# We grow this GSOM at every possible direction.
nodes = []
if node.left is None:
nn = self._insert(node.x - 1, node.y, node)
nodes.append(nn)
print("Growing left at: (" + str(node.x) + "," + str(node.y)\
+ ") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.right is None:
nn = self._insert(node.x + 1, node.y, node)
nodes.append(nn)
print("Growing right at: (" + str(node.x) + "," + str(node.y)\
+ ") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.up is None:
nn = self._insert(node.x, node.y + 1, node)
nodes.append(nn)
print("Growing up at: (" + str(node.x) + "," + str(node.y) +\
") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
if node.down is None:
nn = self._insert(node.x, node.y - 1, node)
nodes.append(nn)
print("Growing down at: (" + str(node.x) + "," + str(node.y) +\
") -> (" + str(nn.x) + ", " + str(nn.y) + ")")
return nodes
def _insert(self, x, y, init_node):
# Create new node
new_node = GSOMNode(self.dim, x, y, self.data)
self.nodes.append(new_node)
# Save the number of the current iteration. We need this to prune
# this node later (if neccessary).
new_node.it = new_node.last_it = self.it
# Create the connections to possible neighbouring nodes.
for node in self.nodes:
# Left, Right, Up, Down
if node.x == x - 1 and node.y == y:
new_node.left = node
node.right = new_node
if node.x == x + 1 and node.y == y:
new_node.right = node
node.left = new_node
if node.x == x and node.y == y + 1:
new_node.up = node
node.down = new_node
if node.x == x and node.y == y - 1:
new_node.down = node
node.up = new_node
# Calculate new weights, look for a neighbour.
neigh = new_node.left
if neigh is None: neigh = new_node.right
if neigh is None: neigh = new_node.up
if neigh is None: neigh = new_node.down
if neigh is None: print("_insert: No neighbour found!")
for i in range(0, len(new_node.weights)):
new_node.weights[i] = 2 * init_node.weights[i] - neigh.weights[i]
return new_node
def _remove_unused_nodes(self):
to_remove = []
# Iterate over all nodes.
for node in self.nodes:
# Different rules for nodes that have been used or not.
iterations_not_won = self.it - node.last_it
# If we have 50 nodes, every node is allowed not to win 50 times
# in a row. This means every node must be picked at least once.
if iterations_not_won < len(self.nodes) * 4.0 * (1 + self.it/len(self.data)) : continue
# First, remove the connections to the neighbouring nodes.
if node.left: node.left.right = None
if node.up: node.up.down = None
if node.down: node.down.up = None
if node.right: node.right.left = None
# Save this node for removing.
to_remove.append(node)
# Now remove all marked nodes.
for node in to_remove:
print("Removing node @ " + str(node.x) + ", " + str(node.y) + \
" - Current it: " + str(self.it) + " - Last time won: " +\
str(node.last_it))
if node.data:
self.output.write(node.data + "," + str(node.x)+","+str(node.y)\
+ ",remove\n")
self.nodes.remove(node)
| true | true |
f736343fd5da597b2fb04dc9aa414c2b0598f993 | 4,724 | py | Python | elmerbot/commands/search.py | scott-hand/elmerbot | cb356da60dcf7d6b6d3bef22341765c043e48f87 | [
"MIT"
] | null | null | null | elmerbot/commands/search.py | scott-hand/elmerbot | cb356da60dcf7d6b6d3bef22341765c043e48f87 | [
"MIT"
] | null | null | null | elmerbot/commands/search.py | scott-hand/elmerbot | cb356da60dcf7d6b6d3bef22341765c043e48f87 | [
"MIT"
] | null | null | null | import discord
import string
from elmerbot.commands import ElmerCommand
__all__ = ["SearchCommand", "InfoCommand"]
class SearchCommand(ElmerCommand):
command = "search"
description = (
"Search for a whisky by name. Optionally put a number of results to limit it to in front of "
"your query.\n"
"Examples: `!search stagg 2014` or `!search 10 stagg`"
)
async def handle(self, client, message, args):
self._logger.info("Got search command")
if client.data.stale:
await message.channel.send("One moment, reloading review data...")
await message.channel.trigger_typing()
first, _, rest = args.partition(" ")
pattern = args
choices = 5
if first.isnumeric():
choices = int(first)
pattern = rest
# Whitelist characters to eliminate Markdown injection
whitelist = string.ascii_letters + string.digits + "'()-., "
pattern = "".join([c for c in pattern if c in whitelist])
results = client.data.search(pattern, choices)
# Stop now if there's nothing to show
if not results:
em = discord.Embed(title='No results found for "{}".'.format(pattern), colour=0xDD0000)
await message.channel.send(embed=em)
return
# Compile results, stopping when there's a large drop in confidence.
output = []
last_conf = results[0][2]
for token, whisky_id, conf in results:
if last_conf - conf > 3:
break
last_conf = conf
output.append("**{}** [#{}]".format(token, whisky_id))
hits = len(output)
output += [
"",
"Use **!info <id>** to get review information. The <id> is the number in brackets \
from search results.",
]
em = discord.Embed(
title='{} Results for "{}":'.format(hits, pattern), description="\n".join(output), colour=0x00DD00
)
await message.channel.send(embed=em)
class InfoCommand(ElmerCommand):
command = "info"
description = (
"Search for a whisky by name. Optionally put a number of results to limit it to in front of your "
"query.\n"
"Examples: `!search stagg 2014` or `!search 10 stagg`"
)
async def handle(self, client, message, args):
self._logger.info("Got info command")
pending_msg = None
if client.data.stale:
pending_msg = await message.channel.send("One moment, reloading review data...")
await message.channel.trigger_typing()
if args.isnumeric():
whisky_id = int(args)
else:
# Whitelist characters to eliminate Markdown injection
whitelist = string.ascii_letters + string.digits + "'()-., "
pattern = "".join([c for c in args if c in whitelist])
result = client.data.search(pattern, 1)
if not result:
em = discord.Embed(
title='Could not find "{}"'.format(pattern),
description="Try using **!search** first.",
colour=0xDD0000,
)
await message.channel.send(embed=em)
return
whisky_id = result[0][1]
data = client.data.find(whisky_id)
if pending_msg:
await pending_msg.delete()
output = []
# Generate stats
ratings = [row["rating"] for row in data if row["rating"]]
if ratings:
avg_rating = round(sum(ratings) / float(len(ratings)), 2)
delta = round(avg_rating - client.data.avg, 2)
delta = "+" + str(delta) if delta >= 0.0 else str(delta)
output.append("**Average rating:** {} based on {} reviews with scores.".format(avg_rating, len(ratings)))
output.append(
"It is {} from the global average \
of {} with standard deviation {}".format(
delta, client.data.avg, client.data.stddev
)
)
output.append("**Most recent reviews:**")
for idx, review in enumerate(client.data.most_recent(whisky_id=whisky_id)):
output.append(
"{}. {} gave it {}. [Link]({})".format(
idx + 1, review["username"], review["rating"] or "no rating", review["link"]
)
)
else:
output.append("**Average rating:** No reviews with scores.")
em = discord.Embed(title="{}".format(data[0]["name"]), description="\n".join(output), colour=0x00DD00)
await message.channel.send(embed=em)
| 40.033898 | 117 | 0.556308 | import discord
import string
from elmerbot.commands import ElmerCommand
__all__ = ["SearchCommand", "InfoCommand"]
class SearchCommand(ElmerCommand):
command = "search"
description = (
"Search for a whisky by name. Optionally put a number of results to limit it to in front of "
"your query.\n"
"Examples: `!search stagg 2014` or `!search 10 stagg`"
)
async def handle(self, client, message, args):
self._logger.info("Got search command")
if client.data.stale:
await message.channel.send("One moment, reloading review data...")
await message.channel.trigger_typing()
first, _, rest = args.partition(" ")
pattern = args
choices = 5
if first.isnumeric():
choices = int(first)
pattern = rest
whitelist = string.ascii_letters + string.digits + "'()-., "
pattern = "".join([c for c in pattern if c in whitelist])
results = client.data.search(pattern, choices)
# Stop now if there's nothing to show
if not results:
em = discord.Embed(title='No results found for "{}".'.format(pattern), colour=0xDD0000)
await message.channel.send(embed=em)
return
output = []
last_conf = results[0][2]
for token, whisky_id, conf in results:
if last_conf - conf > 3:
break
last_conf = conf
output.append("**{}** [#{}]".format(token, whisky_id))
hits = len(output)
output += [
"",
"Use **!info <id>** to get review information. The <id> is the number in brackets \
from search results.",
]
em = discord.Embed(
title='{} Results for "{}":'.format(hits, pattern), description="\n".join(output), colour=0x00DD00
)
await message.channel.send(embed=em)
class InfoCommand(ElmerCommand):
command = "info"
description = (
"Search for a whisky by name. Optionally put a number of results to limit it to in front of your "
"query.\n"
"Examples: `!search stagg 2014` or `!search 10 stagg`"
)
async def handle(self, client, message, args):
self._logger.info("Got info command")
pending_msg = None
if client.data.stale:
pending_msg = await message.channel.send("One moment, reloading review data...")
await message.channel.trigger_typing()
if args.isnumeric():
whisky_id = int(args)
else:
# Whitelist characters to eliminate Markdown injection
whitelist = string.ascii_letters + string.digits + "'()-., "
pattern = "".join([c for c in args if c in whitelist])
result = client.data.search(pattern, 1)
if not result:
em = discord.Embed(
title='Could not find "{}"'.format(pattern),
description="Try using **!search** first.",
colour=0xDD0000,
)
await message.channel.send(embed=em)
return
whisky_id = result[0][1]
data = client.data.find(whisky_id)
if pending_msg:
await pending_msg.delete()
output = []
ratings = [row["rating"] for row in data if row["rating"]]
if ratings:
avg_rating = round(sum(ratings) / float(len(ratings)), 2)
delta = round(avg_rating - client.data.avg, 2)
delta = "+" + str(delta) if delta >= 0.0 else str(delta)
output.append("**Average rating:** {} based on {} reviews with scores.".format(avg_rating, len(ratings)))
output.append(
"It is {} from the global average \
of {} with standard deviation {}".format(
delta, client.data.avg, client.data.stddev
)
)
output.append("**Most recent reviews:**")
for idx, review in enumerate(client.data.most_recent(whisky_id=whisky_id)):
output.append(
"{}. {} gave it {}. [Link]({})".format(
idx + 1, review["username"], review["rating"] or "no rating", review["link"]
)
)
else:
output.append("**Average rating:** No reviews with scores.")
em = discord.Embed(title="{}".format(data[0]["name"]), description="\n".join(output), colour=0x00DD00)
await message.channel.send(embed=em)
| true | true |
f736354e5a0f20eea35eceff7914e22c35491058 | 7,259 | py | Python | tests/test_grid.py | eEcoLiDAR/lcMacroPipeline | 91709f93ef53a3e453f0ce967e1094688688f684 | [
"Apache-2.0"
] | 2 | 2021-02-17T14:41:50.000Z | 2021-04-07T12:06:21.000Z | tests/test_grid.py | eEcoLiDAR/lcMacroPipeline | 91709f93ef53a3e453f0ce967e1094688688f684 | [
"Apache-2.0"
] | 16 | 2020-03-11T08:39:46.000Z | 2020-05-20T10:42:29.000Z | tests/test_grid.py | eEcoLiDAR/Laserfarm | 91709f93ef53a3e453f0ce967e1094688688f684 | [
"Apache-2.0"
] | 2 | 2020-07-06T08:18:56.000Z | 2020-08-19T12:00:26.000Z | from pathlib import Path
import unittest
import numpy as np
import pylas
from laserfarm.grid import Grid
try:
import matplotlib
matplotlib_available = True
except ModuleNotFoundError:
matplotlib_available = False
if matplotlib_available:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class TestValidGridSetup(unittest.TestCase):
def setUp(self):
self.grid = Grid()
self.grid.setup(0., 0., 20., 20., 5)
def test_gridMins(self):
np.testing.assert_allclose(self.grid.grid_mins, [0., 0.])
def test_gridMaxs(self):
np.testing.assert_allclose(self.grid.grid_maxs, [20., 20.])
def test_gridWidth(self):
np.testing.assert_allclose(self.grid.grid_width, 20.)
def test_tileWidth(self):
np.testing.assert_allclose(self.grid.tile_width, 4.)
def test_tileIndexForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_index(0.1, 0.2),
(0, 0))
def test_tileIndexForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_index((0.1, 19.9),
(0.2, 19.8)),
((0, 0), (4, 4)))
def test_tileBoundsForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds(0, 0),
((0., 0.), (4., 4.)))
def test_tileBoundsForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds((0, 0),
(0, 1)),
(((0., 0.), (0., 4.)),
((4., 4.), (4., 8.))))
class TestInvalidGridSetup(unittest.TestCase):
def test_fractionalNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0.1)
def test_zeroNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0)
def test_zeroWidthGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 0., 20., 5)
def test_rectangularGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 10., 20., 5)
class TestRealGridValid(unittest.TestCase):
_test_dir = 'test_tmp_dir'
_test_data_dir = 'testdata'
_test_tile_idx = [101, 101]
_test_file_name = 'C_43FN1_1_1.LAZ'
_min_x = -113107.8100
_min_y = 214783.8700
_max_x = 398892.1900
_max_y = 726783.87
_n_tiles_sides = 256
plot = False
def setUp(self):
self.grid = Grid()
self.grid.setup(min_x=self._min_x,
min_y=self._min_y,
max_x=self._max_x,
max_y=self._max_y,
n_tiles_side=self._n_tiles_sides)
self._test_data_path = Path(self._test_data_dir).joinpath(self._test_file_name)
self.points = _read_points_from_file(str(self._test_data_path))
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
self.assertTrue(np.all(mask_valid_points))
class TestRealGridLowPrecision(TestRealGridValid):
"""
The following tile has been obtained by using large scale parameters (0.1)
in the PDAL LAS writer. Some points thus fall outside the tile boundary
when read from the file.
"""
_test_file_name = 'C_43FN1_1.LAZ'
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
if self.plot and matplotlib_available:
_plot_points_and_tile(self.grid,
self.points[~mask_valid_points],
self._test_tile_idx,
self._test_data_path.with_suffix('.png').name)
self.assertFalse(np.all(mask_valid_points))
def test_isPointInTileWithPrecision(self):
x_pts, y_pts = self.points.T
precision = np.abs(np.rint(self._max_y) - self._max_y)
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx,
precision=precision)
self.assertTrue(np.all(mask_valid_points))
class TestRealGridLowPrecisionRoundedOrigin(TestRealGridValid):
"""
The following tile has been obtained by rounding off the coordinates
of the origin and by using the default scale parameters (0.01) in the PDAL
LAS writer.
"""
_test_file_name = 'C_43FN1_1.LAZ'
_test_tile_idx = [101, 101]
_min_x = -113108.00
_min_y = 214784.00
_max_x = 398892.00
_max_y = 726784.00
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
if self.plot and matplotlib_available:
_plot_points_and_tile(self.grid,
self.points[~mask_valid_points],
self._test_tile_idx,
self._test_data_path.with_suffix('.png').name)
self.assertFalse(np.all(mask_valid_points))
def test_isPointInTileWithPrecision(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx,
precision=0.01)
self.assertTrue(np.all(mask_valid_points))
def _read_points_from_file(filename):
file = pylas.read(filename)
return np.column_stack((file.x, file.y))
def _plot_points_and_tile(grid, points, tile_indices, filename=None):
"""
Plot points
:param grid: grid object
:param points: (Nx2) array containing X,Y coordinates of the points
:param tile_indices: [N_x, N_y], where N_i is the integer tile index along
dimension i
:param filename: optional, path where to save plot
"""
# plot points
x_pts, y_pts = points.T
plt.scatter(x_pts, y_pts, color='r')
# plot tile
tile_mins, tile_maxs = grid.get_tile_bounds(*tile_indices)
line = np.array((tile_mins,
[tile_mins[0], tile_maxs[1]],
tile_maxs,
[tile_maxs[0], tile_mins[1]],
tile_mins))
x, y = line.T
plt.plot(x, y, color='k')
# add tile label
x_cntr, y_cntr = (tile_mins + tile_maxs) / 2.
plt.text(x_cntr, y_cntr, '({}, {})'.format(*tile_indices),
horizontalalignment='center',
verticalalignment='center')
if filename is not None:
plt.savefig(filename, dpi=300)
else:
plt.show()
plt.close(plt.figure())
| 35.067633 | 87 | 0.573908 | from pathlib import Path
import unittest
import numpy as np
import pylas
from laserfarm.grid import Grid
try:
import matplotlib
matplotlib_available = True
except ModuleNotFoundError:
matplotlib_available = False
if matplotlib_available:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class TestValidGridSetup(unittest.TestCase):
def setUp(self):
self.grid = Grid()
self.grid.setup(0., 0., 20., 20., 5)
def test_gridMins(self):
np.testing.assert_allclose(self.grid.grid_mins, [0., 0.])
def test_gridMaxs(self):
np.testing.assert_allclose(self.grid.grid_maxs, [20., 20.])
def test_gridWidth(self):
np.testing.assert_allclose(self.grid.grid_width, 20.)
def test_tileWidth(self):
np.testing.assert_allclose(self.grid.tile_width, 4.)
def test_tileIndexForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_index(0.1, 0.2),
(0, 0))
def test_tileIndexForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_index((0.1, 19.9),
(0.2, 19.8)),
((0, 0), (4, 4)))
def test_tileBoundsForPoint(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds(0, 0),
((0., 0.), (4., 4.)))
def test_tileBoundsForArray(self):
np.testing.assert_array_equal(self.grid.get_tile_bounds((0, 0),
(0, 1)),
(((0., 0.), (0., 4.)),
((4., 4.), (4., 8.))))
class TestInvalidGridSetup(unittest.TestCase):
def test_fractionalNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0.1)
def test_zeroNumberOfTilesGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 20., 20., 0)
def test_zeroWidthGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 0., 20., 5)
def test_rectangularGrid(self):
with self.assertRaises(ValueError):
grid = Grid()
grid.setup(0., 0., 10., 20., 5)
class TestRealGridValid(unittest.TestCase):
_test_dir = 'test_tmp_dir'
_test_data_dir = 'testdata'
_test_tile_idx = [101, 101]
_test_file_name = 'C_43FN1_1_1.LAZ'
_min_x = -113107.8100
_min_y = 214783.8700
_max_x = 398892.1900
_max_y = 726783.87
_n_tiles_sides = 256
plot = False
def setUp(self):
self.grid = Grid()
self.grid.setup(min_x=self._min_x,
min_y=self._min_y,
max_x=self._max_x,
max_y=self._max_y,
n_tiles_side=self._n_tiles_sides)
self._test_data_path = Path(self._test_data_dir).joinpath(self._test_file_name)
self.points = _read_points_from_file(str(self._test_data_path))
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
self.assertTrue(np.all(mask_valid_points))
class TestRealGridLowPrecision(TestRealGridValid):
_test_file_name = 'C_43FN1_1.LAZ'
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
if self.plot and matplotlib_available:
_plot_points_and_tile(self.grid,
self.points[~mask_valid_points],
self._test_tile_idx,
self._test_data_path.with_suffix('.png').name)
self.assertFalse(np.all(mask_valid_points))
def test_isPointInTileWithPrecision(self):
x_pts, y_pts = self.points.T
precision = np.abs(np.rint(self._max_y) - self._max_y)
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx,
precision=precision)
self.assertTrue(np.all(mask_valid_points))
class TestRealGridLowPrecisionRoundedOrigin(TestRealGridValid):
_test_file_name = 'C_43FN1_1.LAZ'
_test_tile_idx = [101, 101]
_min_x = -113108.00
_min_y = 214784.00
_max_x = 398892.00
_max_y = 726784.00
def test_isPointInTile(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx)
if self.plot and matplotlib_available:
_plot_points_and_tile(self.grid,
self.points[~mask_valid_points],
self._test_tile_idx,
self._test_data_path.with_suffix('.png').name)
self.assertFalse(np.all(mask_valid_points))
def test_isPointInTileWithPrecision(self):
x_pts, y_pts = self.points.T
mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,
*self._test_tile_idx,
precision=0.01)
self.assertTrue(np.all(mask_valid_points))
def _read_points_from_file(filename):
file = pylas.read(filename)
return np.column_stack((file.x, file.y))
def _plot_points_and_tile(grid, points, tile_indices, filename=None):
x_pts, y_pts = points.T
plt.scatter(x_pts, y_pts, color='r')
tile_mins, tile_maxs = grid.get_tile_bounds(*tile_indices)
line = np.array((tile_mins,
[tile_mins[0], tile_maxs[1]],
tile_maxs,
[tile_maxs[0], tile_mins[1]],
tile_mins))
x, y = line.T
plt.plot(x, y, color='k')
x_cntr, y_cntr = (tile_mins + tile_maxs) / 2.
plt.text(x_cntr, y_cntr, '({}, {})'.format(*tile_indices),
horizontalalignment='center',
verticalalignment='center')
if filename is not None:
plt.savefig(filename, dpi=300)
else:
plt.show()
plt.close(plt.figure())
| true | true |
f73635595a57c8cdfe5c9fda02943fccf1922333 | 2,431 | py | Python | examples/crud_rest_api/config.py | NeolithEra/Flask-AppBuilder | d9561bf89d925a2f5d74c226318884733b52718f | [
"BSD-3-Clause"
] | 3,862 | 2015-01-01T11:59:24.000Z | 2022-03-31T19:23:55.000Z | examples/crud_rest_api/config.py | NeolithEra/Flask-AppBuilder | d9561bf89d925a2f5d74c226318884733b52718f | [
"BSD-3-Clause"
] | 1,566 | 2015-01-01T23:26:11.000Z | 2022-03-31T16:23:08.000Z | examples/crud_rest_api/config.py | NeolithEra/Flask-AppBuilder | d9561bf89d925a2f5d74c226318884733b52718f | [
"BSD-3-Clause"
] | 1,352 | 2015-01-02T10:45:59.000Z | 2022-03-26T20:56:48.000Z | import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Google", "url": "https://www.google.com/accounts/o8/id"},
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://username:password@mysqlserver.local/quickhowto'
# SQLALCHEMY_DATABASE_URI = 'postgresql://scott:tiger@localhost:5432/myapp'
# SQLALCHEMY_ECHO = True
SQLALCHEMY_POOL_RECYCLE = 3
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portuguese"},
"pt_BR": {"flag": "br", "name": "Pt Brazil"},
"es": {"flag": "es", "name": "Spanish"},
"fr": {"flag": "fr", "name": "French"},
"de": {"flag": "de", "name": "German"},
"zh": {"flag": "cn", "name": "Chinese"},
"ru": {"flag": "ru", "name": "Russian"},
"pl": {"flag": "pl", "name": "Polish"},
"el": {"flag": "gr", "name": "Greek"},
"ja_JP": {"flag": "jp", "name": "Japanese"},
}
# ------------------------------
# GLOBALS FOR GENERAL APP's
# ------------------------------
FAB_API_SWAGGER_UI = True
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
AUTH_TYPE = 1
# AUTH_LDAP_SERVER = "ldap://dc.domain.net"
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
APP_NAME = "F.A.B. Example"
APP_THEME = "" # default
# APP_THEME = "cerulean.css" # COOL
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css" # COOL
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css" # COOL
# APP_THEME = "spacelab.css" # NICE
# APP_THEME = "united.css"
# APP_THEME = "darkly.css"
# APP_THEME = "lumen.css"
# APP_THEME = "paper.css"
# APP_THEME = "sandstone.css"
# APP_THEME = "solar.css"
# APP_THEME = "superhero.css"
#FAB_ROLES = {
# "ReadOnly": [
# [".*", "can_list"],
# [".*", "can_show"],
# [".*", "menu_access"]
# ]
#}
| 30.772152 | 84 | 0.588235 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Google", "url": "https://www.google.com/accounts/o8/id"},
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
SQLALCHEMY_POOL_RECYCLE = 3
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portuguese"},
"pt_BR": {"flag": "br", "name": "Pt Brazil"},
"es": {"flag": "es", "name": "Spanish"},
"fr": {"flag": "fr", "name": "French"},
"de": {"flag": "de", "name": "German"},
"zh": {"flag": "cn", "name": "Chinese"},
"ru": {"flag": "ru", "name": "Russian"},
"pl": {"flag": "pl", "name": "Polish"},
"el": {"flag": "gr", "name": "Greek"},
"ja_JP": {"flag": "jp", "name": "Japanese"},
}
# ------------------------------
FAB_API_SWAGGER_UI = True
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
AUTH_TYPE = 1
# AUTH_LDAP_SERVER = "ldap://dc.domain.net"
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
APP_NAME = "F.A.B. Example"
APP_THEME = "" # default
# APP_THEME = "cerulean.css" # COOL
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css" # COOL
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css" # COOL
# APP_THEME = "spacelab.css" # NICE
# APP_THEME = "united.css"
# APP_THEME = "darkly.css"
# APP_THEME = "lumen.css"
# APP_THEME = "paper.css"
# APP_THEME = "sandstone.css"
# APP_THEME = "solar.css"
# APP_THEME = "superhero.css"
#FAB_ROLES = {
# "ReadOnly": [
# [".*", "can_list"],
# [".*", "can_show"],
# [".*", "menu_access"]
# ]
#}
| true | true |
f73635be840dcaa8541d5a931f7dd2184a0e9aca | 2,888 | py | Python | test/test_custom.py | lyon916/pyecharts | 40efd119ec7bccb4bc3d5dab985ec7aca2936911 | [
"MIT"
] | null | null | null | test/test_custom.py | lyon916/pyecharts | 40efd119ec7bccb4bc3d5dab985ec7aca2936911 | [
"MIT"
] | null | null | null | test/test_custom.py | lyon916/pyecharts | 40efd119ec7bccb4bc3d5dab985ec7aca2936911 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
from pyecharts import Bar, Line, Scatter, EffectScatter, Kline
def test_custom():
# custom_0
attr = ['A', 'B', 'C', 'D', 'E', 'F']
v1 = [10, 20, 30, 40, 50, 60]
v2 = [15, 25, 35, 45, 55, 65]
v3 = [38, 28, 58, 48, 78, 68]
bar = Bar("Line - Bar 示例")
bar.add("bar", attr, v1)
line = Line()
line.add("line", v2, v3)
bar.custom(line.get_series())
bar.show_config()
bar.render()
# custom_1
v1 = [10, 20, 30, 40, 50, 60]
v2 = [30, 30, 30, 30, 30, 30]
v3 = [50, 50, 50, 50, 50, 50]
v4 = [10, 10, 10, 10, 10, 10]
es = EffectScatter("Scatter - EffectScatter 示例")
es.add("es", v1, v2)
scatter = Scatter()
scatter.add("scatter", v1, v3)
es.custom(scatter.get_series())
es_1 = EffectScatter()
es_1.add("es_1", v1, v4, symbol='pin', effect_scale=5)
es.custom(es_1.get_series())
es.show_config()
es.render()
# custom_2
import random
v1 = [[2320.26, 2320.26, 2287.3, 2362.94],
[2300, 2291.3, 2288.26, 2308.38],
[2295.35, 2346.5, 2295.35, 2345.92],
[2347.22, 2358.98, 2337.35, 2363.8],
[2360.75, 2382.48, 2347.89, 2383.76],
[2383.43, 2385.42, 2371.23, 2391.82],
[2377.41, 2419.02, 2369.57, 2421.15],
[2425.92, 2428.15, 2417.58, 2440.38],
[2411, 2433.13, 2403.3, 2437.42],
[2432.68, 2334.48, 2427.7, 2441.73],
[2430.69, 2418.53, 2394.22, 2433.89],
[2416.62, 2432.4, 2414.4, 2443.03],
[2441.91, 2421.56, 2418.43, 2444.8],
[2420.26, 2382.91, 2373.53, 2427.07],
[2383.49, 2397.18, 2370.61, 2397.94],
[2378.82, 2325.95, 2309.17, 2378.82],
[2322.94, 2314.16, 2308.76, 2330.88],
[2320.62, 2325.82, 2315.01, 2338.78],
[2313.74, 2293.34, 2289.89, 2340.71],
[2297.77, 2313.22, 2292.03, 2324.63],
[2322.32, 2365.59, 2308.92, 2366.16],
[2364.54, 2359.51, 2330.86, 2369.65],
[2332.08, 2273.4, 2259.25, 2333.54],
[2274.81, 2326.31, 2270.1, 2328.14],
[2333.61, 2347.18, 2321.6, 2351.44],
[2340.44, 2324.29, 2304.27, 2352.02],
[2326.42, 2318.61, 2314.59, 2333.67],
[2314.68, 2310.59, 2296.58, 2320.96],
[2309.16, 2286.6, 2264.83, 2333.29],
[2282.17, 2263.97, 2253.25, 2286.33],
[2255.77, 2270.28, 2253.31, 2276.22]]
attr = ["2017/7/{}".format(i + 1) for i in range(31)]
kline = Kline("Kline - Line 示例")
kline.add("日K", attr, v1)
line_1 = Line()
line_1.add("line-1", attr, [random.randint(2400, 2500) for _ in range(31)])
line_2 = Line()
line_2.add("line-2", attr, [random.randint(2400, 2500) for _ in range(31)])
kline.custom(line_1.get_series())
kline.custom(line_2.get_series())
kline.show_config()
kline.render()
| 35.654321 | 79 | 0.532895 |
from pyecharts import Bar, Line, Scatter, EffectScatter, Kline
def test_custom():
attr = ['A', 'B', 'C', 'D', 'E', 'F']
v1 = [10, 20, 30, 40, 50, 60]
v2 = [15, 25, 35, 45, 55, 65]
v3 = [38, 28, 58, 48, 78, 68]
bar = Bar("Line - Bar 示例")
bar.add("bar", attr, v1)
line = Line()
line.add("line", v2, v3)
bar.custom(line.get_series())
bar.show_config()
bar.render()
v1 = [10, 20, 30, 40, 50, 60]
v2 = [30, 30, 30, 30, 30, 30]
v3 = [50, 50, 50, 50, 50, 50]
v4 = [10, 10, 10, 10, 10, 10]
es = EffectScatter("Scatter - EffectScatter 示例")
es.add("es", v1, v2)
scatter = Scatter()
scatter.add("scatter", v1, v3)
es.custom(scatter.get_series())
es_1 = EffectScatter()
es_1.add("es_1", v1, v4, symbol='pin', effect_scale=5)
es.custom(es_1.get_series())
es.show_config()
es.render()
import random
v1 = [[2320.26, 2320.26, 2287.3, 2362.94],
[2300, 2291.3, 2288.26, 2308.38],
[2295.35, 2346.5, 2295.35, 2345.92],
[2347.22, 2358.98, 2337.35, 2363.8],
[2360.75, 2382.48, 2347.89, 2383.76],
[2383.43, 2385.42, 2371.23, 2391.82],
[2377.41, 2419.02, 2369.57, 2421.15],
[2425.92, 2428.15, 2417.58, 2440.38],
[2411, 2433.13, 2403.3, 2437.42],
[2432.68, 2334.48, 2427.7, 2441.73],
[2430.69, 2418.53, 2394.22, 2433.89],
[2416.62, 2432.4, 2414.4, 2443.03],
[2441.91, 2421.56, 2418.43, 2444.8],
[2420.26, 2382.91, 2373.53, 2427.07],
[2383.49, 2397.18, 2370.61, 2397.94],
[2378.82, 2325.95, 2309.17, 2378.82],
[2322.94, 2314.16, 2308.76, 2330.88],
[2320.62, 2325.82, 2315.01, 2338.78],
[2313.74, 2293.34, 2289.89, 2340.71],
[2297.77, 2313.22, 2292.03, 2324.63],
[2322.32, 2365.59, 2308.92, 2366.16],
[2364.54, 2359.51, 2330.86, 2369.65],
[2332.08, 2273.4, 2259.25, 2333.54],
[2274.81, 2326.31, 2270.1, 2328.14],
[2333.61, 2347.18, 2321.6, 2351.44],
[2340.44, 2324.29, 2304.27, 2352.02],
[2326.42, 2318.61, 2314.59, 2333.67],
[2314.68, 2310.59, 2296.58, 2320.96],
[2309.16, 2286.6, 2264.83, 2333.29],
[2282.17, 2263.97, 2253.25, 2286.33],
[2255.77, 2270.28, 2253.31, 2276.22]]
attr = ["2017/7/{}".format(i + 1) for i in range(31)]
kline = Kline("Kline - Line 示例")
kline.add("日K", attr, v1)
line_1 = Line()
line_1.add("line-1", attr, [random.randint(2400, 2500) for _ in range(31)])
line_2 = Line()
line_2.add("line-2", attr, [random.randint(2400, 2500) for _ in range(31)])
kline.custom(line_1.get_series())
kline.custom(line_2.get_series())
kline.show_config()
kline.render()
| true | true |
f73636427e005194ae2caceac942ddd1063e52d3 | 1,078 | py | Python | code/src/plan2scene/texture_gen/custom_ops/noise.py | madhawav/plan2scene | cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5 | [
"MIT"
] | 305 | 2021-06-09T23:30:34.000Z | 2022-03-30T02:49:45.000Z | code/src/plan2scene/texture_gen/custom_ops/noise.py | madhawav/plan2scene | cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5 | [
"MIT"
] | 8 | 2021-06-11T01:59:26.000Z | 2022-03-24T21:32:21.000Z | code/src/plan2scene/texture_gen/custom_ops/noise.py | madhawav/plan2scene | cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5 | [
"MIT"
] | 32 | 2021-06-09T23:19:23.000Z | 2022-03-05T14:03:15.000Z | # Code adapted from https://github.com/henzler/neuraltexture/blob/master/code/custom_ops/noise/noise.py
from torch import nn
from torch.autograd import Function
import plan2scene.texture_gen.utils.neural_texture_helper as utils_nt
import noise_cuda
import torch
import numpy as np
from torch.autograd import gradcheck
class NoiseFunction(Function):
@staticmethod
def forward(ctx, position, seed):
ctx.save_for_backward(position, seed)
noise = noise_cuda.forward(position, seed)
return noise
@staticmethod
def backward(ctx, grad_noise):
position, seed = ctx.saved_tensors
d_position_bilinear = noise_cuda.backward(position, seed)
d_position = torch.stack([torch.zeros_like(d_position_bilinear), d_position_bilinear], dim=0)
return grad_noise.unsqueeze(2) * d_position, None
class Noise(nn.Module):
def __init__(self):
super(Noise, self).__init__()
def forward(self, position, seed):
noise = NoiseFunction.apply(position.contiguous(), seed.contiguous())
return noise
| 29.944444 | 103 | 0.730056 |
from torch import nn
from torch.autograd import Function
import plan2scene.texture_gen.utils.neural_texture_helper as utils_nt
import noise_cuda
import torch
import numpy as np
from torch.autograd import gradcheck
class NoiseFunction(Function):
@staticmethod
def forward(ctx, position, seed):
ctx.save_for_backward(position, seed)
noise = noise_cuda.forward(position, seed)
return noise
@staticmethod
def backward(ctx, grad_noise):
position, seed = ctx.saved_tensors
d_position_bilinear = noise_cuda.backward(position, seed)
d_position = torch.stack([torch.zeros_like(d_position_bilinear), d_position_bilinear], dim=0)
return grad_noise.unsqueeze(2) * d_position, None
class Noise(nn.Module):
def __init__(self):
super(Noise, self).__init__()
def forward(self, position, seed):
noise = NoiseFunction.apply(position.contiguous(), seed.contiguous())
return noise
| true | true |
f736365887c9571c3e54f907948e54ccd5725e4e | 6,414 | py | Python | src/rdb/models/environment.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | src/rdb/models/environment.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | src/rdb/models/environment.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | from rdb.rdb import db, LowerCaseText
from enum import Enum
import datetime
from resources.adminAccess import is_admin_user
from flask import g
import rdb.models.image as Image
from dockerUtil.dockerClient import dockerClient, wait_for_it
import config
import requests
import uuid
from flask_restful import abort
import rdb.models.user as User
class Environment(db.Model):
"""Environment Class"""
__tablename__ = "environment"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.Text, nullable=False)
container_id = db.Column(db.Text, nullable=False)
container_name = db.Column(db.Text, nullable=False)
status = db.Column(LowerCaseText, nullable=False)
jupyter_port = db.Column(db.Text, nullable=False)
jupyter_token = db.Column(db.Text, nullable=False)
jupyter_url = None
description = db.Column(db.Text)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
# authorized_users = db.relationship('User', lazy='subquery', secondary='user_environment_access')
image_id = db.Column(db.Integer, db.ForeignKey('image.id'), nullable=False)
ml_models = db.relationship('MLModel', lazy='select', cascade='delete, delete-orphan', backref='environment')
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
updated_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), onupdate=datetime.datetime.now)
def __init__(self):
super(Environment, self).__init__()
def __repr__(self):
"""Display when printing a environment object"""
return "<ID: {}, Name: {}, description: {}>".format(self.id, self.name, self.description)
def as_dict(self):
"""Convert object to dictionary"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def handle_jupyter_data(self):
if is_admin_user() or g.user.id == self.creator_id:
self.set_jupyter_url()
else:
self.hide_jupyter_data()
def start_jupyter(self):
# wait for container api to be up and running
wait_for_it(self.container_name, 5000)
# start jupyter notebook and get jupyter token
resp = requests.post('http://' + self.container_name + ':5000/jupyter').json()
self.jupyter_token = str(resp['jupyter_token'])
self.status = Environment.Status.running.value
def set_jupyter_url(self):
# TODO: read host address from os - for now use config
host = config.KETOS_HOST
self.jupyter_url = host + ':' + self.jupyter_port + '/?token=' + self.jupyter_token
def hide_jupyter_data(self):
self.jupyter_port = None
self.jupyter_token = None
self.jupyter_url = None
def get_data_directory(self):
return config.KETOS_DATA_FOLDER + '/environments_data/' + self.container_name
class Status(Enum):
running = 'running'
stopped = 'stopped'
def get_open_port():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def create(name, desc, image_id, raise_abort=True):
e = Environment()
e.name = name
e.description = desc
i = Image.get(image_id, raise_abort=raise_abort)
e.image_id = i.id
e.creator_id = g.user.id
image_name = config.DOCKER_REGISTRY_DOMAIN + "/" + i.name
e.jupyter_port = get_open_port()
e.container_name = str(uuid.uuid4().hex)
container = dockerClient.containers.run(image_name,
name=e.container_name,
detach=True,
network=config.PROJECT_NAME+"_environment",
ports={"8000/tcp": e.jupyter_port},
volumes={e.get_data_directory(): {'bind': '/mlenvironment/models', 'mode': 'rw'},
config.KETOS_DATA_FOLDER+'/auth': {'bind': '/root/src/auth', 'mode': 'ro'}}
)
e.container_id = container.id
e.start_jupyter()
db.session.add(e)
db.session.commit()
e.set_jupyter_url()
return e
def abort_if_environment_doesnt_exist(env_id):
abort(404, message="environment {} doesn't exist".format(env_id))
def get(env_id, raise_abort=True):
e = Environment.query.get(env_id)
if raise_abort and not e:
abort_if_environment_doesnt_exist(env_id)
e.handle_jupyter_data()
return e
def get_all():
envs = Environment.query.all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_all_for_user(user_id):
envs = Environment.query.filter_by(creator_id=user_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_by_image_id(image_id):
envs = Environment.query.filter_by(image_id=image_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def update(env_id, status=None, name=None, desc=None, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if status and not e.status == status:
if status == Environment.Status.running.value:
dockerClient.containers.get(e.container_id).start()
e.start_jupyter()
elif status == Environment.Status.stopped.value:
dockerClient.containers.get(e.container_id).stop()
else:
if raise_abort:
abort(400, message="status {} is not allowed".format(status))
else:
return None
e.status = status
if name:
e.name = name
if desc:
e.description = desc
db.session.commit()
return e
def delete(env_id, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if not e.status == 'stopped':
if raise_abort:
abort(405, message="environment must be stopped before it can be deleted")
else:
return None
container = dockerClient.containers.get(e.container_id)
container.remove(force=True)
db.session.delete(e)
db.session.commit()
return env_id
| 30.112676 | 128 | 0.63985 | from rdb.rdb import db, LowerCaseText
from enum import Enum
import datetime
from resources.adminAccess import is_admin_user
from flask import g
import rdb.models.image as Image
from dockerUtil.dockerClient import dockerClient, wait_for_it
import config
import requests
import uuid
from flask_restful import abort
import rdb.models.user as User
class Environment(db.Model):
__tablename__ = "environment"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.Text, nullable=False)
container_id = db.Column(db.Text, nullable=False)
container_name = db.Column(db.Text, nullable=False)
status = db.Column(LowerCaseText, nullable=False)
jupyter_port = db.Column(db.Text, nullable=False)
jupyter_token = db.Column(db.Text, nullable=False)
jupyter_url = None
description = db.Column(db.Text)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
image_id = db.Column(db.Integer, db.ForeignKey('image.id'), nullable=False)
ml_models = db.relationship('MLModel', lazy='select', cascade='delete, delete-orphan', backref='environment')
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
updated_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), onupdate=datetime.datetime.now)
def __init__(self):
super(Environment, self).__init__()
def __repr__(self):
return "<ID: {}, Name: {}, description: {}>".format(self.id, self.name, self.description)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def handle_jupyter_data(self):
if is_admin_user() or g.user.id == self.creator_id:
self.set_jupyter_url()
else:
self.hide_jupyter_data()
def start_jupyter(self):
wait_for_it(self.container_name, 5000)
resp = requests.post('http://' + self.container_name + ':5000/jupyter').json()
self.jupyter_token = str(resp['jupyter_token'])
self.status = Environment.Status.running.value
def set_jupyter_url(self):
host = config.KETOS_HOST
self.jupyter_url = host + ':' + self.jupyter_port + '/?token=' + self.jupyter_token
def hide_jupyter_data(self):
self.jupyter_port = None
self.jupyter_token = None
self.jupyter_url = None
def get_data_directory(self):
return config.KETOS_DATA_FOLDER + '/environments_data/' + self.container_name
class Status(Enum):
running = 'running'
stopped = 'stopped'
def get_open_port():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def create(name, desc, image_id, raise_abort=True):
e = Environment()
e.name = name
e.description = desc
i = Image.get(image_id, raise_abort=raise_abort)
e.image_id = i.id
e.creator_id = g.user.id
image_name = config.DOCKER_REGISTRY_DOMAIN + "/" + i.name
e.jupyter_port = get_open_port()
e.container_name = str(uuid.uuid4().hex)
container = dockerClient.containers.run(image_name,
name=e.container_name,
detach=True,
network=config.PROJECT_NAME+"_environment",
ports={"8000/tcp": e.jupyter_port},
volumes={e.get_data_directory(): {'bind': '/mlenvironment/models', 'mode': 'rw'},
config.KETOS_DATA_FOLDER+'/auth': {'bind': '/root/src/auth', 'mode': 'ro'}}
)
e.container_id = container.id
e.start_jupyter()
db.session.add(e)
db.session.commit()
e.set_jupyter_url()
return e
def abort_if_environment_doesnt_exist(env_id):
abort(404, message="environment {} doesn't exist".format(env_id))
def get(env_id, raise_abort=True):
e = Environment.query.get(env_id)
if raise_abort and not e:
abort_if_environment_doesnt_exist(env_id)
e.handle_jupyter_data()
return e
def get_all():
envs = Environment.query.all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_all_for_user(user_id):
envs = Environment.query.filter_by(creator_id=user_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_by_image_id(image_id):
envs = Environment.query.filter_by(image_id=image_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def update(env_id, status=None, name=None, desc=None, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if status and not e.status == status:
if status == Environment.Status.running.value:
dockerClient.containers.get(e.container_id).start()
e.start_jupyter()
elif status == Environment.Status.stopped.value:
dockerClient.containers.get(e.container_id).stop()
else:
if raise_abort:
abort(400, message="status {} is not allowed".format(status))
else:
return None
e.status = status
if name:
e.name = name
if desc:
e.description = desc
db.session.commit()
return e
def delete(env_id, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if not e.status == 'stopped':
if raise_abort:
abort(405, message="environment must be stopped before it can be deleted")
else:
return None
container = dockerClient.containers.get(e.container_id)
container.remove(force=True)
db.session.delete(e)
db.session.commit()
return env_id
| true | true |
f736369cbe9d074b967f6f6e71550893a185fd79 | 4,636 | py | Python | src/ball_window.py | gltchitm/desktop-pong | aff92e8fc683d72e6139b33b56736dc2fe9c3ca0 | [
"MIT"
] | null | null | null | src/ball_window.py | gltchitm/desktop-pong | aff92e8fc683d72e6139b33b56736dc2fe9c3ca0 | [
"MIT"
] | null | null | null | src/ball_window.py | gltchitm/desktop-pong | aff92e8fc683d72e6139b33b56736dc2fe9c3ca0 | [
"MIT"
] | null | null | null | from gi.repository import Gtk, Gdk
import cairo
from random import getrandbits
from ball_drawing_area import BallDrawingArea
from store import store
from near import near
import config
def rand_velocity():
return config.BALL_VELOCITY if getrandbits(1) else -config.BALL_VELOCITY
class BallWindow(Gtk.Window):
def __init__(self, window_size, x=None, y=None, x_velocity=None, y_velocity=None):
Gtk.Window.__init__(self)
self.window_size = window_size
self.x = x
self.y = y
self.x_velocity = x_velocity if x_velocity != None else rand_velocity()
self.y_velocity = y_velocity if y_velocity != None else rand_velocity()
self.rebuilding = False
self.set_app_paintable(True)
self.set_decorated(False)
self.set_accept_focus(False)
self.set_keep_above(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_deletable(False)
self.set_size_request(config.BALL_DIAMETER, config.BALL_DIAMETER)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_visual(self.get_screen().get_rgba_visual())
self.add_tick_callback(self.tick)
if self.x and self.y:
self.move(self.x, self.y)
self.connect('realize', self.realize)
self.connect('draw', self.draw)
self.connect('window-state-event', self.check_minimized)
def rebuild(self, use_current_position):
if not self.rebuilding:
if config.USE_ANTI_TAMPER or not use_current_position:
self.rebuilding = True
self.destroy()
if use_current_position:
self = BallWindow(self.window_size, self.x, self.y, self.x_velocity, self.y_velocity)
else:
self = BallWindow(self.window_size)
self.show_all()
def tick(self, _widget, _frame_clock):
if config.USE_ANTI_TAMPER:
self.set_keep_above(True)
self.get_window().move_to_desktop(0)
current_x, current_y = self.get_position()
ai_x, ai_y = store['ai_paddle']
player_x, player_y = store['player_paddle']
width, height = self.window_size
if ai_x == None or player_x == None:
return True
if (
current_x < config.SCREEN_PADDING or
current_x + config.BALL_DIAMETER > width - config.SCREEN_PADDING
):
self.x -= self.x_velocity
self.rebuild(False)
if (
current_y < config.SCREEN_PADDING or
current_y + config.BALL_DIAMETER > height - config.SCREEN_PADDING
):
self.y -= self.y_velocity
self.y_velocity *= -1
if (
self.x < ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING) and
self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > ai_y and
self.y < ai_y + config.PADDLE_SIZE[1] + config.BALL_PADDING
):
self.x_velocity *= -1
if config.USE_BALL_STUCK_IN_PADDLE_FIX:
self.x = ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING)
if (
self.x > player_x - (config.BALL_DIAMETER + config.BALL_PADDING) and
self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > player_y and
self.y < player_y + config.PADDLE_SIZE[1] + config.BALL_PADDING
):
self.x_velocity *= -1
if config.USE_BALL_STUCK_IN_PADDLE_FIX:
self.x = player_x - (config.BALL_DIAMETER + config.BALL_PADDING)
if not near(self.x, current_x, config.BALL_LEEWAY) or not near(self.y, current_y, config.BALL_LEEWAY):
self.rebuild(True)
self.x += self.x_velocity
self.y += self.y_velocity
self.move(self.x, self.y)
store['ball_position'] = (self.x, self.y, self.x_velocity, self.y_velocity)
return True
def check_minimized(self, _widget, event):
if event.new_window_state & Gdk.WindowState.ICONIFIED:
self.rebuild(True)
def realize(self, _widget):
current_x, current_y = self.get_position()
self.x = current_x
self.y = current_y
ball = BallDrawingArea(self.get_window())
self.add(ball)
ball.show_all()
cursor = Gdk.Cursor.new_from_name(Gdk.Display.get_default(), 'not-allowed')
self.get_window().set_cursor(cursor)
def draw(self, _widget, cr):
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
| 34.597015 | 110 | 0.622951 | from gi.repository import Gtk, Gdk
import cairo
from random import getrandbits
from ball_drawing_area import BallDrawingArea
from store import store
from near import near
import config
def rand_velocity():
return config.BALL_VELOCITY if getrandbits(1) else -config.BALL_VELOCITY
class BallWindow(Gtk.Window):
def __init__(self, window_size, x=None, y=None, x_velocity=None, y_velocity=None):
Gtk.Window.__init__(self)
self.window_size = window_size
self.x = x
self.y = y
self.x_velocity = x_velocity if x_velocity != None else rand_velocity()
self.y_velocity = y_velocity if y_velocity != None else rand_velocity()
self.rebuilding = False
self.set_app_paintable(True)
self.set_decorated(False)
self.set_accept_focus(False)
self.set_keep_above(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_deletable(False)
self.set_size_request(config.BALL_DIAMETER, config.BALL_DIAMETER)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_visual(self.get_screen().get_rgba_visual())
self.add_tick_callback(self.tick)
if self.x and self.y:
self.move(self.x, self.y)
self.connect('realize', self.realize)
self.connect('draw', self.draw)
self.connect('window-state-event', self.check_minimized)
def rebuild(self, use_current_position):
if not self.rebuilding:
if config.USE_ANTI_TAMPER or not use_current_position:
self.rebuilding = True
self.destroy()
if use_current_position:
self = BallWindow(self.window_size, self.x, self.y, self.x_velocity, self.y_velocity)
else:
self = BallWindow(self.window_size)
self.show_all()
def tick(self, _widget, _frame_clock):
if config.USE_ANTI_TAMPER:
self.set_keep_above(True)
self.get_window().move_to_desktop(0)
current_x, current_y = self.get_position()
ai_x, ai_y = store['ai_paddle']
player_x, player_y = store['player_paddle']
width, height = self.window_size
if ai_x == None or player_x == None:
return True
if (
current_x < config.SCREEN_PADDING or
current_x + config.BALL_DIAMETER > width - config.SCREEN_PADDING
):
self.x -= self.x_velocity
self.rebuild(False)
if (
current_y < config.SCREEN_PADDING or
current_y + config.BALL_DIAMETER > height - config.SCREEN_PADDING
):
self.y -= self.y_velocity
self.y_velocity *= -1
if (
self.x < ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING) and
self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > ai_y and
self.y < ai_y + config.PADDLE_SIZE[1] + config.BALL_PADDING
):
self.x_velocity *= -1
if config.USE_BALL_STUCK_IN_PADDLE_FIX:
self.x = ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING)
if (
self.x > player_x - (config.BALL_DIAMETER + config.BALL_PADDING) and
self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > player_y and
self.y < player_y + config.PADDLE_SIZE[1] + config.BALL_PADDING
):
self.x_velocity *= -1
if config.USE_BALL_STUCK_IN_PADDLE_FIX:
self.x = player_x - (config.BALL_DIAMETER + config.BALL_PADDING)
if not near(self.x, current_x, config.BALL_LEEWAY) or not near(self.y, current_y, config.BALL_LEEWAY):
self.rebuild(True)
self.x += self.x_velocity
self.y += self.y_velocity
self.move(self.x, self.y)
store['ball_position'] = (self.x, self.y, self.x_velocity, self.y_velocity)
return True
def check_minimized(self, _widget, event):
if event.new_window_state & Gdk.WindowState.ICONIFIED:
self.rebuild(True)
def realize(self, _widget):
current_x, current_y = self.get_position()
self.x = current_x
self.y = current_y
ball = BallDrawingArea(self.get_window())
self.add(ball)
ball.show_all()
cursor = Gdk.Cursor.new_from_name(Gdk.Display.get_default(), 'not-allowed')
self.get_window().set_cursor(cursor)
def draw(self, _widget, cr):
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
| true | true |
f73637169849c878d792f9992621b4bc541554d0 | 1,918 | py | Python | task/collect_thirdparty_licenses.py | yshrk1597/nimclosedenv | 2fc92e4e3b03e9f9aed92642af742ac4d4ffb1e6 | [
"MIT"
] | null | null | null | task/collect_thirdparty_licenses.py | yshrk1597/nimclosedenv | 2fc92e4e3b03e9f9aed92642af742ac4d4ffb1e6 | [
"MIT"
] | null | null | null | task/collect_thirdparty_licenses.py | yshrk1597/nimclosedenv | 2fc92e4e3b03e9f9aed92642af742ac4d4ffb1e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
import pathlib
import json
import urllib.parse
import requests
'''
json format
[
{
"name": "",
"url": "",
"category":"",
"filename-format":""
},
]
'''
def download(session, url, filename):
print(f"get {url}")
r = session.get(url)
if r.status_code == requests.codes.ok:
print(f"save {filename}")
data = r.text
with open(filename, "w", encoding='utf-8') as f:
f.write(data)
else:
print(f"failed {url}")
print(f"status_code = {r.status_code}")
sys.quit(1)
if __name__ == '__main__':
if len(sys.argv) >= 2:
settingfilename = sys.argv[1]
setting = None
with open(settingfilename, encoding='utf-8') as f:
setting = json.load(f)
if setting is not None:
with requests.Session() as session:
session.headers.update({'Accept': '*/*'})
for p in setting:
name = p["name"]
url = p["url"]
category = p["category"]
urlfilename = urllib.parse.urlsplit(url).path.split('/')[-1]
if "filename-format" in p and len(p["filename-format"]) > 0:
f = p["filename-format"]
else:
if len(category) > 0:
f = "{category}/{name}-{urlfilename}"
else:
f = "{name}-{urlfilename}"
filename = f.format(name=name, category=category, urlfilename=urlfilename)
parent_dir = pathlib.Path(filename).parent
if str(parent_dir) != ".":
parent_dir.mkdir(parents=True, exist_ok=True)
download(session, url, filename)
else:
print("must arg \"jsonfile\"")
sys.quit(1) | 29.507692 | 94 | 0.487487 |
import sys
import os
import pathlib
import json
import urllib.parse
import requests
def download(session, url, filename):
print(f"get {url}")
r = session.get(url)
if r.status_code == requests.codes.ok:
print(f"save {filename}")
data = r.text
with open(filename, "w", encoding='utf-8') as f:
f.write(data)
else:
print(f"failed {url}")
print(f"status_code = {r.status_code}")
sys.quit(1)
if __name__ == '__main__':
if len(sys.argv) >= 2:
settingfilename = sys.argv[1]
setting = None
with open(settingfilename, encoding='utf-8') as f:
setting = json.load(f)
if setting is not None:
with requests.Session() as session:
session.headers.update({'Accept': '*/*'})
for p in setting:
name = p["name"]
url = p["url"]
category = p["category"]
urlfilename = urllib.parse.urlsplit(url).path.split('/')[-1]
if "filename-format" in p and len(p["filename-format"]) > 0:
f = p["filename-format"]
else:
if len(category) > 0:
f = "{category}/{name}-{urlfilename}"
else:
f = "{name}-{urlfilename}"
filename = f.format(name=name, category=category, urlfilename=urlfilename)
parent_dir = pathlib.Path(filename).parent
if str(parent_dir) != ".":
parent_dir.mkdir(parents=True, exist_ok=True)
download(session, url, filename)
else:
print("must arg \"jsonfile\"")
sys.quit(1) | true | true |
f7363798e2b7842fd9105ce3f50bac3499622d10 | 4,632 | py | Python | fortnox/services/cost_center_services.py | xalien10/fortnox-python | 7c5fe29a8adaa5a21288df4495996e20515ba8a7 | [
"MIT"
] | 21 | 2020-03-21T14:49:33.000Z | 2022-02-02T12:46:08.000Z | fortnox/services/cost_center_services.py | xalien10/fortnox-python | 7c5fe29a8adaa5a21288df4495996e20515ba8a7 | [
"MIT"
] | 5 | 2020-07-03T18:55:48.000Z | 2021-11-02T10:25:32.000Z | fortnox/services/cost_center_services.py | xalien10/fortnox-python | 7c5fe29a8adaa5a21288df4495996e20515ba8a7 | [
"MIT"
] | 3 | 2020-06-08T06:23:50.000Z | 2021-06-10T18:28:32.000Z | class CostCenterService(object):
"""
:class:`fortnox.CostCenterService` is used by :class:`fortnox.Client` to make
actions related to CostCenter resource.
Normally you won't instantiate this class directly.
"""
"""
Allowed attributes for CostCenter to send to Fortnox backend servers.
"""
OPTS_KEYS_TO_PERSIST = ['Code', 'Description']
SERVICE = "CostCenter"
def __init__(self, http_client):
"""
:param :class:`fortnox.HttpClient` http_client: Pre configured high-level http client.
"""
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
"""
Retrieve all CostCenter
Returns all CostCenter available to the Company, according to the parameters provided
:calls: ``get /costcenters``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of CostCenter.
:rtype: list
"""
_, _, cost_centers = self.http_client.get("/costcenters", params=params)
return cost_centers
def retrieve(self, code):
"""
Retrieve a single CostCenter
Returns a single CostCenter according to the unique CostCenter ID provided
If the specified CostCenter does not exist, this query returns an error
:calls: ``get /costcenters/{code}``
:param int id: Unique identifier of a CostCenter.
:return: Dictionary that support attriubte-style access and represent CostCenter resource.
:rtype: dict
"""
_, _, cost_center = self.http_client.get("/costcenters/{code}".format(code=code))
return cost_center
def create(self, *args, **kwargs):
"""
Create a CostCenter
Creates a new CostCenter
**Notice** the CostCenter's name **must** be unique within the scope of the resource_type
:calls: ``post /costcenters``
:param tuple *args: (optional) Single object representing CostCenter resource.
:param dict **kwargs: (optional) cost_center attributes.
:return: Dictionary that support attriubte-style access and represents newely created CostCenter resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for CostCenter are missing')
initial_attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in initial_attributes.items())
attributes.update({'service': self.SERVICE})
_, _, cost_center = self.http_client.post("/costcenters", body=attributes)
return cost_center
def update(self, code, *args, **kwargs):
"""
Update a CostCenter
Updates a CostCenter's information
If the specified CostCenter does not exist, this query will return an error
**Notice** if you want to update a CostCenter, you **must** make sure the CostCenter's name is unique within the scope of the specified resource
:calls: ``put /costcenters/{code}``
:param int id: Unique identifier of a CostCenter.
:param tuple *args: (optional) Single object representing CostCenter resource which attributes should be updated.
:param dict **kwargs: (optional) CostCenter attributes to update.
:return: Dictionary that support attriubte-style access and represents updated CostCenter resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for CostCenter are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items())
attributes.update({'service': self.SERVICE})
_, _, cost_center = self.http_client.put("/costcenters/{code}".format(code=code), body=attributes)
return cost_center
def destroy(self, code):
"""
Delete a CostCenter
Deletes an existing CostCenter
If the specified CostCenter is assigned to any resource, we will remove this CostCenter from all such resources
If the specified CostCenter does not exist, this query will return an error
This operation cannot be undone
:calls: ``delete /costcenters/{code}``
:param int id: Unique identifier of a CostCenter.
:return: True if the operation succeeded.
:rtype: bool
"""
status_code, _, _ = self.http_client.delete("/costcenters/{code}".format(code=code))
return status_code == 204
| 38.280992 | 152 | 0.656952 | class CostCenterService(object):
OPTS_KEYS_TO_PERSIST = ['Code', 'Description']
SERVICE = "CostCenter"
def __init__(self, http_client):
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
_, _, cost_centers = self.http_client.get("/costcenters", params=params)
return cost_centers
def retrieve(self, code):
_, _, cost_center = self.http_client.get("/costcenters/{code}".format(code=code))
return cost_center
def create(self, *args, **kwargs):
if not args and not kwargs:
raise Exception('attributes for CostCenter are missing')
initial_attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in initial_attributes.items())
attributes.update({'service': self.SERVICE})
_, _, cost_center = self.http_client.post("/costcenters", body=attributes)
return cost_center
def update(self, code, *args, **kwargs):
if not args and not kwargs:
raise Exception('attributes for CostCenter are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items())
attributes.update({'service': self.SERVICE})
_, _, cost_center = self.http_client.put("/costcenters/{code}".format(code=code), body=attributes)
return cost_center
def destroy(self, code):
status_code, _, _ = self.http_client.delete("/costcenters/{code}".format(code=code))
return status_code == 204
| true | true |
f736383903fae4d8d7d3cbcadabd5161e1f75a1b | 5,125 | py | Python | src/foolysh/fsm.py | tcdude/foolysh | dedd6eb655b9eab1d1ac90c6624b0ca92f54486f | [
"MIT"
] | 2 | 2019-05-04T01:22:55.000Z | 2019-10-04T10:48:05.000Z | src/foolysh/fsm.py | tcdude/foolysh | dedd6eb655b9eab1d1ac90c6624b0ca92f54486f | [
"MIT"
] | 5 | 2020-03-09T16:15:13.000Z | 2022-01-13T02:02:27.000Z | src/foolysh/fsm.py | tcdude/foolysh | dedd6eb655b9eab1d1ac90c6624b0ca92f54486f | [
"MIT"
] | null | null | null | """
Provides the FSM class, a rudimentary implementation of a Finite State Machine.
"""
from typing import Any, Callable, Dict, List, Optional, Tuple
import sdl2
from .tools.common import to_snake_case
__author__ = 'Tiziano Bettio'
__license__ = 'MIT'
__version__ = '0.1'
__copyright__ = """Copyright (c) 2020 Tiziano Bettio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
class FSM:
"""
Rudimentary Finite State Machine to organize state changes. If both a
`enter_[my_class_name]` and `exit_[my_class_name]` are provided in a
subclass of FSM, it will become a state that can be activated through the
:meth:`self.request` method. `[my_class_name]` is a snake_case
representation of the name of the subclass. It assumes PascalCase for class
names (i.e. `MyClass` -> `my_class`). Use
:func:`~foolysh.tools.common.to_snake_case` with your class name as
parameter to determine the proper state name.
When :meth:`FSM.request` is called the following actions are performed:
1. The `exit_` method from the current state gets called.
2. The `enter_` method from the requested state gets called.
.. note::
Only provide `enter_` / `exit_` methods for subclasses that should be
callable states.
"""
__states: Dict[str, Tuple[Callable, Callable]] = None
__active_state: Optional[str] = None
__history: List[str] = None
__fsm_data: Dict[str, Any] = None
def __setup_fsm(self):
mro = [i.__name__ for i in self.__class__.__mro__]
mro = mro[:mro.index('FSM')]
self.__states = {}
self.__history = []
self.__fsm_data = {}
self.__fsm_data['-global-'] = {}
for i in mro:
name = to_snake_case(i)
enterm = getattr(self, f'enter_{name}', False)
exitm = getattr(self, f'exit_{name}', False)
if enterm and exitm:
self.__states[name] = enterm, exitm
self.__fsm_data[name] = None
else:
Warning(f'Class "{i}" does not expose enter and exit methods. '
f'State not registered!')
def request(self, state_name: str, back: bool = False) -> None:
"""
Performs the transition to a registered State. Raises a ValueError if
the provided `state_name` is not registered.
"""
if not self.__states:
self.__setup_fsm()
if state_name not in self.__states:
raise ValueError(f'Unknown state "{state_name}".')
if self.__active_state == state_name:
return
if self.__active_state is not None:
self.__states[self.__active_state][1]()
if not back:
self.__history.append(self.__active_state)
self.__active_state = state_name
self.__states[state_name][0]()
sdl2.SDL_StopTextInput() # Ensure on-screen kbd gets hidden
def fsm_back(self) -> None:
"""
Performs the transition to the last known state in the history. Does
nothing if the history is empty.
"""
if not self.__history:
return
self.request(self.__history.pop(), True)
@property
def active_state(self) -> str:
"""The currently active state."""
return self.__active_state
@property
def previous_state(self) -> str:
"""The previous state before the last transition, or ``None``."""
if self.__history:
return self.__history[-1]
return None
@property
def fsm_data(self) -> Any:
"""The FSM data stored for the active state."""
if self.__active_state is None:
raise ValueError(f'No state set yet.')
if self.__fsm_data[self.__active_state] is None:
self.__fsm_data[self.__active_state] = {}
return self.__fsm_data[self.__active_state]
@property
def fsm_global_data(self) -> Dict[str, Any]:
"""
A data dict accessible from any state, potentially useful for passing
information between states.
"""
return self.__fsm_data['-global-']
| 38.246269 | 79 | 0.658537 |
from typing import Any, Callable, Dict, List, Optional, Tuple
import sdl2
from .tools.common import to_snake_case
__author__ = 'Tiziano Bettio'
__license__ = 'MIT'
__version__ = '0.1'
__copyright__ = """Copyright (c) 2020 Tiziano Bettio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
class FSM:
__states: Dict[str, Tuple[Callable, Callable]] = None
__active_state: Optional[str] = None
__history: List[str] = None
__fsm_data: Dict[str, Any] = None
def __setup_fsm(self):
mro = [i.__name__ for i in self.__class__.__mro__]
mro = mro[:mro.index('FSM')]
self.__states = {}
self.__history = []
self.__fsm_data = {}
self.__fsm_data['-global-'] = {}
for i in mro:
name = to_snake_case(i)
enterm = getattr(self, f'enter_{name}', False)
exitm = getattr(self, f'exit_{name}', False)
if enterm and exitm:
self.__states[name] = enterm, exitm
self.__fsm_data[name] = None
else:
Warning(f'Class "{i}" does not expose enter and exit methods. '
f'State not registered!')
def request(self, state_name: str, back: bool = False) -> None:
if not self.__states:
self.__setup_fsm()
if state_name not in self.__states:
raise ValueError(f'Unknown state "{state_name}".')
if self.__active_state == state_name:
return
if self.__active_state is not None:
self.__states[self.__active_state][1]()
if not back:
self.__history.append(self.__active_state)
self.__active_state = state_name
self.__states[state_name][0]()
sdl2.SDL_StopTextInput()
def fsm_back(self) -> None:
if not self.__history:
return
self.request(self.__history.pop(), True)
@property
def active_state(self) -> str:
return self.__active_state
@property
def previous_state(self) -> str:
if self.__history:
return self.__history[-1]
return None
@property
def fsm_data(self) -> Any:
if self.__active_state is None:
raise ValueError(f'No state set yet.')
if self.__fsm_data[self.__active_state] is None:
self.__fsm_data[self.__active_state] = {}
return self.__fsm_data[self.__active_state]
@property
def fsm_global_data(self) -> Dict[str, Any]:
return self.__fsm_data['-global-']
| true | true |
f736392d8c764845b6ae8e78f7c882654f9d241a | 10,071 | py | Python | impactutils/transfer/ftpsender.py | cbworden/earthquake-impact-utils | fff41e41668896110bebbc7054a36dd60779655b | [
"CC0-1.0"
] | 3 | 2017-04-11T21:30:20.000Z | 2018-02-28T23:17:30.000Z | impactutils/transfer/ftpsender.py | cbworden/earthquake-impact-utils | fff41e41668896110bebbc7054a36dd60779655b | [
"CC0-1.0"
] | 103 | 2016-08-26T15:05:35.000Z | 2021-02-10T16:24:55.000Z | impactutils/transfer/ftpsender.py | cbworden/earthquake-impact-utils | fff41e41668896110bebbc7054a36dd60779655b | [
"CC0-1.0"
] | 9 | 2016-08-24T01:07:40.000Z | 2022-03-01T10:03:27.000Z | #!/usr/bin/env python
# stdlib imports
from ftplib import FTP, error_perm
import os.path
import shutil
import tempfile
# local
from .sender import Sender
class FTPSender(Sender):
'''Class for sending and deleting files and directories via FTP.
PDLSender uses a local installation of Product Distribution Layer (PDL)
(https://ehppdl1.cr.usgs.gov/index.html#documentation)
to send a file or a directory, along with desired metadata to one or more
PDL hubs.
Required properties:
- remote_host Name of FTP server.
- remote_directory String path on remote_host where local files should
be copied to.
Optional properties:
- user String user name, for FTP servers where anonymous login is not
allowed.
- password String password, for FTP servers where anonymous login is
not allowed.
Usage:
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_directory = '/home/user/event1')
sender.send() => Creates remote url: ftp://ftp.gov/pub/incoming/event1 with contents of /home/user/event1 in it.
OR
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_directory = '/home/user/event1/version1')
sender.send() => Creates remote url: ftp://ftp.gov/pub/incoming/event1 with contents of /home/user/event1/version1 in it.
OR
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_files = ['/home/user/event1/version1/file1.txt','/home/user/event1/version1/file2.txt'])
sender.send() => Creates remote files: ftp://ftp.gov/pub/incoming/event1/file1.txt AND
ftp://ftp.gov/pub/incoming/event1/file1.txt
'''
_required_properties = ['remote_directory', 'remote_host']
_optional_properties = ['user', 'password']
def send(self):
'''
Send any files or folders that have been passed to constructor.
Returns:
Tuple of Number of files sent to remote SSH server and message
describing success.
Raises:
Exception when files cannot be sent to remote FTP server for any
reason.
'''
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
try:
# this should put us at the top level folder
ftp = self._setup()
# send any files we want
nfiles = 0
for f in self._local_files:
self.__sendfile(f, ftp)
nfiles += 1
# send everything in the directories we specified
if self._local_directory is not None:
local_directory = self._local_directory
allfiles = self.getAllLocalFiles()
for filename in allfiles:
try:
self._copy_file_with_path(
ftp, filename, remote_folder,
local_folder=local_directory)
nfiles += 1
except:
x = 1
ftp.quit()
return (nfiles, f'{int(nfiles):d} files were sent successfully to {remote_host} {remote_folder}')
except Exception as obj:
raise Exception(
f'Could not send to {host}. Error "{str(obj)}"')
def cancel(self):
"""
Create a cancel file (named as indicated in constructor "cancelfile"
parameter) in remote_directory on remote_host.
Args:
cancel_content: String containing text that should be written to
the cancelfile.
Returns:
A string message describing what has occurred.
"""
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
ftp = self._setup()
# Create local .cancel file, then copy it to ftp server
tempdir = tempfile.mkdtemp()
try:
tfile = os.path.join(tempdir, self._cancelfile) # local file
f = open(tfile, 'wt')
f.close()
ftp.cwd(remote_folder)
self.__sendfile(tfile, ftp)
except Exception as e:
raise Exception(
f'Could not create .cancel file on {remote_host}/{remote_folder}')
finally:
shutil.rmtree(tempdir)
return (f'{self._cancelfile} file succesfully placed on {remote_host} {remote_folder}')
def _setup(self):
"""Initiate an ftp connection with properties passed to constructor.
Navigate to/create directory (as necessary) specified by
remote_directory property.
Returns:
Instance of the ftplib.FTP class.
"""
host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
# attempt to login to remote host
try:
dirparts = self._split(remote_folder)
ftp = FTP(host)
if 'user' in self._properties:
user = self._properties['user']
else:
user = ''
if 'password' in self._properties:
password = self._properties['password']
else:
password = ''
if user == '':
ftp.login()
else:
ftp.login(user, password)
except error_perm as msg:
raise Exception(f'Could not login to remote host {host}')
# attempt to cd to remote directory
try:
self._create_remote_directory(ftp, remote_folder)
except Exception as e:
ftp.quit()
raise Exception(
f'Could not navigate to directory "{remote_folder}" on remote host {host}')
return ftp
def _create_remote_directory(self, ftp, remote_directory):
"""Create directory (recursively) on remote_host.
Args:
ftp: ftplib.FTP instance.
remote_directory: String path of directory on remote system which
needs to be created.
Raises:
Exception when unable to create remote_directory.
"""
# attempt to cd to remote directory
ftp.cwd('/')
try:
ftp.cwd(remote_directory)
except error_perm as msg:
dirparts = self._split(remote_directory)
for directory in dirparts:
try:
ftp.cwd(directory)
except error_perm as msg:
try:
ftp.mkd(directory)
ftp.cwd(directory)
except error_perm as msg:
raise Exception(
f'Unable to create subdirectory {directory}.')
def _copy_file_with_path(self, ftp, local_file, remote_folder,
local_folder=None):
"""
Copy local_file to remote_folder, preserving relative path and creating
required sub-directories.
Usage:
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events
local_folder: /home/user/data/events/us2016abcd
would create:
/data/archive/events/us2016abcd/data_files/datafile.txt
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events/us2016abcd
local_folder: None
would create:
/data/archive/events/us2016abcd/datafile.txt
Args:
local_file: Local file to copy.
remote_folder: Remote folder to copy local files to.
local_folder: Top of local directory where file copying started.
If None, local_file should be copied to a file of the same
name (not preserving path) into remote_folder.
"""
if local_folder is None:
ftp.cwd(remote_folder)
self.__sendfile(filename, ftp)
else:
local_parts = local_file.replace(local_folder, '').strip(
os.path.sep).split(os.path.sep)
remote_parts = self._split(remote_folder)
all_parts = remote_parts + local_parts
remote_file = '/' + '/'.join(all_parts)
print(remote_file)
remfolder, remfile = self._path_split(remote_file)
try:
ftp.cwd(remfolder)
except error_perm as ep:
self._create_remote_directory(ftp, remfolder)
self.__sendfile(local_file, ftp)
ftp.cwd(remote_folder)
def __sendfile(self, filename, ftp):
'''Internal function used to send a file using an FTP object.
Args:
filename: Local filename
ftp: Instance of FTP object.
'''
# in case somebody is polling for this file,
# make a temporary file first, then rename it
# so the poller doesn't grab it before its finished transferring.
fbase, fpath = os.path.split(filename) # this is a local file
tmpfile = fpath + '.tmp'
cmd = "STOR " + tmpfile
# we don't tell the ftp server about the local path to the file
# actually send the file
ftp.storbinary(cmd, open(filename, "rb"), 1024)
# rename it to the desired destination
ftp.rename(tmpfile, fpath)
def _join(self, *path_parts):
return '/' + '/'.join(path_parts)
def _split(self, path):
return path.strip('/').split('/')
def _path_split(self, path):
parts = path.strip('/').split('/')
fname = parts[-1]
fpath = '/' + '/'.join(parts[0:-1])
return (fpath, fname)
| 36.357401 | 125 | 0.577599 |
from ftplib import FTP, error_perm
import os.path
import shutil
import tempfile
from .sender import Sender
class FTPSender(Sender):
_required_properties = ['remote_directory', 'remote_host']
_optional_properties = ['user', 'password']
def send(self):
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
try:
ftp = self._setup()
nfiles = 0
for f in self._local_files:
self.__sendfile(f, ftp)
nfiles += 1
if self._local_directory is not None:
local_directory = self._local_directory
allfiles = self.getAllLocalFiles()
for filename in allfiles:
try:
self._copy_file_with_path(
ftp, filename, remote_folder,
local_folder=local_directory)
nfiles += 1
except:
x = 1
ftp.quit()
return (nfiles, f'{int(nfiles):d} files were sent successfully to {remote_host} {remote_folder}')
except Exception as obj:
raise Exception(
f'Could not send to {host}. Error "{str(obj)}"')
def cancel(self):
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
ftp = self._setup()
tempdir = tempfile.mkdtemp()
try:
tfile = os.path.join(tempdir, self._cancelfile)
f = open(tfile, 'wt')
f.close()
ftp.cwd(remote_folder)
self.__sendfile(tfile, ftp)
except Exception as e:
raise Exception(
f'Could not create .cancel file on {remote_host}/{remote_folder}')
finally:
shutil.rmtree(tempdir)
return (f'{self._cancelfile} file succesfully placed on {remote_host} {remote_folder}')
def _setup(self):
host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
try:
dirparts = self._split(remote_folder)
ftp = FTP(host)
if 'user' in self._properties:
user = self._properties['user']
else:
user = ''
if 'password' in self._properties:
password = self._properties['password']
else:
password = ''
if user == '':
ftp.login()
else:
ftp.login(user, password)
except error_perm as msg:
raise Exception(f'Could not login to remote host {host}')
try:
self._create_remote_directory(ftp, remote_folder)
except Exception as e:
ftp.quit()
raise Exception(
f'Could not navigate to directory "{remote_folder}" on remote host {host}')
return ftp
def _create_remote_directory(self, ftp, remote_directory):
ftp.cwd('/')
try:
ftp.cwd(remote_directory)
except error_perm as msg:
dirparts = self._split(remote_directory)
for directory in dirparts:
try:
ftp.cwd(directory)
except error_perm as msg:
try:
ftp.mkd(directory)
ftp.cwd(directory)
except error_perm as msg:
raise Exception(
f'Unable to create subdirectory {directory}.')
def _copy_file_with_path(self, ftp, local_file, remote_folder,
local_folder=None):
if local_folder is None:
ftp.cwd(remote_folder)
self.__sendfile(filename, ftp)
else:
local_parts = local_file.replace(local_folder, '').strip(
os.path.sep).split(os.path.sep)
remote_parts = self._split(remote_folder)
all_parts = remote_parts + local_parts
remote_file = '/' + '/'.join(all_parts)
print(remote_file)
remfolder, remfile = self._path_split(remote_file)
try:
ftp.cwd(remfolder)
except error_perm as ep:
self._create_remote_directory(ftp, remfolder)
self.__sendfile(local_file, ftp)
ftp.cwd(remote_folder)
def __sendfile(self, filename, ftp):
fbase, fpath = os.path.split(filename) # this is a local file
tmpfile = fpath + '.tmp'
cmd = "STOR " + tmpfile
# we don't tell the ftp server about the local path to the file
ftp.storbinary(cmd, open(filename, "rb"), 1024)
ftp.rename(tmpfile, fpath)
def _join(self, *path_parts):
return '/' + '/'.join(path_parts)
def _split(self, path):
return path.strip('/').split('/')
def _path_split(self, path):
parts = path.strip('/').split('/')
fname = parts[-1]
fpath = '/' + '/'.join(parts[0:-1])
return (fpath, fname)
| true | true |
f7363ac8e3c301222fb9085a20025b96b226a9eb | 4,958 | py | Python | Chapter9/Figure9-1.py | liloganle/Reinforcement-Learning | 29ffb74a1c8e506c544245c9aff37e958e503f26 | [
"MIT"
] | 1 | 2018-08-27T10:09:06.000Z | 2018-08-27T10:09:06.000Z | Chapter9/Figure9-1.py | liloganle/Reinforcement-Learning | 29ffb74a1c8e506c544245c9aff37e958e503f26 | [
"MIT"
] | null | null | null | Chapter9/Figure9-1.py | liloganle/Reinforcement-Learning | 29ffb74a1c8e506c544245c9aff37e958e503f26 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class RandomWalk(object):
def __init__(self, num_states=1000, groups=10, alpha=2e-5):
self.num_states = num_states # the number of states
self.groups = groups # the number of groups
self.alpha = alpha # the step size
self.group_value = np.zeros(groups) # the value of each group
self.group_size = int(num_states / groups) # the size of each group
self.states = np.arange(1, num_states+1) # all states except terminal state
self.start_state = int(num_states / 2) # the start state
self.end_state = [0, num_states + 1] # the terminal states
self.action = [-1, 1] # right:1, left:-1
self.neighbors = 100 # the neighboring states
def select_action(self):
"""to select randomly an action"""
if np.random.binomial(1, 0.5):
return self.action[1] # select right action
else:
return self.action[0] # select left action
def find_next_state(self, state, action):
"""to get the next state and reward"""
move_step = np.random.randint(1, self.neighbors+1) # the step size of moving
move_step *= action
next_state = state + move_step # the next state
next_state = max(min(next_state, self.end_state[1]), 0)
if next_state == self.end_state[0]: # terminating on the left
reward = -1
elif next_state == self.end_state[1]: # terminating on the right
reward = 1
else:
reward = 0
return next_state, reward
def get_state_value(self, state):
"""to get the state value except for terminal states"""
group_idx = (state - 1) // self.group_size
return self.group_value[group_idx]
def update_group_value(self, state, delta):
"""to update the group_value"""
group_idx = (state - 1) // self.group_size
self.group_value[group_idx] += delta
def gradient_monte_carlo(self, state_distribution):
""" the gradient-descent version of Monte Carlo state-value prediction"""
state = self.start_state # initialize the state
trajectory = [state] # track the transition state
while state not in self.end_state:
action = self.select_action() # select an action
next_state, reward = self.find_next_state(state, action) # get the next state and reward
trajectory.append(next_state) # record the transition state
state = next_state
for stat in trajectory[:-1]:
delta = self.alpha * (reward - self.get_state_value(stat))
self.update_group_value(stat, delta)
state_distribution[stat] += 1
def dp_compute_value(test_class):
"""using Dynamic programming to find the true state values"""
value = np.arange(-test_class.end_state[1], test_class.end_state[1] + 1, 2) / test_class.end_state[1]
print("Starting computing......")
while True:
value_temp = value.copy()
for state in test_class.states:
value[state] = 0
for act in test_class.action:
for step in range(1, test_class.neighbors + 1):
step *= act
next_state = state + step
next_state = max(min(next_state, test_class.end_state[1]), 0)
# update the value
value[state] += 1/(2*test_class.neighbors)*value[next_state]
if np.linalg.norm(value - value_temp) < 0.001:
break
print("Completed!!!")
return value
if __name__ == "__main__":
episodes = 100000
test_exam = RandomWalk()
true_value = dp_compute_value(test_class=test_exam)
distribution = np.zeros(test_exam.num_states + len(test_exam.end_state))
for itr in tqdm(range(episodes)):
test_exam.gradient_monte_carlo(distribution)
distribution /= np.sum(distribution)
state_value = [test_exam.get_state_value(stat) for stat in test_exam.states]
plt.figure(1)
plt.plot(test_exam.states, true_value[1:-1], label="True value")
plt.plot(test_exam.states, state_value, label="Approximate MC value")
plt.xlabel("State")
plt.ylabel("Value")
plt.legend()
plt.savefig("./images/Figure9-1-1.png")
plt.show()
plt.figure(2)
plt.plot(test_exam.states, distribution[1:-1], label="State Distribution")
plt.xlabel("State")
plt.ylabel("Distribution")
plt.legend()
plt.savefig("./images/Figure9-1-2.png")
plt.show()
plt.close()
print("Completed!!!You can check it in 'images' directory")
| 39.349206 | 107 | 0.594796 |
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class RandomWalk(object):
def __init__(self, num_states=1000, groups=10, alpha=2e-5):
self.num_states = num_states
self.groups = groups
self.alpha = alpha
self.group_value = np.zeros(groups)
self.group_size = int(num_states / groups)
self.states = np.arange(1, num_states+1)
self.start_state = int(num_states / 2)
self.end_state = [0, num_states + 1]
self.action = [-1, 1]
self.neighbors = 100
def select_action(self):
if np.random.binomial(1, 0.5):
return self.action[1]
else:
return self.action[0]
def find_next_state(self, state, action):
move_step = np.random.randint(1, self.neighbors+1)
move_step *= action
next_state = state + move_step
next_state = max(min(next_state, self.end_state[1]), 0)
if next_state == self.end_state[0]:
reward = -1
elif next_state == self.end_state[1]:
reward = 1
else:
reward = 0
return next_state, reward
def get_state_value(self, state):
group_idx = (state - 1) // self.group_size
return self.group_value[group_idx]
def update_group_value(self, state, delta):
group_idx = (state - 1) // self.group_size
self.group_value[group_idx] += delta
def gradient_monte_carlo(self, state_distribution):
state = self.start_state
trajectory = [state]
while state not in self.end_state:
action = self.select_action()
next_state, reward = self.find_next_state(state, action)
trajectory.append(next_state)
state = next_state
for stat in trajectory[:-1]:
delta = self.alpha * (reward - self.get_state_value(stat))
self.update_group_value(stat, delta)
state_distribution[stat] += 1
def dp_compute_value(test_class):
value = np.arange(-test_class.end_state[1], test_class.end_state[1] + 1, 2) / test_class.end_state[1]
print("Starting computing......")
while True:
value_temp = value.copy()
for state in test_class.states:
value[state] = 0
for act in test_class.action:
for step in range(1, test_class.neighbors + 1):
step *= act
next_state = state + step
next_state = max(min(next_state, test_class.end_state[1]), 0)
value[state] += 1/(2*test_class.neighbors)*value[next_state]
if np.linalg.norm(value - value_temp) < 0.001:
break
print("Completed!!!")
return value
if __name__ == "__main__":
episodes = 100000
test_exam = RandomWalk()
true_value = dp_compute_value(test_class=test_exam)
distribution = np.zeros(test_exam.num_states + len(test_exam.end_state))
for itr in tqdm(range(episodes)):
test_exam.gradient_monte_carlo(distribution)
distribution /= np.sum(distribution)
state_value = [test_exam.get_state_value(stat) for stat in test_exam.states]
plt.figure(1)
plt.plot(test_exam.states, true_value[1:-1], label="True value")
plt.plot(test_exam.states, state_value, label="Approximate MC value")
plt.xlabel("State")
plt.ylabel("Value")
plt.legend()
plt.savefig("./images/Figure9-1-1.png")
plt.show()
plt.figure(2)
plt.plot(test_exam.states, distribution[1:-1], label="State Distribution")
plt.xlabel("State")
plt.ylabel("Distribution")
plt.legend()
plt.savefig("./images/Figure9-1-2.png")
plt.show()
plt.close()
print("Completed!!!You can check it in 'images' directory")
| true | true |
f7363b31d00a4b9a3ce297b2c0f2291a97db0556 | 3,344 | py | Python | var/spack/repos/builtin/packages/spdlog/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/spdlog/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/spdlog/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2022-01-18T23:39:24.000Z | 2022-01-18T23:39:24.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spdlog(CMakePackage):
"""Very fast, header only, C++ logging library"""
homepage = "https://github.com/gabime/spdlog"
url = "https://github.com/gabime/spdlog/archive/v0.9.0.tar.gz"
version('1.8.1', sha256='5197b3147cfcfaa67dd564db7b878e4a4b3d9f3443801722b3915cdeced656cb')
version('1.8.0', sha256='1e68e9b40cf63bb022a4b18cdc1c9d88eb5d97e4fd64fa981950a9cacf57a4bf')
version('1.7.0', sha256='f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62')
version('1.6.1', sha256='378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc')
version('1.6.0', sha256='0421667c9f2fc78e6548d44f7bc5921be0f03e612df384294c16cedb93d967f8')
version('1.5.0', sha256='b38e0bbef7faac2b82fed550a0c19b0d4e7f6737d5321d4fd8f216b80f8aee8a')
version('1.4.2', sha256='821c85b120ad15d87ca2bc44185fa9091409777c756029125a02f81354072157')
version('1.4.1', sha256='3291958eb54ed942d1bd3aef1b4f8ccf70566cbc04d34296ec61eb96ceb73cff')
version('1.2.1', sha256='867a4b7cedf9805e6f76d3ca41889679054f7e5a3b67722fe6d0eae41852a767')
version('1.2.0', sha256='0ba31b9e7f8e43a7be328ab0236d57810e5d4fc8a1a7842df665ae22d5cbd128')
version('1.1.0', sha256='3dbcbfd8c07e25f5e0d662b194d3a7772ef214358c49ada23c044c4747ce8b19')
version('1.0.0', sha256='90d5365121bcd2c41ce94dfe6a460e89507a2dfef6133fe5fad5bb35ac4ef0a1')
version('0.17.0', sha256='94f74fd1b3344733d1db3de2ec22e6cbeb769f93a8baa0d4a22b1f62dc7369f8')
version('0.16.3', sha256='b88d7be261d9089c817fc8cee6c000d69f349b357828e4c7f66985bc5d5360b8')
version('0.16.2', sha256='2081e5df5e87402398847431e16b87c71dd5c4d632314bb976ace8161f4d32de')
version('0.16.1', sha256='733260e1fbdcf1b3dc307fc585e4476240026de8be28eb905731d2ab0942deae')
version('0.16.0', sha256='9e64e3b10c2a3c54dfff63aa056057cf1db8a5fd506b3d9cf77207511820baac')
version('0.14.0', sha256='eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d')
version('0.13.0', sha256='d798a6ca19165f0a18a43938859359269f5a07fd8e0eb83ab8674739c9e8f361')
version('0.12.0', sha256='5cfd6a0b3182a88e1eb35bcb65a7ef9035140d7c73b16ba6095939dbf07325b9')
version('0.11.0', sha256='8c0f1810fb6b7d23fef70c2ea8b6fa6768ac8d18d6e0de39be1f48865e22916e')
version('0.10.0', sha256='fbbc53c1cc09b93b4c3d76b683bbe9315e2efe3727701227374dce6aa4264075')
version('0.9.0', sha256='bbbe5a855c8b309621352921d650449eb2f741d35d55ec50fb4d8122ddfb8f01')
variant('shared', default=True,
description='Build shared libraries (v1.4.0+)')
depends_on('cmake@3.2:', type='build')
def cmake_args(self):
spec = self.spec
args = []
if self.spec.version >= Version('1.4.0'):
args.extend([
'-DSPDLOG_BUILD_SHARED:BOOL={0}'.format(
'ON' if '+shared' in spec else 'OFF'),
# tests and examples
'-DSPDLOG_BUILD_TESTS:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
'-DSPDLOG_BUILD_EXAMPLE:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF')
])
return args
| 54.819672 | 96 | 0.747309 |
from spack import *
class Spdlog(CMakePackage):
homepage = "https://github.com/gabime/spdlog"
url = "https://github.com/gabime/spdlog/archive/v0.9.0.tar.gz"
version('1.8.1', sha256='5197b3147cfcfaa67dd564db7b878e4a4b3d9f3443801722b3915cdeced656cb')
version('1.8.0', sha256='1e68e9b40cf63bb022a4b18cdc1c9d88eb5d97e4fd64fa981950a9cacf57a4bf')
version('1.7.0', sha256='f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62')
version('1.6.1', sha256='378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc')
version('1.6.0', sha256='0421667c9f2fc78e6548d44f7bc5921be0f03e612df384294c16cedb93d967f8')
version('1.5.0', sha256='b38e0bbef7faac2b82fed550a0c19b0d4e7f6737d5321d4fd8f216b80f8aee8a')
version('1.4.2', sha256='821c85b120ad15d87ca2bc44185fa9091409777c756029125a02f81354072157')
version('1.4.1', sha256='3291958eb54ed942d1bd3aef1b4f8ccf70566cbc04d34296ec61eb96ceb73cff')
version('1.2.1', sha256='867a4b7cedf9805e6f76d3ca41889679054f7e5a3b67722fe6d0eae41852a767')
version('1.2.0', sha256='0ba31b9e7f8e43a7be328ab0236d57810e5d4fc8a1a7842df665ae22d5cbd128')
version('1.1.0', sha256='3dbcbfd8c07e25f5e0d662b194d3a7772ef214358c49ada23c044c4747ce8b19')
version('1.0.0', sha256='90d5365121bcd2c41ce94dfe6a460e89507a2dfef6133fe5fad5bb35ac4ef0a1')
version('0.17.0', sha256='94f74fd1b3344733d1db3de2ec22e6cbeb769f93a8baa0d4a22b1f62dc7369f8')
version('0.16.3', sha256='b88d7be261d9089c817fc8cee6c000d69f349b357828e4c7f66985bc5d5360b8')
version('0.16.2', sha256='2081e5df5e87402398847431e16b87c71dd5c4d632314bb976ace8161f4d32de')
version('0.16.1', sha256='733260e1fbdcf1b3dc307fc585e4476240026de8be28eb905731d2ab0942deae')
version('0.16.0', sha256='9e64e3b10c2a3c54dfff63aa056057cf1db8a5fd506b3d9cf77207511820baac')
version('0.14.0', sha256='eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d')
version('0.13.0', sha256='d798a6ca19165f0a18a43938859359269f5a07fd8e0eb83ab8674739c9e8f361')
version('0.12.0', sha256='5cfd6a0b3182a88e1eb35bcb65a7ef9035140d7c73b16ba6095939dbf07325b9')
version('0.11.0', sha256='8c0f1810fb6b7d23fef70c2ea8b6fa6768ac8d18d6e0de39be1f48865e22916e')
version('0.10.0', sha256='fbbc53c1cc09b93b4c3d76b683bbe9315e2efe3727701227374dce6aa4264075')
version('0.9.0', sha256='bbbe5a855c8b309621352921d650449eb2f741d35d55ec50fb4d8122ddfb8f01')
variant('shared', default=True,
description='Build shared libraries (v1.4.0+)')
depends_on('cmake@3.2:', type='build')
def cmake_args(self):
spec = self.spec
args = []
if self.spec.version >= Version('1.4.0'):
args.extend([
'-DSPDLOG_BUILD_SHARED:BOOL={0}'.format(
'ON' if '+shared' in spec else 'OFF'),
'-DSPDLOG_BUILD_TESTS:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
'-DSPDLOG_BUILD_EXAMPLE:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF')
])
return args
| true | true |
f7363c67ecf0557ee81b88769cad34b8eafd3728 | 682 | py | Python | Python Advanced/Advanced/Tuples and Sets/Exercise/Task06.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Advanced/Advanced/Tuples and Sets/Exercise/Task06.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Advanced/Advanced/Tuples and Sets/Exercise/Task06.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | n = int(input())
even_set = set()
odd_set = set()
for index in range(1, n + 1):
name = input()
ascii_value = 0
for char in name:
ascii_value += ord(char)
ascii_value /= index
ascii_value = int(ascii_value)
if ascii_value % 2 == 0:
even_set.add(ascii_value)
else:
odd_set.add(ascii_value)
if sum(even_set) == sum(odd_set):
union = even_set.union(odd_set)
print(", ".join([str(x) for x in union]))
elif sum(odd_set) > sum(even_set):
print(", ".join([str(x) for x in odd_set]))
elif sum(odd_set) < sum(even_set):
sym_dif = even_set.symmetric_difference(odd_set)
print(", ".join([str(x) for x in sym_dif])) | 22.733333 | 52 | 0.61437 | n = int(input())
even_set = set()
odd_set = set()
for index in range(1, n + 1):
name = input()
ascii_value = 0
for char in name:
ascii_value += ord(char)
ascii_value /= index
ascii_value = int(ascii_value)
if ascii_value % 2 == 0:
even_set.add(ascii_value)
else:
odd_set.add(ascii_value)
if sum(even_set) == sum(odd_set):
union = even_set.union(odd_set)
print(", ".join([str(x) for x in union]))
elif sum(odd_set) > sum(even_set):
print(", ".join([str(x) for x in odd_set]))
elif sum(odd_set) < sum(even_set):
sym_dif = even_set.symmetric_difference(odd_set)
print(", ".join([str(x) for x in sym_dif])) | true | true |
f7363e2b72f9b4c0f235a44590c692b9349869a4 | 1,532 | py | Python | server/canonicalization/utils/log.py | hotpxl/canonicalization-server | ac3c0e93adf35015d7f6cfc8c6cf2e6ec45cdeae | [
"MIT"
] | 3 | 2015-10-29T21:43:27.000Z | 2020-05-26T09:17:41.000Z | server/canonicalization/utils/log.py | hotpxl/canonicalization-server | ac3c0e93adf35015d7f6cfc8c6cf2e6ec45cdeae | [
"MIT"
] | null | null | null | server/canonicalization/utils/log.py | hotpxl/canonicalization-server | ac3c0e93adf35015d7f6cfc8c6cf2e6ec45cdeae | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import logging
class _Formatter(logging.Formatter):
def __init__(self):
datefmt = '%m%d %H:%M:%S'
super(_Formatter, self).__init__(datefmt=datefmt)
def get_color(self, level):
if logging.WARNING <= level:
return '\x1b[31m'
elif logging.INFO <= level:
return '\x1b[32m'
else:
return '\x1b[34m'
def get_label(self, level):
if level == logging.CRITICAL:
return 'C'
elif level == logging.ERROR:
return 'E'
elif level == logging.WARNING:
return 'W'
elif level == logging.INFO:
return 'I'
elif level == logging.DEBUG:
return 'D'
else:
return 'U'
def format(self, record):
fmt = self.get_color(record.levelno)
fmt += self.get_label(record.levelno)
fmt += '%(asctime)s %(process)d %(filename)s:%(lineno)d:%(funcName)s' \
' %(name)s]\x1b[0m'
fmt += ' %(message)s'
self._fmt = fmt
return super(_Formatter, self).format(record)
_handler = logging.StreamHandler()
_handler.setFormatter(_Formatter())
def get_logger(name=None, level=logging.DEBUG):
logger = logging.getLogger(name)
if getattr(logger, '_init_done', None):
return logger
else:
logger._init_done = True
logger.addHandler(_handler)
logger.setLevel(level)
return logger
| 27.357143 | 79 | 0.580287 | from __future__ import absolute_import
from __future__ import print_function
import logging
class _Formatter(logging.Formatter):
def __init__(self):
datefmt = '%m%d %H:%M:%S'
super(_Formatter, self).__init__(datefmt=datefmt)
def get_color(self, level):
if logging.WARNING <= level:
return '\x1b[31m'
elif logging.INFO <= level:
return '\x1b[32m'
else:
return '\x1b[34m'
def get_label(self, level):
if level == logging.CRITICAL:
return 'C'
elif level == logging.ERROR:
return 'E'
elif level == logging.WARNING:
return 'W'
elif level == logging.INFO:
return 'I'
elif level == logging.DEBUG:
return 'D'
else:
return 'U'
def format(self, record):
fmt = self.get_color(record.levelno)
fmt += self.get_label(record.levelno)
fmt += '%(asctime)s %(process)d %(filename)s:%(lineno)d:%(funcName)s' \
' %(name)s]\x1b[0m'
fmt += ' %(message)s'
self._fmt = fmt
return super(_Formatter, self).format(record)
_handler = logging.StreamHandler()
_handler.setFormatter(_Formatter())
def get_logger(name=None, level=logging.DEBUG):
logger = logging.getLogger(name)
if getattr(logger, '_init_done', None):
return logger
else:
logger._init_done = True
logger.addHandler(_handler)
logger.setLevel(level)
return logger
| true | true |
f7363ff34bb5175c2483c71a8f1bc69582de84b2 | 3,302 | py | Python | techreview/settings.py | lcared/TechReview2021 | 96b5f9af3c35f2f1dc79e6c53b48623d84e21da8 | [
"MIT"
] | 1 | 2021-06-14T17:39:37.000Z | 2021-06-14T17:39:37.000Z | techreview/settings.py | mjelde/TechReview2021 | 96b5f9af3c35f2f1dc79e6c53b48623d84e21da8 | [
"MIT"
] | null | null | null | techreview/settings.py | mjelde/TechReview2021 | 96b5f9af3c35f2f1dc79e6c53b48623d84e21da8 | [
"MIT"
] | null | null | null | """
Django settings for techreview project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tech',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'techreview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'techreview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.postgresql',
'NAME' : config('DB_NAME'),
'USER' : config('DB_USER'),
'PASSWORD' : config('DB_PASSWORD'),
'HOST' : config('DB_HOST'),
'PORT' : config('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL='loginmessage'
LOGOUT_REDIRECT_URL='logoutmessage' | 25.796875 | 91 | 0.692611 |
from pathlib import Path
from decouple import config
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = config('SECRET_KEY')
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tech',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'techreview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'techreview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.postgresql',
'NAME' : config('DB_NAME'),
'USER' : config('DB_USER'),
'PASSWORD' : config('DB_PASSWORD'),
'HOST' : config('DB_HOST'),
'PORT' : config('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL='loginmessage'
LOGOUT_REDIRECT_URL='logoutmessage' | true | true |
f73640ecbf844dab79db03ff22b848317b94f45e | 3,023 | py | Python | sphinx/source/tutorial/solutions/les_mis.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 2 | 2021-09-01T12:36:06.000Z | 2021-11-17T10:48:36.000Z | sphinx/source/tutorial/solutions/les_mis.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | null | null | null | sphinx/source/tutorial/solutions/les_mis.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | 1 | 2016-03-18T03:01:59.000Z | 2016-03-18T03:01:59.000Z | import numpy as np
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.sampledata.les_mis import data
# EXERCISE: try out different sort orders for the names
nodes = data['nodes']
names = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]
# store the links information in numpy
N = len(nodes)
counts = np.empty((N, N))
for link in data['links']:
counts[link['source'], link['target']] = link['value']
counts[link['target'], link['source']] = link['value']
# We will use these colors to color each group by a different color
colormap = [
"#444444", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"
]
# set up some data to plot! We will need to have values for every pair of names. The
# co-occurrence count for a given pair of names is in `count[i,j]`. The strategy is
# to color each rect by the group, and set its alpha based on the count.
xname = []
yname = []
color = []
alpha = []
for i, n1 in enumerate(nodes):
for j, n2 in enumerate(nodes):
xname.append(n1['name'])
yname.append(n2['name'])
a = min(counts[i,j]/4.0, 0.9) + 0.1
alpha.append(a)
if n1['group'] == n2['group']:
color.append(colormap[n1['group']])
else:
color.append('lightgrey')
# EXERCISE: output static HTML file
output_file("les_mis.html")
# EXERCISE: create a ColumnDataSource to hold the xnames, ynames, colors, alphas,
# and counts. NOTE: the counts array is 2D and will need to be flattened
source = ColumnDataSource(
data=dict(
xname=xname,
yname=yname,
colors=color,
alphas=alpha,
count=counts.flatten(),
)
)
# create a new figure
p = figure(title="Les Mis Occurrences (one at a time)",
x_axis_location="above", tools="resize,hover",
x_range=list(reversed(names)), y_range=names,
plot_width=800, plot_height=800)
# EXERCISE: use the `p.rect` renderer to render a categorical heatmap of all the
# data. Experiment with the widths and heights (use categorical percentage
# unite) as well as colors and alphas.
p.rect('xname', 'yname', 0.9, 0.9, source=source,
color='colors', alpha='alphas', line_color=None)
# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
# EXERCISE: configure the hover tool to display both names as well as
# the count value as tooltips
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
('names', '@yname, @xname'),
('count', '@count'),
]
# EXERCISE: show the plot
show(p)
| 32.505376 | 84 | 0.670526 | import numpy as np
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.sampledata.les_mis import data
nodes = data['nodes']
names = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]
N = len(nodes)
counts = np.empty((N, N))
for link in data['links']:
counts[link['source'], link['target']] = link['value']
counts[link['target'], link['source']] = link['value']
colormap = [
"#444444", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"
]
xname = []
yname = []
color = []
alpha = []
for i, n1 in enumerate(nodes):
for j, n2 in enumerate(nodes):
xname.append(n1['name'])
yname.append(n2['name'])
a = min(counts[i,j]/4.0, 0.9) + 0.1
alpha.append(a)
if n1['group'] == n2['group']:
color.append(colormap[n1['group']])
else:
color.append('lightgrey')
output_file("les_mis.html")
source = ColumnDataSource(
data=dict(
xname=xname,
yname=yname,
colors=color,
alphas=alpha,
count=counts.flatten(),
)
)
p = figure(title="Les Mis Occurrences (one at a time)",
x_axis_location="above", tools="resize,hover",
x_range=list(reversed(names)), y_range=names,
plot_width=800, plot_height=800)
p.rect('xname', 'yname', 0.9, 0.9, source=source,
color='colors', alpha='alphas', line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
('names', '@yname, @xname'),
('count', '@count'),
]
show(p)
| true | true |
f73641597666934bb64b5e3940ed3742742977cd | 3,013 | py | Python | Scribe/other_estimators.py | aristoteleo/scribe-py | ea28d2b588f8648b9ce1679fe18c3142aee2aa58 | [
"BSD-3-Clause"
] | 32 | 2019-08-15T20:58:30.000Z | 2022-03-17T14:16:31.000Z | Scribe/other_estimators.py | aristoteleo/scribe-py | ea28d2b588f8648b9ce1679fe18c3142aee2aa58 | [
"BSD-3-Clause"
] | 6 | 2020-03-06T06:02:08.000Z | 2022-03-30T22:37:59.000Z | Scribe/other_estimators.py | aristoteleo/scribe-py | ea28d2b588f8648b9ce1679fe18c3142aee2aa58 | [
"BSD-3-Clause"
] | 10 | 2020-03-05T09:56:04.000Z | 2021-03-14T12:16:54.000Z | import pandas
import numpy as np
from multiprocessing import Pool
def __individual_corr(id1, id2, x, y):
return (id1, id2, corr(x, y)[0])
def __individual_mi(id1, id2, x, y):
return (id1, id2, mi(x, y))
def corr(self, number_of_processes=1):
"""Calculate pairwise correlation over the data
Arguments
---------
self: 'class causal_model object'
An instance of a causal_model class object. This object can be converted from an AnnData object through
load_anndata function.
number_of_processes: `int` (Default: 1)
Number of processes to use.
Returns
---------
corr_results: 'pd.core.frame.DataFrame'
The correlation network inferred.
"""
self.corr_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)
if number_of_processes > 1: temp_input = []
for id1 in self.node_ids:
for id2 in self.node_ids:
if id1 == id2: continue
if number_of_processes == 1:
self.corr_results.loc[id1, id2] = __individual_corr((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))[2]
else:
temp_input.append((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))
if number_of_processes > 1:
tmp_results = Pool(number_of_processes).map(__individual_corr, temp_input)
for t in tmp_results: self.corr_results.loc[t[0], t[1]] = t[2]
return self.corr_results
def mi(self, number_of_processes=1):
"""Calculate pairwise mutual information over the data
Arguments
---------
self: 'class causal_model object'
An instance of a causal_model class object. This object can be converted from an AnnData object through
load_anndata function.
number_of_processes: `int` (Default: 1)
Number of processes to use.
Returns
---------
mi_results: 'pd.core.frame.DataFrame'
The mutual information network inferred.
"""
self.mi_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)
if number_of_processes > 1: temp_input = []
for id1 in self.node_ids:
for id2 in self.node_ids:
if id1 == id2: continue
if number_of_processes == 1:
self.mi_results.loc[id1, id2] = __individual_mi((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))[2]
else:
temp_input.append((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))
if number_of_processes > 1:
tmp_results = Pool(number_of_processes).map(__individual_mi, temp_input)
for t in tmp_results: self.mi_results.loc[t[0], t[1]] = t[2]
return self.mi_results
| 35.869048 | 185 | 0.654165 | import pandas
import numpy as np
from multiprocessing import Pool
def __individual_corr(id1, id2, x, y):
return (id1, id2, corr(x, y)[0])
def __individual_mi(id1, id2, x, y):
return (id1, id2, mi(x, y))
def corr(self, number_of_processes=1):
self.corr_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)
if number_of_processes > 1: temp_input = []
for id1 in self.node_ids:
for id2 in self.node_ids:
if id1 == id2: continue
if number_of_processes == 1:
self.corr_results.loc[id1, id2] = __individual_corr((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))[2]
else:
temp_input.append((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))
if number_of_processes > 1:
tmp_results = Pool(number_of_processes).map(__individual_corr, temp_input)
for t in tmp_results: self.corr_results.loc[t[0], t[1]] = t[2]
return self.corr_results
def mi(self, number_of_processes=1):
self.mi_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)
if number_of_processes > 1: temp_input = []
for id1 in self.node_ids:
for id2 in self.node_ids:
if id1 == id2: continue
if number_of_processes == 1:
self.mi_results.loc[id1, id2] = __individual_mi((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))[2]
else:
temp_input.append((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))
if number_of_processes > 1:
tmp_results = Pool(number_of_processes).map(__individual_mi, temp_input)
for t in tmp_results: self.mi_results.loc[t[0], t[1]] = t[2]
return self.mi_results
| true | true |
f73642dae27409c6118a87490a8475aae4fb34f2 | 5,536 | py | Python | liteflow/input.py | frankilepro/LiTeFlow | d07105ea00ad29b701e1b100d9cda2297eef19de | [
"Apache-2.0"
] | null | null | null | liteflow/input.py | frankilepro/LiTeFlow | d07105ea00ad29b701e1b100d9cda2297eef19de | [
"Apache-2.0"
] | null | null | null | liteflow/input.py | frankilepro/LiTeFlow | d07105ea00ad29b701e1b100d9cda2297eef19de | [
"Apache-2.0"
] | null | null | null | """Utilities for input pipelines."""
import tensorflow as tf
def shuffle(tensors,
capacity=32,
min_after_dequeue=16,
num_threads=1,
dtypes=None,
shapes=None,
seed=None,
shared_name=None,
name='shuffle'):
"""Wrapper around a `tf.RandomShuffleQueue` creation.
Return a dequeue op that dequeues elements from `tensors` in a
random order, through a `tf.RandomShuffleQueue` -- see for further
documentation.
Arguments:
tensors: an iterable of tensors.
capacity: (Optional) the capacity of the queue; default value set to 32.
num_threads: (Optional) the number of threads to be used fo the queue runner;
default value set to 1.
min_after_dequeue: (Optional) minimum number of elements to remain in the
queue after a `dequeue` or `dequeu_many` has been performend,
in order to ensure better mixing of elements; default value set to 16.
dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;
if not provided, will be inferred from `tensors`.
shapes: (Optional) list of shapes, one for each tensor in `tensors`.
seed: (Optional) seed for random shuffling.
shared_name: (Optional) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name scope for the ops.
Returns:
The tuple of tensors that was randomly dequeued from `tensors`.
"""
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
queue = tf.RandomShuffleQueue(
seed=seed,
shared_name=shared_name,
name='random_shuffle_queue',
dtypes=dtypes,
shapes=shapes,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
enqueue = queue.enqueue(tensors)
runner = tf.train.QueueRunner(queue, [enqueue] * num_threads)
tf.train.add_queue_runner(runner)
dequeue = queue.dequeue()
return dequeue
def shuffle_batch(tensors,
batch_size,
capacity=32,
num_threads=1,
min_after_dequeue=16,
dtypes=None,
shapes=None,
seed=None,
enqueue_many=False,
dynamic_pad=True,
allow_smaller_final_batch=False,
shared_name=None,
name='shuffle_batch'):
"""Create shuffled and padded batches of tensors in `tensors`.
Dequeue elements from `tensors` shuffling, batching and dynamically
padding them. First a `tf.RandomShuffleQueue` is created and fed with
`tensors` (using the `dket.input.shuffle` function); the dequeued tensors
shapes are then set and fed into a `tf.train.batch` function that provides
batching and dynamic padding.
Arguments:
tensors: an iterable of tensors.
batch_size: an `int` representing th batch size.
capacity: (Optional) the capacity of the queues; default value set to 32.
num_threads: (Optional) the number of threads to be used fo the queue runner;
default value set to 1.
min_after_dequeue: (Optional) minimum number of elements to remain in the
shuffling queue after a `dequeue` or `dequeu_many` has been performend,
in order to ensure better mixing of elements; default value set to 16.
dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;
if not provided, will be inferred from `tensors`.
shapes: (Optional) list of shapes, one for each tensor in `tensors`.
seed: (Optional) seed for random shuffling.
enqueue_many: Whether each tensor in tensors is a single example.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within
a batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If True, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: if set, the queues will be shared under the given name
across different sessions.
name: scope name for the given ops.
Returns:
A batch of tensors from `tensors`, shuffled and padded.
"""
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
shapes = shapes or list([t.get_shape() for t in tensors])
inputs = shuffle(tensors,
seed=seed,
dtypes=dtypes,
capacity=capacity,
num_threads=num_threads,
min_after_dequeue=min_after_dequeue,
shared_name=shared_name,
name='shuffle')
# fix the shapes
for tensor, shape in zip(inputs, shapes):
tensor.set_shape(shape)
minibatch = tf.train.batch(
tensors=inputs,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
enqueue_many=enqueue_many,
name='batch')
return minibatch
| 40.705882 | 83 | 0.627168 |
import tensorflow as tf
def shuffle(tensors,
capacity=32,
min_after_dequeue=16,
num_threads=1,
dtypes=None,
shapes=None,
seed=None,
shared_name=None,
name='shuffle'):
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
queue = tf.RandomShuffleQueue(
seed=seed,
shared_name=shared_name,
name='random_shuffle_queue',
dtypes=dtypes,
shapes=shapes,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
enqueue = queue.enqueue(tensors)
runner = tf.train.QueueRunner(queue, [enqueue] * num_threads)
tf.train.add_queue_runner(runner)
dequeue = queue.dequeue()
return dequeue
def shuffle_batch(tensors,
batch_size,
capacity=32,
num_threads=1,
min_after_dequeue=16,
dtypes=None,
shapes=None,
seed=None,
enqueue_many=False,
dynamic_pad=True,
allow_smaller_final_batch=False,
shared_name=None,
name='shuffle_batch'):
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
shapes = shapes or list([t.get_shape() for t in tensors])
inputs = shuffle(tensors,
seed=seed,
dtypes=dtypes,
capacity=capacity,
num_threads=num_threads,
min_after_dequeue=min_after_dequeue,
shared_name=shared_name,
name='shuffle')
for tensor, shape in zip(inputs, shapes):
tensor.set_shape(shape)
minibatch = tf.train.batch(
tensors=inputs,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
enqueue_many=enqueue_many,
name='batch')
return minibatch
| true | true |
f73642fd4c1a40e9dd300d78c5863ed4603901d6 | 12,577 | py | Python | tsfresh/feature_selection/relevance.py | Nickwangpeng/tsfresh | 48118627d9d4644906613e25b077ce2ec82ca2f9 | [
"MIT"
] | 1 | 2020-03-25T22:08:04.000Z | 2020-03-25T22:08:04.000Z | tsfresh/feature_selection/relevance.py | Chao-Jiang/tsfresh | 48118627d9d4644906613e25b077ce2ec82ca2f9 | [
"MIT"
] | null | null | null | tsfresh/feature_selection/relevance.py | Chao-Jiang/tsfresh | 48118627d9d4644906613e25b077ce2ec82ca2f9 | [
"MIT"
] | 1 | 2019-12-30T14:07:12.000Z | 2019-12-30T14:07:12.000Z | # -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
"""
Contains a feature selection method that evaluates the importance of the different extracted features. To do so,
for every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.
The methods that calculate the p-values are called feature selectors.
Afterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and
which to cut off (solely based on the p-values).
"""
from multiprocessing import Pool
import warnings
import numpy as np
import pandas as pd
from functools import partial, reduce
from tsfresh import defaults
from tsfresh.feature_selection.benjamini_hochberg_test import benjamini_hochberg_test
from tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \
target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test
from tsfresh.utilities.distribution import initialize_warnings_in_workers
def calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS, chunksize=defaults.CHUNKSIZE,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT):
"""
Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.
The relevance table is calculated for the intended machine learning task `ml_task`.
To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test
is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to
decide which features to keep and which to delete.
We are testing
:math:`H_0` = the Feature is not relevant and should not be added
against
:math:`H_1` = the Feature is relevant and should be kept
or in other words
:math:`H_0` = Target and Feature are independent / the Feature has no influence on the target
:math:`H_1` = Target and Feature are associated / dependent
When the target is binary this becomes
:math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)`
:math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)`
Where :math:`F` is the distribution of the target.
In the same way we can state the hypothesis when the feature is binary
:math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)`
:math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)`
Here :math:`T` is the distribution of the target.
TODO: And for real valued?
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumend to be classification,
else regression.
:type ml_task: str
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).
:type show_warnings: bool
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature. The DataFrame has the columns
"Feature",
"type" (binary, real or const),
"p_value" (the significance of this feature as a p-value, lower means more significant)
"relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is
not relevant] for this feature)
:rtype: pandas.DataFrame
"""
if ml_task not in ['auto', 'classification', 'regression']:
raise ValueError('ml_task must be one of: \'auto\', \'classification\', \'regression\'')
elif ml_task == 'auto':
ml_task = infer_ml_task(y)
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
if n_jobs == 0:
map_function = map
else:
pool = Pool(processes=n_jobs, initializer=initialize_warnings_in_workers, initargs=(show_warnings,))
map_function = partial(pool.map, chunksize=chunksize)
relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature'))
relevance_table['feature'] = relevance_table.index
relevance_table['type'] = pd.Series(
map_function(get_feature_type, [X[feature] for feature in relevance_table.index]),
index=relevance_table.index
)
table_real = relevance_table[relevance_table.type == 'real'].copy()
table_binary = relevance_table[relevance_table.type == 'binary'].copy()
table_const = relevance_table[relevance_table.type == 'constant'].copy()
table_const['p_value'] = np.NaN
table_const['relevant'] = False
if not table_const.empty:
warnings.warn("[test_feature_significance] Constant features: {}"
.format(", ".join(table_const.feature)), RuntimeWarning)
if len(table_const) == len(relevance_table):
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
return table_const
if ml_task == 'classification':
tables = []
for label in y.unique():
_test_real_feature = partial(target_binary_feature_real_test, y=(y == label),
test=test_for_binary_target_real_feature)
_test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label))
tmp = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
tables.append(tmp)
relevance_table = combine_relevance_tables(tables)
elif ml_task == 'regression':
_test_real_feature = partial(target_real_feature_real_test, y=y)
_test_binary_feature = partial(target_real_feature_binary_test, y=y)
relevance_table = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
relevance_table = pd.concat([relevance_table, table_const], axis=0)
if sum(relevance_table['relevant']) == 0:
warnings.warn(
"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage "
"of irrelevant features, consider using an higher fdr level or add other features."
.format(ml_task, fdr_level), RuntimeWarning)
return relevance_table
def _calculate_relevance_table_for_implicit_target(table_real, table_binary, X, test_real_feature, test_binary_feature,
hypotheses_independent, fdr_level, map_function):
table_real['p_value'] = pd.Series(
map_function(test_real_feature, [X[feature] for feature in table_real.index]),
index=table_real.index
)
table_binary['p_value'] = pd.Series(
map_function(test_binary_feature, [X[feature] for feature in table_binary.index]),
index=table_binary.index
)
relevance_table = pd.concat([table_real, table_binary])
return benjamini_hochberg_test(relevance_table, hypotheses_independent, fdr_level)
def infer_ml_task(y):
"""
Infer the machine learning task to select for.
The result will be either `'regression'` or `'classification'`.
If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`.
Else `'regression'`.
:param y: The target vector y.
:type y: pandas.Series
:return: 'classification' or 'regression'
:rtype: str
"""
if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object:
ml_task = 'classification'
else:
ml_task = 'regression'
return ml_task
def combine_relevance_tables(relevance_tables):
"""
Create a combined relevance table out of a list of relevance tables,
aggregating the p-values and the relevances.
:param relevance_tables: A list of relevance tables
:type relevance_tables: List[pd.DataFrame]
:return: The combined relevance table
:rtype: pandas.DataFrame
"""
def _combine(a, b):
a.relevant |= b.relevant
a.p_value = a.p_value.combine(b.p_value, min, 1)
return a
return reduce(_combine, relevance_tables)
def get_feature_type(feature_column):
"""
For a given feature, determine if it is real, binary or constant.
Here binary means that only two unique values occur in the feature.
:param feature_column: The feature column
:type feature_column: pandas.Series
:return: 'constant', 'binary' or 'real'
"""
n_unique_values = len(set(feature_column.values))
if n_unique_values == 1:
return 'constant'
elif n_unique_values == 2:
return 'binary'
else:
return 'real'
| 45.078853 | 120 | 0.678699 |
from multiprocessing import Pool
import warnings
import numpy as np
import pandas as pd
from functools import partial, reduce
from tsfresh import defaults
from tsfresh.feature_selection.benjamini_hochberg_test import benjamini_hochberg_test
from tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \
target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test
from tsfresh.utilities.distribution import initialize_warnings_in_workers
def calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS, chunksize=defaults.CHUNKSIZE,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT):
if ml_task not in ['auto', 'classification', 'regression']:
raise ValueError('ml_task must be one of: \'auto\', \'classification\', \'regression\'')
elif ml_task == 'auto':
ml_task = infer_ml_task(y)
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
if n_jobs == 0:
map_function = map
else:
pool = Pool(processes=n_jobs, initializer=initialize_warnings_in_workers, initargs=(show_warnings,))
map_function = partial(pool.map, chunksize=chunksize)
relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature'))
relevance_table['feature'] = relevance_table.index
relevance_table['type'] = pd.Series(
map_function(get_feature_type, [X[feature] for feature in relevance_table.index]),
index=relevance_table.index
)
table_real = relevance_table[relevance_table.type == 'real'].copy()
table_binary = relevance_table[relevance_table.type == 'binary'].copy()
table_const = relevance_table[relevance_table.type == 'constant'].copy()
table_const['p_value'] = np.NaN
table_const['relevant'] = False
if not table_const.empty:
warnings.warn("[test_feature_significance] Constant features: {}"
.format(", ".join(table_const.feature)), RuntimeWarning)
if len(table_const) == len(relevance_table):
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
return table_const
if ml_task == 'classification':
tables = []
for label in y.unique():
_test_real_feature = partial(target_binary_feature_real_test, y=(y == label),
test=test_for_binary_target_real_feature)
_test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label))
tmp = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
tables.append(tmp)
relevance_table = combine_relevance_tables(tables)
elif ml_task == 'regression':
_test_real_feature = partial(target_real_feature_real_test, y=y)
_test_binary_feature = partial(target_real_feature_binary_test, y=y)
relevance_table = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
relevance_table = pd.concat([relevance_table, table_const], axis=0)
if sum(relevance_table['relevant']) == 0:
warnings.warn(
"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage "
"of irrelevant features, consider using an higher fdr level or add other features."
.format(ml_task, fdr_level), RuntimeWarning)
return relevance_table
def _calculate_relevance_table_for_implicit_target(table_real, table_binary, X, test_real_feature, test_binary_feature,
hypotheses_independent, fdr_level, map_function):
table_real['p_value'] = pd.Series(
map_function(test_real_feature, [X[feature] for feature in table_real.index]),
index=table_real.index
)
table_binary['p_value'] = pd.Series(
map_function(test_binary_feature, [X[feature] for feature in table_binary.index]),
index=table_binary.index
)
relevance_table = pd.concat([table_real, table_binary])
return benjamini_hochberg_test(relevance_table, hypotheses_independent, fdr_level)
def infer_ml_task(y):
if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object:
ml_task = 'classification'
else:
ml_task = 'regression'
return ml_task
def combine_relevance_tables(relevance_tables):
def _combine(a, b):
a.relevant |= b.relevant
a.p_value = a.p_value.combine(b.p_value, min, 1)
return a
return reduce(_combine, relevance_tables)
def get_feature_type(feature_column):
n_unique_values = len(set(feature_column.values))
if n_unique_values == 1:
return 'constant'
elif n_unique_values == 2:
return 'binary'
else:
return 'real'
| true | true |
f7364342837e8e9664cc1bea548231dfc7f87e39 | 5,173 | py | Python | welib/FEM/reduction.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 24 | 2019-07-24T23:37:10.000Z | 2022-03-30T20:40:40.000Z | welib/FEM/reduction.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | null | null | null | welib/FEM/reduction.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 11 | 2019-03-14T13:47:04.000Z | 2022-03-31T15:47:27.000Z | import numpy as np
from welib.system.eva import eig
def CraigBampton(MM, KK, Ileader, nModesCB=None, Ifollow=None, F=None, DD=None, fullModesOut=False):
"""
Performs the CraigBampton (CB) reduction of a system given some input master dofs index
and a number of modes. Reduced matrices, and Guyan and Craig-Bampton modes are returned.
INPUTS
Ileader : index of leader DOFs
nModesCB: number of CB modes to keep
MM, KK : Maff and stiffness matrix
INPUTS (Optional)
nModesCB: number of CB modes to keep. Default: all
Ifollow: indices of follower DOFs. Default: complementary set to Ileader
fullModesOut: if true, the Guyan and CB modes
OUTPUTS
fc: critical frequency
Mr,Kr,Fr,Dr: reduced mass, stiffness, force and damping matrices
AUTHOR: E. Branlard
"""
# --- Input cleanup
Ileader = np.asarray(Ileader).ravel()
# --- Optional arguments
if Ifollow is None:
# Then we take the complementary to Ileader
Iall = np.arange(len(MM))
Ifollow = [i for i in Iall if i not in Ileader]
else:
Ifollow = np.asarray(Ifollow).ravel()
if nModesCB is None:
nModesCB=len(Ifollow)
# Partitioning - NOTE: leaders will be first in reduced matrix Mr and Kr
Mll= MM[np.ix_(Ileader, Ileader)]
Kll= KK[np.ix_(Ileader, Ileader)]
Mff= MM[np.ix_(Ifollow, Ifollow)]
Kff= KK[np.ix_(Ifollow, Ifollow)]
Mlf= MM[np.ix_(Ileader, Ifollow)]
Klf= KK[np.ix_(Ileader, Ifollow)]
# --- Solve for Guyan modes
Kff1Kfl = np.linalg.solve(Kff,(np.transpose(Klf))) # Kss1Ksm=Kss\(Kms');
#Kff1Kfl = np.linalg.inv(Kff).dot(Klf.T)
Kff1Kfl = np.linalg.lstsq(Kff,Klf.T, rcond=None)[0]
Phi_G = - Kff1Kfl;
# --- Solve EVP for constrained system
Phi_CB, Lambda_CB = eig(Kff,Mff)
Omega2 = np.diag(Lambda_CB).copy()
Omega2[Omega2<0]=0.0
f_CB = np.sqrt(Omega2)/(2*np.pi)
# --- Taking only thefirst few modes
Phi_CB = Phi_CB[:,:nModesCB]
Lambda_CB = Lambda_CB[:,:nModesCB]
f_CB = f_CB[:nModesCB]
# --- Using the T matrix:
# # T=[eye(nm) zeros(nm,nModesCB); -Kff1Kfl Phi_CB];
# # MM=[Mll Mlf; Mlf' Mff];
# # KK=[Kll Klf; Klf' Kff];
# # Mr=T' * MM * T;
# # Kr=T' * KK * T;
# --- Building reduced matrices
#Mr11=Mmm-(Kss1Ksm')*Mms' - Mms*Kss1Ksm + (Kss1Ksm')*Mss*Kss1Ksm;
#Kr11=Kmm-Kms*Kss1Ksm;
#Mr12=(Mms-(Kss1Ksm')*Mss)*Psic;
Mr11 = Mll - (np.transpose(Kff1Kfl)).dot(np.transpose(Mlf)) - Mlf.dot(Kff1Kfl) + (np.transpose(Kff1Kfl)).dot(Mff).dot(Kff1Kfl)
Kr11 = Kll - Klf.dot(Kff1Kfl)
Mr12 = (Mlf - (np.transpose(Kff1Kfl)).dot(Mff)).dot(Phi_CB)
ZZ = np.zeros((len(Ileader),nModesCB))
# --- Guyan frequencies
Phi_G2, Lambda_G = eig(Kr11,Mr11)
Omega2 = np.diag(Lambda_G).copy()
Omega2[Omega2<0]=0.0
f_G = np.sqrt(Omega2)/(2*np.pi)
# Building reduced matrix
Mr = np.block( [ [Mr11 , Mr12 ], [ Mr12.T, np.eye(nModesCB) ] ])
Kr = np.block( [ [Kr11 , ZZ ], [ ZZ.T , Lambda_CB[:nModesCB,:]] ])
# --- Augmenting modes so that they have the same dimension as MM
# Add "1" for Guyan modes, and "0" for CB modes
if fullModesOut:
Phi_G, Phi_CB = augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=Ifollow)
if DD is not None:
raise NotImplementedError('Not done')
if F is not None:
raise NotImplementedError('Not done')
return Mr, Kr, Phi_G, Phi_CB, f_G, f_CB
def augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=None):
"""
Augment Guyan and Craig Bampton modes, so as to return full DOF vectors
going back to the original size
"""
# --- Augment modes so that they go back to same size after BC
nl = len(Ileader)
nall = nl+Phi_G.shape[0]
nf = nall-nl
if Ifollow is None:
Iall = np.arange(nall)
Ifollow = list(np.setdiff1d(Iall, Ileader))
# Guyan
Phi_G_aug = np.zeros((nall, nl))
Phi_G_aug[Ileader,:] = np.eye(nl)
Phi_G_aug[Ifollow,:] = Phi_G
#
Phi_CB_aug = np.zeros((nall, Phi_CB.shape[1]))
Phi_CB_aug[Ileader,:] = 0
Phi_CB_aug[Ifollow,:] = Phi_CB
return Phi_G_aug, Phi_CB_aug
if __name__=='__main__':
np.set_printoptions(linewidth=500)
L = 100
EI = 1868211939147.334
Maff = L * 8828.201296825122
KK = EI / (L ** 3) * np.array([[12,6 * L,- 12,6 * L],[6 * L,4 * L ** 2,- 6 * L,2 * L ** 2],[- 12,- 6 * L,12,- 6 * L],[6 * L,2 * L ** 2,- 6 * L,4 * L ** 2]])
MM = Maff / 420 * np.array([[156,22 * L,54,- 13 * L],[22 * L,4 * L ** 2,13 * L,- 3 * L ** 2],[54,13 * L,156,- 22 * L],[- 13 * L,- 3 * L ** 2,- 22 * L,4 * L ** 2]])
print(MM)
Mr,Kr,Phi_G,Phi_CB,f_CB,f_G = CraigBampton(MM,KK,[2], nModesCB=2)
print(Mr)
print(Kr)
print(Phi_G)
print(Phi_CB)
print(f_CB)
## --- Solve EVA
__,Lambda = eig(Kr,Mr)
f= np.sqrt(np.sort(np.diag(Lambda)))/(2*np.pi)
print(f)
# f = np.sqrt(Omega2) / (2 * pi)
# for i in np.arange(1,np.amin(8,Mr.shape[1-1])+1).reshape(-1):
# print('f%d=%8.3f Rayleigh Ratio=%.5f\n' % (i,f(i),(f(i) / fc) ** 2))
| 34.486667 | 167 | 0.594626 | import numpy as np
from welib.system.eva import eig
def CraigBampton(MM, KK, Ileader, nModesCB=None, Ifollow=None, F=None, DD=None, fullModesOut=False):
Ileader = np.asarray(Ileader).ravel()
if Ifollow is None:
Iall = np.arange(len(MM))
Ifollow = [i for i in Iall if i not in Ileader]
else:
Ifollow = np.asarray(Ifollow).ravel()
if nModesCB is None:
nModesCB=len(Ifollow)
Mll= MM[np.ix_(Ileader, Ileader)]
Kll= KK[np.ix_(Ileader, Ileader)]
Mff= MM[np.ix_(Ifollow, Ifollow)]
Kff= KK[np.ix_(Ifollow, Ifollow)]
Mlf= MM[np.ix_(Ileader, Ifollow)]
Klf= KK[np.ix_(Ileader, Ifollow)]
Kff1Kfl = np.linalg.solve(Kff,(np.transpose(Klf)))
#Kff1Kfl = np.linalg.inv(Kff).dot(Klf.T)
Kff1Kfl = np.linalg.lstsq(Kff,Klf.T, rcond=None)[0]
Phi_G = - Kff1Kfl;
# --- Solve EVP for constrained system
Phi_CB, Lambda_CB = eig(Kff,Mff)
Omega2 = np.diag(Lambda_CB).copy()
Omega2[Omega2<0]=0.0
f_CB = np.sqrt(Omega2)/(2*np.pi)
# --- Taking only thefirst few modes
Phi_CB = Phi_CB[:,:nModesCB]
Lambda_CB = Lambda_CB[:,:nModesCB]
f_CB = f_CB[:nModesCB]
# --- Using the T matrix:
# # T=[eye(nm) zeros(nm,nModesCB); -Kff1Kfl Phi_CB];
# # MM=[Mll Mlf; Mlf' Mff];
ing reduced matrices
#Mr11=Mmm-(Kss1Ksm')*Mms' - Mms*Kss1Ksm + (Kss1Ksm')*Mss*Kss1Ksm;
Mr11 = Mll - (np.transpose(Kff1Kfl)).dot(np.transpose(Mlf)) - Mlf.dot(Kff1Kfl) + (np.transpose(Kff1Kfl)).dot(Mff).dot(Kff1Kfl)
Kr11 = Kll - Klf.dot(Kff1Kfl)
Mr12 = (Mlf - (np.transpose(Kff1Kfl)).dot(Mff)).dot(Phi_CB)
ZZ = np.zeros((len(Ileader),nModesCB))
# --- Guyan frequencies
Phi_G2, Lambda_G = eig(Kr11,Mr11)
Omega2 = np.diag(Lambda_G).copy()
Omega2[Omega2<0]=0.0
f_G = np.sqrt(Omega2)/(2*np.pi)
# Building reduced matrix
Mr = np.block( [ [Mr11 , Mr12 ], [ Mr12.T, np.eye(nModesCB) ] ])
Kr = np.block( [ [Kr11 , ZZ ], [ ZZ.T , Lambda_CB[:nModesCB,:]] ])
# --- Augmenting modes so that they have the same dimension as MM
# Add "1" for Guyan modes, and "0" for CB modes
if fullModesOut:
Phi_G, Phi_CB = augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=Ifollow)
if DD is not None:
raise NotImplementedError('Not done')
if F is not None:
raise NotImplementedError('Not done')
return Mr, Kr, Phi_G, Phi_CB, f_G, f_CB
def augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=None):
# --- Augment modes so that they go back to same size after BC
nl = len(Ileader)
nall = nl+Phi_G.shape[0]
nf = nall-nl
if Ifollow is None:
Iall = np.arange(nall)
Ifollow = list(np.setdiff1d(Iall, Ileader))
# Guyan
Phi_G_aug = np.zeros((nall, nl))
Phi_G_aug[Ileader,:] = np.eye(nl)
Phi_G_aug[Ifollow,:] = Phi_G
#
Phi_CB_aug = np.zeros((nall, Phi_CB.shape[1]))
Phi_CB_aug[Ileader,:] = 0
Phi_CB_aug[Ifollow,:] = Phi_CB
return Phi_G_aug, Phi_CB_aug
if __name__=='__main__':
np.set_printoptions(linewidth=500)
L = 100
EI = 1868211939147.334
Maff = L * 8828.201296825122
KK = EI / (L ** 3) * np.array([[12,6 * L,- 12,6 * L],[6 * L,4 * L ** 2,- 6 * L,2 * L ** 2],[- 12,- 6 * L,12,- 6 * L],[6 * L,2 * L ** 2,- 6 * L,4 * L ** 2]])
MM = Maff / 420 * np.array([[156,22 * L,54,- 13 * L],[22 * L,4 * L ** 2,13 * L,- 3 * L ** 2],[54,13 * L,156,- 22 * L],[- 13 * L,- 3 * L ** 2,- 22 * L,4 * L ** 2]])
print(MM)
Mr,Kr,Phi_G,Phi_CB,f_CB,f_G = CraigBampton(MM,KK,[2], nModesCB=2)
print(Mr)
print(Kr)
print(Phi_G)
print(Phi_CB)
print(f_CB)
## --- Solve EVA
__,Lambda = eig(Kr,Mr)
f= np.sqrt(np.sort(np.diag(Lambda)))/(2*np.pi)
print(f)
# f = np.sqrt(Omega2) / (2 * pi)
# for i in np.arange(1,np.amin(8,Mr.shape[1-1])+1).reshape(-1):
# print('f%d=%8.3f Rayleigh Ratio=%.5f\n' % (i,f(i),(f(i) / fc) ** 2))
| true | true |
f7364375968b371ac7c5110a2aae3ab9342ca3bb | 2,884 | py | Python | .venv/lib/python3.8/site-packages/pycrop/tools.py | amon-wanyonyi/publication | caae5e04f9191493408145ac80dc943dba7e93ca | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/pycrop/tools.py | amon-wanyonyi/publication | caae5e04f9191493408145ac80dc943dba7e93ca | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/pycrop/tools.py | amon-wanyonyi/publication | caae5e04f9191493408145ac80dc943dba7e93ca | [
"MIT"
] | null | null | null | import os
import re
from math import floor
from PIL import Image
def default_save_path(path, size):
savepath, ext = os.path.splitext(path)
savepath = '%s__w%dh%d%s' % (savepath, *size, ext)
return savepath
def assure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def normilize_size(size, img_size):
if None not in size:
return size
W, H = img_size
w, h = size
if w is None:
return h * W / H, h
return w, w * H / W
def get_cover_size(from_size, to_size):
p = max([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_contain_size(from_size, to_size):
p = min([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_coords_from_center(from_size, to_size):
return (
floor((from_size[0] - to_size[0]) / 2),
floor((from_size[1] - to_size[1]) / 2),
floor((from_size[0] + to_size[0]) / 2),
floor((from_size[1] + to_size[1]) / 2)
)
def adjust_coords(coords, size, point):
vec = [
size[0] * (point[0] - 50) / 100,
size[1] * (point[1] - 50) / 100
]
if coords[0] + vec[0] < 0:
vec[0] = - coords[0]
if coords[1] + vec[1] < 0:
vec[1] = - coords[1]
if coords[3] + vec[1] > size[1]:
vec[1] = size[1] - coords[3]
if coords[2] + vec[0] > size[0]:
vec[0] = size[0] - coords[2]
return tuple(floor(sum(coord)) for coord in zip(coords, 2 * vec))
def cover(path, size, point, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
cover_size = get_cover_size(img.size, size)
coords = get_coords_from_center(cover_size, size)
coords = adjust_coords(coords, cover_size, point)
img = img.resize(cover_size, Image.ANTIALIAS)
img = img.crop(coords)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
def contain(path, size, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
contain_size = get_contain_size(img.size, size)
img = img.resize(contain_size, Image.ANTIALIAS)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
if __name__ == "__main__":
img_path = os.path.abspath('img.jpg')
cover(img_path, (500, None), (50, 50))
| 25.298246 | 69 | 0.601942 | import os
import re
from math import floor
from PIL import Image
def default_save_path(path, size):
savepath, ext = os.path.splitext(path)
savepath = '%s__w%dh%d%s' % (savepath, *size, ext)
return savepath
def assure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def normilize_size(size, img_size):
if None not in size:
return size
W, H = img_size
w, h = size
if w is None:
return h * W / H, h
return w, w * H / W
def get_cover_size(from_size, to_size):
p = max([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_contain_size(from_size, to_size):
p = min([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_coords_from_center(from_size, to_size):
return (
floor((from_size[0] - to_size[0]) / 2),
floor((from_size[1] - to_size[1]) / 2),
floor((from_size[0] + to_size[0]) / 2),
floor((from_size[1] + to_size[1]) / 2)
)
def adjust_coords(coords, size, point):
vec = [
size[0] * (point[0] - 50) / 100,
size[1] * (point[1] - 50) / 100
]
if coords[0] + vec[0] < 0:
vec[0] = - coords[0]
if coords[1] + vec[1] < 0:
vec[1] = - coords[1]
if coords[3] + vec[1] > size[1]:
vec[1] = size[1] - coords[3]
if coords[2] + vec[0] > size[0]:
vec[0] = size[0] - coords[2]
return tuple(floor(sum(coord)) for coord in zip(coords, 2 * vec))
def cover(path, size, point, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
cover_size = get_cover_size(img.size, size)
coords = get_coords_from_center(cover_size, size)
coords = adjust_coords(coords, cover_size, point)
img = img.resize(cover_size, Image.ANTIALIAS)
img = img.crop(coords)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
def contain(path, size, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
contain_size = get_contain_size(img.size, size)
img = img.resize(contain_size, Image.ANTIALIAS)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
if __name__ == "__main__":
img_path = os.path.abspath('img.jpg')
cover(img_path, (500, None), (50, 50))
| true | true |
f73644650dd1855dd44249090e286bf11f7c6b16 | 11,199 | py | Python | mlbench_core/optim/pytorch/fp_optimizers.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | mlbench_core/optim/pytorch/fp_optimizers.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | mlbench_core/optim/pytorch/fp_optimizers.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | # import ctypes
import logging
import math
import torch
import torch.distributed as dist
from torch.nn.utils import clip_grad_norm_
from mlbench_core.utils.pytorch.distributed import (
AllReduceAggregation,
AllReduceAggregationHVD,
)
try:
from apex.optimizers import FusedAdam
from apex import amp
except ImportError as e:
pass
logger = logging.getLogger("mlbench")
class FP16Optimizer:
"""
Mixed precision optimizer with dynamic loss scaling and backoff.
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor
Args:
fp16_model (`obj`:torch.nn.Module): model (previously casted to half)
world_size (int): Distributed world size
use_cuda (bool): Use cuda tensors for aggregation
use_horovod (bool): Use Horovod for aggregation
by_layer (bool): Aggregate by layer
grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients
loss_scale (int): initial loss scale
dls_downscale (int): loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients
dls_upscale (int): loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully
dls_upscale_interval (int): interval for loss scale upscaling
average_models (bool): Average the models
"""
def __init__(
self,
fp16_model,
world_size,
use_cuda=False,
use_horovod=False,
by_layer=False,
grad_clip=float("inf"),
loss_scale=1024,
dls_downscale=2,
dls_upscale=2,
dls_upscale_interval=128,
average_models=True,
):
self.use_cuda = use_cuda
self.fp16_model = fp16_model
self.fp16_params, self.fp32_params = self.initialize_flat_fp32_weight()
self.since_last_invalid = 0
self.loss_scale = loss_scale
self.dls_downscale = dls_downscale
self.dls_upscale = dls_upscale
self.dls_upscale_interval = dls_upscale_interval
self.grad_clip = grad_clip
self.world_size = dist.get_world_size()
self.optimizer = None
if use_horovod:
self.agg = AllReduceAggregationHVD(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
else:
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
def set_optimizer(self, optimizer):
self.optimizer = optimizer
# Flattening master weight
def initialize_flat_fp32_weight(self):
""" Initializes the model's parameters in fp32 and fp16
Returns:
(torch.Tensor, torch.Tensor): The Parametrs in fp16 and fp32
"""
# Set all gradients to None
for p in self.fp16_model.parameters():
p.grad = None
# Count number of parameters per layer
nelem = 0
for p in self.fp16_model.parameters():
nelem += p.numel()
fp32_params = torch.empty(
nelem,
dtype=torch.float32,
device=torch.device("cuda" if self.use_cuda else "cpu"),
)
fp16_params = torch.empty(
nelem,
dtype=torch.float16,
device=torch.device("cuda" if self.use_cuda else "cpu"),
)
pointer = 0
for p in self.fp16_model.parameters():
nelem = p.numel()
fp32_params[pointer : pointer + nelem].copy_(p.data.view(-1))
fp16_params[pointer : pointer + nelem].copy_(p.data.view(-1))
pointer += nelem
fp32_params = torch.nn.Parameter(fp32_params, requires_grad=True)
fp32_params.grad = torch.autograd.Variable(
fp32_params.data.new(*fp32_params.size())
)
fp16_params = torch.nn.Parameter(fp16_params, requires_grad=True)
fp16_params.grad = torch.autograd.Variable(
fp16_params.data.new(*fp16_params.size())
)
return fp16_params, fp32_params
@staticmethod
def fp16_to_fp32_flat_grad(fp32_params, fp16_model):
""" Copies the parameters in `fp16_model` into `fp32_params` in-place
Args:
fp32_params (torch.Tensor): Parameters in fp32
fp16_model (torch.nn.Module): Model in fp16
"""
pointer = 0
for p in fp16_model.parameters():
nelem = p.numel()
fp32_params.grad.data[pointer : pointer + nelem].copy_(p.grad.data.view(-1))
pointer += nelem
@staticmethod
def fp32_to_fp16_grads(fp16_model, fp32_params):
""" Copies the parameters in `fp32_params` into `fp16_model` in-place
Args:
fp16_model (torch.nn.Module): Model in fp16
fp32_params (torch.Tensor): Parameters in fp32
"""
pointer = 0
for p in fp16_model.parameters():
nelem = p.numel()
p.data.view(-1).copy_(fp32_params.data[pointer : pointer + nelem])
pointer += nelem
def backward_loss(self, loss):
""" Scales and performs backward on the given loss
Args:
loss (torch.nn.Module): The loss
"""
loss *= self.loss_scale
loss.backward()
def step(self, closure=None):
"""
Performs one step of the optimizer.
Applies loss scaling, computes gradients in fp16, converts gradients to
fp32, inverts scaling and applies optional gradient norm clipping.
If gradients are finite, it applies update to fp32 master weights and
copies updated parameters to fp16 model for the next iteration. If
gradients are not finite, it skips the batch and adjusts scaling factor
for the next iteration.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
scaling_factor = self.loss_scale
# Aggregate gradients
self.agg(self.fp16_model, self.agg_mode)
# Cast fp16 params to fp32 for optimizer
self.fp16_to_fp32_flat_grad(self.fp32_params, self.fp16_model)
if scaling_factor != 1.0:
self.fp32_params.grad.data /= scaling_factor
norm = clip_grad_norm_([self.fp32_params], self.grad_clip)
updated = False
if math.isfinite(norm):
self.optimizer.step(closure=closure)
self.fp32_to_fp16_grads(self.fp16_model, self.fp32_params)
self.since_last_invalid += 1
updated = True
else:
self.loss_scale /= self.dls_downscale
self.since_last_invalid = 0
logger.info(f"Skipped batch, new scale: {self.loss_scale}")
if self.since_last_invalid >= self.dls_upscale_interval:
self.loss_scale *= self.dls_upscale
self.loss_scale = min(self.loss_scale, 8192.0)
self.since_last_invalid = 0
for p in self.fp16_model.parameters():
p.grad = None
return updated
def zero_grad(self):
self.optimizer.zero_grad()
class FP32Optimizer:
"""
Standard optimizer, computes backward and applies weight update.
Args:
model (`obj`:torch.nn.Module): model
world_size (int): Distributed world size
use_cuda (bool): Use cuda tensors for aggregation
by_layer (bool): Aggregate by layer
grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients
average_models (bool): Average the models
"""
def __init__(
self,
model,
world_size,
use_cuda=False,
by_layer=False,
grad_clip=None,
average_models=True,
):
self.model = model
self.grad_clip = grad_clip
self.optimizer = None
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
"""
Performs one step of the optimizer.
"""
if self.grad_clip != float("inf"):
clip_grad_norm_(self.model.parameters(), self.grad_clip)
self.agg(self.model, self.agg_mode)
self.optimizer.step(closure=closure)
return True
def backward_loss(self, loss):
loss.backward()
def zero_grad(self):
self.optimizer.zero_grad()
class AMPOptimizer:
"""
Optimizer compatible with AMP.
Uses AMP to apply loss scaling, computes backward and applies weight
update.
Args:
model (`obj`:torch.nn.Module): model
grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients
loss_scale (int): initial loss scale
dls_upscale_interval (int): interval for loss scale upscaling
average_models (bool): Average the models
world_size (int): Distributed world size
use_cuda (bool): Use cuda tensors for aggregation
by_layer (bool): Aggregate by layer
use_horovod (bool): Use Horovod for aggregation
"""
def __init__(
self,
model,
grad_clip=None,
loss_scale=8192,
dls_upscale_interval=128,
average_models=True,
world_size=1,
use_cuda=False,
by_layer=False,
use_horovod=False,
):
self.model = model
self.grad_clip = grad_clip
self.optimizer = None
loss_scaler = amp._amp_state.loss_scalers[0]
loss_scaler._loss_scale = loss_scale
loss_scaler._scale_seq_len = dls_upscale_interval
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
if use_horovod:
self.agg = AllReduceAggregationHVD(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
else:
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def backward_loss(self, loss):
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
def step(self, closure=None):
"""
Performs one step of the optimizer.
"""
if self.grad_clip != float("inf"):
clip_grad_norm_(amp.master_params(self.optimizer), self.grad_clip)
self.agg(self.model, self.agg_mode)
self.optimizer.step(closure=closure)
return True
def zero_grad(self):
self.optimizer.zero_grad()
| 32.273775 | 150 | 0.628003 |
import logging
import math
import torch
import torch.distributed as dist
from torch.nn.utils import clip_grad_norm_
from mlbench_core.utils.pytorch.distributed import (
AllReduceAggregation,
AllReduceAggregationHVD,
)
try:
from apex.optimizers import FusedAdam
from apex import amp
except ImportError as e:
pass
logger = logging.getLogger("mlbench")
class FP16Optimizer:
def __init__(
self,
fp16_model,
world_size,
use_cuda=False,
use_horovod=False,
by_layer=False,
grad_clip=float("inf"),
loss_scale=1024,
dls_downscale=2,
dls_upscale=2,
dls_upscale_interval=128,
average_models=True,
):
self.use_cuda = use_cuda
self.fp16_model = fp16_model
self.fp16_params, self.fp32_params = self.initialize_flat_fp32_weight()
self.since_last_invalid = 0
self.loss_scale = loss_scale
self.dls_downscale = dls_downscale
self.dls_upscale = dls_upscale
self.dls_upscale_interval = dls_upscale_interval
self.grad_clip = grad_clip
self.world_size = dist.get_world_size()
self.optimizer = None
if use_horovod:
self.agg = AllReduceAggregationHVD(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
else:
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def initialize_flat_fp32_weight(self):
for p in self.fp16_model.parameters():
p.grad = None
nelem = 0
for p in self.fp16_model.parameters():
nelem += p.numel()
fp32_params = torch.empty(
nelem,
dtype=torch.float32,
device=torch.device("cuda" if self.use_cuda else "cpu"),
)
fp16_params = torch.empty(
nelem,
dtype=torch.float16,
device=torch.device("cuda" if self.use_cuda else "cpu"),
)
pointer = 0
for p in self.fp16_model.parameters():
nelem = p.numel()
fp32_params[pointer : pointer + nelem].copy_(p.data.view(-1))
fp16_params[pointer : pointer + nelem].copy_(p.data.view(-1))
pointer += nelem
fp32_params = torch.nn.Parameter(fp32_params, requires_grad=True)
fp32_params.grad = torch.autograd.Variable(
fp32_params.data.new(*fp32_params.size())
)
fp16_params = torch.nn.Parameter(fp16_params, requires_grad=True)
fp16_params.grad = torch.autograd.Variable(
fp16_params.data.new(*fp16_params.size())
)
return fp16_params, fp32_params
@staticmethod
def fp16_to_fp32_flat_grad(fp32_params, fp16_model):
pointer = 0
for p in fp16_model.parameters():
nelem = p.numel()
fp32_params.grad.data[pointer : pointer + nelem].copy_(p.grad.data.view(-1))
pointer += nelem
@staticmethod
def fp32_to_fp16_grads(fp16_model, fp32_params):
pointer = 0
for p in fp16_model.parameters():
nelem = p.numel()
p.data.view(-1).copy_(fp32_params.data[pointer : pointer + nelem])
pointer += nelem
def backward_loss(self, loss):
loss *= self.loss_scale
loss.backward()
def step(self, closure=None):
scaling_factor = self.loss_scale
self.agg(self.fp16_model, self.agg_mode)
self.fp16_to_fp32_flat_grad(self.fp32_params, self.fp16_model)
if scaling_factor != 1.0:
self.fp32_params.grad.data /= scaling_factor
norm = clip_grad_norm_([self.fp32_params], self.grad_clip)
updated = False
if math.isfinite(norm):
self.optimizer.step(closure=closure)
self.fp32_to_fp16_grads(self.fp16_model, self.fp32_params)
self.since_last_invalid += 1
updated = True
else:
self.loss_scale /= self.dls_downscale
self.since_last_invalid = 0
logger.info(f"Skipped batch, new scale: {self.loss_scale}")
if self.since_last_invalid >= self.dls_upscale_interval:
self.loss_scale *= self.dls_upscale
self.loss_scale = min(self.loss_scale, 8192.0)
self.since_last_invalid = 0
for p in self.fp16_model.parameters():
p.grad = None
return updated
def zero_grad(self):
self.optimizer.zero_grad()
class FP32Optimizer:
def __init__(
self,
model,
world_size,
use_cuda=False,
by_layer=False,
grad_clip=None,
average_models=True,
):
self.model = model
self.grad_clip = grad_clip
self.optimizer = None
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
if self.grad_clip != float("inf"):
clip_grad_norm_(self.model.parameters(), self.grad_clip)
self.agg(self.model, self.agg_mode)
self.optimizer.step(closure=closure)
return True
def backward_loss(self, loss):
loss.backward()
def zero_grad(self):
self.optimizer.zero_grad()
class AMPOptimizer:
def __init__(
self,
model,
grad_clip=None,
loss_scale=8192,
dls_upscale_interval=128,
average_models=True,
world_size=1,
use_cuda=False,
by_layer=False,
use_horovod=False,
):
self.model = model
self.grad_clip = grad_clip
self.optimizer = None
loss_scaler = amp._amp_state.loss_scalers[0]
loss_scaler._loss_scale = loss_scale
loss_scaler._scale_seq_len = dls_upscale_interval
if average_models:
self.agg_mode = "avg"
else:
raise NotImplementedError("Only average model is supported right now.")
if use_horovod:
self.agg = AllReduceAggregationHVD(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
else:
self.agg = AllReduceAggregation(
world_size=world_size, use_cuda=use_cuda
).agg_grad(by_layer=by_layer)
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def backward_loss(self, loss):
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
def step(self, closure=None):
if self.grad_clip != float("inf"):
clip_grad_norm_(amp.master_params(self.optimizer), self.grad_clip)
self.agg(self.model, self.agg_mode)
self.optimizer.step(closure=closure)
return True
def zero_grad(self):
self.optimizer.zero_grad()
| true | true |
f736477bb6aa66636e35c7f32ac0a2e1313242fa | 8,375 | py | Python | secmmf/mmf_data_loader/form_parsers.py | yj1990/sec_mmf | 72a8c0d5a6aadb4362c07a5606c70e51b08a53cd | [
"MIT"
] | 1 | 2019-12-20T17:52:14.000Z | 2019-12-20T17:52:14.000Z | secmmf/mmf_data_loader/form_parsers.py | yj1990/sec_mmf | 72a8c0d5a6aadb4362c07a5606c70e51b08a53cd | [
"MIT"
] | null | null | null | secmmf/mmf_data_loader/form_parsers.py | yj1990/sec_mmf | 72a8c0d5a6aadb4362c07a5606c70e51b08a53cd | [
"MIT"
] | null | null | null | import pandas as pd
import bs4 as bs
import untangle as ut
import requests
import urllib.request as rq
from collections import OrderedDict
from secmmf.mmf_data_loader.utils import get_edgar_url
class N_MFP2:
def __init__(self):
self.select_cols()
def born(self, tag):
# if tag is a single-node tag contains a navigable string, return a list with that string
# if tag has multiple element, needs to further born them
childs = []
for x in tag:
if (x != '\n') & (type(x) != bs.element.Comment):
childs.append(x)
return childs
def dive(self, root, surname=''):
name = surname + root.name
sons = []
for son in self.born(root):
if type(son) == bs.element.NavigableString:
text = ': '.join([name, son])
sons.append(text)
elif type(son) == bs.element.Tag:
sons.extend(self.dive(son, surname=name + '_'))
return sons
def teach(self, root):
sons = []
for son in self.born(root):
if len(self.born(son)) == 1:
sons.append((son.name, son.get_text().replace('\n', '')))
elif len(self.born(son)) > 1:
for grandson in self.born(son):
sons.append((son.name + '_' + grandson.name,
grandson.get_text().replace('\n', '')))
return sons
def teach_rec(self, root):
sons = []
for son in self.born(root):
if len(self.born(son)) == 1:
sons.append((son.name, son.get_text().replace('\n', '')))
elif len(self.born(son)) > 1:
sons.append(teach_rec(son))
return sons
def parse(self, url='https://www.sec.gov/Archives/edgar/data/759667/000070217219000020/primary_doc.xml'):
stubs = self.stubs
#_tonum = self._tonum
#series_level_names = self.series_level_names
#class_level_names = self.class_level_names
source = rq.urlopen(url).read()
soup = bs.BeautifulSoup(source, 'xml')
# parse XML info into a list of dictionaries
mmf = []
for tag in self.born(soup.formData):
if tag.name in ['classLevelInfo', 'generalInfo', 'seriesLevelInfo']:
mmf.append((tag.name, self.teach(tag)))
general_series_class = []
general_series = mmf[0][1] + mmf[1][1]
for i, x in enumerate(general_series):
if x[0] == 'numberOfSharesOutstanding':
y = list(x)
y[0] = 'series_numberOfSharesOutstanding'
general_series[i] = tuple(y)
for x in mmf[2:]:
general_series_class.append(OrderedDict(general_series + x[1]))
df = pd.DataFrame(general_series_class)
if 'nameOfPersonDescExpensePay' in df.columns:
df.drop(columns='nameOfPersonDescExpensePay', inplace=True)
# rename those columns that have reversed patterns
namemap = []
for x in ['weeklyGrossRedemptions', 'weeklyGrossSubscriptions']:
namemap.append(dict([('fridayWeek' + str(i + 1) + '_' + x,
x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))
for x in ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets']:
namemap.append(dict([(x + '_' + 'fridayDay' + str(i + 1),
x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))
for i in range(4):
df = df.rename(columns=namemap[i])
# make data wide to long on weekly holding statistics
df = pd.wide_to_long(df, stubnames=self.stubs,
i='classesId', j='week', sep='_', suffix='\w+')
df.reset_index(inplace=True)
df['week'] = df['week'].apply(
lambda x: int(x.replace('fridayWeek', '')))
#df = df[['week']+series_level_names+class_level_names]
# change the type of numeric data to float
#df[_tonum] = df[_tonum].astype(dtype = float)
return df
def parse_csv(self, url):
source = get_edgar_url(url).content
soup = bs.BeautifulSoup(source, 'xml')
return self.dive(soup.formData)
def select_cols(self):
self.stubs = ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets',
'totalValueWeeklyLiquidAssets', 'percentageWeeklyLiquidAssets',
'netAssetValue', 'netAssetPerShare',
'weeklyGrossRedemptions', 'weeklyGrossSubscriptions']
self._tonum = ['totalShareClassesInSeries',
'averagePortfolioMaturity',
'averageLifeMaturity',
'cash',
'totalValuePortfolioSecurities',
'amortizedCostPortfolioSecurities',
'totalValueOtherAssets',
'totalValueLiabilities',
'netAssetOfSeries',
'numberOfSharesOutstanding',
'stablePricePerShare',
'sevenDayGrossYield',
'minInitialInvestment',
'netAssetsOfClass',
'totalForTheMonthReported_weeklyGrossSubscriptions',
'totalForTheMonthReported_weeklyGrossRedemptions',
'sevenDayNetYield'] + self.stubs
self.series_level_names = ['reportDate',
'cik',
'seriesId',
'totalShareClassesInSeries',
'finalFilingFlag',
'fundAcqrdOrMrgdWthAnthrFlag',
'securitiesActFileNumber',
'adviser_adviserName',
'adviser_adviserFileNumber',
'indpPubAccountant_name',
'indpPubAccountant_city',
'indpPubAccountant_stateCountry',
'administrator',
'transferAgent_name',
'transferAgent_cik',
'transferAgent_fileNumber',
'feederFundFlag',
'masterFundFlag',
'seriesFundInsuCmpnySepAccntFlag',
'moneyMarketFundCategory',
'fundExemptRetailFlag',
'averagePortfolioMaturity',
'averageLifeMaturity',
'totalValueDailyLiquidAssets',
'totalValueWeeklyLiquidAssets',
'percentageDailyLiquidAssets',
'percentageWeeklyLiquidAssets',
'cash',
'totalValuePortfolioSecurities',
'amortizedCostPortfolioSecurities',
'totalValueOtherAssets',
'totalValueLiabilities',
'netAssetOfSeries',
'series_numberOfSharesOutstanding',
'stablePricePerShare',
'sevenDayGrossYield',
'netAssetValue']
self.class_level_names = ['classesId',
'minInitialInvestment',
'netAssetsOfClass',
'numberOfSharesOutstanding',
'netAssetPerShare',
'weeklyGrossSubscriptions',
'weeklyGrossRedemptions',
'totalForTheMonthReported_weeklyGrossSubscriptions',
'totalForTheMonthReported_weeklyGrossRedemptions',
'sevenDayNetYield',
'personPayForFundFlag']
| 43.848168 | 109 | 0.488597 | import pandas as pd
import bs4 as bs
import untangle as ut
import requests
import urllib.request as rq
from collections import OrderedDict
from secmmf.mmf_data_loader.utils import get_edgar_url
class N_MFP2:
def __init__(self):
self.select_cols()
def born(self, tag):
childs = []
for x in tag:
if (x != '\n') & (type(x) != bs.element.Comment):
childs.append(x)
return childs
def dive(self, root, surname=''):
name = surname + root.name
sons = []
for son in self.born(root):
if type(son) == bs.element.NavigableString:
text = ': '.join([name, son])
sons.append(text)
elif type(son) == bs.element.Tag:
sons.extend(self.dive(son, surname=name + '_'))
return sons
def teach(self, root):
sons = []
for son in self.born(root):
if len(self.born(son)) == 1:
sons.append((son.name, son.get_text().replace('\n', '')))
elif len(self.born(son)) > 1:
for grandson in self.born(son):
sons.append((son.name + '_' + grandson.name,
grandson.get_text().replace('\n', '')))
return sons
def teach_rec(self, root):
sons = []
for son in self.born(root):
if len(self.born(son)) == 1:
sons.append((son.name, son.get_text().replace('\n', '')))
elif len(self.born(son)) > 1:
sons.append(teach_rec(son))
return sons
def parse(self, url='https://www.sec.gov/Archives/edgar/data/759667/000070217219000020/primary_doc.xml'):
stubs = self.stubs
source = rq.urlopen(url).read()
soup = bs.BeautifulSoup(source, 'xml')
mmf = []
for tag in self.born(soup.formData):
if tag.name in ['classLevelInfo', 'generalInfo', 'seriesLevelInfo']:
mmf.append((tag.name, self.teach(tag)))
general_series_class = []
general_series = mmf[0][1] + mmf[1][1]
for i, x in enumerate(general_series):
if x[0] == 'numberOfSharesOutstanding':
y = list(x)
y[0] = 'series_numberOfSharesOutstanding'
general_series[i] = tuple(y)
for x in mmf[2:]:
general_series_class.append(OrderedDict(general_series + x[1]))
df = pd.DataFrame(general_series_class)
if 'nameOfPersonDescExpensePay' in df.columns:
df.drop(columns='nameOfPersonDescExpensePay', inplace=True)
namemap = []
for x in ['weeklyGrossRedemptions', 'weeklyGrossSubscriptions']:
namemap.append(dict([('fridayWeek' + str(i + 1) + '_' + x,
x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))
for x in ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets']:
namemap.append(dict([(x + '_' + 'fridayDay' + str(i + 1),
x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))
for i in range(4):
df = df.rename(columns=namemap[i])
df = pd.wide_to_long(df, stubnames=self.stubs,
i='classesId', j='week', sep='_', suffix='\w+')
df.reset_index(inplace=True)
df['week'] = df['week'].apply(
lambda x: int(x.replace('fridayWeek', '')))
return df
def parse_csv(self, url):
source = get_edgar_url(url).content
soup = bs.BeautifulSoup(source, 'xml')
return self.dive(soup.formData)
def select_cols(self):
self.stubs = ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets',
'totalValueWeeklyLiquidAssets', 'percentageWeeklyLiquidAssets',
'netAssetValue', 'netAssetPerShare',
'weeklyGrossRedemptions', 'weeklyGrossSubscriptions']
self._tonum = ['totalShareClassesInSeries',
'averagePortfolioMaturity',
'averageLifeMaturity',
'cash',
'totalValuePortfolioSecurities',
'amortizedCostPortfolioSecurities',
'totalValueOtherAssets',
'totalValueLiabilities',
'netAssetOfSeries',
'numberOfSharesOutstanding',
'stablePricePerShare',
'sevenDayGrossYield',
'minInitialInvestment',
'netAssetsOfClass',
'totalForTheMonthReported_weeklyGrossSubscriptions',
'totalForTheMonthReported_weeklyGrossRedemptions',
'sevenDayNetYield'] + self.stubs
self.series_level_names = ['reportDate',
'cik',
'seriesId',
'totalShareClassesInSeries',
'finalFilingFlag',
'fundAcqrdOrMrgdWthAnthrFlag',
'securitiesActFileNumber',
'adviser_adviserName',
'adviser_adviserFileNumber',
'indpPubAccountant_name',
'indpPubAccountant_city',
'indpPubAccountant_stateCountry',
'administrator',
'transferAgent_name',
'transferAgent_cik',
'transferAgent_fileNumber',
'feederFundFlag',
'masterFundFlag',
'seriesFundInsuCmpnySepAccntFlag',
'moneyMarketFundCategory',
'fundExemptRetailFlag',
'averagePortfolioMaturity',
'averageLifeMaturity',
'totalValueDailyLiquidAssets',
'totalValueWeeklyLiquidAssets',
'percentageDailyLiquidAssets',
'percentageWeeklyLiquidAssets',
'cash',
'totalValuePortfolioSecurities',
'amortizedCostPortfolioSecurities',
'totalValueOtherAssets',
'totalValueLiabilities',
'netAssetOfSeries',
'series_numberOfSharesOutstanding',
'stablePricePerShare',
'sevenDayGrossYield',
'netAssetValue']
self.class_level_names = ['classesId',
'minInitialInvestment',
'netAssetsOfClass',
'numberOfSharesOutstanding',
'netAssetPerShare',
'weeklyGrossSubscriptions',
'weeklyGrossRedemptions',
'totalForTheMonthReported_weeklyGrossSubscriptions',
'totalForTheMonthReported_weeklyGrossRedemptions',
'sevenDayNetYield',
'personPayForFundFlag']
| true | true |
f73647b2a6012065708253b687595d2d32a619e0 | 6,002 | bzl | Python | go/private/actions/archive.bzl | tomlu/rules_go | 28dd92b09c978ad09c74c18161a1da844235adfb | [
"Apache-2.0"
] | 1 | 2020-12-02T20:04:26.000Z | 2020-12-02T20:04:26.000Z | go/private/actions/archive.bzl | tomlu/rules_go | 28dd92b09c978ad09c74c18161a1da844235adfb | [
"Apache-2.0"
] | null | null | null | go/private/actions/archive.bzl | tomlu/rules_go | 28dd92b09c978ad09c74c18161a1da844235adfb | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@io_bazel_rules_go//go/private:common.bzl",
"as_tuple",
"split_srcs",
)
load(
"@io_bazel_rules_go//go/private:mode.bzl",
"LINKMODE_C_ARCHIVE",
"LINKMODE_C_SHARED",
"mode_string",
)
load(
"@io_bazel_rules_go//go/private:providers.bzl",
"GoArchive",
"GoArchiveData",
"effective_importpath_pkgpath",
"get_archive",
)
load(
"@io_bazel_rules_go//go/private:rules/cgo.bzl",
"cgo_configure",
)
load(
"@io_bazel_rules_go//go/private:actions/compilepkg.bzl",
"emit_compilepkg",
)
def emit_archive(go, source = None):
"""See go/toolchains.rst#archive for full documentation."""
if source == None:
fail("source is a required parameter")
split = split_srcs(source.srcs)
testfilter = getattr(source.library, "testfilter", None)
pre_ext = ""
if go.mode.link == LINKMODE_C_ARCHIVE:
pre_ext = "_" # avoid collision with go_binary output file with .a extension
elif testfilter == "exclude":
pre_ext = ".internal"
elif testfilter == "only":
pre_ext = ".external"
out_lib = go.declare_file(go, ext = pre_ext + ".a")
if go.nogo:
# TODO(#1847): write nogo data into a new section in the .a file instead
# of writing a separate file.
out_export = go.declare_file(go, ext = pre_ext + ".x")
else:
out_export = None
out_cgo_export_h = None # set if cgo used in c-shared or c-archive mode
direct = [get_archive(dep) for dep in source.deps]
runfiles = source.runfiles
data_files = runfiles.files
for a in direct:
runfiles = runfiles.merge(a.runfiles)
if a.source.mode != go.mode:
fail("Archive mode does not match {} is {} expected {}".format(a.data.label, mode_string(a.source.mode), mode_string(go.mode)))
importmap = "main" if source.library.is_main else source.library.importmap
importpath, _ = effective_importpath_pkgpath(source.library)
if source.cgo and not go.mode.pure:
# TODO(jayconrod): do we need to do full Bourne tokenization here?
cppopts = [f for fs in source.cppopts for f in fs.split(" ")]
copts = [f for fs in source.copts for f in fs.split(" ")]
cxxopts = [f for fs in source.cxxopts for f in fs.split(" ")]
clinkopts = [f for fs in source.clinkopts for f in fs.split(" ")]
cgo = cgo_configure(
go,
srcs = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cdeps = source.cdeps,
cppopts = cppopts,
copts = copts,
cxxopts = cxxopts,
clinkopts = clinkopts,
)
if go.mode.link in (LINKMODE_C_SHARED, LINKMODE_C_ARCHIVE):
out_cgo_export_h = go.declare_file(go, path = "_cgo_install.h")
cgo_deps = cgo.deps
runfiles = runfiles.merge(cgo.runfiles)
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
out_cgo_export_h = out_cgo_export_h,
gc_goopts = source.gc_goopts,
cgo = True,
cgo_inputs = cgo.inputs,
cppopts = cgo.cppopts,
copts = cgo.copts,
cxxopts = cgo.cxxopts,
objcopts = cgo.objcopts,
objcxxopts = cgo.objcxxopts,
clinkopts = cgo.clinkopts,
testfilter = testfilter,
)
else:
cgo_deps = depset()
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
gc_goopts = source.gc_goopts,
cgo = False,
testfilter = testfilter,
)
data = GoArchiveData(
name = source.library.name,
label = source.library.label,
importpath = source.library.importpath,
importmap = source.library.importmap,
importpath_aliases = source.library.importpath_aliases,
pathtype = source.library.pathtype,
file = out_lib,
export_file = out_export,
srcs = as_tuple(source.srcs),
orig_srcs = as_tuple(source.orig_srcs),
data_files = as_tuple(data_files),
)
x_defs = dict(source.x_defs)
for a in direct:
x_defs.update(a.x_defs)
cgo_exports_direct = list(source.cgo_exports)
if out_cgo_export_h:
cgo_exports_direct.append(out_cgo_export_h)
cgo_exports = depset(direct = cgo_exports_direct, transitive = [a.cgo_exports for a in direct])
return GoArchive(
source = source,
data = data,
direct = direct,
libs = depset(direct = [out_lib], transitive = [a.libs for a in direct]),
transitive = depset([data], transitive = [a.transitive for a in direct]),
x_defs = x_defs,
cgo_deps = depset(transitive = [cgo_deps] + [a.cgo_deps for a in direct]),
cgo_exports = cgo_exports,
runfiles = runfiles,
mode = go.mode,
)
| 36.375758 | 139 | 0.619294 |
load(
"@io_bazel_rules_go//go/private:common.bzl",
"as_tuple",
"split_srcs",
)
load(
"@io_bazel_rules_go//go/private:mode.bzl",
"LINKMODE_C_ARCHIVE",
"LINKMODE_C_SHARED",
"mode_string",
)
load(
"@io_bazel_rules_go//go/private:providers.bzl",
"GoArchive",
"GoArchiveData",
"effective_importpath_pkgpath",
"get_archive",
)
load(
"@io_bazel_rules_go//go/private:rules/cgo.bzl",
"cgo_configure",
)
load(
"@io_bazel_rules_go//go/private:actions/compilepkg.bzl",
"emit_compilepkg",
)
def emit_archive(go, source = None):
if source == None:
fail("source is a required parameter")
split = split_srcs(source.srcs)
testfilter = getattr(source.library, "testfilter", None)
pre_ext = ""
if go.mode.link == LINKMODE_C_ARCHIVE:
pre_ext = "_"
elif testfilter == "exclude":
pre_ext = ".internal"
elif testfilter == "only":
pre_ext = ".external"
out_lib = go.declare_file(go, ext = pre_ext + ".a")
if go.nogo:
+ ".x")
else:
out_export = None
out_cgo_export_h = None
direct = [get_archive(dep) for dep in source.deps]
runfiles = source.runfiles
data_files = runfiles.files
for a in direct:
runfiles = runfiles.merge(a.runfiles)
if a.source.mode != go.mode:
fail("Archive mode does not match {} is {} expected {}".format(a.data.label, mode_string(a.source.mode), mode_string(go.mode)))
importmap = "main" if source.library.is_main else source.library.importmap
importpath, _ = effective_importpath_pkgpath(source.library)
if source.cgo and not go.mode.pure:
cppopts = [f for fs in source.cppopts for f in fs.split(" ")]
copts = [f for fs in source.copts for f in fs.split(" ")]
cxxopts = [f for fs in source.cxxopts for f in fs.split(" ")]
clinkopts = [f for fs in source.clinkopts for f in fs.split(" ")]
cgo = cgo_configure(
go,
srcs = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cdeps = source.cdeps,
cppopts = cppopts,
copts = copts,
cxxopts = cxxopts,
clinkopts = clinkopts,
)
if go.mode.link in (LINKMODE_C_SHARED, LINKMODE_C_ARCHIVE):
out_cgo_export_h = go.declare_file(go, path = "_cgo_install.h")
cgo_deps = cgo.deps
runfiles = runfiles.merge(cgo.runfiles)
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
out_cgo_export_h = out_cgo_export_h,
gc_goopts = source.gc_goopts,
cgo = True,
cgo_inputs = cgo.inputs,
cppopts = cgo.cppopts,
copts = cgo.copts,
cxxopts = cgo.cxxopts,
objcopts = cgo.objcopts,
objcxxopts = cgo.objcxxopts,
clinkopts = cgo.clinkopts,
testfilter = testfilter,
)
else:
cgo_deps = depset()
emit_compilepkg(
go,
sources = split.go + split.c + split.asm + split.cxx + split.objc + split.headers,
cover = source.cover,
importpath = importpath,
importmap = importmap,
archives = direct,
out_lib = out_lib,
out_export = out_export,
gc_goopts = source.gc_goopts,
cgo = False,
testfilter = testfilter,
)
data = GoArchiveData(
name = source.library.name,
label = source.library.label,
importpath = source.library.importpath,
importmap = source.library.importmap,
importpath_aliases = source.library.importpath_aliases,
pathtype = source.library.pathtype,
file = out_lib,
export_file = out_export,
srcs = as_tuple(source.srcs),
orig_srcs = as_tuple(source.orig_srcs),
data_files = as_tuple(data_files),
)
x_defs = dict(source.x_defs)
for a in direct:
x_defs.update(a.x_defs)
cgo_exports_direct = list(source.cgo_exports)
if out_cgo_export_h:
cgo_exports_direct.append(out_cgo_export_h)
cgo_exports = depset(direct = cgo_exports_direct, transitive = [a.cgo_exports for a in direct])
return GoArchive(
source = source,
data = data,
direct = direct,
libs = depset(direct = [out_lib], transitive = [a.libs for a in direct]),
transitive = depset([data], transitive = [a.transitive for a in direct]),
x_defs = x_defs,
cgo_deps = depset(transitive = [cgo_deps] + [a.cgo_deps for a in direct]),
cgo_exports = cgo_exports,
runfiles = runfiles,
mode = go.mode,
)
| true | true |
f73647b504c490e3c41803e6e2a4d597213a1d43 | 1,900 | py | Python | gsadjust/data/__init__.py | jkennedy-usgs/sgp-gsadjust | 929d650674b942d33168bcdaae7da07db175a1c4 | [
"CC0-1.0"
] | 5 | 2019-01-08T13:51:23.000Z | 2020-04-22T19:04:20.000Z | gsadjust/data/__init__.py | jkennedy-usgs/sgp-gsadjust | 929d650674b942d33168bcdaae7da07db175a1c4 | [
"CC0-1.0"
] | 113 | 2018-06-14T21:39:59.000Z | 2022-01-21T19:34:12.000Z | gsadjust/data/__init__.py | jkennedy-usgs/sgp-gsadjust | 929d650674b942d33168bcdaae7da07db175a1c4 | [
"CC0-1.0"
] | 3 | 2021-01-26T07:10:42.000Z | 2022-03-15T12:39:35.000Z | """
data
====
GSadjust objects for non-Survey | Loop | Station objects (those are
represented as PyQt objects).
--------------------------------------------------------------------------------
The main data objects are:
ChannelList: Holds lists of observed data values (g, tilt, temp, etc.). The
lists are copied to ObsTreeStation objects.
Adjustment | AdjustmentOptions } Adjustment Results: The first contains
instances of the latter two. Holds the input data, options, and results of the
network adjustment.
Datum: Absolute-gravity observation or other reference for the relative-gravity
network. At least one datum is required for network adjustment.
Delta: Relative-gravity difference calculated from two station occupations.
May or may not include drift correction.
Tare: Represents an offset applied to the data
TimeSeries: Used only for tide correction.
This software is preliminary, provisional, and is subject to revision. It is
being provided to meet the need for timely best science. The software has not
received final approval by the U.S. Geological Survey (USGS). No warranty,
expressed or implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the fact of release
constitute any such warranty. The software is provided on the condition tha
neither the USGS nor the U.S. Government shall be held liable for any damages
resulting from the authorized or unauthorized use of the software.
"""
from . import adjustment, analysis, channel, correction, datum, delta, tare
from .adjustment import (
AdjustedStation,
Adjustment,
AdjustmentOptions,
AdjustmentResults,
)
from .channel import ChannelList
from .datum import Datum
from .delta import (
Delta3Point,
DeltaList,
DeltaNormal,
create_delta_by_type,
)
from .tare import Tare
| 35.849057 | 81 | 0.728421 | from . import adjustment, analysis, channel, correction, datum, delta, tare
from .adjustment import (
AdjustedStation,
Adjustment,
AdjustmentOptions,
AdjustmentResults,
)
from .channel import ChannelList
from .datum import Datum
from .delta import (
Delta3Point,
DeltaList,
DeltaNormal,
create_delta_by_type,
)
from .tare import Tare
| true | true |
f73647cb9744604fcafbadc9ef4a6ae28b197403 | 937 | py | Python | named_entity_recognition1.py | DreamFireworks/nlp | e96e6f505964f1886e3de5347f2c1179cf2578e5 | [
"MIT"
] | null | null | null | named_entity_recognition1.py | DreamFireworks/nlp | e96e6f505964f1886e3de5347f2c1179cf2578e5 | [
"MIT"
] | null | null | null | named_entity_recognition1.py | DreamFireworks/nlp | e96e6f505964f1886e3de5347f2c1179cf2578e5 | [
"MIT"
] | null | null | null | import nltk
text="""A biography of Yue Fei, the Eguo Jintuo Zubian (鄂國金佗稡编), was written 60 years after his death by his grandson, the poet and historian Yue Ke (岳柯) (1183–post 1240).[3][4][5] In 1346 it was incorporated into the History of Song, a 496-chapter record of historical events and biographies of noted Song dynasty individuals, compiled by Yuan dynasty prime minister Toqto'a and others.[6] Yue Fei's biography is found in the 365th chapter of the book and is numbered biography 124.[7] Some later historians including Deng Guangming (1907–1998) now doubt the veracity of many of Yue Ke's claims about his grandfather.[8]
According to the History of Song, Yue Fei was named "Fei", meaning to fly, because at the time he was born, "a large bird like a swan landed on the roof of his house"."""
tokenized = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokenized)
named_entity = nltk.ne_chunk(tagged)
named_entity.draw()
| 72.076923 | 621 | 0.770544 | import nltk
text="""A biography of Yue Fei, the Eguo Jintuo Zubian (鄂國金佗稡编), was written 60 years after his death by his grandson, the poet and historian Yue Ke (岳柯) (1183–post 1240).[3][4][5] In 1346 it was incorporated into the History of Song, a 496-chapter record of historical events and biographies of noted Song dynasty individuals, compiled by Yuan dynasty prime minister Toqto'a and others.[6] Yue Fei's biography is found in the 365th chapter of the book and is numbered biography 124.[7] Some later historians including Deng Guangming (1907–1998) now doubt the veracity of many of Yue Ke's claims about his grandfather.[8]
According to the History of Song, Yue Fei was named "Fei", meaning to fly, because at the time he was born, "a large bird like a swan landed on the roof of his house"."""
tokenized = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokenized)
named_entity = nltk.ne_chunk(tagged)
named_entity.draw()
| true | true |
f736498c71b92611343f1c80a567282dd6f753c1 | 413 | py | Python | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_XRayPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_XRayPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_XRayPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | from DAL.DAL_XRayPatient import DAL_XRayPatient
class BUS_XRayPatient():
def __init__(self):
self.dalXRayPatient = DAL_XRayPatient()
def firstLoadLinkXRay(self, IDPatient):
return self.dalXRayPatient.selectLinkXRayViaIDPatient(IDPatient=IDPatient)
def loadLinkXRay(self, IDPatient, XRayType):
return self.dalXRayPatient.selectLinkXRay(IDPatient=IDPatient, XRayType=XRayType) | 37.545455 | 89 | 0.774818 | from DAL.DAL_XRayPatient import DAL_XRayPatient
class BUS_XRayPatient():
def __init__(self):
self.dalXRayPatient = DAL_XRayPatient()
def firstLoadLinkXRay(self, IDPatient):
return self.dalXRayPatient.selectLinkXRayViaIDPatient(IDPatient=IDPatient)
def loadLinkXRay(self, IDPatient, XRayType):
return self.dalXRayPatient.selectLinkXRay(IDPatient=IDPatient, XRayType=XRayType) | true | true |
f7364b16fc4142c59fd6ce8321417e4f024bf8de | 1,543 | py | Python | internal/notes/builtin-SAVE/packages/r-rodbc/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/r-rodbc/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/r-rodbc/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRodbc(RPackage):
"""An ODBC database interface."""
homepage = "https://cran.rstudio.com/web/packages/RODBC/"
url = "https://cran.rstudio.com/src/contrib/RODBC_1.3-13.tar.gz"
version('1.3-13', 'c52ef9139c2ed85adc53ad6effa7d68e')
depends_on('unixodbc')
| 41.702703 | 78 | 0.672715 | true | true | |
f7364bb8d7801da82a198367f1f1d7ca19fe8f6b | 373 | py | Python | wordcloud/__init__.py | jcfr/word_cloud | d01847f973b45af9198c36b922a908b4374afe4d | [
"MIT"
] | null | null | null | wordcloud/__init__.py | jcfr/word_cloud | d01847f973b45af9198c36b922a908b4374afe4d | [
"MIT"
] | null | null | null | wordcloud/__init__.py | jcfr/word_cloud | d01847f973b45af9198c36b922a908b4374afe4d | [
"MIT"
] | null | null | null | from .wordcloud import (WordCloud, STOPWORDS, random_color_func,
get_single_color_func)
from .color_from_image import ImageColorGenerator
__all__ = ['WordCloud', 'STOPWORDS', 'random_color_func',
'get_single_color_func', 'ImageColorGenerator']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 33.909091 | 64 | 0.742627 | from .wordcloud import (WordCloud, STOPWORDS, random_color_func,
get_single_color_func)
from .color_from_image import ImageColorGenerator
__all__ = ['WordCloud', 'STOPWORDS', 'random_color_func',
'get_single_color_func', 'ImageColorGenerator']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| true | true |
f7364c56f83403121dd05f261447211892d91a9e | 542 | py | Python | I_love_Easin.py | mdhasan8/Problem_Solving | ac18f30ecc7d1baa4cea382c53aec16a544530be | [
"MIT"
] | null | null | null | I_love_Easin.py | mdhasan8/Problem_Solving | ac18f30ecc7d1baa4cea382c53aec16a544530be | [
"MIT"
] | null | null | null | I_love_Easin.py | mdhasan8/Problem_Solving | ac18f30ecc7d1baa4cea382c53aec16a544530be | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 28 12:20:40 2021
@author: Easin
"""
in1 = input()
in1 = int(in1)
in2 = input().split()
list1 = []
for elem in range(len(in2)):
list1.append(int(in2[elem]))
#print(list1)
amazing = 0
min1 = list1[0]
max1 = list1[0]
for val in range(len(list1)-1):
if list1[val +1] > min1:
amazing +=1
min1 = list1[val+1]
elif list1[val +1] < max1:
amazing += 1
max1 = list1[val+1]
print(amazing)
| 16.424242 | 36 | 0.498155 |
in1 = input()
in1 = int(in1)
in2 = input().split()
list1 = []
for elem in range(len(in2)):
list1.append(int(in2[elem]))
amazing = 0
min1 = list1[0]
max1 = list1[0]
for val in range(len(list1)-1):
if list1[val +1] > min1:
amazing +=1
min1 = list1[val+1]
elif list1[val +1] < max1:
amazing += 1
max1 = list1[val+1]
print(amazing)
| true | true |
f7364d5dfda7356574ef3f753491f24da0998c62 | 854 | py | Python | triple_agent/reports/generation/generate_external_reports.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | 3 | 2020-04-25T11:42:03.000Z | 2020-07-08T16:38:26.000Z | triple_agent/reports/generation/generate_external_reports.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | 17 | 2019-08-11T19:09:55.000Z | 2021-03-30T17:12:28.000Z | triple_agent/reports/generation/generate_external_reports.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | null | null | null | import json
from enum import Enum
from triple_agent.reports.generation.generic_query import populate_data_properties
def generate_external_reports(
games, data_query_properties, json_file_path, html_file_path
):
_, data_props = populate_data_properties(games, data_query_properties)
# https://github.com/pandas-dev/pandas/issues/15273
# This means the normal .to_json doesn't work for a dataframe.
if isinstance(data_props.frame.columns[0], Enum):
data_props.frame.columns = data_props.frame.columns.map(lambda x: x.name)
if isinstance(data_props.frame.index[0], Enum):
data_props.frame.index = data_props.frame.index.map(lambda x: x.name)
with open(json_file_path, "w") as at_json_out:
json.dump(data_props.frame.to_dict(), at_json_out, indent=4)
data_props.frame.T.to_html(html_file_path)
| 35.583333 | 82 | 0.75644 | import json
from enum import Enum
from triple_agent.reports.generation.generic_query import populate_data_properties
def generate_external_reports(
games, data_query_properties, json_file_path, html_file_path
):
_, data_props = populate_data_properties(games, data_query_properties)
if isinstance(data_props.frame.columns[0], Enum):
data_props.frame.columns = data_props.frame.columns.map(lambda x: x.name)
if isinstance(data_props.frame.index[0], Enum):
data_props.frame.index = data_props.frame.index.map(lambda x: x.name)
with open(json_file_path, "w") as at_json_out:
json.dump(data_props.frame.to_dict(), at_json_out, indent=4)
data_props.frame.T.to_html(html_file_path)
| true | true |
f7364d9908dc590a28f74d0749e5123f5854f229 | 160,627 | py | Python | IsoFit.py | wjwainwright/Capstone | a2ea661079ece6ff5008f4399b3f0f6d32c598d3 | [
"MIT"
] | null | null | null | IsoFit.py | wjwainwright/Capstone | a2ea661079ece6ff5008f4399b3f0f6d32c598d3 | [
"MIT"
] | null | null | null | IsoFit.py | wjwainwright/Capstone | a2ea661079ece6ff5008f4399b3f0f6d32c598d3 | [
"MIT"
] | null | null | null | try:
runCount += 1
except:
isoIn = False
clIn = False
cataIn = False
closePlots = False
resultsIn = False
clusterList = []
clusters=[]
isochrones = []
isoList = []
catalogue = []
runCount = 1
class resultClusterObj:
def __init__(self,cl):
import numpy as np
#Automatically populates variables based on those from the cluster it was given, except the data arrays
global properties
#List of all of the variables defined for the cluster cl, strips out the __functions__
properties = [a for a in dir(cl) if not a.startswith('_')]
for prop in properties:
#Saves all 'number' type variables to the memory of the result cluster object
if eval(f"type(cl.{prop})") == float or eval(f"type(cl.{prop})") == np.float64 or eval(f"type(cl.{prop})") == int:
exec(f"self.{prop} = float(cl.{prop})")
elif eval(f"type(cl.{prop})") == str:
exec(f"self.{prop} = cl.{prop}")
#Manually defined properties
self.name = cl.name
self.clType = cl.clType
class clusterObj:
def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):
#Declare instance variables
self.basedir = basedir
self.dataPath = self.basedir + f"{name}/data/"
self.imgPath = self.basedir + f"{name}/plots/"
self.unfilteredWide = []
self.unfilteredNarrow = []
self.filtered = []
self.mag = []
self.iso = []
self.condensed = []
self.condensed0 = []
self.condensedInit=[]
self.unfilteredBright = []
self.filteredBright = []
self.brightmag = []
self.distFiltered = []
self.binaries = []
self.stars = []
self.brightThreshold = brightThreshold
self.mean_par = 0
self.stdev_par = 0
self.mean_ra = 0
self.mean_dec = 0
self.stdev_ra = 0
self.stdev_dec = 0
self.mean_pmra = 0
self.stdev_pmra = 0
self.mean_pmdec = 0
self.stdev_pmdec = 0
self.mean_a_g = 0
self.stdev_a_g = 0
self.mean_e_bp_rp = 0
self.stdev_e_bp_rp = 0
self.mean_par_over_ra = 0
self.stdev_par_over_ra = 0
self.dist_mod = 0
self.turnPoint = 0
self.reddening = 0
self.radDist = 0
self.massLoaded = False
#Catalogued properties
self.name = name
self.clType = "None"
self.pmra_min = -99
self.pmra_max = -99
self.pmdec_min = -99
self.pmdec_max = -99
self.par_min = -99
self.par_max = -99
self.cltpx = -99
self.cltpy = -99
self.noise_cutoff = -99
#Check directory locations
import os
if not os.path.isdir(self.dataPath):
os.mkdir(self.dataPath)
if not os.path.isdir(self.imgPath):
os.mkdir(self.imgPath)
if not os.path.isdir(f"{self.imgPath}/png"):
os.mkdir(f"{self.imgPath}/png")
#Gaia DR2 Implementation
# class starObj:
# def __init__(self,name,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_match_obs,astro_sigma5d,match_obs,g_mag,b_mag,r_mag,b_r,b_g,g_r,radvel,radvel_err,variable,teff,a_g,e_bp_rp,lum):
# #Declare instance variables
# self.name = name
# self.ra = float(ra)
# self.ra_err = float(ra_err)
# self.dec = float(dec)
# self.dec_err = float(dec_err)
# self.par = float(par)
# self.par_err = float(par_err)
# self.par_over_err = float(par_over_err)
# self.pmra = float(pmra)
# self.pmra_err = float(pmra_err)
# self.pmdec = float(pmdec)
# self.pmdec_err = float(pmdec_err)
# self.ra_dec_corr = float(ra_dec_corr)
# self.ra_par_corr = float(ra_par_corr)
# self.ra_pmra_corr = float(ra_pmra_corr)
# self.ra_pmdec_corr = float(ra_pmdec_corr)
# self.dec_par_corr = float(dec_par_corr)
# self.dec_pmra_corr = float(dec_pmra_corr)
# self.dec_pmdec_corr = float(dec_pmdec_corr)
# self.par_pmra_corr = float(par_pmra_corr)
# self.par_pmdec_corr = float(par_pmdec_corr)
# self.pmra_pmdec_corr = float(pmra_pmdec_corr)
# self.astro_n_obs = float(astro_n_obs)
# self.astro_n_good_obs = float(astro_n_good_obs)
# self.astro_n_bad_obs = float(astro_n_bad_obs)
# self.astro_gof = float(astro_gof)
# self.astro_chi2 = float(astro_chi2)
# self.astro_noise = float(astro_noise)
# self.astro_noise_sig = float(astro_noise_sig)
# self.astro_match_obs = float(astro_match_obs)
# self.astro_sigma5d = float(astro_sigma5d)
# self.match_obs = float(match_obs)
# self.g_mag = float(g_mag)
# self.b_mag = float(b_mag)
# self.r_mag = float(r_mag)
# self.b_r = float(b_r)
# self.b_g = float(b_g)
# self.g_r = float(g_r)
# self.radvel = float(radvel)
# self.radvel_err = float(radvel_err)
# self.variable = variable
# self.teff = float(teff)
# self.a_g = float(a_g)
# self.e_bp_rp = float(e_bp_rp)
# self.lum = float(lum)
# self.member = 0
# self.binary = 0
# self.radDist = 0
# self.par_over_ra = float(par)/float(ra)
# self.par_over_dec = float(par)/float(dec)
# self.par_over_pmra = float(par)/float(pmra)
# self.par_over_pmdec = float(par)/float(pmdec)
# self.vosaPoints = []
# self.excess = 0
#Gaia DR3 implementation
class starObj:
def __init__(self,name,source_id,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err, #Basic astrometrics
ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr, #Correlations
astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_nu_eff, #Assorted astrometric properties
pseudocolor,pseudocolor_err,ra_pseudocolor_corr,dec_pseudocolor_corr,par_pseudocolor_corr,pmra_pseudoclor_corr,pmdec_pseudocolor_corr, #Pseudocolor
astro_sigma5d,duplicated_source, #More assorted properties
g_flux,g_flux_err,g_mag, #Gaia_G
b_flux,b_flux_err,b_mag, #Gaia_BP
r_flux,r_flux_err,r_mag, #Gaia_RP
b_over_r_excess,b_r,b_g,g_r, #Color indices and excess
radvel,radvel_err,radvel_num_transits,radvel_teff,radvel_feh, #Template Teff and Fe/H used to calculate the radvel
l,b,long,lat): #Galactic l and b, ecliptic long and lat
import numpy as np
#Declare instance variables
self.name = name
self.source_id = source_id
self.ra = float(ra)
self.ra_err = float(ra_err)
self.dec = float(dec)
self.dec_err = float(dec_err)
self.par = float(par)
self.par_err = float(par_err)
self.par_over_err = float(par_over_err)
self.pmra = float(pmra)
self.pmra_err = float(pmra_err)
self.pmdec = float(pmdec)
self.pmdec_err = float(pmdec_err)
self.ra_dec_corr = float(ra_dec_corr)
self.ra_par_corr = float(ra_par_corr)
self.ra_pmra_corr = float(ra_pmra_corr)
self.ra_pmdec_corr = float(ra_pmdec_corr)
self.dec_par_corr = float(dec_par_corr)
self.dec_pmra_corr = float(dec_pmra_corr)
self.dec_pmdec_corr = float(dec_pmdec_corr)
self.par_pmra_corr = float(par_pmra_corr)
self.par_pmdec_corr = float(par_pmdec_corr)
self.pmra_pmdec_corr = float(pmra_pmdec_corr)
self.astro_n_obs = float(astro_n_obs)
self.astro_n_good_obs = float(astro_n_good_obs)
self.astro_n_bad_obs = float(astro_n_bad_obs)
self.astro_gof = float(astro_gof)
self.astro_chi2 = float(astro_chi2)
self.astro_noise = float(astro_noise)
self.astro_noise_sig = float(astro_noise_sig)
self.astro_nu_eff = float(astro_nu_eff)
self.astro_sigma5d = float(astro_sigma5d)
self.duplicated_source = bool(duplicated_source)
self.g_flux = float(g_flux)
self.g_flux_err = float(g_flux_err)
self.g_mag = float(g_mag)
self.b_flux = float(b_flux)
self.b_flux_err = float(b_flux_err)
self.b_mag = float(b_mag)
self.r_flux = float(r_flux)
self.r_flux_err = float(r_flux_err)
self.r_mag = float(r_mag)
self.b_over_r_excess = float(b_over_r_excess)
self.b_r = float(b_r)
self.b_g = float(b_g)
self.g_r = float(g_r)
self.radvel = float(radvel)
self.radvel_err = float(radvel_err)
self.radvel_num_transits=float(radvel_num_transits)
self.radvel_teff = float(radvel_teff)
self.radvel_feh = float(radvel_feh)
self.l = float(l)
self.b = float(b)
self.long = float(long)
self.lat = float(lat)
self.member = 0
self.binary = 0
self.radDist = 0
self.par_over_ra = float(par)/float(ra)
self.par_over_dec = float(par)/float(dec)
self.par_over_pmra = float(par)/float(pmra)
self.par_over_pmdec = float(par)/float(pmdec)
self.normRA = self.ra*np.cos(self.dec*np.pi/180)
self.vosaPoints = []
self.excess = 0
class isochroneObj:
def __init__(self,age=404,feh=404,afe=404,y=404,basedir='isochrones/',subdir='processed',isodir=''):
#Declare instance variables
self.basedir = basedir
self.subdir = subdir
self.isodir = isodir
self.starList = []
self.age = age
self.feh = feh
self.afe = afe
self.y = y
self.name = f"feh_{feh}_afe_{afe}_age_{age}_y_{y}"
self.distance = 0
self.coeff = []
self.g = []
self.br = []
class fakeStarObj:
def __init__(self,g_mag,b_mag,r_mag):
#Declare instance variables
self.g_mag = g_mag
self.b_mag = b_mag
self.r_mag = r_mag
self.b_r = self.b_mag-self.r_mag
self.b_g = self.b_mag-self.g_mag
self.g_r = self.g_mag-self.r_mag
self.score = 0
class mistStar:
def __init__(self,properties):
#Declare instance variables
for prop,val in properties:
if "inf" in str(val):
val = 50
exec(f"self.{prop} = {val}")
class condensedPoint:
def __init__(self,b_r,g_mag,weight):
self.b_r = b_r
self.g_mag = g_mag
self.weight = weight
class vosaPoint:
def __init__(self,filterID,wavelength,obs_flux,obs_error,flux,flux_error,excess):
self.filterID = filterID
self.wavelength = wavelength
self.obs_flux = obs_flux
self.obs_error = obs_error
self.flux = flux
self.flux_error = flux_error
self.excess = excess
class cataloguedCluster():
def __init__(self,name,clType,pmra_min,pmra_max,pmdec_min,pmdec_max,par_min,par_max,cltpx,cltpy,noise_cutoff):
#Catalogued properties
self.name = str(name)
self.clType = str(clType)
self.pmra_min = float(pmra_min)
self.pmra_max = float(pmra_max)
self.pmdec_min = float(pmdec_min)
self.pmdec_max = float(pmdec_max)
self.par_min = float(par_min)
self.par_max = float(par_max)
self.cltpx = float(cltpx)
self.cltpy = float(cltpy)
self.noise_cutoff = float(noise_cutoff)
class Datum:
from matplotlib import colors as mcolors
colorin = mcolors.to_rgba("red")
colorout = mcolors.to_rgba("blue")
def __init__(self, x, y, include=False):
self.x = x
self.y = y
if include:
self.color = self.colorin
else:
self.color = self.colorout
class LassoManager:
def __init__(self, ax, data, cluster):
from matplotlib.collections import RegularPolyCollection
self.axes = ax
self.canvas = ax.figure.canvas
self.data = data
self.cluster = cluster
self.Nxy = len(data)
facecolors = [d.color for d in data]
self.xys = [(d.x, d.y) for d in data]
self.collection = RegularPolyCollection(
6, sizes=(5,),
facecolors=facecolors,
offsets=self.xys,
transOffset=ax.transData)
ax.add_collection(self.collection)
self.cid = self.canvas.mpl_connect('button_press_event', self.on_press)
def callback(self, verts):
from matplotlib import path
global coords
global clusters
cluster = clusters[self.cluster.name]
facecolors = self.collection.get_facecolors()
p = path.Path(verts)
ind = p.contains_points(self.xys)
cluster.binaries = []
for i in range(len(self.xys)):
if ind[i]:
facecolors[i] = Datum.colorin
star = cluster.filtered[[a.b_r for a in cluster.filtered].index(self.xys[i][0])]
cluster.binaries.append(star)
else:
facecolors[i] = Datum.colorout
self.canvas.draw_idle()
self.canvas.widgetlock.release(self.lasso)
del self.lasso
def on_press(self, event):
from matplotlib.widgets import Lasso
if self.canvas.widgetlock.locked():
return
if event.inaxes is None:
return
self.lasso = Lasso(event.inaxes,
(event.xdata, event.ydata),
self.callback)
# acquire a lock on the widget drawing
self.canvas.widgetlock(self.lasso)
def clusterCatalogue(types='all'):
import numpy as np
import pandas as pd
global data
global catalogue
global cataIn
data = pd.read_csv("catalogue.csv",sep=',',dtype=str)
data = data.to_numpy(dtype=str)
cata = []
for row in data:
cata.append(cataloguedCluster(*row))
if types == 'all':
catalogue = cata
cataIn = True
return
def readClusters(cList=["M67"],basedir="clusters/",smRad=0.35):
#Imports
import numpy as np
import pandas as pd
global clusterList
global clusters
global stars
global clIn
global catalogue
try:
if clIn and len(clusterList) > 0:
for clname in cList:
if clname in clusters:
unloadClusters([clname])
except:
clusterList=[]
#Check the cluster catalogue to load the catalogued properties
if not cataIn:
clusterCatalogue()
#Loop through clusters
for clname in cList:
#Create cluster objects
cluster = clusterObj(name=clname,basedir=basedir)
reference = None
for cl in catalogue:
if str(cl.name) == str(clname):
reference = cl
print(f"Catalogue match for {clname} found")
break
if reference == None:
print(f"Catalogue match for {clname} was not found, please create one")
continue
#Filter all of the methods out of the properties list
properties = [a for a in dir(reference) if not a.startswith('_')]
print(properties)
#exec(f"print(reference.{properties[1]})")
#print(properties)
#Now we have a list of all the attributes assigned to the catalogue (the self.variables)
for p in properties:
prop = getattr(reference,p)
#print(prop)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{clname} does not have a specified catalogue value for {p}")
except:
continue
# if cluster.name == 'NGC752' or cluster.name == 'NGC188':
# cluster.brightThreshold=18
# if "M67" in clname:
# cluster.type = "open"
# if "M35" in clname:
# cluster.type = "open"
# if "NGC188" in clname:
# cluster.type = "open"
# if "NGC752" in clname:
# cluster.type = "open"
# if "IC4651" in clname:
# cluster.type = "open"
# if "NGC2451" in clname:
# cluster.type = "open"
# if "AlphaPer" in clname:
# cluster.type = "open"
# if "M12" in clname:
# cluster.type = "globular"
# if "M3" in clname:
# cluster.type = "globular"
# if "M5" in clname:
# cluster.type = "globular"
# if "M15" in clname:
# cluster.type = "globular"
# if "M53" in clname:
# cluster.type = "globular"
# if "NGC6426" in clname:
# cluster.type = "globular"
# if "NGC6934" in clname:
# cluster.type = "globular"
"""
#Generate wide-field star list
starlist = np.genfromtxt(cluster.dataPath+"narrow.csv", delimiter=",", skip_header=1, usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17))
starlist = preFilter(starlist)
for s in starlist:
star = starObj(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],s[8],s[9],s[10],s[11],s[12],s[13],s[14],s[15],s[16],s[17])
cluster.unfilteredNarrow.append(star)
"""
#Generate narrow-field star list
starlist = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
stars = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
starlist = starlist.to_numpy(dtype=str)
#starlist = np.genfromtxt(cluster.dataPath+"wide.csv", delimiter=",", skip_header=1)
print(f"{clname} initial length: {len(starlist)}")
starlist = preFilter(starlist)
print(f"{clname} post-prefiltered length: {len(starlist)}")
ramean = np.mean([float(x) for x in starlist[:,1]])
decmean = np.mean([float(x) for x in starlist[:,3]])
for s in starlist:
star = starObj(*s)
cluster.unfilteredWide.append(star)
if np.less_equal(star.g_mag,cluster.brightThreshold):
cluster.unfilteredBright.append(star)
# if np.less_equal(np.sqrt(((star.ra-ramean)*np.cos(np.pi/180*star.dec))**2+(star.dec-decmean)**2),smRad):
# cluster.unfilteredNarrow.append(star)
clusterList.append(cluster)
calcStats(cluster,mode='narrow')
if not 'YSO' in clname:
rmOutliers()
clIn = True
toDict()
def pad(string, pads):
spl = string.split(',')
return '\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])
def readIso(basedir='isochrones/',subdir='MIST_raw/'):
#Important note: The ages are rounded to a few decimal places in the Gyr range
#This has the effect of making it such that a few dozen isochrones in the kyr range
#are overwritten because they all round to the same value. I found this to be an issue
#worth overlooking given that a cluster of that age hasn't been identified yet
#Imports
import os
import re
global isochrone_headers
global isoList
global isoIn
path = basedir + subdir
isoList = []
for fn in os.listdir(path):
#Read in file
main = open(path+fn).read()
main = main.split("\n")
#Relevant variables from headers
N_iso = int(main[7].split("=")[1])
index = 13
varList = re.sub("\s+", ",", main[5].strip()).split(",")
afe = varList[4]
feh = varList[3]
y = varList[1]
z = varList[2]
v_vcrit = varList[5]
#Column labels
#Replace any number of spaces with a single comma, then replace a few problematic phrases and split the list by commas
isochrone_headers = re.sub("\s+", ",", main[12].replace("2MASS","TwoMASS").replace("[Fe/H]","feh").strip()).split(",")[1:]
for idx in range(0,N_iso):
N_stars = int(re.sub("\s+", "," , main[index-3].split("=")[1]).split(",")[1])
#print(f"Iso = {idx} N_stars = {N_stars}")
#Populate a single isochrone
stars = []
for i in range(index,index+N_stars):
#Send the header and values to the mistStar object
#print(f"i = {i}")
values = [float(a) for a in re.sub("\s+", "," , main[i].strip()).split(",")]
properties = zip(isochrone_headers,values)
stars.append(mistStar(properties))
#Create the isochrone from the list of stars
age = round(10**values[1]/1e9,3)
iso = isochroneObj(age,feh,afe,y)
iso.starList = stars
iso.br = [star.Gaia_BP_EDR3-star.Gaia_RP_EDR3 for star in stars]
iso.g = [star.Gaia_G_EDR3 for star in stars]
isoList.append(iso)
index += N_stars + 5
isoIn = True
toDict()
def checkIsoDupes():
global isochrones
global isoList
names = []
for iso in isoList:
if iso.name in names:
print(iso.name)
else:
names.append(iso.name)
def processIso(basedir='isochrones/',subdir='raw/'):
#Imports
import os
import re
path = basedir + subdir
for fn in os.listdir(path):
main = open(path+fn).read()
part = main.split('\n\n\n')
part[0] = part[0].split('#----------------------------------------------------')[3].split('\n',1)[1]
for a in range(len(part)):
temp = part[a].split('#AGE=')[1].split(' EEPS=')[0]
age = temp.strip()
out = part[a].split('\n',2)[2]
out = re.sub("\s+", ",", out.strip())
out = pad(out,8)
filename = f"{basedir}processed/"+fn.split('.')[0]+'/'+age+".csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename,"w") as f:
f.write(out)
def readIsochrones(basedir='isochrones/',subdir='processed/'):
#Imports
import os
import numpy as np
global isoList
global isoIn
isoList=[]
for folder in os.listdir(basedir+subdir):
for fn in os.listdir(basedir+subdir+folder):
#Get the age and metallicities of the isochrones
ageStr = fn.split('.csv')[0]
fehStr = folder.split('feh')[1].split('afe')[0]
afeStr = folder.split('afe')[1].split('y')[0]
if 'y' in folder:
yStr = folder.split('y')[1]
else:
yStr = '0'
feh = float(fehStr[1]+fehStr[2])/10
afe = float(afeStr[1])/10
age = float(ageStr)
y = int(yStr)
if fehStr[0] == 'm':
feh = feh*-1
if afeStr[0] == 'm':
afe = afe*-1
#Debug
#print(f"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}")
#Create isochone object
iso = isochroneObj(age=age,feh=feh,afe=afe,y=y,basedir=basedir,subdir=subdir,isodir=folder+'/')
isoArr = np.genfromtxt(basedir+subdir+folder+"/"+fn, delimiter=",")
for s in isoArr:
star = fakeStarObj(s[5],s[6],s[7])
iso.starList.append(star)
iso.br.append(s[6]-s[7])
iso.g.append(s[5])
isoList.append(iso)
isoIn = True
toDict()
def preFilter(starList):
#Imports
import numpy as np
final = []
#Columns to be checked for NaN values. If an NaN is present in this column, the entry(star) is discarded from the "unfiltered" list
#2-12 is the astrometry
#42,45,48 are the g,bp,rp magnitudes
#50-52 are the color indices
cols = list(range(2,13))+[42]+[45]+[48]+list(range(50,53))
#Filters out NaN values except for the last two columns
for n,s in enumerate(starList):
dump = False
for c in cols:
if np.isnan(float(s[c])):
dump = True
if not dump:
final.append(starList[n])
#Reshapes array
final = np.array(final)
return final
def rmOutliers():
#Imports
global clusterList
import numpy as np
for cluster in clusterList:
if cluster.clType.lower() == "globular":
scale = 4
else:
scale = 1.5
#Variables
pmthreshold = 5
pmpthreshold = 50
parthreshold = 5
posthreshold = 5
toRemove=[]
#print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)
#print(len(cluster.unfilteredWide))
#Classifies outliers
for star in cluster.unfilteredWide:
if cluster.name == "NGC188":
if star.ra > 100:
toRemove.append(star)
#print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)
if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt(((star.ra-cluster.mean_ra)*np.cos(np.pi/180*star.dec))**2+(star.dec-cluster.mean_dec)**2),posthreshold) or np.greater(abs(star.par),parthreshold):
#if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):
toRemove.append(star)
#Removes the outliers from the array
for rm in toRemove:
cluster.unfilteredWide.remove(rm)
try:
cluster.unfilteredNarrow.remove(rm)
except ValueError:
pass
#print(len(cluster.unfilteredWide))
def calcStats(cluster,mode='filtered'):
#Imports
import numpy as np
#Reads in all the values for a cluster
par=[]
par_err=[]
ra=[]
dec=[]
pmra=[]
pmdec=[]
gmag = []
br = []
# a_g=[]
# e_bp_rp=[]
loopList=[]
checkLoaded([cluster])
if type(cluster) == str:
cluster = clusters[cluster]
if mode == 'bright':
loopList = cluster.filteredBright
elif mode == 'narrow':
loopList = cluster.unfilteredNarrow
elif mode == 'filtered':
loopList = cluster.filtered
for star in loopList:
par.append(star.par)
par_err.append(star.par_err)
pmra.append(star.pmra)
pmdec.append(star.pmdec)
ra.append(star.ra)
dec.append(star.dec)
gmag.append(star.g_mag)
br.append(star.b_r)
# if not np.isnan(star.a_g) and not star.a_g == 0:
# a_g.append(star.a_g)
# if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:
# e_bp_rp.append(star.e_bp_rp)
#Calculate the statistics
cluster.mean_par = np.mean(par[:])
cluster.mean_ra = np.mean(ra[:])
cluster.mean_dec = np.mean(dec[:])
cluster.stdev_ra = np.std(ra[:])
cluster.stdev_dec = np.std(dec[:])
cluster.stdev_par = np.std(par[:])
cluster.mean_pmra = np.mean(pmra[:])
cluster.stdev_pmra = np.std(pmra[:])
cluster.mean_pmdec = np.mean(pmdec[:])
cluster.stdev_pmdec = np.std(pmdec[:])
# cluster.mean_a_g = np.mean(a_g[:])
# cluster.stdev_a_g = np.std(a_g[:])
# cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])
# cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])
cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])
cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])
cluster.mean_par_err = np.mean(par_err[:])
cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5
for star in loopList:
star.radDist = np.sqrt((star.ra-cluster.mean_ra)**2+(star.dec-cluster.mean_dec)**2)
star.normRadDist = np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-cluster.mean_ra*np.cos(cluster.mean_dec*np.pi/180))**2+(star.dec-cluster.mean_dec)**2)
def saveClusters(cList):
#Imports
import dill
saveResults(cList)
#Creates a pickle file with all of the saved instances
for cl in cList:
cluster = clusters[cl]
#print(cluster.name,id(cluster))
with open(f"{cluster.dataPath}filtered.pk1", 'wb') as output:
dill.dump(cluster, output)
def saveIsochrones():
#Imports
import dill
global clusterList
#Creates a pickle file with all of the saved instances
for iso in isoList:
with open(f"{iso.basedir}pickled/{iso.name}.pk1", 'wb') as output:
dill.dump(iso, output)
def loadClusters(clusterNames=["M67"],basedir='clusters/'):
#Imports
import dill
global clusterList
global clusters
global clIn
for clusterName in clusterNames:
if clusterName in clusters:
unloadClusters([clusterName])
#Reads in instances from the saved pickle file
with open(f"{basedir}{clusterName}/data/filtered.pk1",'rb') as input:
cluster = dill.load(input)
clusterList.append(cluster)
clIn = True
toDict()
def loadIsochrones(basedir='isochrones/'):
#Imports
import dill
import os
global isoList
global isoIn
isoList=[]
for fn in os.listdir(basedir+"pickled/"):
#Reads in instances from the saved pickle file
with open(f"{basedir}pickled/{fn}",'rb') as input:
iso = dill.load(input)
isoList.append(iso)
isoIn = True
toDict()
def unloadClusters(cList=['all']):
#Imports
global clusterList
global clusters
if 'all' in cList:
cList = [cluster.name for cluster in clusterList]
for cl in cList:
cluster = clusters[cl]
clusterList.remove(cluster)
clusters.pop(cl)
del cluster
def dataProcess(cList,load=False,fit=True,unload=True,plotting=True,member=True,save=True,close=True):
#This method is largely intended for re-processing a bulk batch of clusters that have already been processed before,
#meaning they already have condensed point lists or you are already aware of their fitting quality
#Imports
import matplotlib.pyplot as plt
global clusterList
global clusters
global closePlots
if not isoIn:
loadIsochrones()
loadList = ["M15","M12","M39","M46","M67","NGC188","NGC2355","NGC2158","IC4651","NGC6791","NGC2360","NGC2204"]
for cl in cList:
if cl in loadList:
condensing = "load"
else:
condensing = "auto"
if load:
loadClusters([cl])
else:
readClusters([cl])
turboFilter([cl])
if close:
plt.close('all')
if fit:
turboFit([cl],condensing=condensing)
if plotting:
plot([cl],['pos','pm','cmd','quiver','iso'])
if close:
plt.close('all')
if member:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=75)
plt.close('all')
if save:
saveClusters([cl])
saveResults([cl])
if unload:
unloadClusters([cl])
def turboFilter(cl=["all"]):
#Imports
global clusterList
cList = checkLoaded(cl)
for clus in cList:
cluster = clusters[clus]
cluster.filteredBright,cluster.brightmag = pmFilter(cluster.unfilteredBright,cluster.name)
print(f"==========================={cluster.name}===========================")
print(f"bright unf/pm fil: {len(cluster.unfilteredBright)} / {len(cluster.filteredBright)}")
calcStats(cluster,mode='bright')
distFilter(cluster)
print(f"dist(all): {len(cluster.distFiltered)}")
cluster.filtered,cluster.mag = pmFilter(cluster.distFiltered,cluster.name)
#Manual filtering of extraneous points
cluster.filtered,cluster.mag = manualFilter(cluster)
print(f"pm(all): {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
magnitude = cutNoise(cluster)
print(f"noise cutoff: mag {magnitude} length {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
"""
for i in range(10):
print(f"{cluster.filtered[i].b_r} {cluster.mag[i,0]}")
"""
calcStats(cluster,mode='filtered')
setFlag()
def manualFilter(cluster):
#This exists to remove any points that may or may not be relevant to the cluster but are prohibiting the fit from happening
if "M35" in cluster.name:
filtered = [star for star in cluster.filtered if star.g_mag > 9 or star.b_r < 1]
return filtered,magList(filtered)
else:
return cluster.filtered,cluster.mag
def magList(filtered):
import numpy as np
mag = np.empty((0,2))
for star in filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
def pmFilter(starList,name):
#Imports
import numpy as np
filtered = []
mag = np.empty((0,2))
cluster = clusters[name]
assert cluster.name == name
#Apply an elliptical filter to the proper motion space
pmra_width = (cluster.pmra_max-cluster.pmra_min)/2
pmdec_width = (cluster.pmdec_max-cluster.pmdec_min)/2
pmra_center = cluster.pmra_min+pmra_width
pmdec_center = cluster.pmdec_min+pmdec_width
print(pmra_center,pmdec_center)
for star in starList:
if (star.pmra-pmra_center)**2/pmra_width**2 + (star.pmdec-pmdec_center)**2/pmdec_width**2 <= 1:
filtered.append(star)
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
assert len(filtered) > 1
print(len(filtered))
return filtered,mag
def distFilter(cluster):
#Imports
import numpy as np
if cluster.par_min == 0 or cluster.par_max == 0:
threshold = 1.5*cluster.mean_par
print(f"{cluster.name} filtered using mean parallax")
for star in cluster.unfilteredWide:
if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):
cluster.distFiltered.append(star)
else:
print(f"{cluster.name} filtered using min & max parallax values")
for star in cluster.unfilteredWide:
if star.par > cluster.par_min and star.par < cluster.par_max:
cluster.distFiltered.append(star)
def cutNoise(cluster):
#Imports
import numpy as np
stars = sorted(cluster.filtered,key=lambda x: x.g_mag)
new = []
newMag = np.empty((0,2))
if cluster.noise_cutoff <= -98:
threshold = 1
print(f"{cluster.name} noise cutoff undefined, using default")
else:
threshold = cluster.noise_cutoff
bad = 0
badCut = 5
for i,s in enumerate(stars):
if s.astro_sigma5d > threshold:
bad += 1
if bad >= badCut:
break
else:
new.append(s)
newMag = np.r_[newMag,[[s.b_r,s.g_mag]]]
cluster.filtered = new
cluster.mag = newMag
return s.g_mag
def turboFit(cl=["all"],condensing='auto',weighting='pos',tp="catalogue",minScore=0.001):
#Typical use cases are auto, pos, catalogue --OR-- manual, equal, catalogue
#Imports
import time
global clusterList
cList = checkLoaded(cl)
print("=========================Fitting=========================")
t0 = time.time()
status = condense(cList,condensing,weighting,tp,minScore)
if status == "Suspended":
return
for cluster in cList:
redFitting(cluster,minScore,weighting)
t1 = time.time()
print(f"Total {cluster.name} fit runtime: {t1-t0} seconds")
def redFitting(cluster,minScore,weighting):
#Imports
import numpy as np
import math
from sys import stdout
from time import sleep
global clusterList
if type(cluster) == str:
cluster = clusters[cluster]
cluster.iso = []
redMin = 0
redMax = 0.7
step = 0.05
redList = [round(x,2) for x in np.arange(redMin,redMax+step,step)]
for reddening in redList:
stdout.write(f"\rCurrent reddening value for {cluster.name}: {reddening:.2f} / ({redList[0]:.2f}->{redList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
best = float(cluster.iso[0][2])
print(f"\nCoarse-step reddening for {cluster.name}: {best}")
subMin = best - 0.05
subMax = best + 0.05
substep = 0.01
if subMin < 0:
subMin = 0
subList = [round(x,2) for x in np.arange(subMin,subMax+substep,substep) if not round(x,2) in redList and round(x,2) > subMin and round(x,2) < subMax]
for reddening in subList:
stdout.write(f"\rCurrent fine-step reddening value for {cluster.name}: {reddening:.2f} / ({subList[0]:.2f}->{subList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
cluster.reddening = float(cluster.iso[0][2])
cluster.fit_age = float(isochrones[cluster.iso[0][0]].age)
cluster.fit_feh = float(isochrones[cluster.iso[0][0]].feh)
cluster.fit_afe = float(isochrones[cluster.iso[0][0]].afe)
cluster.fit_y = float(isochrones[cluster.iso[0][0]].y)
#Unrelated properties but I needed somewhere to assign them
setattr(cluster,'meanDist',1000/cluster.mean_par)
meanL = np.mean([a.l*np.pi/180 for a in cluster.filtered])
galDist = 8000 #pc
gd = cluster.meanDist**2 + galDist**2 - 2*cluster.meanDist*galDist*np.cos(meanL)
setattr(cluster,'meanGalacticDist',gd**0.5)
print(f"\nReddening for {cluster.name}: {best}")
def shapeFit(cluster,reddening,minScore,weighting):
#Imports
import numpy as np
import shapely.geometry as geom
global isoList
conversion = 2.1
isoFitList = np.empty((0,3))
for iso in isoList:
isoLine = geom.LineString(tuple(zip([x+reddening for x in iso.br],[x+cluster.dist_mod+conversion*reddening for x in iso.g])))
dist = []
for star in cluster.condensed:
starPt = geom.Point(star.b_r,star.g_mag)
#print(starPt.distance(isoLine))
pointDist = np.abs(starPt.distance(isoLine))*star.weight
if pointDist < minScore*star.weight:
pointDist = minScore*star.weight
dist.append(pointDist**2)
isoScore = np.sum(dist[:])
#print(isoScore,dist)
#print(list(geom.shape(isoLine).coords))
isoFitList = np.r_[isoFitList,[[iso.name,float(isoScore),float(reddening)]]]
#compareInstances(iso,cluster.iso[-1][0])
#print(isoScore)
cluster.iso.extend(isoFitList)
#best = cluster.iso[1][0]
#specificPlot(cluster.name,best.name,reddening)
#print(f"\nFirst point of best fit: {best.br[0]+reddening},{best.g[0]+conversion*reddening+cluster.dist_mod}")
def onclick(x,y,fig,ax,cluster,minScore,weighting,newList):
def func(event):
import matplotlib.pyplot as plt
global coords
ix, iy = event.xdata, event.ydata
if str(event.button) == "MouseButton.RIGHT":
for i,(cx,cy) in enumerate(coords):
if abs(ix-cx) <= 0.075 and abs(iy-cy) <= 0.25:
coords.pop(i)
ax.clear()
ax.scatter(x,y,s=0.5,color='dimgray')
ax.invert_yaxis()
ax.scatter([a[0] for a in coords],[a[1] for a in coords],c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.LEFT":
coords.append((ix, iy))
ax.scatter(ix,iy,c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.MIDDLE":
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
if len(coords) >= 100:
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
return
return func
def updateCondensed(cluster,minScore,weighting,newList):
#Imports
import numpy as np
global coords
condensed = []
for point in coords:
if cluster.clType.lower() == "globular" or weighting.lower() == "equal":
weight = 1
else:
#Automatic weighting scheme currently unsupported for manual condensed point definition,
#but the framework is here to be able to insert it without having to worry about it being
#passed around from function to function
weight = 1
condensed.append(condensedPoint(point[0],point[1],weight))
if cluster.reddening == 0:
cluster.condensed0 = condensed
cluster.condensed = condensed
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
redFitting(cluster,minScore,weighting)
if len(newList) > 0:
turboFit(newList,'manual',weighting,'catalogue',minScore)
return
def find_nearest(array, value):
#Imports
import numpy as np
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def testCluster(name='feh_0.00_afe_0.00_age_0.141_y_0.2703'):
#Imports
import numpy as np
global clusterList
global clIn
iso = isochrones[name]
test = clusterObj('test')
filtered = [starObj('fake',0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,a.Gaia_G_EDR3,0,0,0,0,0,0,0,a.Gaia_BP_EDR3-a.Gaia_RP_EDR3,0,0,0,0,0,0,0,0,0,0,0) for a in iso.starList]
test.filtered = filtered
mag = np.empty((0,2))
for star in test.filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
test.mag = mag
if not 'test' in clusters:
clusterList.append(test)
else:
idx = clusterList.index(clusters['test'])
clusterList.pop(idx)
clusterList.append(test)
clIn = True
toDict()
def condense(cList,condensing,weighting,tp,minScore=0.001):
#Imports
import numpy as np
global isoList
global mag
for cluster in cList:
if type(cluster) == str:
cluster = clusters[cluster]
cList[cList.index(cluster.name)] = cluster
#Creates mag arrays to be used in place of the filtered star objects
mag = cluster.mag[:,:]
mag[mag[:,1].argsort()]
gmag = list(mag[:,1])
gmin = mag[0,1]
gmax = mag[-1,1]
div = 50
seg = (gmax-gmin)/div
minpoints = 1
#The array that will become the condensed points list
condensed = np.empty((0,3))
turnPoints = []
if condensing.lower() == "load":
global pts
pts = np.genfromtxt(f"{cluster.dataPath}condensed.csv",delimiter=',')
condensed = []
for point in pts:
#Missing alternate weighting schemes, but can be imlemented *here*
condensed.append(condensedPoint(point[0],point[1],1))
cluster.condensed = condensed
cluster.condensed0 = condensed
continue
#Manual point definition
if condensing.lower() == "manual":
import matplotlib.pyplot as plt
global cid
global coords
coords = []
if len(cList) == 1:
newList = []
else:
newList = cList[cList.index(cluster)+1:]
x,y = mag[:,0],mag[:,1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.invert_yaxis()
hook = onclick(x,y,fig,ax,cluster,minScore,weighting,newList)
cid = fig.canvas.mpl_connect('button_press_event', hook)
return "Suspended"
#Vertically stacked slices in brightness
for i in range(div):
sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
#print(np.array(sliced).shape)
#Skip forseen problems with empty arrays
if len(sliced) < minpoints:
continue
condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]
condensed = condensed[::-1]
#Uses defined turning points in the cluster catalogue
if tp.lower() == "catalogue":
if cluster.cltpx <= -98 and cluster.cltpy <= -98:
tp == "auto"
#If no turning point is found, or auto is specified, then this section of code
#attempts to find the turning point through steep gradient changes in the main sequence
if tp.lower() == "auto":
#Criteria for the line that forms the basis of the gradient change method
start = 4
end = 11
theta_crit = 5
#Creates a slope-intercept fit for the lower main sequence
basex = [a[0] for a in condensed[start:end]]
basey = [a[1] for a in condensed[start:end]]
base = np.polyfit(basex,basey,1)
#Travels up the main sequence
for i,point in enumerate(condensed):
if i == start:
continue
#Creates a fit line between the start point and the current point
x = [point[0],condensed[start,0]]
y = [point[1],condensed[start,1]]
lin = np.polyfit(x,y,1)
#Calculates an angle between the new line and the lower main sequence
point[2] = 180/np.pi*np.arctan(abs( (base[0]-lin[0])/(1+base[0]*lin[0]) ))
#If the angle between the two lines is large enough, the point is considered
#to be a candidate turning point, and is appended to the list of candidates
if point[2] > theta_crit and i > end:
turnPoints.append(point)
#Analysis plot showing the theta value for each condensed point
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(condensed[:,0],condensed[:,1],c=condensed[:,2])
plt.set_cmap('brg')
plt.gca().invert_yaxis()
clb = plt.colorbar()
clb.ax.set_title("Theta")
plt.savefig(f'condensed_{cluster.name}')
#If no automatic turning point is found, ends the method here
if len(turnPoints) == 0:
print("No turning point identified for {cluster.name}")
return
else:
#Identifies the proper turning point as a 5% color offset of the dimmest turning point candidate
turnPoints = sorted(turnPoints,key=lambda x: x[1])
tp = turnPoints[-1]
tp[0] = tp[0] - 0.05*np.abs(tp[0])
cluster.turnPoint = tp
#Stores the condensed point list
cl = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
cluster.condensedInit = cl
# [ B-R , G , Theta ]
print(f"{cluster.name} Turning Point: {cluster.turnPoint}")
#Assuming the undefined catch for manual would be caught the first time around
if tp.lower() == "catalogue":
cluster.turnPoint = [cluster.cltpx,cluster.cltpy]
if cluster.clType.lower() == "open":
#Recalc with the turnPoint limit enforced - Ignore blue stragglers
condensed = np.empty((0,3))
condensed_giant = np.empty((0,3))
yList = []
#Vertically stacked slices in brightness
for i in range(div):
rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
sliced = np.empty((0,2))
sliced_giant = np.empty((0,2))
for point in rawSliced:
#print(point)
if point[0] >= cluster.turnPoint[0]:
sliced = np.r_[sliced,[[point[0],point[1]]]]
else:
sliced_giant = np.r_[sliced_giant,[[point[0],point[1]]]]
#Skip forseen problems with empty arrays
if len(sliced) > 0:
x = np.median(sliced[:,0])
y = np.median(sliced[:,1])
yList.append(y)
condensed = np.r_[condensed,[[x,y,1]]]
if len(sliced_giant) > 3:
xg = np.median(sliced_giant[:,0])
yg = np.median(sliced_giant[:,1])
condensed_giant = np.r_[condensed_giant,[[xg,yg,1]]]
#New turning point found from the reduced data set
newTP = find_nearest(yList,cluster.turnPoint[1])
index = 0
for i,point in enumerate(condensed):
if newTP == point[1]:
index = i
#print(f"{point} found to be TP")
break
assert not index == 0
#Binary star list
tpcut = index + 3
xset = condensed[tpcut:-1,0]
yset = condensed[tpcut:-1,1]
#print(cluster.name,yset)
fit = np.polyfit(xset,yset,1)
#Distance from the main sequence linear fit
for star in cluster.filtered:
x0 = star.b_r
y0 = star.g_mag
dist = abs( y0 - fit[0]*x0 - fit[1] ) / np.sqrt(fit[0]**2 + 1)
star.distance_MS = dist
if dist > 0.05 and y0 < fit[0]*x0+fit[1] and x0 > xset[0] and y0 > condensed[index,1]:
cluster.binaries.append(star)
star.binary = 1
else:
star.binary = 0
#Fit weight parameters
N = len(condensed)
beta = -2
index = index - 7
for i,point in enumerate(condensed):
#point[2] = 5/(1+np.abs(index-i))
if weighting.lower() == 'pos':
point[2] = np.exp(beta*((i-index)/N)**2)
# if cluster.type == "globular":
# condensed = np.vstack((condensed,condensed_giant))
condensed = condensed[::-1]
cl = []
coords = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
coords.append((point[0],point[1]))
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
if cluster.reddening == 0:
cluster.condensed0 = cl
cluster.condensed = cl
# def checkLoaded(cList):
# needsLoading = []
# loaded = []
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# else:
# loaded.append(cl)
# return loaded,needsLoading()
def toDict():
#Imports
global clusterList
global clusters
global isoList
global isochrones
global resultList
global results
global clIn
global isoIn
global resultsIn
if clIn:
clName = []
for cluster in clusterList:
clName.append(cluster.name)
clusters = dict(zip(clName,clusterList))
if isoIn:
isoName = []
for iso in isoList:
isoName.append(iso.name)
isochrones = dict(zip(isoName,isoList))
if resultsIn:
resName=[]
for res in resultList:
resName.append(res.name)
results = dict(zip(resName,resultList))
def plot(cList=['all'],modes=['pos','pm','cmd','quiver','iso'],closePlots=False):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
import os
global clusterList
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}/png"):
os.mkdir(f"{cluster.imgPath}/png")
#Position plots
if 'pos' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
ra=[star.ra for star in cluster.filtered]
dec=[star.dec for star in cluster.filtered]
unfnormra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide]
normra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered]
#Unfiltered position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='dimgray')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered.png",dpi=500)
#Filtered position plot
plt.figure(f"{cluster.name}_ra_dec_filtered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(ra[:],dec[:],s=0.5,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered.png",dpi=500)
#Position overlay
plt.figure(f"{cluster.name}_ra_dec_overlay")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(ra[:],dec[:],s=1,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay.png",dpi=500)
#Normalized
#NormRA = RA*cos(DEC)
#Unfiltered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='dimgray')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_normalized.png",dpi=500)
#Filtered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_filtered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered Normalized")
plt.scatter(normra[:],dec[:],s=0.5,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered_normalized.png",dpi=500)
#Position overlay normalized
plt.figure(f"{cluster.name}_ra_dec_overlay_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(normra[:],dec[:],s=1,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay_normalized.png",dpi=500)
#Proper motion plots
if 'pm' in modes:
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
pmra=[star.pmra for star in cluster.filtered]
pmdec=[star.pmdec for star in cluster.filtered]
unfpara=[star.par for star in cluster.unfilteredWide]
para=[star.par for star in cluster.filtered]
x0 = cluster.pmra_min
x1 = cluster.pmra_max
y0 = cluster.pmdec_min
y1 = cluster.pmdec_max
width = x1-x0
scale = 5
subscale = 2
xmin = x0-scale*width
xmax = x1+scale*width
ymin = y0-scale*width
ymax = y1+scale*width
sxmin = x0-subscale*width
sxmax = x1+subscale*width
symin = y0-subscale*width
symax = y1+subscale*width
#Unfiltered proper motion plot
plt.figure(f"{cluster.name}_pm_unfiltered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered_closeup.png",dpi=500)
#Filtered proper motion plot
plt.figure(f"{cluster.name}_pm_filtered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(pmra[:],pmdec[:],s=0.5,c='midnightblue')
# plt.xlim([xmin,xmax])
# plt.ylim([ymin,ymax])
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_filtered.png",dpi=500)
#Proper motion overlay
plt.figure(f"{cluster.name}_pm_overlay")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='lightgray')
plt.scatter(pmra[:],pmdec[:],s=1,c='midnightblue')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay_closeup.png",dpi=500)
#Unfiltered PM/Parallax
plt.figure(f"{cluster.name}_pm_over_parallax_unfiltered")
plt.xlabel('PMRA / Parallax')
plt.ylabel('PMDEC / Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a/b for a,b in zip(unfpmra,unfpara)],[a/b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_over_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_over_parallax_unfiltered.png",dpi=500)
#Unfiltered PM*Parallax
plt.figure(f"{cluster.name}_pm_times_parallax_unfiltered")
plt.xlabel('PMRA * Parallax')
plt.ylabel('PMDEC * Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a*b for a,b in zip(unfpmra,unfpara)],[a*b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_times_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_times_parallax_unfiltered.png",dpi=500)
#CMD plots
if 'cmd' in modes:
unfgmag=[star.g_mag for star in cluster.unfilteredWide]
unf_b_r=[star.b_r for star in cluster.unfilteredWide]
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
bright_b_r = [x.b_r for x in cluster.filteredBright]
bright_gmag = [x.g_mag for x in cluster.filteredBright]
par_b_r = [x.b_r for x in cluster.distFiltered]
par_gmag = [x.g_mag for x in cluster.distFiltered]
#Reddening Correction
plt.figure(f"{cluster.name}_reddening_CMD")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('G Mag')
plt.title(f"{cluster.name} Reddening = {cluster.reddening:.2f}")
plt.scatter(b_r[:],gmag[:],s=0.5,c='dimgray',label='Observed')
plt.arrow(b_r[int(len(b_r)/2)]-cluster.reddening,gmag[int(len(gmag)/2)]-2.1*cluster.reddening,cluster.reddening,2.1*cluster.reddening,color='red')
plt.scatter([s-cluster.reddening for s in b_r[:]],[s-2.1*cluster.reddening for s in gmag[:]],s=1,c='midnightblue',label='Corrected')
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_reddening_CMD.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_reddening_CMD.png",dpi=500)
#Unfiltered CMD plot
plt.figure(f"{cluster.name}_CMD_unfiltered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_unfiltered.png",dpi=500)
#Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax & Proper Motion Filtered")
plt.scatter(b_r[:],gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_filtered.png",dpi=500)
#CMD overlay
plt.figure(f"{cluster.name}_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.scatter(b_r[:],gmag[:],s=1,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_overlay.png",dpi=500)
#Condensed CMD overlay
plt.figure(f"{cluster.name}_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Condensed Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_condensed_CMD_overlay.png",dpi=500)
#Weighted CMD overlay
plt.figure(f"{cluster.name}_weighted_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Weighted Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_weighted_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_weighted_CMD_overlay.png",dpi=500)
#Initial Condensed CMD overlay
plt.figure(f"{cluster.name}_initial_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Initial Condensed Overlay")
plt.scatter(b_r,gmag,s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r for s in cluster.condensedInit],[s.g_mag for s in cluster.condensedInit],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_initial_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_initial_condensed_CMD_overlay.png",dpi=500)
#Brightness-PM Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_bright_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Bright-Only Proper Motion Filtered")
plt.scatter(bright_b_r[:],bright_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_bright_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_bright_filtered.png",dpi=500)
#Parallax Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_parallax_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax Filtered")
plt.scatter(par_b_r[:],par_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_parallax_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_parallax_filtered.png",dpi=500)
if 'quiver' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
x0 = min([s.ra for s in cluster.filtered])
x1 = max([s.ra for s in cluster.filtered])
y0 = min([s.dec for s in cluster.filtered])
y1 = max([s.dec for s in cluster.filtered])
width = x1-x0
scale = 0.25
xmin = x0+scale*width
xmax = x1-scale*width
ymin = y0+scale*width
ymax = y1-scale*width
#Unfiltered position quiver plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_quiver")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
ax = plt.gca()
ax.quiver(unfra[:],unfdec[:],unfpmra[:],unfpmdec[:],color='midnightblue',width=0.003,scale=400,scale_units='width')
plt.axis("square")
plt.gcf().set_size_inches(10,10)
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver.png",dpi=500)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.png",dpi=500)
#Isochrone plots
if 'iso' in modes:
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
isochrone = isochrones[cluster.iso[0][0]]
#Isochrone best fit
plt.figure(f"{cluster.name}_Iso_best")
plt.gca().invert_yaxis()
plt.xlabel('Dereddened BP-RP')
plt.ylabel('Corrected Absolute G Mag')
plt.title(f"{cluster.name} Isochrone Best Fit")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening-cluster.dist_mod for s in gmag],s=0.5,c='dimgray',label='Cluster')
isoLabels = isochrone.name.split('_')
isoLabel = r"$[\frac{Fe}{H}]$" + "=" + isoLabels[1] + "\n" \
+ r"$[\frac{\alpha}{Fe}]$" + "=" + isoLabels[3] + "\n" \
+ r"$[Y]$" + "=" + isoLabels[7] + "\n" \
+ "Age" + "=" + isoLabels[5] + " Gyr"
plt.plot(isochrone.br,isochrone.g,c='midnightblue',label=isoLabel)
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening-cluster.dist_mod for s in cluster.condensed],s=5,c='red',label='Cluster Proxy')
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {cluster.reddening}")
plt.legend(h,l)
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_Iso_BestFit.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_Iso_BestFit.png",dpi=500)
#Membership plots
if 'membership' in modes:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=50)
#3D Position plots
if '3D' in modes:
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(1000*c.par) for c in cluster.filtered]
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
r = [np.sqrt(a**2+b**2) for a,b in zip(x,y)]
theta = [np.arctan(b/a) for a,b in zip(x,y)]
plt.figure(f"{cluster.name}_3D_Position")
ax = plt.axes(projection='3d')
ax.scatter3D(x,y,z)
ax.scatter(0,0,0,color='red')
scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
if closePlots:
plt.close('all')
# def Plot3D(cList):
# #Imports
# import matplotlib.pyplot as plt
# import numpy as np
# global clusterList
# needsLoading=[]
# plt.figure(f"3D_Position_Ensemble")
# ax = plt.axes(projection='3d')
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# if not len(needsLoading) == 0:
# loadClusters(needsLoading)
# for cl in cList:
# cluster = clusters[cl]
# A = [a.ra * np.pi/180 for a in cluster.filtered]
# B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
# C = [1/(0.001*c.par) for c in cluster.filtered]
# #Flatten radially
# C = [np.mean(C)]*len(C)
# x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
# y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
# z = [c*np.sin(b) for b,c in zip(B,C)]
# #Force Cluster to origin
# # x = [a-np.mean(x) for a in x]
# # y = [a-np.mean(y) for a in y]
# # z = [a-np.mean(z) for a in z]
# ax.scatter3D(x,y,z,label=cluster.name)
# scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
# ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
# #ax.scatter(0,0,0,color='black')
# plt.legend()
def yso_lookup():
#Imports
from astroquery.simbad import Simbad
import numpy as np
import os
import re
global names
global sect
global results
global ra
global dec
main = open("Excess Examples/YSO_object_list.dat").read()
main = main.split("\n")[:-1]
#Get the names of all of the objects identified
names = []
ra = []
dec = []
validNames = []
for row in main:
sect = re.split('\s+',row)
if sect[0] == '':
sect = sect[1:]
if sect[2] == 'none':
continue
name = sect[2]
blacklist = ['A','Ab','AB','ABC','B','AaB']
for entry in sect[3:]:
if '.' in entry or entry in blacklist:
break
name = name + " " + entry
names.append(name)
#Perform a SIMBAD query for the identified objects
results = []
for name in names:
result = Simbad.query_object(name)
if not type(result) == type(None):
results.append(result)
validNames.append(name.replace(' ',''))
ra1 = str(result.columns['RA']).split('\n')[-1]
ra1 = re.split('\s+',ra1)
if '' in ra1:
ra.append('---')
else:
ra.append(str(round(float(ra1[0])*15+float(ra1[1])/4+float(ra1[2])/240,5)))
dec1 = str(result.columns['DEC']).split('\n')[-1]
dec1 = re.split('\s+',dec1)
if '' in dec1:
dec.append('---')
else:
dec.append(str(round(float(dec1[0])+float(dec1[1])/60+float(dec1[2])/3600,5)))
#Create a text file in the VOSA readable format
VOSAdata = []
gaiadata = []
for i in range(len(validNames)):
line1 = f"{validNames[i]} {ra[i]} {dec[i]} --- --- --- --- --- --- ---"
line2 = f"{ra[i]} {dec[i]}"
VOSAdata.append(line1)
if '-' in line2:
continue
gaiadata.append(line2)
np.savetxt("Excess Examples/yso_vosa_output.txt",VOSAdata,fmt="%s")
np.savetxt("Excess Examples/yso_gaia_output.txt",gaiadata,fmt="%s")
def exportVOSA(cl):
#Imports
import numpy as np
if not cl in clusters:
loadClusters([cl])
cluster = clusters[cl]
#objname RA DEC DIS Av Filter Flux Error PntOpts ObjOpts
data = []
for star in cluster.filtered:
name = star.name.replace(" ","")
line = f"{name} {star.ra} {star.dec} {1000/star.par} --- --- --- --- --- ---"
data.append(line)
np.savetxt(f"{cluster.dataPath}{cluster.name}_VOSA.txt",data,fmt="%s")
def readSED(cList=['all'],printMissing=False):
#imports
import numpy as np
import re
import os
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
objPath = cluster.dataPath + "vosa_results/objects/"
names = []
for star in cluster.filtered:
flat = star.name.replace(" ","").replace("DR2","").replace("EDR3","").replace("DR3","")
names.append(flat)
star.flatName = flat
cluster.stars = dict(zip(names,cluster.filtered))
idx = 0
newStars = dict()
#Each star in a cluster has its own folder, and each folder contains several data sets
for folder in os.listdir(objPath):
fileName = folder.replace("DR2","").replace("EDR3","").replace("DR3","")
#Weed out VOSA stars not in current filtered members list
if not fileName in cluster.stars:
if printMissing:
print(f"{fileName} is missing from filtered list, skipping it...")
continue
main = open(objPath+folder+"/sed/"+folder+".sed.dat").read()
main = main.split("\n")
data = main[10:-1]
#Create a list of measurement object pointers to attach to the stars later
measurements = []
#Convert every line of the data set into a vosaPoint object
for row in data:
sect = re.split('\s+',row)[1:-1]
measurements.append(vosaPoint(str(sect[0]),float(sect[1]),float(sect[2]),float(sect[3]),float(sect[4]),float(sect[5]),float(sect[6])))
cluster.stars[fileName].vosaPoints = measurements
#Weed out cluster.stars members who do not have a vosa table
newStars[fileName] = cluster.stars[fileName]
idx += 1
cluster.stars = newStars
def checkBinary(cl):
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
global lman
data = [Datum(star.b_r,star.g_mag) for star in cluster.filtered]
# ax = plt.axes(xlim=(cluster.min_b_r-0.25,cluster.max_b_r+0.25), ylim=(cluster.min_g_mag-1,cluster.max_g_mag+1),autoscale_on=False)
ax = plt.axes(xlim=(0, 2.5), ylim=(8, 20), autoscale_on=False)
ax.invert_yaxis()
ax.set_title('Lasso points using left mouse button')
lman = LassoManager(ax, data,cluster)
plt.show()
def vosaBinaries(cl):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}vosaBinaries/"):
os.mkdir(f"{cluster.imgPath}vosaBinaries/")
for star in cluster.stars.values():
if not star.binary == 1:
return
def excessIR(cl,plot=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}excessIR/"):
os.mkdir(f"{cluster.imgPath}excessIR/")
for star in cluster.stars.values():
excess = False
for vp in star.vosaPoints:
if vp.excess > 0:
excess = True
if excess:
#print(f"{star.name} has {len(star.vosaPoints)} VOSA points")
star.hasExcess = 1
if plot:
plt.figure(f'{cluster.name} - {star.name}')
plt.title(f'{cluster.name} : {star.name}')
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.ylabel(r'Flux ($ergs^{-1}cm^{-2}\AA^{-1}$)')
plt.xlabel(r'Wavelength ($\AA$)')
plt.scatter([a.wavelength for a in star.vosaPoints],[a.flux for a in star.vosaPoints])
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.pdf")
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.png",dpi=500)
def proxyMatch(cList,plot=False):
#Imports
import matplotlib.pyplot as plt
import numpy as np
checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
iso = isochrones[cluster.iso[0][0]]
isoPoints = []
for pt in iso.starList:
isoPoints.append(pt)
# if pt.Gaia_G_EDR3+cluster.dist_mod > cluster.turnPoint[1]:
# isoPoints.append(pt)
for star in cluster.filtered:
minDist = 0.2
smallestDist = 10
vertCutoff = 1
minPoint = None
for point in isoPoints:
dist = abs(point.Gaia_BP_EDR3-point.Gaia_RP_EDR3-star.b_r+cluster.reddening)
if dist < minDist:
if abs(point.Gaia_G_EDR3+cluster.dist_mod - star.g_mag + 2.1*cluster.reddening) < vertCutoff:
minDist = dist
minPoint = point
elif dist < smallestDist:
smallestDist = dist
try:
assert minDist < 0.2
except:
print(f"[{cluster.name}] Star too distant from isochrone to make a good proxy: BP-RP: {star.b_r} | G: {star.g_mag} | Dist: {smallestDist}")
star.proxyMass = 0
star.proxyLogTemp = 0
star.proxyFeH = 0
star.proxyLogAge = 0
star.proxy = None
continue
#print(minDist)
star.proxyMass = minPoint.star_mass
star.proxyLogTemp = minPoint.log_Teff
star.proxyFeH = minPoint.feh
star.proxyLogAge = minPoint.log10_isochrone_age_yr
star.proxy = minPoint
cluster.massLoaded = True
cluster.meanProxyMass = np.mean([a.proxyMass for a in cluster.filtered])
cluster.totalProxyMass = np.sum([a.proxyMass for a in cluster.filtered])
cluster.min_g_mag = min([a.g_mag for a in cluster.filtered])
cluster.max_g_mag = max([a.g_mag for a in cluster.filtered])
cluster.min_b_r = min([a.b_r for a in cluster.filtered])
cluster.max_b_r = max([a.b_r for a in cluster.filtered])
# if plot:
# plt.figure(f"{cluster.name}_proxy_fit")
def variableHistogram(cl,var):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
plt.figure()
plt.title(f"{cluster.name} Histogram of {var}")
plt.xlabel(f"{var}")
plt.ylabel("Count")
plt.hist([eval(f"a.{var}") for a in cluster.filtered],bins='auto')
def varHist2D(cl,var1,var2,color='default',listType='filtered'):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
#Check allowed entries
allowedTypes = ['filtered','unfilteredWide','unfilteredBright,filteredBright,binaries']
if not listType in allowedTypes:
print(f"{listType} is not a valid list type, defaulting to filtered")
listType = "filtered"
cluster = clusters[cl]
plt.figure(figsize=(8,8))
#Axis size and spacing
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
x = [eval(f"a.{var1}") for a in eval(f"cluster.{listType}")]
y = [eval(f"a.{var2}") for a in eval(f"cluster.{listType}")]
if color == 'default':
ax_scatter.scatter(x, y, s=5)
else:
colorMap = plt.get_cmap('coolwarm')#.reversed()
ax_scatter.scatter(x, y, s=5, c=[eval(f"a.{color}") for a in eval(f"cluster.{listType}")], cmap = colorMap)
# clb = plt.colorbar(ax_scatter)
# clb.ax.set_title(f"{color}")
ax_histx.hist(x,bins='auto')
ax_histy.hist(y,bins='auto',orientation='horizontal')
ax_histx.set_title(f"Histogram of {listType} {cluster.name} in {var1} and {var2}")
ax_scatter.set_xlabel(f"{var1}")
ax_scatter.set_ylabel(f"{var2}")
def Plot3D(cList=['all'],showEarth=True,flatten=True):
#Imports
import plotly.express as px
import plotly.io as pio
import numpy as np
global clusterList
pio.renderers.default='browser'
fig = px.scatter_3d()
if showEarth:
fig.add_scatter3d(x=[0],y=[0],z=[0],marker=dict(color='lightblue'),name="Earth")
cList = checkLoaded(cList)
big = []
for cl in cList:
cluster = clusters[cl]
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(0.001*c.par) for c in cluster.filtered]
#Flatten radially
if flatten:
C = [np.mean(C)]*len(C)
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
#Force Cluster to origin
# x = [a-np.mean(x) for a in x]
# y = [a-np.mean(y) for a in y]
# z = [a-np.mean(z) for a in z]
fig.add_scatter3d(x=x,y=y,z=z,name=cl,mode="markers",marker=dict(size=2))
big.append(np.amax(x))
big.append(np.amax(y))
big.append(np.amax(z))
#fig.layout.scene = dict(aspectmode="manual",aspectratio=dict(x=1,y=1,z=1))
#fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),yaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),zaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)])))
fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False),yaxis=dict(showbackground=False),zaxis=dict(showbackground=False,visible=False)))
fig.show()
def specificPlot(cl,iso,reddening,score):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import os
checkLoaded([cl])
cluster = clusters[f"{cl}"]
isochrone = isochrones[f"{iso}"]
#These are displayed on the plot
# score = 0
reddening = float(reddening)
#Directory for saving plot outputs
if not os.path.isdir("SpecificPlots/pdf/"):
os.makedirs("SpecificPlots/pdf/")
if not os.path.isdir("SpecificPlots/png/"):
os.makedirs("SpecificPlots/png/")
# #Find the score of the associated isochrone
# for chrone in cluster.iso:
# if chrone[0] == iso and chrone[2] == reddening:
# score = chrone[1]
# break
#Plots the CMD and the isochrone, with all of the points adjusted to reddening, extinction, and distance modulus
plt.figure()
plt.gca().invert_yaxis()
plt.xlabel('B-R')
plt.ylabel('G Mag')
plt.title(f"{cl} {iso}")
plt.scatter([s.b_r for s in cluster.filtered],[s.g_mag for s in cluster.filtered],s=0.05,c='dimgray',label='Cluster')
plt.plot([x + reddening for x in isochrone.br],[x+cluster.dist_mod+2.1*reddening for x in isochrone.g],c='midnightblue',label=f"Score: {float(score):.7f}")
plt.scatter([s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Cluster Proxy')
#Colors the points by their fitting weight
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
#Label for the reddening
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {reddening}")
plt.legend(h,l)
#Save figure output to disk
plt.savefig(f"SpecificPlots/pdf/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.pdf")
plt.savefig(f"SpecificPlots/png/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.png",dpi=500)
def plotRange(cl,a,b):
global clusters
checkLoaded([cl])
#Plots the top fitting isochrones over the range a to b for a given cluster
#Does this by calling the specificPlot() method for each isochrone over the range
for isochrone in clusters[f"{cl}"].iso[a:b]:
specificPlot(cl,isochrones[isochrone[0]].name,isochrone[2],isochrone[1])
def getIsoScore(cl,iso,red,output=True):
#Return the score for a given cluster's isochrone fit
for i in cl.iso:
if i[0] == iso.name and float(i[2]) == red:
return i[1]
if output:
print(f"No score found for {cl.name} | {iso.name} | {red}")
return 0
def onkey(x,y,cx,cy,fig,ax,cluster,iso,reddening):
global curIso
global curReddening
curIso = iso
curReddening = reddening
def func(event):
import matplotlib.patches as patches
global curIso
global curReddening
global isochrones
key = str(event.key)
#print(key)
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
#Move up or down in the desired variable space, with wrap-around at the ends of the lists
if key == "w":
#Increase metallicity
try:
curIso = fehSorted[feh_index+1]
feh_index = feh_index+1
except:
curIso = fehSorted[0]
feh_index = 0
if key == "s":
#Decrease metallicity
curIso = fehSorted[feh_index-1]
feh_index = feh_index-1
if feh_index < 0:
feh_index = len(fehSorted)+feh_index
if key == "a":
#Increase age
curIso = ageSorted[age_index-1]
age_index = age_index-1
if age_index < 0:
age_index = len(ageSorted)+age_index
if key == "d":
#Decrease age
try:
curIso = ageSorted[age_index+1]
age_index = age_index+1
except:
curIso = ageSorted[0]
age_index = 0
if key == "q":
#Decrease metallicity
curReddening = round(curReddening-0.01,2)
if key == "e":
#Increase metalicity
curReddening = round(curReddening+0.01,2)
if key == "r":
#Reset to originally requested isochrone
curIso = iso
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
if key == " ":
#Print currently highlighted isochrone to console
score = getIsoScore(cluster,curIso,curReddening)
fig.savefig(f"Jamboree Images/frames/{curIso.name}.png",dpi=500)
print(f"{curIso.name} | {curReddening} | {score}")
score = getIsoScore(cluster,curIso,curReddening,output=False)
#Replots everything with the updated isochrone
ax.clear()
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+curReddening for a in curIso.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*curReddening for a in curIso.starList],color='darkblue')
ax.set_title(f"{curIso.name}\n {curReddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
#Progress bar indicators for the interactive plot
#Sets the dimensons of the boxes
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
#The two main progress bars
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
#rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
#ax.add_patch(rect3)
#The segments filling up the progress bars
n = len(ageSorted)
#Adds cells bottom to top
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
#Age progress bar
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
#Metallicity progress bar
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
fig.canvas.draw_idle()
return func
def interactivePlot(cl,iso=0,reddening="auto"):
#Imports
import matplotlib.pyplot as plt
import matplotlib.patches as patches
global clusters
global isochrones
global kid
checkLoaded([cl])
cluster = clusters[f"{cl}"]
#Select the starting isochrone based on user input
if type(iso) == str:
isochrone = isochrones[f"{iso}"]
elif type(iso) == int:
assert iso >= 0
isochrone = isochrones[cluster.iso[iso][0]]
else:
print("Invalid declaration of 'iso'")
return
name = isochrone.name
#Get the reddening if not manually defined
if reddening == "auto":
reddening = cluster.reddening
assert type(reddening) == float or type(reddening) == int
score = getIsoScore(cluster,isochrone,reddening)
# #Sorted and secondary-sorted isochrone lists
# ageSorted = sorted(isoList,key=lambda x: (x.age,x.feh))
# fehSorted = sorted(isoList,key=lambda x: (x.feh,x.age))
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == isochrone.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == isochrone.age]
age_index = ageSorted.index(isochrone)
feh_index = fehSorted.index(isochrone)
#Coordinate lists to plot in addition to the isochrones
x,y = cluster.mag[:,0],cluster.mag[:,1]
cx,cy = [s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed]
#Systematically remove some of the conflicting default keymaps in Pyplot
letters = ['w','s','a','d','q','e','r']
for letter in letters:
#Finds all keymap references in the rcParams
for param in [key for key in plt.rcParams if key.startswith("keymap") ]:
try:
plt.rcParams[param].remove(letter)
except:
continue
#Initialize the plot that will be updated every time
fig = plt.figure(f"Interactive plot of {cl}")
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+reddening for a in isochrone.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*reddening for a in isochrone.starList],color='darkblue')
ax.set_title(f"{name}\n {reddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
#rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
#ax.add_patch(rect3)
n = len(ageSorted)
#Adds cells bottom to top
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
#Launch the key_press listener
hook = onkey(x,y,cx,cy,fig,ax,cluster,isochrone,reddening)
kid = fig.canvas.mpl_connect('key_press_event',hook)
def printList(cList,varList):
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
for a in varList:
clStr = f"[{cl}] {a} ="
exec(f"print(clStr,cluster.{a})")
def statRange(cl,a,b):
import numpy as np
global clusters
checkLoaded([cl])
if not isoIn:
loadIsochrones()
ages = []
fehs = []
ys = []
reds = []
#Computes the mean age, metallicity, and reddening for the top fitting isochrones over the range a to b for a given cluster
#For example, a=0, b=10 will average the top 10 isochrone fits
for isochrone in clusters[cl].iso[a:b]:
iso = isochrones[isochrone[0]]
print(f"{iso.name} Reddening:{isochrone[2]}")
ages.append(float(iso.age))
fehs.append(float(iso.feh))
ys.append(float(iso.y))
reds.append(float(isochrone[2]))
print(f"[{cl}] Mean age= {np.mean(ages)} Mean feh= {np.mean(fehs)} Mean y= {np.mean(ys)} Mean Reddening= {np.mean(reds)}")
def setFlag():
#Imports
global clusterlist
#Goes back and sets membership flags for all of the clusters loaded in memory to ensure that this tag can be used later
#This takes place automatically after running turboFilter()
#Example use case for this variable is in the customPlot() method
for cluster in clusterList:
for star in cluster.filtered:
for unfStar in cluster.unfilteredWide:
if star == unfStar:
unfStar.member = 1
def customPlot(var1,var2,clname,mode='filtered',iso=False,square=True,color='default',title='default',close=False,save=True):
#Imports
import matplotlib.pyplot as plt
global closePlots
#Load the cluster if it isn't yet
checkLoaded([clname])
cluster = clusters[f"{clname}"]
#Set the list of stars to be used for the given cluster
#Using a mode not specified will return a referenced before assignment error
if mode == 'filtered':
starlist = cluster.filtered
elif mode == 'unfiltered':
starlist = cluster.unfilteredWide
elif mode == 'bright_filtered':
starlist = cluster.filteredBright
elif mode == 'dist_filtered':
starlist = cluster.distFiltered
elif mode == 'bright_unfiltered':
starlist = cluster.unfilteredBright
elif mode == 'duo':
starlist = cluster.unfilteredWide
starlistF = cluster.filtered
elif mode == 'binary':
starlist = cluster.binaries
elif mode == 'duoBinary':
starlist = cluster.filtered
starlistF = cluster.binaries
elif mode == 'duoBright':
starlist = cluster.unfilteredBright
starlistF = cluster.filteredBright
elif mode == 'duoDist':
starlist = cluster.distFiltered
starlistF = cluster.filtered
elif mode == 'condensed':
starlist = cluster.condensed
elif mode == 'duoCondensed':
starlist = cluster.filtered
starlistF = cluster.condensed
elif mode == 'bounded':
starlist = cluster.bounded
elif mode == 'duoBounded':
starlist = cluster.filtered
starlistF = cluster.bounded
else:
print("No preset star list configuration found with that alias")
return
#Basic plot features with axis labels and a title
plt.figure()
if title == 'default':
plt.title(f"{clname} {mode} | {var1} vs {var2} | {color} color")
else:
plt.title(f"{title}")
plt.xlabel(f"{var1}".upper())
plt.ylabel(f"{var2}".upper())
#Plots differently depending on the mode
#The color tag can be used to add distinction of a third variable while limited to two axes
#If unspecified, filtered starlist with midnight blue coloring will be the result
if iso:
plt.gca().invert_yaxis()
if 'duo' in mode:
#plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=[0.1+a.member*1.4 for a in starlist],c=[list(('lightgray',eval('z.par')))[z.member] for z in starlist])
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c='gray')
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c='red')
else:
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c=[eval(f"z.{color}") for z in starlistF])
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
else:
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=1,c='midnightblue')
else:
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c=[eval(f"z.{color}") for z in starlist])
plt.set_cmap('cool')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
#By default, squares the axes to avoid misinformation from stretched axes
#Turn this off and iso to true for a color magnitude diagram
if square:
plt.axis("square")
if save:
plt.savefig(f"SpecificPlots/pdf/{clname}_{mode}_{var1}_{var2}.pdf")
plt.savefig(f"SpecificPlots/png/{clname}_{mode}_{var1}_{var2}.png",dpi=500)
if close or closePlots:
plt.close()
if save:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} saved and closed")
else:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} closed")
def splitMS(clname='M67',slope=3,offset=12.2):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([clname])
cluster = clusters[clname]
xlist = [s.b_r for s in cluster.filtered]
ylist = [s.g_mag for s in cluster.filtered]
x = np.linspace(1,2,100)
#Create a diagram showing the lower edge and upper edge of the main sequence, which in theory are separated by 0.75mag
plt.figure()
plt.title('Main and Binary Sequences')
plt.xlabel('B-R')
plt.ylabel('Apparent G Mag')
plt.scatter(xlist,ylist,s=0.5,label='Filtered Star Data')
plt.plot(x,[slope*a + offset for a in x],color='r',label='Main Sequence')
plt.plot(x,[slope*a + offset - 0.75 for a in x],'--',color='r',label='MS shifted 0.75 mag')
plt.xlim(0.6,2.2)
plt.ylim(13,19)
plt.legend()
plt.gca().invert_yaxis()
plt.savefig(f"SpecificPlots/png/{clname}_MS_Spread.png",dpi=500)
plt.savefig(f"SpecificPlots/pdf/{clname}_MS_Spread.pdf")
def kingProfile(r,K,R):
return K*(1+r**2/R**2)**(-1)
def kingError(r,K,R,dK,dR):
import numpy as np
dfdK = (1+r**2/R**2)**(-1)
dfdR = 2*K*r**2*R*(r**2+R**2)**(-2)
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def densityProfile(r,K,R):
import numpy as np
#The exponential that is fit for the membership profile
#R is a characteristic radius, typically negative but the absolute value is used for comparison
#K is a scalar constant
return K*np.exp(-1*r/R)
def densityError(r,K,R,dK,dR):
import numpy as np
dfdK = abs(np.exp(-1*r/R))
dfdR = abs(K*r/(R**2)*np.exp(-1*r/R))
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def toIntensity(mag):
msun = -26.74 #apparent magnitude
Isun = 1360 #w/m^)
return Isun*10**( 0.4*(msun-mag) )
def membership(clname='M67',N=100,mode='filtered',numPercentileBins=5,percentile=0.2,delta=5,normalize=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy.optimize as so
import scipy.stats as st
import math
global volume
checkLoaded([clname])
cluster = clusters[clname]
mode = mode.lower()
#Default mode is filtered, but unfiltered data can be processed
if "filtered" in mode:
starList = cluster.filtered
elif "bounded" in mode:
starList = cluster.bounded
else:
starList = cluster.unfilteredWide
#Load mass estimates from isochrone fitting
if not cluster.massLoaded:
proxyMatch([cluster.name])
assert cluster.massLoaded
assert len(starList) > 0
#Assign x and y lists based on normalization or not
if normalize:
starX = [a.ra*np.cos(a.dec*np.pi/180) for a in starList]
starY = [a.dec for a in starList]
mode = mode + "_normalized"
else:
starX = [a.ra for a in starList]
starY = [a.dec for a in starList]
#Determine bounds of the field of view (post-filtering)
xmax = max(starX)
ymax = max(starY)
x0 = np.mean(starX)
y0 = np.mean(starY)
newN = N
#Determine radius of the field of view
rx = xmax-x0
ry = ymax-y0
#r = np.mean([rx,ry])
radiusFOV = ry
#Using the mean ra and dec radius caused problems with clusters
#like NGC188, which are close to the celestial pole and have
#a very stretched mapping to the RA DEC space
ringBins = list(np.linspace(0,radiusFOV,N))
#The bins are divided up such that 50% of the bins are located in the inner 25% of the cluster radius
#The remaining 50% of the bins are divided from 25% to 100% of the radius
rings = list(np.linspace(0,radiusFOV/4,math.ceil(N/2)))
ring2 = list(np.linspace(radiusFOV/4,radiusFOV,math.floor(N/2)+1))
ring2 = ring2[1:-1]
rings.extend(ring2)
x=rings[:-1]
# for i in range(0,len(rings[:-1])):
# x.append((rings[i+1]+rings[i])/2)
counts = list(np.zeros(N-1,dtype=int))
masses = list(np.zeros(N-1,dtype=int))
rads=[]
for star in starList:
#Radial distance from the mean RA and Dec of the cluster
if normalize:
rads.append(np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2))
else:
rads.append(np.sqrt((star.ra-x0)**2+(star.dec-y0)**2))
#Find the nearest ring to the star
r = find_nearest(rings, rads[-1])
i = rings.index(r)
#Check bounds
if i < len(counts):
#If outside last ring, add to that count
if r > rads[-1]:
counts[i-1] += 1
masses [i-1] += star.proxyMass
else:
counts[i] += 1
masses [i] += star.proxyMass
#Worth noting here that the way that this is set up, the rings don't actually mark the bounds of the bins but rather the midpoints.
#There is no check to see if you are exterior or interior to the nearest ring, but rather what ring you are nearest to,
#so the rings mark the midpoints of their bins not the boundaries
#Histogram of the counts in each radial bin
plt.figure(f"{clname}_membership_{mode}")
plt.hist(rads,bins=ringBins)
plt.xlabel("Radius (deg)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} Membership")
plt.savefig(f"{cluster.imgPath}{clname}_membership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_membership_{mode}.png",dpi=500)
#Calculates the volume of each region bounded by two concentric rings and the number density of the stars counted in those regions
volume = []
for i in range(0,len(rings[:-1])):
volume.append(np.pi*(rings[i+1]**2-rings[i]**2))
numDensity = [a/b for a,b in zip(counts,volume)]
massDensity = [a/b for a,b in zip(masses,volume)]
error_num = [np.sqrt(a)/b for a,b in zip(counts,volume)]
error_mass = [np.sqrt(a)/b for a,b in zip(masses,volume)]
for i in range(0,len(error_num)):
if error_num[i] < 0.1:
error_num[i] = 0.1
#Cut out the inner 5% because overbinning in the center of a circle doesn't help
x = x[math.ceil(N/20):-1]
counts = counts[math.ceil(N/20):-1]
numDensity = numDensity[math.ceil(N/20):-1]
massDensity = massDensity[math.ceil(N/20):-1]
error_num = error_num[math.ceil(N/20):-1]
error_mass = error_mass[math.ceil(N/20):-1]
#Further filter the data based on outliers, either extremely low density or extremely big jumps in density from bin to bin
i = 0
numSmall = 0
numGrad = 0
while i < len(x)-1:
if numDensity[i] < 0.5 or numDensity[i] < numDensity[i+1]/delta or massDensity[i] < 0.1:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numSmall += 1
newN -= 1
elif abs(numDensity[i]) > abs(numDensity[i+1])*delta:# or abs(numDensity[i]) < abs(numDensity[i-1])/3:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numGrad += 1
newN -= 1
else:
i += 1
if numDensity[-1] < 0.01 or massDensity[-1] < 0.01:
x.pop(-1)
counts.pop(-1)
numDensity.pop(-1)
massDensity.pop(-1)
error_num.pop(-1)
error_mass.pop(-1)
numSmall += 1
newN -= 1
print(f"[{cluster.name}] Removed {numSmall} points with too small of a density and {numGrad} points with too extreme of a delta")
#========= Number Density =========
#Number density vs radial bin plot
plt.figure(f"{clname}_density_{mode}")
plt.errorbar(x,numDensity,yerr=error_num,ls='None')
plt.scatter(x,numDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Number Density ($deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Number Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
if "NGC2355" in cluster.name:
p0=[5000,0.1]
else:
p0=[5000,0.1]
#print([b/a for a,b in zip(numDensity,error_num)])
fit,var = so.curve_fit(kingProfile,x,numDensity,p0,maxfev=1000)
#Std. Dev. from variance
err = np.sqrt(var[1][1])
err_coeff = np.sqrt(var[0][0])
scale = np.abs(fit[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar = (3600/206265)*(err/(cluster.mean_par/1000) ) + 2*fit[1]/(cluster.mean_par_err/1000)
scaleVar = np.abs(scale*np.sqrt((var[1][1]/fit[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from count in parsecs
setattr(cluster,f"scaleRad_{mode}",scale)
setattr(cluster,f"scaleRad_err_{mode}",scaleVar)
#Scale radius from count in degrees
setattr(cluster,f"scaleAngle_{mode}",abs(fit[1]))
setattr(cluster,f"scaleAngle_err_{mode}",err)
setattr(cluster,f"numDensity_coeff_{mode}",fit[0])
setattr(cluster,f"numDensity_coeff_err_{mode}",err_coeff)
#Plot the curve fit
numLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit[0]:.3f} $\pm$ {err_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit[1]):.3f}$\degree$ $\pm$ {err:.3f}$\degree$"+ "\n"
+ fr"R={scale:.3f}pc $\pm$ {scaleVar:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit) for a in x],color='red',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_density_filtered")
plt.title(f"{clname} Overlaid Number Density")
plt.errorbar(x,numDensity,yerr=error_num,ls='None',color='midnightblue')
plt.scatter(x,numDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit) for a in x],color='darkred',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_overlay.png",dpi=500)
#========= Mass Density =========
#Mass density vs radial bin plot
plt.figure(f"{clname}_mass_density_{mode}")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None')
plt.scatter(x,massDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Mass Density ($M_{\odot}*deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Mass Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
fit_mass,var_mass = so.curve_fit(kingProfile,x,massDensity,p0,maxfev=1000)
#Std. Dev. from variance
err_mass = np.sqrt(var[1][1])
err_mass_coeff = np.sqrt(var[0][0])
scale_mass = np.abs(fit_mass[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar_mass = (3600/206265)*(err_mass/(cluster.mean_par/1000) ) + 2*fit_mass[1]/(cluster.mean_par_err/1000)
scaleVar_mass = np.abs(scale_mass*np.sqrt((var_mass[1][1]/fit_mass[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from mass in parsecs
setattr(cluster,f"scaleRad_mass_{mode}",scale_mass)
setattr(cluster,f"scaleRad_mass_err_{mode}",scaleVar_mass)
#Scale radius from mass in degrees
setattr(cluster,f"scaleAngle_mass_{mode}",abs(fit_mass[1]))
setattr(cluster,f"scaleAngle_mass_err_{mode}",err_mass)
setattr(cluster,f"massDensity_coeff_{mode}",fit_mass[0])
setattr(cluster,f"massDensity_coeff_err_{mode}",err_mass_coeff)
#Plot the curve fit
massLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit_mass[0]:.3f} $\pm$ {err_mass_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit_mass[1]):.3f}$\degree$ $\pm$ {err_mass:.3f}$\degree$"+ "\n"
+ fr"R={scale_mass:.3f}pc $\pm$ {scaleVar_mass:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='red',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_density_filtered")
plt.title(f"{clname} Overlaid Mass Density")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None',color='midnightblue')
plt.scatter(x,massDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='darkred',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_overlay.png",dpi=500)
#========= Average Mass =========
averageMass = [a/b for a,b in zip(massDensity,numDensity)]
xDist = [np.abs(a*3600/206265)/(cluster.mean_par/1000) for a in x]
#Average Mass plot
plt.figure(f"{clname}_average_mass_{mode}")
plt.scatter(xDist,averageMass,label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.xlabel("Distance from Center (pc)")
plt.ylabel(r"Average Stellar Mass ($M_{\odot}$)")
plt.title(f"{clname} {mode.capitalize()} Average Mass".replace("_normalized",' Normalized'))
#Split average mass data into numPercentileBins number of bins
if "filtered" in mode:
cluster.pMin = xDist[0]
cluster.pMax = xDist[-1]
pBins = np.linspace(cluster.pMin,cluster.pMax,numPercentileBins+1)
xBins = []
for i in range(len(pBins)-1):
xBins.append((pBins[i]+pBins[i+1])/2)
pBins = np.delete(pBins,0)
pBins = np.delete(pBins,-1)
for b in pBins:
plt.axvline(x=b,color='black',linestyle='--')
binned = []
for n in range(numPercentileBins):
binned.append([])
#Assign the average mass data points to the bins
for i in range(len(xDist)):
#Finds the nearest xBin to each x value and sorts the corresponding averageMass into that bin
val = find_nearest(xBins,xDist[i])
idx = xBins.index(val)
binned[idx].append(averageMass[i])
#Creates arrays that are numPercentileBins long that store the standard and quantile means of the points in those bins
quantileMean = []
binMean = []
meanBins = []
for b in binned:
if len(b) == 0:
continue
binSorted = sorted(b)
#Finds the index of the lower percentile marker (ex. 20%)
lower = binSorted.index(find_nearest(binSorted, np.quantile(b,percentile)))
#Finds the index of the upper percentile marker (ex. 80%)
upper = binSorted.index(find_nearest(binSorted, np.quantile(b,1-percentile)))
#Means between lower and upper percentile markers
quantileMean.append(np.mean(binSorted[lower:upper+1]))
#Standard Mean
binMean.append(np.mean(b))
#Bins
meanBins.append(xBins[binned.index(b)])
try:
fit, var = so.curve_fit(kingProfile,xDist,[kingProfile(a,*fit_mass)/kingProfile(a,*fit) for a in x])
residual_coeff, residual_scaleAngle = fit[0],fit[1]
except:
print(f"Unable to fit the residuals for {cluster.name}")
residual_coeff, residual_scaleAngle = -99, -99
massFit = st.linregress(meanBins,quantileMean)
fitslope, intercept, rval, pval, fitslope_err, intercept_err = massFit.slope, massFit.intercept, massFit.rvalue, massFit.pvalue, massFit.stderr, massFit.intercept_stderr
residual_scaleRad = np.abs(residual_scaleAngle*3600/206265)/(cluster.mean_par/1000)
setattr(cluster,f"residual_coeff_{mode}",residual_coeff)
setattr(cluster,f"residual_scaleAngle_{mode}",residual_scaleAngle)
setattr(cluster,f"residual_scaleRad_{mode}",residual_scaleRad)
setattr(cluster,f"mass_slope_{mode}",fitslope)
setattr(cluster,f"mass_slope_err_{mode}",fitslope_err)
setattr(cluster,f"mass_intercept_{mode}",intercept)
setattr(cluster,f"mass_intercept_err_{mode}",intercept_err)
setattr(cluster,f"mass_fit_r2_{mode}",rval**2)
setattr(cluster,f"mass_fit_p_{mode}",pval)
fitLabel = ( fr"Slope = {fitslope:.3f} $\pm$ {fitslope_err:.3f}" + "\n"
+ fr"Intercept = {intercept:.3f} $\pm$ {intercept_err:.3f}" + "\n"
+ fr"$r^2$ = {rval**2:.3f} ({mode.capitalize()})".replace("_normalized",' Normalized'))
#Plot the quantile and standard means on the existing average mass plot
plt.scatter(meanBins,quantileMean,color='red',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='red',label=fitLabel)
#plt.scatter(meanBins,binMean,color='dimgray',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_average_mass_filtered")
plt.title(f"{clname} Overlaid Average Mass")
plt.scatter(xDist,averageMass,color='midnightblue',label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='darkred',label=fitLabel)
plt.scatter(meanBins,quantileMean,color='darkred',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
#plt.scatter(meanBins,binMean,color='black',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_overlay.png",dpi=500)
#========= Radius Plot =========
plt.figure(f"{clname}_characteristic_radius_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
pltRad = abs(getattr(cluster,f"scaleAngle_{mode}"))
outline1 = Circle([x0,y0],1*pltRad,color='red',fill=False,ls='--',label=fr"$\rho$={1*pltRad:0.3f}$\degree$",alpha=0.7)
outline2 = Circle([x0,y0],5*pltRad,color='red',fill=False,ls='--',label=fr"5$\rho$={5*pltRad:0.3f}$\degree$",alpha=0.7)
#outline3 = Circle([x0,y0],10*abs(getattr(cluster,f"scaleAngle_{mode}")),color='red',fill=False,ls='--',label=fr"10$\rho$={3*abs(fit[1]):0.3f}$\degree$",alpha=0.7)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
#plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Characteristic Radius".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_radialMembership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_radialMembership_{mode}.png",dpi=500)
if "M67" in clname and "filtered" in mode:
plt.figure(f"{clname}_rings_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
for i in range(0,len(rings)):
outline = Circle([x0,y0],rings[i],color='red',fill=False)
plt.gca().add_patch(outline)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} Radial Bins")
plt.gcf().set_size_inches(8,8)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_{mode}.png".replace("_filtered",''),dpi=500)
plt.xlim(x0-0.15,x0+0.15)
plt.ylim(y0-0.15,y0+0.15)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_center_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_center_{mode}.png".replace("_filtered",''),dpi=500)
#========= Stars by Mass =========
massList = []
innerMassList = []
for star in starList:
massList.append(star.proxyMass)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
mBins = np.arange(min(massList),max(massList)+0.1,0.1)
inBins = np.arange(min(innerMassList),max(innerMassList)+0.1,0.1)
plt.figure(f"{clname}_mass_frequency_{mode}")
plt.xlabel(r"Stellar Mass ($M_{\odot}$)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Mass Frequency".replace("_normalized",' Normalized'))
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMassList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_frequency_filtered")
plt.title(f"{clname} Overlaid Mass Frequency")
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMassList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_overlay.png",dpi=500)
#========= Stars by Magnitude =========
magList = []
innerMagList = []
for star in starList:
magList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
mBins = np.arange(min(magList),max(magList)+0.1,0.1)
inBins = np.arange(min(innerMagList),max(innerMagList)+0.1,0.1)
plt.figure(f"{clname}_mag_frequency_{mode}")
plt.xlabel(r"Absolute G Mag")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Absolute Magnitude Frequency".replace("_normalized",' Normalized'))
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMagList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mag_frequency_filtered")
plt.title(f"{clname} Overlaid Absolute Magnitude Frequency")
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMagList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_overlay.png",dpi=500)
#========= Stars by Color =========
colorList = []
innerColorList = []
for star in starList:
colorList.append(star.b_r-cluster.reddening)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
mBins = np.arange(min(colorList),max(colorList)+0.1,0.1)
inBins = np.arange(min(innerColorList),max(innerColorList)+0.1,0.1)
plt.figure(f"{clname}_color_frequency_{mode}")
plt.xlabel(r"Dereddened BP-RP")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Dereddened Color Index Frequency".replace("_normalized",' Normalized'))
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerColorList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_color_frequency_filtered")
plt.title(f"{clname} Overlaid Dereddened Color Index Frequency")
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerColorList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_overlay.png",dpi=500)
#========= Other Radii =========
massSum = np.sum([star.proxyMass for star in starList])
intensitySum = np.sum([toIntensity(star.g_mag) for star in starList])
curMassSum = 0
curIntSum = 0
massFound = False
intFound = False
if normalize:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.normRadDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.normRadDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.normRadDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.normRadDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (deg)")
else:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.radDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.radDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.radDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.radDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (deg)")
medRad = getattr(cluster,f"medianRad_{mode}")
medAngle = getattr(cluster,f"medianAngle_{mode}")
mRad = getattr(cluster,f"halfMassRad_{mode}")
mAngle = getattr(cluster,f"halfMassAngle_{mode}")
lRad = getattr(cluster,f"halfLightRad_{mode}")
lAngle = getattr(cluster,f"halfLightAngle_{mode}")
print(medAngle)
outline1 = Circle([x0,y0],medAngle,color='red',fill=False,ls='--',label=fr"Median Star Distance = {medAngle:.3f}$\degree$, {medRad:.3f}pc",alpha=1)
outline2 = Circle([x0,y0],mAngle,color='darkgreen',fill=False,ls='--',label=fr"Half Mass Radius = {mAngle:.3f}$\degree$, {mRad:.3f}pc",alpha=1)
outline3 = Circle([x0,y0],lAngle,color='purple',fill=False,ls='--',label=fr"Half Light Radius = {lAngle:.3f}$\degree$, {lRad:.3f}pc",alpha=1)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Various Radii".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_otherRadii_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_otherRadii_{mode}.png",dpi=500)
def checkLoaded(cList):
if 'all' in cList:
cList = [c.name for c in clusterList]
else:
for cl in cList:
if not cl in clusters:
loadClusters([cl])
return cList
def saveResults(cList,outdir="results"):
#Imports
import numpy as np
import dill
import os
global clusters
global clusterList
checkLoaded(cList)
#Check and create the relevant directory paths to save/load the results
if not os.path.isdir(f"{outdir}/"):
os.mkdir(f"{outdir}/")
if not os.path.isdir(f"{outdir}/pickled/"):
os.mkdir(f"{outdir}/pickled/")
else:
for cl in cList:
cluster = clusters[cl]
#Creates a "result cluster" object from the cluster, effectively just stripping away lists
rCl = resultClusterObj(cluster)
#Pickle the result cluster object
with open(f"{outdir}/pickled/{cluster.name}.pk1", 'wb') as output:
dill.dump(rCl, output)
#Store variables into an array to be printed as csv
properties = [a for a in dir(rCl) if not a.startswith('_')]
res = [getattr(rCl,p) for p in properties]
#Stack into an array of 2 rows with variable names and values
fin = np.vstack((properties,res))
np.savetxt(f"{outdir}/{cluster.name}.csv",fin,delimiter=',',fmt='%s')
def loadResults(filter="None",indir="results"):
#Imports
import numpy as np
import dill
import os
global resultList
global resultsIn
assert os.path.isdir("results/")
resultList = []
for fn in os.listdir(indir+"/pickled/"):
#Reads in instances from the saved pickle file
with open(f"{indir}/pickled/{fn}",'rb') as input:
res = dill.load(input)
resultList.append(res)
resultsIn = True
toDict()
def refreshProperties(cList=['all']):
import numpy as np
global catalogue
global clusterList
global clusters
clusterCatalogue()
checkLoaded(cList)
#Loop through clusters
for cluster in cList:
reference = None
for cl in catalogue:
if str(cl.name) == str(cluster.name):
reference = cl
print(f"Catalogue match for {cluster.name} found")
break
if reference == None:
print(f"Catalogue match for {cluster.name} was not found, please create one")
continue
#Filter all of the methods out of the properties list
properties = [a for a in dir(reference) if not a.startswith('_')]
#print(properties)
#exec(f"print(reference.{properties[1]})")
#print(properties)
#Now we have a list of all the attributes assigned to the catalogue (the self.variables)
for p in properties:
prop = getattr(reference,p)
#print(prop)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{cluster.name} does not have a specified catalogue value for {p}")
except:
continue
#Additional properties that may be useful
for star in cluster.filtered:
star.normRA = star.pmra*np.cos(star.dec*np.pi/180)
print(f"{cluster.name} properties refreshed from catalogue")
def statPlot(statX,statY,population="open",color="default",square=True,invertY=False,logX=False,logY=False,pointLabels=True,linFit=False,directory='default'):
#Create plots of stat X vs stat Y across a population of clusters, similar to customPlot()
#Can be set to use a custom list of clusters, or all clusters of a given type
#
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Filter out incorrect inputs
if type(population) == str:
population = population.lower()
try:
assert population == "open" or population == "globular"
except:
print("Specified population type not recognized")
else:
try:
assert type(population) == list
assert type(population[0]) == str
except:
print("Population type given is not valid, must be either a list of cluster name strings or a single string \'open\' or \'closed\'")
return
try:
assert len(population) > 1
except:
print("Population statistic plots cannot be made with fewer than 2 clusters given")
return
#Load cluster information from cList
#This is going to involve using the resultCluster object to read data from each cluster folder in the cList
cList = []
banList = ['NGC2204']
if type(population) == str:
for res in resultList:
if res.clType.lower() == population and not res.name in banList:
cList.append(res)
else:
for res in resultList:
if res.name in population:
cList.append(res)
if statX.lower() == "b_r" and statY.lower() == "g_mag":
#Corrected CMD overlay
NUM_COLORS = len(cList)
cm = plt.get_cmap('nipy_spectral')
plt.figure("uncorrected")
plt.title("Cluster Overlay")
plt.xlabel("Observed B-R")
plt.ylabel("Apparent G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("unshifted")
plt.title("Corrected Cluster Overlay")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("shifted")
plt.title("Corrected Cluster Overlay - Offset")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
index = 0
offset = 2.5
for cluster in cList:
try:
path = cluster.dataPath
except:
path = f"clusters/{cluster.name}/data/"
condensed = np.genfromtxt(f"{path}condensed.csv",delimiter=",")
cluster.condensed = condensed
#Adjust by cluster.reddening and cluster.dist_mod
x1 = [a[0] for a in condensed]
y1 = [a[1] for a in condensed]
x2 = [a[0]-cluster.reddening for a in condensed]
y2 = [a[1]-2.1*cluster.reddening-cluster.dist_mod for a in condensed]
x3 = [a[0]-cluster.reddening for a in condensed]
y3 = [a[1]-2.1*cluster.reddening-cluster.dist_mod+index*offset for a in condensed]
index += 1
plt.figure("uncorrected")
plt.scatter(x1,y1,label=f"{cluster.name}")
plt.figure("unshifted")
plt.axvline(x=1.6,ymax=0.5,color='black',linestyle='--')
plt.axhline(y=4,xmin=0.59,color='black',linestyle='--')
plt.scatter(x2,y2,label=f"{cluster.name}")
plt.figure("shifted")
plt.scatter(x3,y3,label=f"{cluster.name}")
plt.axvline(x=1.6,color='black',linestyle='--')
# if 'NGC2301' in cluster.name:
# for a,b in zip(x2,y2):
# print(f"{a},{b}")
plt.figure("uncorrected")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_apparent.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_apparent.png",dpi=500)
plt.figure("unshifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_absolute.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_absolute.png",dpi=500)
plt.figure("shifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_shifted.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_shifted.png",dpi=500)
else:
x = [getattr(a, statX) for a in cList]
y = [getattr(a, statY) for a in cList]
plt.figure()
plt.xlabel(f"{statX}")
plt.ylabel(f"{statY}")
if pointLabels:
for cluster in cList:
plt.scatter(getattr(cluster, statX),getattr(cluster, statY),label=cluster.name)
plt.legend(fontsize="small")
else:
plt.scatter(x,y)
if linFit:
reg = linregress(x,y)
plt.plot(x,[reg[0]*a+reg[1] for a in x])
plt.savefig(f"SpecificPlots/pdf/{population}_{statX}_{statY}.pdf")
plt.savefig(f"SpecificPlots/png/{population}_{statX}_{statY}.png",dpi=500)
return
def ageMassFit(t,m0,k):
import numpy as np
return 1 + m0*np.exp(-1*k*t)
def extinctionLaw(d,M0):
import numpy as np
return M0 -2.5*np.log10(1/(4*np.pi*d**2))
def resultPlots():
#Imports
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
from scipy.optimize import curve_fit
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Select open clusters from resultList
banList = ['NGC2204']
cList = []
for res in resultList:
if res.clType.lower() == "open" and not res.name in banList:
cList.append(res)
#Filtered mass versus age
fname = "mass_vs_age_filtered"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
plt.scatter([c.fit_age for c in cList],[c.meanProxyMass for c in cList])
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded mass versus age
fname = "mass_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.meanBoundedProxyMass for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass intercept versus age
fname = "mass_intercept_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Stellar Mass in Core ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.mass_intercept_bounded for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass slope versus age
fname = "mass_slop_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"IQM Stellar Mass Dropoff ($\frac{M_{\odot}}{pc}$)")
x,y = [c.fit_age for c in cList],[c.mass_slope_bounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Magnitude versus distance (Extinction law)
fname = "mag_vs_dist_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel(r"Mean Apparent G Magnitude")
x,y = [c.meanDist for c in cList],[c.mean_bounded_g_mag for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(extinctionLaw,x,y,maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
plt.plot(xr,[extinctionLaw(a,fit[0]) for a in xr],label="Inverse Square Law \n" + fr" $M_0 = {fit[0]:.3f} \pm {var[0][0]:.3f}$")
plt.gca().invert_yaxis()
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded fraction versus distance
fname = "bounded_fraction_vs_dist"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel("Fraction Unaffected by BP-RP Limit")
x,y = [c.meanDist for c in cList],[c.fractionBounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Radii
plt.figure()
plt.scatter([c.meanGalacticDist for c in cList],[c.halfLightRad_bounded/c.medianRad_bounded for c in cList])
def boundedStats(cList,xmax=1.6,saveCl=True,unloadCl=True):
import numpy as np
global clusters
global subList
for cl in cList:
checkLoaded([cl])
cluster = clusters[cl]
subList = [star for star in cluster.filtered if not (star.b_r-cluster.reddening > xmax and star.g_mag > cluster.cltpy)]
cluster.bounded = subList
#Windowed properties (over the xmin to xmax range)
cluster.meanBoundedProxyMass = np.mean([a.proxyMass for a in subList])
cluster.totalBoundedProxyMass = np.sum([a.proxyMass for a in subList])
cluster.numBounded = len(subList)
cluster.fractionBounded = len(subList)/len(cluster.filtered)
cluster.mean_bounded_b_r = np.mean([a.b_r for a in subList])
cluster.mean_bounded_g_mag = np.mean([a.g_mag for a in subList])
if saveCl:
saveClusters([cl])
saveResults([cl])
if unloadCl:
unloadClusters([cl])
def tryFits(fitVar='fit_age'):
from scipy.stats import linregress
global resultsIn
global resultList
global props
global r2
if not resultsIn:
loadResults()
cList = []
for res in resultList:
if res.clType.lower() == "open":
cList.append(res)
if 'all' in fitVar:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
propList = [a for a in props if type(getattr(cList[0],a)) == float]
propList.remove('turnPoint')
r2 = []
for pr in propList:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(pr)
for prop in props:
x = [getattr(a, pr) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((pr,prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[2],reverse=True)
print("Top 100 r^2 values:")
for r in r2[:200]:
print(f"{r[0]} | {r[1]} | {r[2]}")
else:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(fitVar)
r2 = []
for prop in props:
x = [getattr(a, fitVar) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[1],reverse=True)
print("Top 20 r^2 values:")
for r in r2[:20]:
print(f"{r[0]} | {r[1]}")
def prelimPlot(cl):
import matplotlib.pyplot as plt
cluster = clusters[cl]
plt.scatter([a.ra for a in cluster.unfilteredWide],[a.dec for a in cluster.unfilteredWide],s=0.1)
plt.figure()
plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1)
# plt.figure()
# plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1,c=[a.par for a in cluster.unfilteredWide])
# plt.set_cmap('cool')
# clb = plt.colorbar()
plt.figure()
plt.scatter([a.b_r for a in cluster.unfilteredWide],[a.g_mag for a in cluster.unfilteredWide],s=0.1)
plt.gca().invert_yaxis()
# plt.figure()
# plt.scatter([a.par for a in cluster.unfilteredWide],[a.par for a in cluster.unfilteredWide],s=0.1,c=[(a.pmra**2 + a.pmdec**2)**0.5 for a in cluster.unfilteredWide])
# plt.set_cmap('cool')
| 37.599953 | 457 | 0.588674 | try:
runCount += 1
except:
isoIn = False
clIn = False
cataIn = False
closePlots = False
resultsIn = False
clusterList = []
clusters=[]
isochrones = []
isoList = []
catalogue = []
runCount = 1
class resultClusterObj:
def __init__(self,cl):
import numpy as np
global properties
properties = [a for a in dir(cl) if not a.startswith('_')]
for prop in properties:
if eval(f"type(cl.{prop})") == float or eval(f"type(cl.{prop})") == np.float64 or eval(f"type(cl.{prop})") == int:
exec(f"self.{prop} = float(cl.{prop})")
elif eval(f"type(cl.{prop})") == str:
exec(f"self.{prop} = cl.{prop}")
self.name = cl.name
self.clType = cl.clType
class clusterObj:
def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):
self.basedir = basedir
self.dataPath = self.basedir + f"{name}/data/"
self.imgPath = self.basedir + f"{name}/plots/"
self.unfilteredWide = []
self.unfilteredNarrow = []
self.filtered = []
self.mag = []
self.iso = []
self.condensed = []
self.condensed0 = []
self.condensedInit=[]
self.unfilteredBright = []
self.filteredBright = []
self.brightmag = []
self.distFiltered = []
self.binaries = []
self.stars = []
self.brightThreshold = brightThreshold
self.mean_par = 0
self.stdev_par = 0
self.mean_ra = 0
self.mean_dec = 0
self.stdev_ra = 0
self.stdev_dec = 0
self.mean_pmra = 0
self.stdev_pmra = 0
self.mean_pmdec = 0
self.stdev_pmdec = 0
self.mean_a_g = 0
self.stdev_a_g = 0
self.mean_e_bp_rp = 0
self.stdev_e_bp_rp = 0
self.mean_par_over_ra = 0
self.stdev_par_over_ra = 0
self.dist_mod = 0
self.turnPoint = 0
self.reddening = 0
self.radDist = 0
self.massLoaded = False
self.name = name
self.clType = "None"
self.pmra_min = -99
self.pmra_max = -99
self.pmdec_min = -99
self.pmdec_max = -99
self.par_min = -99
self.par_max = -99
self.cltpx = -99
self.cltpy = -99
self.noise_cutoff = -99
import os
if not os.path.isdir(self.dataPath):
os.mkdir(self.dataPath)
if not os.path.isdir(self.imgPath):
os.mkdir(self.imgPath)
if not os.path.isdir(f"{self.imgPath}/png"):
os.mkdir(f"{self.imgPath}/png")
class starObj:
def __init__(self,name,source_id,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,
ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,
astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_nu_eff,
pseudocolor,pseudocolor_err,ra_pseudocolor_corr,dec_pseudocolor_corr,par_pseudocolor_corr,pmra_pseudoclor_corr,pmdec_pseudocolor_corr,
astro_sigma5d,duplicated_source,
g_flux,g_flux_err,g_mag,
b_flux,b_flux_err,b_mag,
r_flux,r_flux_err,r_mag,
b_over_r_excess,b_r,b_g,g_r,
radvel,radvel_err,radvel_num_transits,radvel_teff,radvel_feh,
l,b,long,lat):
import numpy as np
self.name = name
self.source_id = source_id
self.ra = float(ra)
self.ra_err = float(ra_err)
self.dec = float(dec)
self.dec_err = float(dec_err)
self.par = float(par)
self.par_err = float(par_err)
self.par_over_err = float(par_over_err)
self.pmra = float(pmra)
self.pmra_err = float(pmra_err)
self.pmdec = float(pmdec)
self.pmdec_err = float(pmdec_err)
self.ra_dec_corr = float(ra_dec_corr)
self.ra_par_corr = float(ra_par_corr)
self.ra_pmra_corr = float(ra_pmra_corr)
self.ra_pmdec_corr = float(ra_pmdec_corr)
self.dec_par_corr = float(dec_par_corr)
self.dec_pmra_corr = float(dec_pmra_corr)
self.dec_pmdec_corr = float(dec_pmdec_corr)
self.par_pmra_corr = float(par_pmra_corr)
self.par_pmdec_corr = float(par_pmdec_corr)
self.pmra_pmdec_corr = float(pmra_pmdec_corr)
self.astro_n_obs = float(astro_n_obs)
self.astro_n_good_obs = float(astro_n_good_obs)
self.astro_n_bad_obs = float(astro_n_bad_obs)
self.astro_gof = float(astro_gof)
self.astro_chi2 = float(astro_chi2)
self.astro_noise = float(astro_noise)
self.astro_noise_sig = float(astro_noise_sig)
self.astro_nu_eff = float(astro_nu_eff)
self.astro_sigma5d = float(astro_sigma5d)
self.duplicated_source = bool(duplicated_source)
self.g_flux = float(g_flux)
self.g_flux_err = float(g_flux_err)
self.g_mag = float(g_mag)
self.b_flux = float(b_flux)
self.b_flux_err = float(b_flux_err)
self.b_mag = float(b_mag)
self.r_flux = float(r_flux)
self.r_flux_err = float(r_flux_err)
self.r_mag = float(r_mag)
self.b_over_r_excess = float(b_over_r_excess)
self.b_r = float(b_r)
self.b_g = float(b_g)
self.g_r = float(g_r)
self.radvel = float(radvel)
self.radvel_err = float(radvel_err)
self.radvel_num_transits=float(radvel_num_transits)
self.radvel_teff = float(radvel_teff)
self.radvel_feh = float(radvel_feh)
self.l = float(l)
self.b = float(b)
self.long = float(long)
self.lat = float(lat)
self.member = 0
self.binary = 0
self.radDist = 0
self.par_over_ra = float(par)/float(ra)
self.par_over_dec = float(par)/float(dec)
self.par_over_pmra = float(par)/float(pmra)
self.par_over_pmdec = float(par)/float(pmdec)
self.normRA = self.ra*np.cos(self.dec*np.pi/180)
self.vosaPoints = []
self.excess = 0
class isochroneObj:
def __init__(self,age=404,feh=404,afe=404,y=404,basedir='isochrones/',subdir='processed',isodir=''):
self.basedir = basedir
self.subdir = subdir
self.isodir = isodir
self.starList = []
self.age = age
self.feh = feh
self.afe = afe
self.y = y
self.name = f"feh_{feh}_afe_{afe}_age_{age}_y_{y}"
self.distance = 0
self.coeff = []
self.g = []
self.br = []
class fakeStarObj:
def __init__(self,g_mag,b_mag,r_mag):
self.g_mag = g_mag
self.b_mag = b_mag
self.r_mag = r_mag
self.b_r = self.b_mag-self.r_mag
self.b_g = self.b_mag-self.g_mag
self.g_r = self.g_mag-self.r_mag
self.score = 0
class mistStar:
def __init__(self,properties):
for prop,val in properties:
if "inf" in str(val):
val = 50
exec(f"self.{prop} = {val}")
class condensedPoint:
def __init__(self,b_r,g_mag,weight):
self.b_r = b_r
self.g_mag = g_mag
self.weight = weight
class vosaPoint:
def __init__(self,filterID,wavelength,obs_flux,obs_error,flux,flux_error,excess):
self.filterID = filterID
self.wavelength = wavelength
self.obs_flux = obs_flux
self.obs_error = obs_error
self.flux = flux
self.flux_error = flux_error
self.excess = excess
class cataloguedCluster():
def __init__(self,name,clType,pmra_min,pmra_max,pmdec_min,pmdec_max,par_min,par_max,cltpx,cltpy,noise_cutoff):
self.name = str(name)
self.clType = str(clType)
self.pmra_min = float(pmra_min)
self.pmra_max = float(pmra_max)
self.pmdec_min = float(pmdec_min)
self.pmdec_max = float(pmdec_max)
self.par_min = float(par_min)
self.par_max = float(par_max)
self.cltpx = float(cltpx)
self.cltpy = float(cltpy)
self.noise_cutoff = float(noise_cutoff)
class Datum:
from matplotlib import colors as mcolors
colorin = mcolors.to_rgba("red")
colorout = mcolors.to_rgba("blue")
def __init__(self, x, y, include=False):
self.x = x
self.y = y
if include:
self.color = self.colorin
else:
self.color = self.colorout
class LassoManager:
def __init__(self, ax, data, cluster):
from matplotlib.collections import RegularPolyCollection
self.axes = ax
self.canvas = ax.figure.canvas
self.data = data
self.cluster = cluster
self.Nxy = len(data)
facecolors = [d.color for d in data]
self.xys = [(d.x, d.y) for d in data]
self.collection = RegularPolyCollection(
6, sizes=(5,),
facecolors=facecolors,
offsets=self.xys,
transOffset=ax.transData)
ax.add_collection(self.collection)
self.cid = self.canvas.mpl_connect('button_press_event', self.on_press)
def callback(self, verts):
from matplotlib import path
global coords
global clusters
cluster = clusters[self.cluster.name]
facecolors = self.collection.get_facecolors()
p = path.Path(verts)
ind = p.contains_points(self.xys)
cluster.binaries = []
for i in range(len(self.xys)):
if ind[i]:
facecolors[i] = Datum.colorin
star = cluster.filtered[[a.b_r for a in cluster.filtered].index(self.xys[i][0])]
cluster.binaries.append(star)
else:
facecolors[i] = Datum.colorout
self.canvas.draw_idle()
self.canvas.widgetlock.release(self.lasso)
del self.lasso
def on_press(self, event):
from matplotlib.widgets import Lasso
if self.canvas.widgetlock.locked():
return
if event.inaxes is None:
return
self.lasso = Lasso(event.inaxes,
(event.xdata, event.ydata),
self.callback)
self.canvas.widgetlock(self.lasso)
def clusterCatalogue(types='all'):
import numpy as np
import pandas as pd
global data
global catalogue
global cataIn
data = pd.read_csv("catalogue.csv",sep=',',dtype=str)
data = data.to_numpy(dtype=str)
cata = []
for row in data:
cata.append(cataloguedCluster(*row))
if types == 'all':
catalogue = cata
cataIn = True
return
def readClusters(cList=["M67"],basedir="clusters/",smRad=0.35):
import numpy as np
import pandas as pd
global clusterList
global clusters
global stars
global clIn
global catalogue
try:
if clIn and len(clusterList) > 0:
for clname in cList:
if clname in clusters:
unloadClusters([clname])
except:
clusterList=[]
if not cataIn:
clusterCatalogue()
for clname in cList:
cluster = clusterObj(name=clname,basedir=basedir)
reference = None
for cl in catalogue:
if str(cl.name) == str(clname):
reference = cl
print(f"Catalogue match for {clname} found")
break
if reference == None:
print(f"Catalogue match for {clname} was not found, please create one")
continue
properties = [a for a in dir(reference) if not a.startswith('_')]
print(properties)
for p in properties:
prop = getattr(reference,p)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{clname} does not have a specified catalogue value for {p}")
except:
continue
starlist = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
stars = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
starlist = starlist.to_numpy(dtype=str)
print(f"{clname} initial length: {len(starlist)}")
starlist = preFilter(starlist)
print(f"{clname} post-prefiltered length: {len(starlist)}")
ramean = np.mean([float(x) for x in starlist[:,1]])
decmean = np.mean([float(x) for x in starlist[:,3]])
for s in starlist:
star = starObj(*s)
cluster.unfilteredWide.append(star)
if np.less_equal(star.g_mag,cluster.brightThreshold):
cluster.unfilteredBright.append(star)
clusterList.append(cluster)
calcStats(cluster,mode='narrow')
if not 'YSO' in clname:
rmOutliers()
clIn = True
toDict()
def pad(string, pads):
spl = string.split(',')
return '\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])
def readIso(basedir='isochrones/',subdir='MIST_raw/'):
#Imports
import os
import re
global isochrone_headers
global isoList
global isoIn
path = basedir + subdir
isoList = []
for fn in os.listdir(path):
#Read in file
main = open(path+fn).read()
main = main.split("\n")
#Relevant variables from headers
N_iso = int(main[7].split("=")[1])
index = 13
varList = re.sub("\s+", ",", main[5].strip()).split(",")
afe = varList[4]
feh = varList[3]
y = varList[1]
z = varList[2]
v_vcrit = varList[5]
#Column labels
#Replace any number of spaces with a single comma, then replace a few problematic phrases and split the list by commas
isochrone_headers = re.sub("\s+", ",", main[12].replace("2MASS","TwoMASS").replace("[Fe/H]","feh").strip()).split(",")[1:]
for idx in range(0,N_iso):
N_stars = int(re.sub("\s+", "," , main[index-3].split("=")[1]).split(",")[1])
#print(f"Iso = {idx} N_stars = {N_stars}")
#Populate a single isochrone
stars = []
for i in range(index,index+N_stars):
#Send the header and values to the mistStar object
#print(f"i = {i}")
values = [float(a) for a in re.sub("\s+", "," , main[i].strip()).split(",")]
properties = zip(isochrone_headers,values)
stars.append(mistStar(properties))
#Create the isochrone from the list of stars
age = round(10**values[1]/1e9,3)
iso = isochroneObj(age,feh,afe,y)
iso.starList = stars
iso.br = [star.Gaia_BP_EDR3-star.Gaia_RP_EDR3 for star in stars]
iso.g = [star.Gaia_G_EDR3 for star in stars]
isoList.append(iso)
index += N_stars + 5
isoIn = True
toDict()
def checkIsoDupes():
global isochrones
global isoList
names = []
for iso in isoList:
if iso.name in names:
print(iso.name)
else:
names.append(iso.name)
def processIso(basedir='isochrones/',subdir='raw/'):
#Imports
import os
import re
path = basedir + subdir
for fn in os.listdir(path):
main = open(path+fn).read()
part = main.split('\n\n\n')
part[0] = part[0].split('
for a in range(len(part)):
temp = part[a].split('
age = temp.strip()
out = part[a].split('\n',2)[2]
out = re.sub("\s+", ",", out.strip())
out = pad(out,8)
filename = f"{basedir}processed/"+fn.split('.')[0]+'/'+age+".csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename,"w") as f:
f.write(out)
def readIsochrones(basedir='isochrones/',subdir='processed/'):
#Imports
import os
import numpy as np
global isoList
global isoIn
isoList=[]
for folder in os.listdir(basedir+subdir):
for fn in os.listdir(basedir+subdir+folder):
#Get the age and metallicities of the isochrones
ageStr = fn.split('.csv')[0]
fehStr = folder.split('feh')[1].split('afe')[0]
afeStr = folder.split('afe')[1].split('y')[0]
if 'y' in folder:
yStr = folder.split('y')[1]
else:
yStr = '0'
feh = float(fehStr[1]+fehStr[2])/10
afe = float(afeStr[1])/10
age = float(ageStr)
y = int(yStr)
if fehStr[0] == 'm':
feh = feh*-1
if afeStr[0] == 'm':
afe = afe*-1
#Debug
#print(f"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}")
#Create isochone object
iso = isochroneObj(age=age,feh=feh,afe=afe,y=y,basedir=basedir,subdir=subdir,isodir=folder+'/')
isoArr = np.genfromtxt(basedir+subdir+folder+"/"+fn, delimiter=",")
for s in isoArr:
star = fakeStarObj(s[5],s[6],s[7])
iso.starList.append(star)
iso.br.append(s[6]-s[7])
iso.g.append(s[5])
isoList.append(iso)
isoIn = True
toDict()
def preFilter(starList):
#Imports
import numpy as np
final = []
#Columns to be checked for NaN values. If an NaN is present in this column, the entry(star) is discarded from the "unfiltered" list
#2-12 is the astrometry
#42,45,48 are the g,bp,rp magnitudes
#50-52 are the color indices
cols = list(range(2,13))+[42]+[45]+[48]+list(range(50,53))
#Filters out NaN values except for the last two columns
for n,s in enumerate(starList):
dump = False
for c in cols:
if np.isnan(float(s[c])):
dump = True
if not dump:
final.append(starList[n])
#Reshapes array
final = np.array(final)
return final
def rmOutliers():
#Imports
global clusterList
import numpy as np
for cluster in clusterList:
if cluster.clType.lower() == "globular":
scale = 4
else:
scale = 1.5
#Variables
pmthreshold = 5
pmpthreshold = 50
parthreshold = 5
posthreshold = 5
toRemove=[]
#print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)
#print(len(cluster.unfilteredWide))
#Classifies outliers
for star in cluster.unfilteredWide:
if cluster.name == "NGC188":
if star.ra > 100:
toRemove.append(star)
#print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)
if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt(((star.ra-cluster.mean_ra)*np.cos(np.pi/180*star.dec))**2+(star.dec-cluster.mean_dec)**2),posthreshold) or np.greater(abs(star.par),parthreshold):
#if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):
toRemove.append(star)
#Removes the outliers from the array
for rm in toRemove:
cluster.unfilteredWide.remove(rm)
try:
cluster.unfilteredNarrow.remove(rm)
except ValueError:
pass
#print(len(cluster.unfilteredWide))
def calcStats(cluster,mode='filtered'):
#Imports
import numpy as np
#Reads in all the values for a cluster
par=[]
par_err=[]
ra=[]
dec=[]
pmra=[]
pmdec=[]
gmag = []
br = []
# a_g=[]
# e_bp_rp=[]
loopList=[]
checkLoaded([cluster])
if type(cluster) == str:
cluster = clusters[cluster]
if mode == 'bright':
loopList = cluster.filteredBright
elif mode == 'narrow':
loopList = cluster.unfilteredNarrow
elif mode == 'filtered':
loopList = cluster.filtered
for star in loopList:
par.append(star.par)
par_err.append(star.par_err)
pmra.append(star.pmra)
pmdec.append(star.pmdec)
ra.append(star.ra)
dec.append(star.dec)
gmag.append(star.g_mag)
br.append(star.b_r)
# if not np.isnan(star.a_g) and not star.a_g == 0:
# a_g.append(star.a_g)
# if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:
# e_bp_rp.append(star.e_bp_rp)
#Calculate the statistics
cluster.mean_par = np.mean(par[:])
cluster.mean_ra = np.mean(ra[:])
cluster.mean_dec = np.mean(dec[:])
cluster.stdev_ra = np.std(ra[:])
cluster.stdev_dec = np.std(dec[:])
cluster.stdev_par = np.std(par[:])
cluster.mean_pmra = np.mean(pmra[:])
cluster.stdev_pmra = np.std(pmra[:])
cluster.mean_pmdec = np.mean(pmdec[:])
cluster.stdev_pmdec = np.std(pmdec[:])
# cluster.mean_a_g = np.mean(a_g[:])
# cluster.stdev_a_g = np.std(a_g[:])
# cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])
# cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])
cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])
cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])
cluster.mean_par_err = np.mean(par_err[:])
cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5
for star in loopList:
star.radDist = np.sqrt((star.ra-cluster.mean_ra)**2+(star.dec-cluster.mean_dec)**2)
star.normRadDist = np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-cluster.mean_ra*np.cos(cluster.mean_dec*np.pi/180))**2+(star.dec-cluster.mean_dec)**2)
def saveClusters(cList):
#Imports
import dill
saveResults(cList)
#Creates a pickle file with all of the saved instances
for cl in cList:
cluster = clusters[cl]
#print(cluster.name,id(cluster))
with open(f"{cluster.dataPath}filtered.pk1", 'wb') as output:
dill.dump(cluster, output)
def saveIsochrones():
#Imports
import dill
global clusterList
#Creates a pickle file with all of the saved instances
for iso in isoList:
with open(f"{iso.basedir}pickled/{iso.name}.pk1", 'wb') as output:
dill.dump(iso, output)
def loadClusters(clusterNames=["M67"],basedir='clusters/'):
#Imports
import dill
global clusterList
global clusters
global clIn
for clusterName in clusterNames:
if clusterName in clusters:
unloadClusters([clusterName])
#Reads in instances from the saved pickle file
with open(f"{basedir}{clusterName}/data/filtered.pk1",'rb') as input:
cluster = dill.load(input)
clusterList.append(cluster)
clIn = True
toDict()
def loadIsochrones(basedir='isochrones/'):
#Imports
import dill
import os
global isoList
global isoIn
isoList=[]
for fn in os.listdir(basedir+"pickled/"):
#Reads in instances from the saved pickle file
with open(f"{basedir}pickled/{fn}",'rb') as input:
iso = dill.load(input)
isoList.append(iso)
isoIn = True
toDict()
def unloadClusters(cList=['all']):
#Imports
global clusterList
global clusters
if 'all' in cList:
cList = [cluster.name for cluster in clusterList]
for cl in cList:
cluster = clusters[cl]
clusterList.remove(cluster)
clusters.pop(cl)
del cluster
def dataProcess(cList,load=False,fit=True,unload=True,plotting=True,member=True,save=True,close=True):
#This method is largely intended for re-processing a bulk batch of clusters that have already been processed before,
#meaning they already have condensed point lists or you are already aware of their fitting quality
#Imports
import matplotlib.pyplot as plt
global clusterList
global clusters
global closePlots
if not isoIn:
loadIsochrones()
loadList = ["M15","M12","M39","M46","M67","NGC188","NGC2355","NGC2158","IC4651","NGC6791","NGC2360","NGC2204"]
for cl in cList:
if cl in loadList:
condensing = "load"
else:
condensing = "auto"
if load:
loadClusters([cl])
else:
readClusters([cl])
turboFilter([cl])
if close:
plt.close('all')
if fit:
turboFit([cl],condensing=condensing)
if plotting:
plot([cl],['pos','pm','cmd','quiver','iso'])
if close:
plt.close('all')
if member:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=75)
plt.close('all')
if save:
saveClusters([cl])
saveResults([cl])
if unload:
unloadClusters([cl])
def turboFilter(cl=["all"]):
#Imports
global clusterList
cList = checkLoaded(cl)
for clus in cList:
cluster = clusters[clus]
cluster.filteredBright,cluster.brightmag = pmFilter(cluster.unfilteredBright,cluster.name)
print(f"==========================={cluster.name}===========================")
print(f"bright unf/pm fil: {len(cluster.unfilteredBright)} / {len(cluster.filteredBright)}")
calcStats(cluster,mode='bright')
distFilter(cluster)
print(f"dist(all): {len(cluster.distFiltered)}")
cluster.filtered,cluster.mag = pmFilter(cluster.distFiltered,cluster.name)
#Manual filtering of extraneous points
cluster.filtered,cluster.mag = manualFilter(cluster)
print(f"pm(all): {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
magnitude = cutNoise(cluster)
print(f"noise cutoff: mag {magnitude} length {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
calcStats(cluster,mode='filtered')
setFlag()
def manualFilter(cluster):
#This exists to remove any points that may or may not be relevant to the cluster but are prohibiting the fit from happening
if "M35" in cluster.name:
filtered = [star for star in cluster.filtered if star.g_mag > 9 or star.b_r < 1]
return filtered,magList(filtered)
else:
return cluster.filtered,cluster.mag
def magList(filtered):
import numpy as np
mag = np.empty((0,2))
for star in filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
def pmFilter(starList,name):
#Imports
import numpy as np
filtered = []
mag = np.empty((0,2))
cluster = clusters[name]
assert cluster.name == name
#Apply an elliptical filter to the proper motion space
pmra_width = (cluster.pmra_max-cluster.pmra_min)/2
pmdec_width = (cluster.pmdec_max-cluster.pmdec_min)/2
pmra_center = cluster.pmra_min+pmra_width
pmdec_center = cluster.pmdec_min+pmdec_width
print(pmra_center,pmdec_center)
for star in starList:
if (star.pmra-pmra_center)**2/pmra_width**2 + (star.pmdec-pmdec_center)**2/pmdec_width**2 <= 1:
filtered.append(star)
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
assert len(filtered) > 1
print(len(filtered))
return filtered,mag
def distFilter(cluster):
#Imports
import numpy as np
if cluster.par_min == 0 or cluster.par_max == 0:
threshold = 1.5*cluster.mean_par
print(f"{cluster.name} filtered using mean parallax")
for star in cluster.unfilteredWide:
if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):
cluster.distFiltered.append(star)
else:
print(f"{cluster.name} filtered using min & max parallax values")
for star in cluster.unfilteredWide:
if star.par > cluster.par_min and star.par < cluster.par_max:
cluster.distFiltered.append(star)
def cutNoise(cluster):
#Imports
import numpy as np
stars = sorted(cluster.filtered,key=lambda x: x.g_mag)
new = []
newMag = np.empty((0,2))
if cluster.noise_cutoff <= -98:
threshold = 1
print(f"{cluster.name} noise cutoff undefined, using default")
else:
threshold = cluster.noise_cutoff
bad = 0
badCut = 5
for i,s in enumerate(stars):
if s.astro_sigma5d > threshold:
bad += 1
if bad >= badCut:
break
else:
new.append(s)
newMag = np.r_[newMag,[[s.b_r,s.g_mag]]]
cluster.filtered = new
cluster.mag = newMag
return s.g_mag
def turboFit(cl=["all"],condensing='auto',weighting='pos',tp="catalogue",minScore=0.001):
#Typical use cases are auto, pos, catalogue --OR-- manual, equal, catalogue
#Imports
import time
global clusterList
cList = checkLoaded(cl)
print("=========================Fitting=========================")
t0 = time.time()
status = condense(cList,condensing,weighting,tp,minScore)
if status == "Suspended":
return
for cluster in cList:
redFitting(cluster,minScore,weighting)
t1 = time.time()
print(f"Total {cluster.name} fit runtime: {t1-t0} seconds")
def redFitting(cluster,minScore,weighting):
#Imports
import numpy as np
import math
from sys import stdout
from time import sleep
global clusterList
if type(cluster) == str:
cluster = clusters[cluster]
cluster.iso = []
redMin = 0
redMax = 0.7
step = 0.05
redList = [round(x,2) for x in np.arange(redMin,redMax+step,step)]
for reddening in redList:
stdout.write(f"\rCurrent reddening value for {cluster.name}: {reddening:.2f} / ({redList[0]:.2f}->{redList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
best = float(cluster.iso[0][2])
print(f"\nCoarse-step reddening for {cluster.name}: {best}")
subMin = best - 0.05
subMax = best + 0.05
substep = 0.01
if subMin < 0:
subMin = 0
subList = [round(x,2) for x in np.arange(subMin,subMax+substep,substep) if not round(x,2) in redList and round(x,2) > subMin and round(x,2) < subMax]
for reddening in subList:
stdout.write(f"\rCurrent fine-step reddening value for {cluster.name}: {reddening:.2f} / ({subList[0]:.2f}->{subList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
cluster.reddening = float(cluster.iso[0][2])
cluster.fit_age = float(isochrones[cluster.iso[0][0]].age)
cluster.fit_feh = float(isochrones[cluster.iso[0][0]].feh)
cluster.fit_afe = float(isochrones[cluster.iso[0][0]].afe)
cluster.fit_y = float(isochrones[cluster.iso[0][0]].y)
#Unrelated properties but I needed somewhere to assign them
setattr(cluster,'meanDist',1000/cluster.mean_par)
meanL = np.mean([a.l*np.pi/180 for a in cluster.filtered])
galDist = 8000 #pc
gd = cluster.meanDist**2 + galDist**2 - 2*cluster.meanDist*galDist*np.cos(meanL)
setattr(cluster,'meanGalacticDist',gd**0.5)
print(f"\nReddening for {cluster.name}: {best}")
def shapeFit(cluster,reddening,minScore,weighting):
#Imports
import numpy as np
import shapely.geometry as geom
global isoList
conversion = 2.1
isoFitList = np.empty((0,3))
for iso in isoList:
isoLine = geom.LineString(tuple(zip([x+reddening for x in iso.br],[x+cluster.dist_mod+conversion*reddening for x in iso.g])))
dist = []
for star in cluster.condensed:
starPt = geom.Point(star.b_r,star.g_mag)
#print(starPt.distance(isoLine))
pointDist = np.abs(starPt.distance(isoLine))*star.weight
if pointDist < minScore*star.weight:
pointDist = minScore*star.weight
dist.append(pointDist**2)
isoScore = np.sum(dist[:])
#print(isoScore,dist)
#print(list(geom.shape(isoLine).coords))
isoFitList = np.r_[isoFitList,[[iso.name,float(isoScore),float(reddening)]]]
#compareInstances(iso,cluster.iso[-1][0])
#print(isoScore)
cluster.iso.extend(isoFitList)
#best = cluster.iso[1][0]
#specificPlot(cluster.name,best.name,reddening)
#print(f"\nFirst point of best fit: {best.br[0]+reddening},{best.g[0]+conversion*reddening+cluster.dist_mod}")
def onclick(x,y,fig,ax,cluster,minScore,weighting,newList):
def func(event):
import matplotlib.pyplot as plt
global coords
ix, iy = event.xdata, event.ydata
if str(event.button) == "MouseButton.RIGHT":
for i,(cx,cy) in enumerate(coords):
if abs(ix-cx) <= 0.075 and abs(iy-cy) <= 0.25:
coords.pop(i)
ax.clear()
ax.scatter(x,y,s=0.5,color='dimgray')
ax.invert_yaxis()
ax.scatter([a[0] for a in coords],[a[1] for a in coords],c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.LEFT":
coords.append((ix, iy))
ax.scatter(ix,iy,c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.MIDDLE":
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
if len(coords) >= 100:
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
return
return func
def updateCondensed(cluster,minScore,weighting,newList):
#Imports
import numpy as np
global coords
condensed = []
for point in coords:
if cluster.clType.lower() == "globular" or weighting.lower() == "equal":
weight = 1
else:
#Automatic weighting scheme currently unsupported for manual condensed point definition,
#but the framework is here to be able to insert it without having to worry about it being
#passed around from function to function
weight = 1
condensed.append(condensedPoint(point[0],point[1],weight))
if cluster.reddening == 0:
cluster.condensed0 = condensed
cluster.condensed = condensed
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
redFitting(cluster,minScore,weighting)
if len(newList) > 0:
turboFit(newList,'manual',weighting,'catalogue',minScore)
return
def find_nearest(array, value):
#Imports
import numpy as np
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def testCluster(name='feh_0.00_afe_0.00_age_0.141_y_0.2703'):
#Imports
import numpy as np
global clusterList
global clIn
iso = isochrones[name]
test = clusterObj('test')
filtered = [starObj('fake',0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,a.Gaia_G_EDR3,0,0,0,0,0,0,0,a.Gaia_BP_EDR3-a.Gaia_RP_EDR3,0,0,0,0,0,0,0,0,0,0,0) for a in iso.starList]
test.filtered = filtered
mag = np.empty((0,2))
for star in test.filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
test.mag = mag
if not 'test' in clusters:
clusterList.append(test)
else:
idx = clusterList.index(clusters['test'])
clusterList.pop(idx)
clusterList.append(test)
clIn = True
toDict()
def condense(cList,condensing,weighting,tp,minScore=0.001):
#Imports
import numpy as np
global isoList
global mag
for cluster in cList:
if type(cluster) == str:
cluster = clusters[cluster]
cList[cList.index(cluster.name)] = cluster
#Creates mag arrays to be used in place of the filtered star objects
mag = cluster.mag[:,:]
mag[mag[:,1].argsort()]
gmag = list(mag[:,1])
gmin = mag[0,1]
gmax = mag[-1,1]
div = 50
seg = (gmax-gmin)/div
minpoints = 1
#The array that will become the condensed points list
condensed = np.empty((0,3))
turnPoints = []
if condensing.lower() == "load":
global pts
pts = np.genfromtxt(f"{cluster.dataPath}condensed.csv",delimiter=',')
condensed = []
for point in pts:
#Missing alternate weighting schemes, but can be imlemented *here*
condensed.append(condensedPoint(point[0],point[1],1))
cluster.condensed = condensed
cluster.condensed0 = condensed
continue
#Manual point definition
if condensing.lower() == "manual":
import matplotlib.pyplot as plt
global cid
global coords
coords = []
if len(cList) == 1:
newList = []
else:
newList = cList[cList.index(cluster)+1:]
x,y = mag[:,0],mag[:,1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.invert_yaxis()
hook = onclick(x,y,fig,ax,cluster,minScore,weighting,newList)
cid = fig.canvas.mpl_connect('button_press_event', hook)
return "Suspended"
#Vertically stacked slices in brightness
for i in range(div):
sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
#print(np.array(sliced).shape)
#Skip forseen problems with empty arrays
if len(sliced) < minpoints:
continue
condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]
condensed = condensed[::-1]
#Uses defined turning points in the cluster catalogue
if tp.lower() == "catalogue":
if cluster.cltpx <= -98 and cluster.cltpy <= -98:
tp == "auto"
#If no turning point is found, or auto is specified, then this section of code
#attempts to find the turning point through steep gradient changes in the main sequence
if tp.lower() == "auto":
#Criteria for the line that forms the basis of the gradient change method
start = 4
end = 11
theta_crit = 5
#Creates a slope-intercept fit for the lower main sequence
basex = [a[0] for a in condensed[start:end]]
basey = [a[1] for a in condensed[start:end]]
base = np.polyfit(basex,basey,1)
#Travels up the main sequence
for i,point in enumerate(condensed):
if i == start:
continue
#Creates a fit line between the start point and the current point
x = [point[0],condensed[start,0]]
y = [point[1],condensed[start,1]]
lin = np.polyfit(x,y,1)
#Calculates an angle between the new line and the lower main sequence
point[2] = 180/np.pi*np.arctan(abs( (base[0]-lin[0])/(1+base[0]*lin[0]) ))
#If the angle between the two lines is large enough, the point is considered
#to be a candidate turning point, and is appended to the list of candidates
if point[2] > theta_crit and i > end:
turnPoints.append(point)
#Analysis plot showing the theta value for each condensed point
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(condensed[:,0],condensed[:,1],c=condensed[:,2])
plt.set_cmap('brg')
plt.gca().invert_yaxis()
clb = plt.colorbar()
clb.ax.set_title("Theta")
plt.savefig(f'condensed_{cluster.name}')
#If no automatic turning point is found, ends the method here
if len(turnPoints) == 0:
print("No turning point identified for {cluster.name}")
return
else:
#Identifies the proper turning point as a 5% color offset of the dimmest turning point candidate
turnPoints = sorted(turnPoints,key=lambda x: x[1])
tp = turnPoints[-1]
tp[0] = tp[0] - 0.05*np.abs(tp[0])
cluster.turnPoint = tp
#Stores the condensed point list
cl = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
cluster.condensedInit = cl
# [ B-R , G , Theta ]
print(f"{cluster.name} Turning Point: {cluster.turnPoint}")
#Assuming the undefined catch for manual would be caught the first time around
if tp.lower() == "catalogue":
cluster.turnPoint = [cluster.cltpx,cluster.cltpy]
if cluster.clType.lower() == "open":
#Recalc with the turnPoint limit enforced - Ignore blue stragglers
condensed = np.empty((0,3))
condensed_giant = np.empty((0,3))
yList = []
#Vertically stacked slices in brightness
for i in range(div):
rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
sliced = np.empty((0,2))
sliced_giant = np.empty((0,2))
for point in rawSliced:
#print(point)
if point[0] >= cluster.turnPoint[0]:
sliced = np.r_[sliced,[[point[0],point[1]]]]
else:
sliced_giant = np.r_[sliced_giant,[[point[0],point[1]]]]
#Skip forseen problems with empty arrays
if len(sliced) > 0:
x = np.median(sliced[:,0])
y = np.median(sliced[:,1])
yList.append(y)
condensed = np.r_[condensed,[[x,y,1]]]
if len(sliced_giant) > 3:
xg = np.median(sliced_giant[:,0])
yg = np.median(sliced_giant[:,1])
condensed_giant = np.r_[condensed_giant,[[xg,yg,1]]]
#New turning point found from the reduced data set
newTP = find_nearest(yList,cluster.turnPoint[1])
index = 0
for i,point in enumerate(condensed):
if newTP == point[1]:
index = i
#print(f"{point} found to be TP")
break
assert not index == 0
#Binary star list
tpcut = index + 3
xset = condensed[tpcut:-1,0]
yset = condensed[tpcut:-1,1]
#print(cluster.name,yset)
fit = np.polyfit(xset,yset,1)
#Distance from the main sequence linear fit
for star in cluster.filtered:
x0 = star.b_r
y0 = star.g_mag
dist = abs( y0 - fit[0]*x0 - fit[1] ) / np.sqrt(fit[0]**2 + 1)
star.distance_MS = dist
if dist > 0.05 and y0 < fit[0]*x0+fit[1] and x0 > xset[0] and y0 > condensed[index,1]:
cluster.binaries.append(star)
star.binary = 1
else:
star.binary = 0
#Fit weight parameters
N = len(condensed)
beta = -2
index = index - 7
for i,point in enumerate(condensed):
#point[2] = 5/(1+np.abs(index-i))
if weighting.lower() == 'pos':
point[2] = np.exp(beta*((i-index)/N)**2)
# if cluster.type == "globular":
# condensed = np.vstack((condensed,condensed_giant))
condensed = condensed[::-1]
cl = []
coords = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
coords.append((point[0],point[1]))
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
if cluster.reddening == 0:
cluster.condensed0 = cl
cluster.condensed = cl
# def checkLoaded(cList):
# needsLoading = []
# loaded = []
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# else:
# loaded.append(cl)
# return loaded,needsLoading()
def toDict():
#Imports
global clusterList
global clusters
global isoList
global isochrones
global resultList
global results
global clIn
global isoIn
global resultsIn
if clIn:
clName = []
for cluster in clusterList:
clName.append(cluster.name)
clusters = dict(zip(clName,clusterList))
if isoIn:
isoName = []
for iso in isoList:
isoName.append(iso.name)
isochrones = dict(zip(isoName,isoList))
if resultsIn:
resName=[]
for res in resultList:
resName.append(res.name)
results = dict(zip(resName,resultList))
def plot(cList=['all'],modes=['pos','pm','cmd','quiver','iso'],closePlots=False):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
import os
global clusterList
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}/png"):
os.mkdir(f"{cluster.imgPath}/png")
#Position plots
if 'pos' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
ra=[star.ra for star in cluster.filtered]
dec=[star.dec for star in cluster.filtered]
unfnormra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide]
normra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered]
#Unfiltered position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='dimgray')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered.png",dpi=500)
#Filtered position plot
plt.figure(f"{cluster.name}_ra_dec_filtered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(ra[:],dec[:],s=0.5,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered.png",dpi=500)
#Position overlay
plt.figure(f"{cluster.name}_ra_dec_overlay")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(ra[:],dec[:],s=1,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay.png",dpi=500)
#Normalized
#NormRA = RA*cos(DEC)
#Unfiltered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='dimgray')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_normalized.png",dpi=500)
#Filtered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_filtered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered Normalized")
plt.scatter(normra[:],dec[:],s=0.5,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered_normalized.png",dpi=500)
#Position overlay normalized
plt.figure(f"{cluster.name}_ra_dec_overlay_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(normra[:],dec[:],s=1,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay_normalized.png",dpi=500)
#Proper motion plots
if 'pm' in modes:
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
pmra=[star.pmra for star in cluster.filtered]
pmdec=[star.pmdec for star in cluster.filtered]
unfpara=[star.par for star in cluster.unfilteredWide]
para=[star.par for star in cluster.filtered]
x0 = cluster.pmra_min
x1 = cluster.pmra_max
y0 = cluster.pmdec_min
y1 = cluster.pmdec_max
width = x1-x0
scale = 5
subscale = 2
xmin = x0-scale*width
xmax = x1+scale*width
ymin = y0-scale*width
ymax = y1+scale*width
sxmin = x0-subscale*width
sxmax = x1+subscale*width
symin = y0-subscale*width
symax = y1+subscale*width
#Unfiltered proper motion plot
plt.figure(f"{cluster.name}_pm_unfiltered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered_closeup.png",dpi=500)
#Filtered proper motion plot
plt.figure(f"{cluster.name}_pm_filtered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(pmra[:],pmdec[:],s=0.5,c='midnightblue')
# plt.xlim([xmin,xmax])
# plt.ylim([ymin,ymax])
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_filtered.png",dpi=500)
#Proper motion overlay
plt.figure(f"{cluster.name}_pm_overlay")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='lightgray')
plt.scatter(pmra[:],pmdec[:],s=1,c='midnightblue')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay_closeup.png",dpi=500)
#Unfiltered PM/Parallax
plt.figure(f"{cluster.name}_pm_over_parallax_unfiltered")
plt.xlabel('PMRA / Parallax')
plt.ylabel('PMDEC / Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a/b for a,b in zip(unfpmra,unfpara)],[a/b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_over_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_over_parallax_unfiltered.png",dpi=500)
#Unfiltered PM*Parallax
plt.figure(f"{cluster.name}_pm_times_parallax_unfiltered")
plt.xlabel('PMRA * Parallax')
plt.ylabel('PMDEC * Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a*b for a,b in zip(unfpmra,unfpara)],[a*b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_times_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_times_parallax_unfiltered.png",dpi=500)
#CMD plots
if 'cmd' in modes:
unfgmag=[star.g_mag for star in cluster.unfilteredWide]
unf_b_r=[star.b_r for star in cluster.unfilteredWide]
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
bright_b_r = [x.b_r for x in cluster.filteredBright]
bright_gmag = [x.g_mag for x in cluster.filteredBright]
par_b_r = [x.b_r for x in cluster.distFiltered]
par_gmag = [x.g_mag for x in cluster.distFiltered]
#Reddening Correction
plt.figure(f"{cluster.name}_reddening_CMD")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('G Mag')
plt.title(f"{cluster.name} Reddening = {cluster.reddening:.2f}")
plt.scatter(b_r[:],gmag[:],s=0.5,c='dimgray',label='Observed')
plt.arrow(b_r[int(len(b_r)/2)]-cluster.reddening,gmag[int(len(gmag)/2)]-2.1*cluster.reddening,cluster.reddening,2.1*cluster.reddening,color='red')
plt.scatter([s-cluster.reddening for s in b_r[:]],[s-2.1*cluster.reddening for s in gmag[:]],s=1,c='midnightblue',label='Corrected')
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_reddening_CMD.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_reddening_CMD.png",dpi=500)
#Unfiltered CMD plot
plt.figure(f"{cluster.name}_CMD_unfiltered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_unfiltered.png",dpi=500)
#Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax & Proper Motion Filtered")
plt.scatter(b_r[:],gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_filtered.png",dpi=500)
#CMD overlay
plt.figure(f"{cluster.name}_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.scatter(b_r[:],gmag[:],s=1,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_overlay.png",dpi=500)
#Condensed CMD overlay
plt.figure(f"{cluster.name}_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Condensed Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_condensed_CMD_overlay.png",dpi=500)
#Weighted CMD overlay
plt.figure(f"{cluster.name}_weighted_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Weighted Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_weighted_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_weighted_CMD_overlay.png",dpi=500)
#Initial Condensed CMD overlay
plt.figure(f"{cluster.name}_initial_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Initial Condensed Overlay")
plt.scatter(b_r,gmag,s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r for s in cluster.condensedInit],[s.g_mag for s in cluster.condensedInit],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_initial_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_initial_condensed_CMD_overlay.png",dpi=500)
#Brightness-PM Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_bright_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Bright-Only Proper Motion Filtered")
plt.scatter(bright_b_r[:],bright_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_bright_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_bright_filtered.png",dpi=500)
#Parallax Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_parallax_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax Filtered")
plt.scatter(par_b_r[:],par_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_parallax_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_parallax_filtered.png",dpi=500)
if 'quiver' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
x0 = min([s.ra for s in cluster.filtered])
x1 = max([s.ra for s in cluster.filtered])
y0 = min([s.dec for s in cluster.filtered])
y1 = max([s.dec for s in cluster.filtered])
width = x1-x0
scale = 0.25
xmin = x0+scale*width
xmax = x1-scale*width
ymin = y0+scale*width
ymax = y1-scale*width
#Unfiltered position quiver plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_quiver")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
ax = plt.gca()
ax.quiver(unfra[:],unfdec[:],unfpmra[:],unfpmdec[:],color='midnightblue',width=0.003,scale=400,scale_units='width')
plt.axis("square")
plt.gcf().set_size_inches(10,10)
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver.png",dpi=500)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.png",dpi=500)
#Isochrone plots
if 'iso' in modes:
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
isochrone = isochrones[cluster.iso[0][0]]
#Isochrone best fit
plt.figure(f"{cluster.name}_Iso_best")
plt.gca().invert_yaxis()
plt.xlabel('Dereddened BP-RP')
plt.ylabel('Corrected Absolute G Mag')
plt.title(f"{cluster.name} Isochrone Best Fit")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening-cluster.dist_mod for s in gmag],s=0.5,c='dimgray',label='Cluster')
isoLabels = isochrone.name.split('_')
isoLabel = r"$[\frac{Fe}{H}]$" + "=" + isoLabels[1] + "\n" \
+ r"$[\frac{\alpha}{Fe}]$" + "=" + isoLabels[3] + "\n" \
+ r"$[Y]$" + "=" + isoLabels[7] + "\n" \
+ "Age" + "=" + isoLabels[5] + " Gyr"
plt.plot(isochrone.br,isochrone.g,c='midnightblue',label=isoLabel)
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening-cluster.dist_mod for s in cluster.condensed],s=5,c='red',label='Cluster Proxy')
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {cluster.reddening}")
plt.legend(h,l)
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_Iso_BestFit.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_Iso_BestFit.png",dpi=500)
#Membership plots
if 'membership' in modes:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=50)
#3D Position plots
if '3D' in modes:
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(1000*c.par) for c in cluster.filtered]
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
r = [np.sqrt(a**2+b**2) for a,b in zip(x,y)]
theta = [np.arctan(b/a) for a,b in zip(x,y)]
plt.figure(f"{cluster.name}_3D_Position")
ax = plt.axes(projection='3d')
ax.scatter3D(x,y,z)
ax.scatter(0,0,0,color='red')
scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
if closePlots:
plt.close('all')
# def Plot3D(cList):
# #Imports
# import matplotlib.pyplot as plt
# import numpy as np
# global clusterList
# needsLoading=[]
# plt.figure(f"3D_Position_Ensemble")
# ax = plt.axes(projection='3d')
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# if not len(needsLoading) == 0:
# loadClusters(needsLoading)
# for cl in cList:
# cluster = clusters[cl]
# A = [a.ra * np.pi/180 for a in cluster.filtered]
# B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
# C = [1/(0.001*c.par) for c in cluster.filtered]
# #Flatten radially
# C = [np.mean(C)]*len(C)
# x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
# y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
# z = [c*np.sin(b) for b,c in zip(B,C)]
# #Force Cluster to origin
# # x = [a-np.mean(x) for a in x]
# # y = [a-np.mean(y) for a in y]
# # z = [a-np.mean(z) for a in z]
# ax.scatter3D(x,y,z,label=cluster.name)
# scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
# ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
# #ax.scatter(0,0,0,color='black')
# plt.legend()
def yso_lookup():
#Imports
from astroquery.simbad import Simbad
import numpy as np
import os
import re
global names
global sect
global results
global ra
global dec
main = open("Excess Examples/YSO_object_list.dat").read()
main = main.split("\n")[:-1]
#Get the names of all of the objects identified
names = []
ra = []
dec = []
validNames = []
for row in main:
sect = re.split('\s+',row)
if sect[0] == '':
sect = sect[1:]
if sect[2] == 'none':
continue
name = sect[2]
blacklist = ['A','Ab','AB','ABC','B','AaB']
for entry in sect[3:]:
if '.' in entry or entry in blacklist:
break
name = name + " " + entry
names.append(name)
#Perform a SIMBAD query for the identified objects
results = []
for name in names:
result = Simbad.query_object(name)
if not type(result) == type(None):
results.append(result)
validNames.append(name.replace(' ',''))
ra1 = str(result.columns['RA']).split('\n')[-1]
ra1 = re.split('\s+',ra1)
if '' in ra1:
ra.append('---')
else:
ra.append(str(round(float(ra1[0])*15+float(ra1[1])/4+float(ra1[2])/240,5)))
dec1 = str(result.columns['DEC']).split('\n')[-1]
dec1 = re.split('\s+',dec1)
if '' in dec1:
dec.append('---')
else:
dec.append(str(round(float(dec1[0])+float(dec1[1])/60+float(dec1[2])/3600,5)))
#Create a text file in the VOSA readable format
VOSAdata = []
gaiadata = []
for i in range(len(validNames)):
line1 = f"{validNames[i]} {ra[i]} {dec[i]} --- --- --- --- --- --- ---"
line2 = f"{ra[i]} {dec[i]}"
VOSAdata.append(line1)
if '-' in line2:
continue
gaiadata.append(line2)
np.savetxt("Excess Examples/yso_vosa_output.txt",VOSAdata,fmt="%s")
np.savetxt("Excess Examples/yso_gaia_output.txt",gaiadata,fmt="%s")
def exportVOSA(cl):
#Imports
import numpy as np
if not cl in clusters:
loadClusters([cl])
cluster = clusters[cl]
#objname RA DEC DIS Av Filter Flux Error PntOpts ObjOpts
data = []
for star in cluster.filtered:
name = star.name.replace(" ","")
line = f"{name} {star.ra} {star.dec} {1000/star.par} --- --- --- --- --- ---"
data.append(line)
np.savetxt(f"{cluster.dataPath}{cluster.name}_VOSA.txt",data,fmt="%s")
def readSED(cList=['all'],printMissing=False):
#imports
import numpy as np
import re
import os
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
objPath = cluster.dataPath + "vosa_results/objects/"
names = []
for star in cluster.filtered:
flat = star.name.replace(" ","").replace("DR2","").replace("EDR3","").replace("DR3","")
names.append(flat)
star.flatName = flat
cluster.stars = dict(zip(names,cluster.filtered))
idx = 0
newStars = dict()
#Each star in a cluster has its own folder, and each folder contains several data sets
for folder in os.listdir(objPath):
fileName = folder.replace("DR2","").replace("EDR3","").replace("DR3","")
#Weed out VOSA stars not in current filtered members list
if not fileName in cluster.stars:
if printMissing:
print(f"{fileName} is missing from filtered list, skipping it...")
continue
main = open(objPath+folder+"/sed/"+folder+".sed.dat").read()
main = main.split("\n")
data = main[10:-1]
#Create a list of measurement object pointers to attach to the stars later
measurements = []
#Convert every line of the data set into a vosaPoint object
for row in data:
sect = re.split('\s+',row)[1:-1]
measurements.append(vosaPoint(str(sect[0]),float(sect[1]),float(sect[2]),float(sect[3]),float(sect[4]),float(sect[5]),float(sect[6])))
cluster.stars[fileName].vosaPoints = measurements
#Weed out cluster.stars members who do not have a vosa table
newStars[fileName] = cluster.stars[fileName]
idx += 1
cluster.stars = newStars
def checkBinary(cl):
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
global lman
data = [Datum(star.b_r,star.g_mag) for star in cluster.filtered]
# ax = plt.axes(xlim=(cluster.min_b_r-0.25,cluster.max_b_r+0.25), ylim=(cluster.min_g_mag-1,cluster.max_g_mag+1),autoscale_on=False)
ax = plt.axes(xlim=(0, 2.5), ylim=(8, 20), autoscale_on=False)
ax.invert_yaxis()
ax.set_title('Lasso points using left mouse button')
lman = LassoManager(ax, data,cluster)
plt.show()
def vosaBinaries(cl):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}vosaBinaries/"):
os.mkdir(f"{cluster.imgPath}vosaBinaries/")
for star in cluster.stars.values():
if not star.binary == 1:
return
def excessIR(cl,plot=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}excessIR/"):
os.mkdir(f"{cluster.imgPath}excessIR/")
for star in cluster.stars.values():
excess = False
for vp in star.vosaPoints:
if vp.excess > 0:
excess = True
if excess:
#print(f"{star.name} has {len(star.vosaPoints)} VOSA points")
star.hasExcess = 1
if plot:
plt.figure(f'{cluster.name} - {star.name}')
plt.title(f'{cluster.name} : {star.name}')
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.ylabel(r'Flux ($ergs^{-1}cm^{-2}\AA^{-1}$)')
plt.xlabel(r'Wavelength ($\AA$)')
plt.scatter([a.wavelength for a in star.vosaPoints],[a.flux for a in star.vosaPoints])
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.pdf")
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.png",dpi=500)
def proxyMatch(cList,plot=False):
#Imports
import matplotlib.pyplot as plt
import numpy as np
checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
iso = isochrones[cluster.iso[0][0]]
isoPoints = []
for pt in iso.starList:
isoPoints.append(pt)
# if pt.Gaia_G_EDR3+cluster.dist_mod > cluster.turnPoint[1]:
# isoPoints.append(pt)
for star in cluster.filtered:
minDist = 0.2
smallestDist = 10
vertCutoff = 1
minPoint = None
for point in isoPoints:
dist = abs(point.Gaia_BP_EDR3-point.Gaia_RP_EDR3-star.b_r+cluster.reddening)
if dist < minDist:
if abs(point.Gaia_G_EDR3+cluster.dist_mod - star.g_mag + 2.1*cluster.reddening) < vertCutoff:
minDist = dist
minPoint = point
elif dist < smallestDist:
smallestDist = dist
try:
assert minDist < 0.2
except:
print(f"[{cluster.name}] Star too distant from isochrone to make a good proxy: BP-RP: {star.b_r} | G: {star.g_mag} | Dist: {smallestDist}")
star.proxyMass = 0
star.proxyLogTemp = 0
star.proxyFeH = 0
star.proxyLogAge = 0
star.proxy = None
continue
#print(minDist)
star.proxyMass = minPoint.star_mass
star.proxyLogTemp = minPoint.log_Teff
star.proxyFeH = minPoint.feh
star.proxyLogAge = minPoint.log10_isochrone_age_yr
star.proxy = minPoint
cluster.massLoaded = True
cluster.meanProxyMass = np.mean([a.proxyMass for a in cluster.filtered])
cluster.totalProxyMass = np.sum([a.proxyMass for a in cluster.filtered])
cluster.min_g_mag = min([a.g_mag for a in cluster.filtered])
cluster.max_g_mag = max([a.g_mag for a in cluster.filtered])
cluster.min_b_r = min([a.b_r for a in cluster.filtered])
cluster.max_b_r = max([a.b_r for a in cluster.filtered])
# if plot:
# plt.figure(f"{cluster.name}_proxy_fit")
def variableHistogram(cl,var):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
plt.figure()
plt.title(f"{cluster.name} Histogram of {var}")
plt.xlabel(f"{var}")
plt.ylabel("Count")
plt.hist([eval(f"a.{var}") for a in cluster.filtered],bins='auto')
def varHist2D(cl,var1,var2,color='default',listType='filtered'):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
#Check allowed entries
allowedTypes = ['filtered','unfilteredWide','unfilteredBright,filteredBright,binaries']
if not listType in allowedTypes:
print(f"{listType} is not a valid list type, defaulting to filtered")
listType = "filtered"
cluster = clusters[cl]
plt.figure(figsize=(8,8))
#Axis size and spacing
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
x = [eval(f"a.{var1}") for a in eval(f"cluster.{listType}")]
y = [eval(f"a.{var2}") for a in eval(f"cluster.{listType}")]
if color == 'default':
ax_scatter.scatter(x, y, s=5)
else:
colorMap = plt.get_cmap('coolwarm')#.reversed()
ax_scatter.scatter(x, y, s=5, c=[eval(f"a.{color}") for a in eval(f"cluster.{listType}")], cmap = colorMap)
# clb = plt.colorbar(ax_scatter)
# clb.ax.set_title(f"{color}")
ax_histx.hist(x,bins='auto')
ax_histy.hist(y,bins='auto',orientation='horizontal')
ax_histx.set_title(f"Histogram of {listType} {cluster.name} in {var1} and {var2}")
ax_scatter.set_xlabel(f"{var1}")
ax_scatter.set_ylabel(f"{var2}")
def Plot3D(cList=['all'],showEarth=True,flatten=True):
#Imports
import plotly.express as px
import plotly.io as pio
import numpy as np
global clusterList
pio.renderers.default='browser'
fig = px.scatter_3d()
if showEarth:
fig.add_scatter3d(x=[0],y=[0],z=[0],marker=dict(color='lightblue'),name="Earth")
cList = checkLoaded(cList)
big = []
for cl in cList:
cluster = clusters[cl]
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(0.001*c.par) for c in cluster.filtered]
#Flatten radially
if flatten:
C = [np.mean(C)]*len(C)
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
#Force Cluster to origin
# x = [a-np.mean(x) for a in x]
# y = [a-np.mean(y) for a in y]
# z = [a-np.mean(z) for a in z]
fig.add_scatter3d(x=x,y=y,z=z,name=cl,mode="markers",marker=dict(size=2))
big.append(np.amax(x))
big.append(np.amax(y))
big.append(np.amax(z))
#fig.layout.scene = dict(aspectmode="manual",aspectratio=dict(x=1,y=1,z=1))
#fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),yaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),zaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)])))
fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False),yaxis=dict(showbackground=False),zaxis=dict(showbackground=False,visible=False)))
fig.show()
def specificPlot(cl,iso,reddening,score):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import os
checkLoaded([cl])
cluster = clusters[f"{cl}"]
isochrone = isochrones[f"{iso}"]
#These are displayed on the plot
# score = 0
reddening = float(reddening)
#Directory for saving plot outputs
if not os.path.isdir("SpecificPlots/pdf/"):
os.makedirs("SpecificPlots/pdf/")
if not os.path.isdir("SpecificPlots/png/"):
os.makedirs("SpecificPlots/png/")
# #Find the score of the associated isochrone
# for chrone in cluster.iso:
# if chrone[0] == iso and chrone[2] == reddening:
# score = chrone[1]
# break
#Plots the CMD and the isochrone, with all of the points adjusted to reddening, extinction, and distance modulus
plt.figure()
plt.gca().invert_yaxis()
plt.xlabel('B-R')
plt.ylabel('G Mag')
plt.title(f"{cl} {iso}")
plt.scatter([s.b_r for s in cluster.filtered],[s.g_mag for s in cluster.filtered],s=0.05,c='dimgray',label='Cluster')
plt.plot([x + reddening for x in isochrone.br],[x+cluster.dist_mod+2.1*reddening for x in isochrone.g],c='midnightblue',label=f"Score: {float(score):.7f}")
plt.scatter([s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Cluster Proxy')
#Colors the points by their fitting weight
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
#Label for the reddening
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {reddening}")
plt.legend(h,l)
#Save figure output to disk
plt.savefig(f"SpecificPlots/pdf/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.pdf")
plt.savefig(f"SpecificPlots/png/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.png",dpi=500)
def plotRange(cl,a,b):
global clusters
checkLoaded([cl])
#Plots the top fitting isochrones over the range a to b for a given cluster
#Does this by calling the specificPlot() method for each isochrone over the range
for isochrone in clusters[f"{cl}"].iso[a:b]:
specificPlot(cl,isochrones[isochrone[0]].name,isochrone[2],isochrone[1])
def getIsoScore(cl,iso,red,output=True):
#Return the score for a given cluster's isochrone fit
for i in cl.iso:
if i[0] == iso.name and float(i[2]) == red:
return i[1]
if output:
print(f"No score found for {cl.name} | {iso.name} | {red}")
return 0
def onkey(x,y,cx,cy,fig,ax,cluster,iso,reddening):
global curIso
global curReddening
curIso = iso
curReddening = reddening
def func(event):
import matplotlib.patches as patches
global curIso
global curReddening
global isochrones
key = str(event.key)
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
if key == "w":
try:
curIso = fehSorted[feh_index+1]
feh_index = feh_index+1
except:
curIso = fehSorted[0]
feh_index = 0
if key == "s":
curIso = fehSorted[feh_index-1]
feh_index = feh_index-1
if feh_index < 0:
feh_index = len(fehSorted)+feh_index
if key == "a":
curIso = ageSorted[age_index-1]
age_index = age_index-1
if age_index < 0:
age_index = len(ageSorted)+age_index
if key == "d":
try:
curIso = ageSorted[age_index+1]
age_index = age_index+1
except:
curIso = ageSorted[0]
age_index = 0
if key == "q":
curReddening = round(curReddening-0.01,2)
if key == "e":
curReddening = round(curReddening+0.01,2)
if key == "r":
curIso = iso
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
if key == " ":
score = getIsoScore(cluster,curIso,curReddening)
fig.savefig(f"Jamboree Images/frames/{curIso.name}.png",dpi=500)
print(f"{curIso.name} | {curReddening} | {score}")
score = getIsoScore(cluster,curIso,curReddening,output=False)
ax.clear()
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+curReddening for a in curIso.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*curReddening for a in curIso.starList],color='darkblue')
ax.set_title(f"{curIso.name}\n {curReddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
n = len(ageSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
fig.canvas.draw_idle()
return func
def interactivePlot(cl,iso=0,reddening="auto"):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
global clusters
global isochrones
global kid
checkLoaded([cl])
cluster = clusters[f"{cl}"]
if type(iso) == str:
isochrone = isochrones[f"{iso}"]
elif type(iso) == int:
assert iso >= 0
isochrone = isochrones[cluster.iso[iso][0]]
else:
print("Invalid declaration of 'iso'")
return
name = isochrone.name
if reddening == "auto":
reddening = cluster.reddening
assert type(reddening) == float or type(reddening) == int
score = getIsoScore(cluster,isochrone,reddening)
d(isoList,key=lambda x: float(x.age)) if a.feh == isochrone.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == isochrone.age]
age_index = ageSorted.index(isochrone)
feh_index = fehSorted.index(isochrone)
x,y = cluster.mag[:,0],cluster.mag[:,1]
cx,cy = [s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed]
letters = ['w','s','a','d','q','e','r']
for letter in letters:
for param in [key for key in plt.rcParams if key.startswith("keymap") ]:
try:
plt.rcParams[param].remove(letter)
except:
continue
fig = plt.figure(f"Interactive plot of {cl}")
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+reddening for a in isochrone.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*reddening for a in isochrone.starList],color='darkblue')
ax.set_title(f"{name}\n {reddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
n = len(ageSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
hook = onkey(x,y,cx,cy,fig,ax,cluster,isochrone,reddening)
kid = fig.canvas.mpl_connect('key_press_event',hook)
def printList(cList,varList):
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
for a in varList:
clStr = f"[{cl}] {a} ="
exec(f"print(clStr,cluster.{a})")
def statRange(cl,a,b):
import numpy as np
global clusters
checkLoaded([cl])
if not isoIn:
loadIsochrones()
ages = []
fehs = []
ys = []
reds = []
for isochrone in clusters[cl].iso[a:b]:
iso = isochrones[isochrone[0]]
print(f"{iso.name} Reddening:{isochrone[2]}")
ages.append(float(iso.age))
fehs.append(float(iso.feh))
ys.append(float(iso.y))
reds.append(float(isochrone[2]))
print(f"[{cl}] Mean age= {np.mean(ages)} Mean feh= {np.mean(fehs)} Mean y= {np.mean(ys)} Mean Reddening= {np.mean(reds)}")
def setFlag():
global clusterlist
for cluster in clusterList:
for star in cluster.filtered:
for unfStar in cluster.unfilteredWide:
if star == unfStar:
unfStar.member = 1
def customPlot(var1,var2,clname,mode='filtered',iso=False,square=True,color='default',title='default',close=False,save=True):
import matplotlib.pyplot as plt
global closePlots
checkLoaded([clname])
cluster = clusters[f"{clname}"]
#Set the list of stars to be used for the given cluster
#Using a mode not specified will return a referenced before assignment error
if mode == 'filtered':
starlist = cluster.filtered
elif mode == 'unfiltered':
starlist = cluster.unfilteredWide
elif mode == 'bright_filtered':
starlist = cluster.filteredBright
elif mode == 'dist_filtered':
starlist = cluster.distFiltered
elif mode == 'bright_unfiltered':
starlist = cluster.unfilteredBright
elif mode == 'duo':
starlist = cluster.unfilteredWide
starlistF = cluster.filtered
elif mode == 'binary':
starlist = cluster.binaries
elif mode == 'duoBinary':
starlist = cluster.filtered
starlistF = cluster.binaries
elif mode == 'duoBright':
starlist = cluster.unfilteredBright
starlistF = cluster.filteredBright
elif mode == 'duoDist':
starlist = cluster.distFiltered
starlistF = cluster.filtered
elif mode == 'condensed':
starlist = cluster.condensed
elif mode == 'duoCondensed':
starlist = cluster.filtered
starlistF = cluster.condensed
elif mode == 'bounded':
starlist = cluster.bounded
elif mode == 'duoBounded':
starlist = cluster.filtered
starlistF = cluster.bounded
else:
print("No preset star list configuration found with that alias")
return
#Basic plot features with axis labels and a title
plt.figure()
if title == 'default':
plt.title(f"{clname} {mode} | {var1} vs {var2} | {color} color")
else:
plt.title(f"{title}")
plt.xlabel(f"{var1}".upper())
plt.ylabel(f"{var2}".upper())
#Plots differently depending on the mode
#The color tag can be used to add distinction of a third variable while limited to two axes
#If unspecified, filtered starlist with midnight blue coloring will be the result
if iso:
plt.gca().invert_yaxis()
if 'duo' in mode:
#plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=[0.1+a.member*1.4 for a in starlist],c=[list(('lightgray',eval('z.par')))[z.member] for z in starlist])
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c='gray')
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c='red')
else:
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c=[eval(f"z.{color}") for z in starlistF])
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
else:
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=1,c='midnightblue')
else:
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c=[eval(f"z.{color}") for z in starlist])
plt.set_cmap('cool')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
#By default, squares the axes to avoid misinformation from stretched axes
#Turn this off and iso to true for a color magnitude diagram
if square:
plt.axis("square")
if save:
plt.savefig(f"SpecificPlots/pdf/{clname}_{mode}_{var1}_{var2}.pdf")
plt.savefig(f"SpecificPlots/png/{clname}_{mode}_{var1}_{var2}.png",dpi=500)
if close or closePlots:
plt.close()
if save:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} saved and closed")
else:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} closed")
def splitMS(clname='M67',slope=3,offset=12.2):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([clname])
cluster = clusters[clname]
xlist = [s.b_r for s in cluster.filtered]
ylist = [s.g_mag for s in cluster.filtered]
x = np.linspace(1,2,100)
#Create a diagram showing the lower edge and upper edge of the main sequence, which in theory are separated by 0.75mag
plt.figure()
plt.title('Main and Binary Sequences')
plt.xlabel('B-R')
plt.ylabel('Apparent G Mag')
plt.scatter(xlist,ylist,s=0.5,label='Filtered Star Data')
plt.plot(x,[slope*a + offset for a in x],color='r',label='Main Sequence')
plt.plot(x,[slope*a + offset - 0.75 for a in x],'--',color='r',label='MS shifted 0.75 mag')
plt.xlim(0.6,2.2)
plt.ylim(13,19)
plt.legend()
plt.gca().invert_yaxis()
plt.savefig(f"SpecificPlots/png/{clname}_MS_Spread.png",dpi=500)
plt.savefig(f"SpecificPlots/pdf/{clname}_MS_Spread.pdf")
def kingProfile(r,K,R):
return K*(1+r**2/R**2)**(-1)
def kingError(r,K,R,dK,dR):
import numpy as np
dfdK = (1+r**2/R**2)**(-1)
dfdR = 2*K*r**2*R*(r**2+R**2)**(-2)
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def densityProfile(r,K,R):
import numpy as np
#The exponential that is fit for the membership profile
#R is a characteristic radius, typically negative but the absolute value is used for comparison
#K is a scalar constant
return K*np.exp(-1*r/R)
def densityError(r,K,R,dK,dR):
import numpy as np
dfdK = abs(np.exp(-1*r/R))
dfdR = abs(K*r/(R**2)*np.exp(-1*r/R))
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def toIntensity(mag):
msun = -26.74 #apparent magnitude
Isun = 1360 #w/m^)
return Isun*10**( 0.4*(msun-mag) )
def membership(clname='M67',N=100,mode='filtered',numPercentileBins=5,percentile=0.2,delta=5,normalize=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy.optimize as so
import scipy.stats as st
import math
global volume
checkLoaded([clname])
cluster = clusters[clname]
mode = mode.lower()
#Default mode is filtered, but unfiltered data can be processed
if "filtered" in mode:
starList = cluster.filtered
elif "bounded" in mode:
starList = cluster.bounded
else:
starList = cluster.unfilteredWide
#Load mass estimates from isochrone fitting
if not cluster.massLoaded:
proxyMatch([cluster.name])
assert cluster.massLoaded
assert len(starList) > 0
#Assign x and y lists based on normalization or not
if normalize:
starX = [a.ra*np.cos(a.dec*np.pi/180) for a in starList]
starY = [a.dec for a in starList]
mode = mode + "_normalized"
else:
starX = [a.ra for a in starList]
starY = [a.dec for a in starList]
#Determine bounds of the field of view (post-filtering)
xmax = max(starX)
ymax = max(starY)
x0 = np.mean(starX)
y0 = np.mean(starY)
newN = N
#Determine radius of the field of view
rx = xmax-x0
ry = ymax-y0
#r = np.mean([rx,ry])
radiusFOV = ry
#Using the mean ra and dec radius caused problems with clusters
#like NGC188, which are close to the celestial pole and have
#a very stretched mapping to the RA DEC space
ringBins = list(np.linspace(0,radiusFOV,N))
#The bins are divided up such that 50% of the bins are located in the inner 25% of the cluster radius
#The remaining 50% of the bins are divided from 25% to 100% of the radius
rings = list(np.linspace(0,radiusFOV/4,math.ceil(N/2)))
ring2 = list(np.linspace(radiusFOV/4,radiusFOV,math.floor(N/2)+1))
ring2 = ring2[1:-1]
rings.extend(ring2)
x=rings[:-1]
# for i in range(0,len(rings[:-1])):
# x.append((rings[i+1]+rings[i])/2)
counts = list(np.zeros(N-1,dtype=int))
masses = list(np.zeros(N-1,dtype=int))
rads=[]
for star in starList:
#Radial distance from the mean RA and Dec of the cluster
if normalize:
rads.append(np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2))
else:
rads.append(np.sqrt((star.ra-x0)**2+(star.dec-y0)**2))
#Find the nearest ring to the star
r = find_nearest(rings, rads[-1])
i = rings.index(r)
#Check bounds
if i < len(counts):
#If outside last ring, add to that count
if r > rads[-1]:
counts[i-1] += 1
masses [i-1] += star.proxyMass
else:
counts[i] += 1
masses [i] += star.proxyMass
#Worth noting here that the way that this is set up, the rings don't actually mark the bounds of the bins but rather the midpoints.
plt.figure(f"{clname}_membership_{mode}")
plt.hist(rads,bins=ringBins)
plt.xlabel("Radius (deg)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} Membership")
plt.savefig(f"{cluster.imgPath}{clname}_membership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_membership_{mode}.png",dpi=500)
volume = []
for i in range(0,len(rings[:-1])):
volume.append(np.pi*(rings[i+1]**2-rings[i]**2))
numDensity = [a/b for a,b in zip(counts,volume)]
massDensity = [a/b for a,b in zip(masses,volume)]
error_num = [np.sqrt(a)/b for a,b in zip(counts,volume)]
error_mass = [np.sqrt(a)/b for a,b in zip(masses,volume)]
for i in range(0,len(error_num)):
if error_num[i] < 0.1:
error_num[i] = 0.1
x = x[math.ceil(N/20):-1]
counts = counts[math.ceil(N/20):-1]
numDensity = numDensity[math.ceil(N/20):-1]
massDensity = massDensity[math.ceil(N/20):-1]
error_num = error_num[math.ceil(N/20):-1]
error_mass = error_mass[math.ceil(N/20):-1]
#Further filter the data based on outliers, either extremely low density or extremely big jumps in density from bin to bin
i = 0
numSmall = 0
numGrad = 0
while i < len(x)-1:
if numDensity[i] < 0.5 or numDensity[i] < numDensity[i+1]/delta or massDensity[i] < 0.1:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numSmall += 1
newN -= 1
elif abs(numDensity[i]) > abs(numDensity[i+1])*delta:# or abs(numDensity[i]) < abs(numDensity[i-1])/3:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numGrad += 1
newN -= 1
else:
i += 1
if numDensity[-1] < 0.01 or massDensity[-1] < 0.01:
x.pop(-1)
counts.pop(-1)
numDensity.pop(-1)
massDensity.pop(-1)
error_num.pop(-1)
error_mass.pop(-1)
numSmall += 1
newN -= 1
print(f"[{cluster.name}] Removed {numSmall} points with too small of a density and {numGrad} points with too extreme of a delta")
#========= Number Density =========
#Number density vs radial bin plot
plt.figure(f"{clname}_density_{mode}")
plt.errorbar(x,numDensity,yerr=error_num,ls='None')
plt.scatter(x,numDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Number Density ($deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Number Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
if "NGC2355" in cluster.name:
p0=[5000,0.1]
else:
p0=[5000,0.1]
#print([b/a for a,b in zip(numDensity,error_num)])
fit,var = so.curve_fit(kingProfile,x,numDensity,p0,maxfev=1000)
#Std. Dev. from variance
err = np.sqrt(var[1][1])
err_coeff = np.sqrt(var[0][0])
scale = np.abs(fit[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar = (3600/206265)*(err/(cluster.mean_par/1000) ) + 2*fit[1]/(cluster.mean_par_err/1000)
scaleVar = np.abs(scale*np.sqrt((var[1][1]/fit[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from count in parsecs
setattr(cluster,f"scaleRad_{mode}",scale)
setattr(cluster,f"scaleRad_err_{mode}",scaleVar)
#Scale radius from count in degrees
setattr(cluster,f"scaleAngle_{mode}",abs(fit[1]))
setattr(cluster,f"scaleAngle_err_{mode}",err)
setattr(cluster,f"numDensity_coeff_{mode}",fit[0])
setattr(cluster,f"numDensity_coeff_err_{mode}",err_coeff)
#Plot the curve fit
numLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit[0]:.3f} $\pm$ {err_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit[1]):.3f}$\degree$ $\pm$ {err:.3f}$\degree$"+ "\n"
+ fr"R={scale:.3f}pc $\pm$ {scaleVar:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit) for a in x],color='red',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_density_filtered")
plt.title(f"{clname} Overlaid Number Density")
plt.errorbar(x,numDensity,yerr=error_num,ls='None',color='midnightblue')
plt.scatter(x,numDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit) for a in x],color='darkred',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_overlay.png",dpi=500)
#========= Mass Density =========
#Mass density vs radial bin plot
plt.figure(f"{clname}_mass_density_{mode}")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None')
plt.scatter(x,massDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Mass Density ($M_{\odot}*deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Mass Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
fit_mass,var_mass = so.curve_fit(kingProfile,x,massDensity,p0,maxfev=1000)
#Std. Dev. from variance
err_mass = np.sqrt(var[1][1])
err_mass_coeff = np.sqrt(var[0][0])
scale_mass = np.abs(fit_mass[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar_mass = (3600/206265)*(err_mass/(cluster.mean_par/1000) ) + 2*fit_mass[1]/(cluster.mean_par_err/1000)
scaleVar_mass = np.abs(scale_mass*np.sqrt((var_mass[1][1]/fit_mass[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from mass in parsecs
setattr(cluster,f"scaleRad_mass_{mode}",scale_mass)
setattr(cluster,f"scaleRad_mass_err_{mode}",scaleVar_mass)
#Scale radius from mass in degrees
setattr(cluster,f"scaleAngle_mass_{mode}",abs(fit_mass[1]))
setattr(cluster,f"scaleAngle_mass_err_{mode}",err_mass)
setattr(cluster,f"massDensity_coeff_{mode}",fit_mass[0])
setattr(cluster,f"massDensity_coeff_err_{mode}",err_mass_coeff)
#Plot the curve fit
massLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit_mass[0]:.3f} $\pm$ {err_mass_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit_mass[1]):.3f}$\degree$ $\pm$ {err_mass:.3f}$\degree$"+ "\n"
+ fr"R={scale_mass:.3f}pc $\pm$ {scaleVar_mass:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='red',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_density_filtered")
plt.title(f"{clname} Overlaid Mass Density")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None',color='midnightblue')
plt.scatter(x,massDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='darkred',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_overlay.png",dpi=500)
#========= Average Mass =========
averageMass = [a/b for a,b in zip(massDensity,numDensity)]
xDist = [np.abs(a*3600/206265)/(cluster.mean_par/1000) for a in x]
#Average Mass plot
plt.figure(f"{clname}_average_mass_{mode}")
plt.scatter(xDist,averageMass,label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.xlabel("Distance from Center (pc)")
plt.ylabel(r"Average Stellar Mass ($M_{\odot}$)")
plt.title(f"{clname} {mode.capitalize()} Average Mass".replace("_normalized",' Normalized'))
#Split average mass data into numPercentileBins number of bins
if "filtered" in mode:
cluster.pMin = xDist[0]
cluster.pMax = xDist[-1]
pBins = np.linspace(cluster.pMin,cluster.pMax,numPercentileBins+1)
xBins = []
for i in range(len(pBins)-1):
xBins.append((pBins[i]+pBins[i+1])/2)
pBins = np.delete(pBins,0)
pBins = np.delete(pBins,-1)
for b in pBins:
plt.axvline(x=b,color='black',linestyle='--')
binned = []
for n in range(numPercentileBins):
binned.append([])
#Assign the average mass data points to the bins
for i in range(len(xDist)):
#Finds the nearest xBin to each x value and sorts the corresponding averageMass into that bin
val = find_nearest(xBins,xDist[i])
idx = xBins.index(val)
binned[idx].append(averageMass[i])
#Creates arrays that are numPercentileBins long that store the standard and quantile means of the points in those bins
quantileMean = []
binMean = []
meanBins = []
for b in binned:
if len(b) == 0:
continue
binSorted = sorted(b)
#Finds the index of the lower percentile marker (ex. 20%)
lower = binSorted.index(find_nearest(binSorted, np.quantile(b,percentile)))
#Finds the index of the upper percentile marker (ex. 80%)
upper = binSorted.index(find_nearest(binSorted, np.quantile(b,1-percentile)))
#Means between lower and upper percentile markers
quantileMean.append(np.mean(binSorted[lower:upper+1]))
#Standard Mean
binMean.append(np.mean(b))
#Bins
meanBins.append(xBins[binned.index(b)])
try:
fit, var = so.curve_fit(kingProfile,xDist,[kingProfile(a,*fit_mass)/kingProfile(a,*fit) for a in x])
residual_coeff, residual_scaleAngle = fit[0],fit[1]
except:
print(f"Unable to fit the residuals for {cluster.name}")
residual_coeff, residual_scaleAngle = -99, -99
massFit = st.linregress(meanBins,quantileMean)
fitslope, intercept, rval, pval, fitslope_err, intercept_err = massFit.slope, massFit.intercept, massFit.rvalue, massFit.pvalue, massFit.stderr, massFit.intercept_stderr
residual_scaleRad = np.abs(residual_scaleAngle*3600/206265)/(cluster.mean_par/1000)
setattr(cluster,f"residual_coeff_{mode}",residual_coeff)
setattr(cluster,f"residual_scaleAngle_{mode}",residual_scaleAngle)
setattr(cluster,f"residual_scaleRad_{mode}",residual_scaleRad)
setattr(cluster,f"mass_slope_{mode}",fitslope)
setattr(cluster,f"mass_slope_err_{mode}",fitslope_err)
setattr(cluster,f"mass_intercept_{mode}",intercept)
setattr(cluster,f"mass_intercept_err_{mode}",intercept_err)
setattr(cluster,f"mass_fit_r2_{mode}",rval**2)
setattr(cluster,f"mass_fit_p_{mode}",pval)
fitLabel = ( fr"Slope = {fitslope:.3f} $\pm$ {fitslope_err:.3f}" + "\n"
+ fr"Intercept = {intercept:.3f} $\pm$ {intercept_err:.3f}" + "\n"
+ fr"$r^2$ = {rval**2:.3f} ({mode.capitalize()})".replace("_normalized",' Normalized'))
#Plot the quantile and standard means on the existing average mass plot
plt.scatter(meanBins,quantileMean,color='red',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='red',label=fitLabel)
#plt.scatter(meanBins,binMean,color='dimgray',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_average_mass_filtered")
plt.title(f"{clname} Overlaid Average Mass")
plt.scatter(xDist,averageMass,color='midnightblue',label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='darkred',label=fitLabel)
plt.scatter(meanBins,quantileMean,color='darkred',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
#plt.scatter(meanBins,binMean,color='black',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_overlay.png",dpi=500)
#========= Radius Plot =========
plt.figure(f"{clname}_characteristic_radius_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
pltRad = abs(getattr(cluster,f"scaleAngle_{mode}"))
outline1 = Circle([x0,y0],1*pltRad,color='red',fill=False,ls='--',label=fr"$\rho$={1*pltRad:0.3f}$\degree$",alpha=0.7)
outline2 = Circle([x0,y0],5*pltRad,color='red',fill=False,ls='--',label=fr"5$\rho$={5*pltRad:0.3f}$\degree$",alpha=0.7)
#outline3 = Circle([x0,y0],10*abs(getattr(cluster,f"scaleAngle_{mode}")),color='red',fill=False,ls='--',label=fr"10$\rho$={3*abs(fit[1]):0.3f}$\degree$",alpha=0.7)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
#plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Characteristic Radius".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_radialMembership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_radialMembership_{mode}.png",dpi=500)
if "M67" in clname and "filtered" in mode:
plt.figure(f"{clname}_rings_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
for i in range(0,len(rings)):
outline = Circle([x0,y0],rings[i],color='red',fill=False)
plt.gca().add_patch(outline)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} Radial Bins")
plt.gcf().set_size_inches(8,8)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_{mode}.png".replace("_filtered",''),dpi=500)
plt.xlim(x0-0.15,x0+0.15)
plt.ylim(y0-0.15,y0+0.15)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_center_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_center_{mode}.png".replace("_filtered",''),dpi=500)
#========= Stars by Mass =========
massList = []
innerMassList = []
for star in starList:
massList.append(star.proxyMass)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
mBins = np.arange(min(massList),max(massList)+0.1,0.1)
inBins = np.arange(min(innerMassList),max(innerMassList)+0.1,0.1)
plt.figure(f"{clname}_mass_frequency_{mode}")
plt.xlabel(r"Stellar Mass ($M_{\odot}$)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Mass Frequency".replace("_normalized",' Normalized'))
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMassList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_frequency_filtered")
plt.title(f"{clname} Overlaid Mass Frequency")
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMassList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_overlay.png",dpi=500)
#========= Stars by Magnitude =========
magList = []
innerMagList = []
for star in starList:
magList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
mBins = np.arange(min(magList),max(magList)+0.1,0.1)
inBins = np.arange(min(innerMagList),max(innerMagList)+0.1,0.1)
plt.figure(f"{clname}_mag_frequency_{mode}")
plt.xlabel(r"Absolute G Mag")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Absolute Magnitude Frequency".replace("_normalized",' Normalized'))
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMagList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mag_frequency_filtered")
plt.title(f"{clname} Overlaid Absolute Magnitude Frequency")
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMagList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_overlay.png",dpi=500)
#========= Stars by Color =========
colorList = []
innerColorList = []
for star in starList:
colorList.append(star.b_r-cluster.reddening)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
mBins = np.arange(min(colorList),max(colorList)+0.1,0.1)
inBins = np.arange(min(innerColorList),max(innerColorList)+0.1,0.1)
plt.figure(f"{clname}_color_frequency_{mode}")
plt.xlabel(r"Dereddened BP-RP")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Dereddened Color Index Frequency".replace("_normalized",' Normalized'))
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerColorList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_color_frequency_filtered")
plt.title(f"{clname} Overlaid Dereddened Color Index Frequency")
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerColorList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_overlay.png",dpi=500)
#========= Other Radii =========
massSum = np.sum([star.proxyMass for star in starList])
intensitySum = np.sum([toIntensity(star.g_mag) for star in starList])
curMassSum = 0
curIntSum = 0
massFound = False
intFound = False
if normalize:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.normRadDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.normRadDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.normRadDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.normRadDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (deg)")
else:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.radDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.radDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.radDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.radDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (deg)")
medRad = getattr(cluster,f"medianRad_{mode}")
medAngle = getattr(cluster,f"medianAngle_{mode}")
mRad = getattr(cluster,f"halfMassRad_{mode}")
mAngle = getattr(cluster,f"halfMassAngle_{mode}")
lRad = getattr(cluster,f"halfLightRad_{mode}")
lAngle = getattr(cluster,f"halfLightAngle_{mode}")
print(medAngle)
outline1 = Circle([x0,y0],medAngle,color='red',fill=False,ls='--',label=fr"Median Star Distance = {medAngle:.3f}$\degree$, {medRad:.3f}pc",alpha=1)
outline2 = Circle([x0,y0],mAngle,color='darkgreen',fill=False,ls='--',label=fr"Half Mass Radius = {mAngle:.3f}$\degree$, {mRad:.3f}pc",alpha=1)
outline3 = Circle([x0,y0],lAngle,color='purple',fill=False,ls='--',label=fr"Half Light Radius = {lAngle:.3f}$\degree$, {lRad:.3f}pc",alpha=1)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Various Radii".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_otherRadii_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_otherRadii_{mode}.png",dpi=500)
def checkLoaded(cList):
if 'all' in cList:
cList = [c.name for c in clusterList]
else:
for cl in cList:
if not cl in clusters:
loadClusters([cl])
return cList
def saveResults(cList,outdir="results"):
#Imports
import numpy as np
import dill
import os
global clusters
global clusterList
checkLoaded(cList)
#Check and create the relevant directory paths to save/load the results
if not os.path.isdir(f"{outdir}/"):
os.mkdir(f"{outdir}/")
if not os.path.isdir(f"{outdir}/pickled/"):
os.mkdir(f"{outdir}/pickled/")
else:
for cl in cList:
cluster = clusters[cl]
#Creates a "result cluster" object from the cluster, effectively just stripping away lists
rCl = resultClusterObj(cluster)
#Pickle the result cluster object
with open(f"{outdir}/pickled/{cluster.name}.pk1", 'wb') as output:
dill.dump(rCl, output)
#Store variables into an array to be printed as csv
properties = [a for a in dir(rCl) if not a.startswith('_')]
res = [getattr(rCl,p) for p in properties]
#Stack into an array of 2 rows with variable names and values
fin = np.vstack((properties,res))
np.savetxt(f"{outdir}/{cluster.name}.csv",fin,delimiter=',',fmt='%s')
def loadResults(filter="None",indir="results"):
#Imports
import numpy as np
import dill
import os
global resultList
global resultsIn
assert os.path.isdir("results/")
resultList = []
for fn in os.listdir(indir+"/pickled/"):
#Reads in instances from the saved pickle file
with open(f"{indir}/pickled/{fn}",'rb') as input:
res = dill.load(input)
resultList.append(res)
resultsIn = True
toDict()
def refreshProperties(cList=['all']):
import numpy as np
global catalogue
global clusterList
global clusters
clusterCatalogue()
checkLoaded(cList)
#Loop through clusters
for cluster in cList:
reference = None
for cl in catalogue:
if str(cl.name) == str(cluster.name):
reference = cl
print(f"Catalogue match for {cluster.name} found")
break
if reference == None:
print(f"Catalogue match for {cluster.name} was not found, please create one")
continue
#Filter all of the methods out of the properties list
properties = [a for a in dir(reference) if not a.startswith('_')]
#print(properties)
#exec(f"print(reference.{properties[1]})")
#print(properties)
#Now we have a list of all the attributes assigned to the catalogue (the self.variables)
for p in properties:
prop = getattr(reference,p)
#print(prop)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{cluster.name} does not have a specified catalogue value for {p}")
except:
continue
#Additional properties that may be useful
for star in cluster.filtered:
star.normRA = star.pmra*np.cos(star.dec*np.pi/180)
print(f"{cluster.name} properties refreshed from catalogue")
def statPlot(statX,statY,population="open",color="default",square=True,invertY=False,logX=False,logY=False,pointLabels=True,linFit=False,directory='default'):
#Create plots of stat X vs stat Y across a population of clusters, similar to customPlot()
#Can be set to use a custom list of clusters, or all clusters of a given type
#
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Filter out incorrect inputs
if type(population) == str:
population = population.lower()
try:
assert population == "open" or population == "globular"
except:
print("Specified population type not recognized")
else:
try:
assert type(population) == list
assert type(population[0]) == str
except:
print("Population type given is not valid, must be either a list of cluster name strings or a single string \'open\' or \'closed\'")
return
try:
assert len(population) > 1
except:
print("Population statistic plots cannot be made with fewer than 2 clusters given")
return
#Load cluster information from cList
#This is going to involve using the resultCluster object to read data from each cluster folder in the cList
cList = []
banList = ['NGC2204']
if type(population) == str:
for res in resultList:
if res.clType.lower() == population and not res.name in banList:
cList.append(res)
else:
for res in resultList:
if res.name in population:
cList.append(res)
if statX.lower() == "b_r" and statY.lower() == "g_mag":
#Corrected CMD overlay
NUM_COLORS = len(cList)
cm = plt.get_cmap('nipy_spectral')
plt.figure("uncorrected")
plt.title("Cluster Overlay")
plt.xlabel("Observed B-R")
plt.ylabel("Apparent G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("unshifted")
plt.title("Corrected Cluster Overlay")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("shifted")
plt.title("Corrected Cluster Overlay - Offset")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
index = 0
offset = 2.5
for cluster in cList:
try:
path = cluster.dataPath
except:
path = f"clusters/{cluster.name}/data/"
condensed = np.genfromtxt(f"{path}condensed.csv",delimiter=",")
cluster.condensed = condensed
#Adjust by cluster.reddening and cluster.dist_mod
x1 = [a[0] for a in condensed]
y1 = [a[1] for a in condensed]
x2 = [a[0]-cluster.reddening for a in condensed]
y2 = [a[1]-2.1*cluster.reddening-cluster.dist_mod for a in condensed]
x3 = [a[0]-cluster.reddening for a in condensed]
y3 = [a[1]-2.1*cluster.reddening-cluster.dist_mod+index*offset for a in condensed]
index += 1
plt.figure("uncorrected")
plt.scatter(x1,y1,label=f"{cluster.name}")
plt.figure("unshifted")
plt.axvline(x=1.6,ymax=0.5,color='black',linestyle='--')
plt.axhline(y=4,xmin=0.59,color='black',linestyle='--')
plt.scatter(x2,y2,label=f"{cluster.name}")
plt.figure("shifted")
plt.scatter(x3,y3,label=f"{cluster.name}")
plt.axvline(x=1.6,color='black',linestyle='--')
# if 'NGC2301' in cluster.name:
# for a,b in zip(x2,y2):
# print(f"{a},{b}")
plt.figure("uncorrected")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_apparent.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_apparent.png",dpi=500)
plt.figure("unshifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_absolute.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_absolute.png",dpi=500)
plt.figure("shifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_shifted.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_shifted.png",dpi=500)
else:
x = [getattr(a, statX) for a in cList]
y = [getattr(a, statY) for a in cList]
plt.figure()
plt.xlabel(f"{statX}")
plt.ylabel(f"{statY}")
if pointLabels:
for cluster in cList:
plt.scatter(getattr(cluster, statX),getattr(cluster, statY),label=cluster.name)
plt.legend(fontsize="small")
else:
plt.scatter(x,y)
if linFit:
reg = linregress(x,y)
plt.plot(x,[reg[0]*a+reg[1] for a in x])
plt.savefig(f"SpecificPlots/pdf/{population}_{statX}_{statY}.pdf")
plt.savefig(f"SpecificPlots/png/{population}_{statX}_{statY}.png",dpi=500)
return
def ageMassFit(t,m0,k):
import numpy as np
return 1 + m0*np.exp(-1*k*t)
def extinctionLaw(d,M0):
import numpy as np
return M0 -2.5*np.log10(1/(4*np.pi*d**2))
def resultPlots():
#Imports
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
from scipy.optimize import curve_fit
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Select open clusters from resultList
banList = ['NGC2204']
cList = []
for res in resultList:
if res.clType.lower() == "open" and not res.name in banList:
cList.append(res)
#Filtered mass versus age
fname = "mass_vs_age_filtered"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
plt.scatter([c.fit_age for c in cList],[c.meanProxyMass for c in cList])
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded mass versus age
fname = "mass_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.meanBoundedProxyMass for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass intercept versus age
fname = "mass_intercept_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Stellar Mass in Core ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.mass_intercept_bounded for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass slope versus age
fname = "mass_slop_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"IQM Stellar Mass Dropoff ($\frac{M_{\odot}}{pc}$)")
x,y = [c.fit_age for c in cList],[c.mass_slope_bounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Magnitude versus distance (Extinction law)
fname = "mag_vs_dist_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel(r"Mean Apparent G Magnitude")
x,y = [c.meanDist for c in cList],[c.mean_bounded_g_mag for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(extinctionLaw,x,y,maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
plt.plot(xr,[extinctionLaw(a,fit[0]) for a in xr],label="Inverse Square Law \n" + fr" $M_0 = {fit[0]:.3f} \pm {var[0][0]:.3f}$")
plt.gca().invert_yaxis()
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded fraction versus distance
fname = "bounded_fraction_vs_dist"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel("Fraction Unaffected by BP-RP Limit")
x,y = [c.meanDist for c in cList],[c.fractionBounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Radii
plt.figure()
plt.scatter([c.meanGalacticDist for c in cList],[c.halfLightRad_bounded/c.medianRad_bounded for c in cList])
def boundedStats(cList,xmax=1.6,saveCl=True,unloadCl=True):
import numpy as np
global clusters
global subList
for cl in cList:
checkLoaded([cl])
cluster = clusters[cl]
subList = [star for star in cluster.filtered if not (star.b_r-cluster.reddening > xmax and star.g_mag > cluster.cltpy)]
cluster.bounded = subList
#Windowed properties (over the xmin to xmax range)
cluster.meanBoundedProxyMass = np.mean([a.proxyMass for a in subList])
cluster.totalBoundedProxyMass = np.sum([a.proxyMass for a in subList])
cluster.numBounded = len(subList)
cluster.fractionBounded = len(subList)/len(cluster.filtered)
cluster.mean_bounded_b_r = np.mean([a.b_r for a in subList])
cluster.mean_bounded_g_mag = np.mean([a.g_mag for a in subList])
if saveCl:
saveClusters([cl])
saveResults([cl])
if unloadCl:
unloadClusters([cl])
def tryFits(fitVar='fit_age'):
from scipy.stats import linregress
global resultsIn
global resultList
global props
global r2
if not resultsIn:
loadResults()
cList = []
for res in resultList:
if res.clType.lower() == "open":
cList.append(res)
if 'all' in fitVar:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
propList = [a for a in props if type(getattr(cList[0],a)) == float]
propList.remove('turnPoint')
r2 = []
for pr in propList:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(pr)
for prop in props:
x = [getattr(a, pr) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((pr,prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[2],reverse=True)
print("Top 100 r^2 values:")
for r in r2[:200]:
print(f"{r[0]} | {r[1]} | {r[2]}")
else:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(fitVar)
r2 = []
for prop in props:
x = [getattr(a, fitVar) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[1],reverse=True)
print("Top 20 r^2 values:")
for r in r2[:20]:
print(f"{r[0]} | {r[1]}")
def prelimPlot(cl):
import matplotlib.pyplot as plt
cluster = clusters[cl]
plt.scatter([a.ra for a in cluster.unfilteredWide],[a.dec for a in cluster.unfilteredWide],s=0.1)
plt.figure()
plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1)
# plt.figure()
# plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1,c=[a.par for a in cluster.unfilteredWide])
# plt.set_cmap('cool')
# clb = plt.colorbar()
plt.figure()
plt.scatter([a.b_r for a in cluster.unfilteredWide],[a.g_mag for a in cluster.unfilteredWide],s=0.1)
plt.gca().invert_yaxis()
# plt.figure()
# plt.scatter([a.par for a in cluster.unfilteredWide],[a.par for a in cluster.unfilteredWide],s=0.1,c=[(a.pmra**2 + a.pmdec**2)**0.5 for a in cluster.unfilteredWide])
# plt.set_cmap('cool')
| true | true |
f7364db0a9b0f7a13ada988a208e0f0859847424 | 2,476 | py | Python | neurolib/utils/saver.py | Ronjaa95/neurolib | 6d066a6428718ec85c9b1b87707ea73bc179ebf9 | [
"MIT"
] | 258 | 2020-01-26T14:38:09.000Z | 2022-03-31T14:54:04.000Z | neurolib/utils/saver.py | Ronjaa95/neurolib | 6d066a6428718ec85c9b1b87707ea73bc179ebf9 | [
"MIT"
] | 172 | 2020-01-27T11:02:28.000Z | 2022-03-22T22:25:38.000Z | neurolib/utils/saver.py | Ronjaa95/neurolib | 6d066a6428718ec85c9b1b87707ea73bc179ebf9 | [
"MIT"
] | 49 | 2020-02-04T08:34:44.000Z | 2022-03-28T09:29:12.000Z | """
Saving model output.
"""
import json
import pickle
from copy import deepcopy
import os
import numpy as np
import xarray as xr
def save_to_pickle(datafield, filename):
"""
Save datafield to pickle file. Keep in mind that restoring a pickle
requires that the internal structure of the types for the pickled data
remain unchanged, o.e. not recommended for long-term storage.
:param datafield: datafield or dataarray to save
:type datafield: xr.Dataset|xr.DataArray
:param filename: filename
:type filename: str
"""
assert isinstance(datafield, (xr.DataArray, xr.Dataset))
if not filename.endswith(".pkl"):
filename += ".pkl"
with open(filename, "wb") as handle:
pickle.dump(datafield, handle, protocol=pickle.HIGHEST_PROTOCOL)
def save_to_netcdf(datafield, filename):
"""
Save datafield to NetCDF. NetCDF cannot handle structured attributes,
hence they are stripped and if there are some, they are saved as json
with the same filename.
:param datafield: datafield or dataarray to save
:type datafield: xr.Dataset|xr.DataArray
:param filename: filename
:type filename: str
"""
assert isinstance(datafield, (xr.DataArray, xr.Dataset))
datafield = deepcopy(datafield)
if not filename.endswith(".nc"):
filename += ".nc"
if datafield.attrs:
attributes_copy = deepcopy(datafield.attrs)
_save_attrs_json(attributes_copy, filename)
datafield.attrs = {}
datafield.to_netcdf(filename)
def _save_attrs_json(attrs, filename):
"""
Save attributes to json.
:param attrs: attributes to save
:type attrs: dict
:param filename: filename for the json file
:type filename: str
"""
def sanitise_attrs(attrs):
sanitised = {}
for k, v in attrs.items():
if isinstance(v, list):
sanitised[k] = [
sanitise_attrs(vv) if isinstance(vv, dict) else vv.tolist() if isinstance(vv, np.ndarray) else vv
for vv in v
]
elif isinstance(v, dict):
sanitised[k] = sanitise_attrs(v)
elif isinstance(v, np.ndarray):
sanitised[k] = v.tolist()
else:
sanitised[k] = v
return sanitised
filename = os.path.splitext(filename)[0] + ".json"
with open(filename, "w") as handle:
json.dump(sanitise_attrs(attrs), handle)
| 29.47619 | 117 | 0.640549 |
import json
import pickle
from copy import deepcopy
import os
import numpy as np
import xarray as xr
def save_to_pickle(datafield, filename):
assert isinstance(datafield, (xr.DataArray, xr.Dataset))
if not filename.endswith(".pkl"):
filename += ".pkl"
with open(filename, "wb") as handle:
pickle.dump(datafield, handle, protocol=pickle.HIGHEST_PROTOCOL)
def save_to_netcdf(datafield, filename):
assert isinstance(datafield, (xr.DataArray, xr.Dataset))
datafield = deepcopy(datafield)
if not filename.endswith(".nc"):
filename += ".nc"
if datafield.attrs:
attributes_copy = deepcopy(datafield.attrs)
_save_attrs_json(attributes_copy, filename)
datafield.attrs = {}
datafield.to_netcdf(filename)
def _save_attrs_json(attrs, filename):
def sanitise_attrs(attrs):
sanitised = {}
for k, v in attrs.items():
if isinstance(v, list):
sanitised[k] = [
sanitise_attrs(vv) if isinstance(vv, dict) else vv.tolist() if isinstance(vv, np.ndarray) else vv
for vv in v
]
elif isinstance(v, dict):
sanitised[k] = sanitise_attrs(v)
elif isinstance(v, np.ndarray):
sanitised[k] = v.tolist()
else:
sanitised[k] = v
return sanitised
filename = os.path.splitext(filename)[0] + ".json"
with open(filename, "w") as handle:
json.dump(sanitise_attrs(attrs), handle)
| true | true |
f7364ddf2333b397212e2ec8913c3f5019780df6 | 163 | py | Python | vizdoomgym/envs/vizdoomtakecover.py | AlexGonRo/vizdoomgym | 231b5f07d878f4e8cab49e09db5cbc38c2d3abfb | [
"MIT"
] | 1 | 2021-04-13T19:05:27.000Z | 2021-04-13T19:05:27.000Z | vizdoomgym/envs/vizdoomtakecover.py | safijari/vizdoomgym | 4ba08296d528a40ce865f30f7abfef93417614f4 | [
"MIT"
] | 12 | 2019-10-21T04:53:12.000Z | 2022-02-10T00:28:37.000Z | vizdoomgym/envs/vizdoomtakecover.py | safijari/vizdoomgym | 4ba08296d528a40ce865f30f7abfef93417614f4 | [
"MIT"
] | 1 | 2021-12-14T12:20:39.000Z | 2021-12-14T12:20:39.000Z | from vizdoomgym.envs.vizdoomenv import VizdoomEnv
class VizdoomTakeCover(VizdoomEnv):
def __init__(self):
super(VizdoomTakeCover, self).__init__(7)
| 20.375 | 49 | 0.760736 | from vizdoomgym.envs.vizdoomenv import VizdoomEnv
class VizdoomTakeCover(VizdoomEnv):
def __init__(self):
super(VizdoomTakeCover, self).__init__(7)
| true | true |
f7364f6d6829dd6dc289c75df401165fac819425 | 2,399 | py | Python | tests/test_tensor_transform.py | aripekka/tbcalc | a0337db245f5391bfa9a42123994832c299b1fbe | [
"MIT"
] | 1 | 2020-05-03T00:10:39.000Z | 2020-05-03T00:10:39.000Z | tests/test_tensor_transform.py | aripekka/tbcalc | a0337db245f5391bfa9a42123994832c299b1fbe | [
"MIT"
] | null | null | null | tests/test_tensor_transform.py | aripekka/tbcalc | a0337db245f5391bfa9a42123994832c299b1fbe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tests for the tensor transform functions. Run with pytest.
Created on Sat May 9 00:09:00 2020
@author: aripekka
"""
import sys
import os.path
import numpy as np
sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))
from tbcalc.transverse_deformation import *
from tbcalc import cartesian_tensors_to_cylindrical
from pyTTE import TTcrystal, Quantity
def test_isotropic_circular():
#Calculate the reference stresses and strains as implemented in the
#deprecated sbcalc package
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
L = 100.0
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
RR = np.sqrt(X**2 + Y**2)
PHI = np.arctan2(Y,X)
stress, strain, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)
stress_cyl = cartesian_tensors_to_cylindrical(stress)
strain_cyl = cartesian_tensors_to_cylindrical(strain)
stress_cyl_ref = {}
stress_cyl_ref['rr'] = E/(16*R**2)*(L**2/4-RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['phiphi'] = E/(16*R**2)*(L**2/4-3*RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['rphi'] = stress['xx'](X,Y)*0
stress_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref = {}
strain_cyl_ref['rr'] = 1/(16*R**2)*((1-nu)*L**2/4-(1-3*nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['phiphi'] = 1/(16*R**2)*((1-nu)*L**2/4-(3-nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['rphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref['zphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phiz'] = stress['xx'](X,Y)*0
strain_cyl_ref['rz'] = stress['xx'](X,Y)*0
strain_cyl_ref['zr'] = stress['xx'](X,Y)*0
strain_cyl_ref['zz'] = nu/(4*R**2)*(RR**2-L**2/8)+stress['xx'](X,Y)*0
meps = np.finfo(np.float).eps #m
for i in ['r','phi']:
for j in ['r','phi']:
assert np.all(np.logical_or(np.abs(stress_cyl_ref[i+j] - stress_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(stress_cyl_ref[i+j]), np.isnan(stress_cyl[i+j](RR,PHI)))))
for i in ['r','phi','z']:
for j in ['r','phi','z']:
assert np.all(np.logical_or(np.abs(strain_cyl_ref[i+j] - strain_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(strain_cyl_ref[i+j]), np.isnan(strain_cyl[i+j](RR,PHI))))) | 31.155844 | 108 | 0.60025 |
import sys
import os.path
import numpy as np
sys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))
from tbcalc.transverse_deformation import *
from tbcalc import cartesian_tensors_to_cylindrical
from pyTTE import TTcrystal, Quantity
def test_isotropic_circular():
E = 165
nu = 0.22
thickness = 0.1
Rx = 1000.0
Ry = 500.0
R = np.sqrt(Rx*Ry)
L = 100.0
x=np.linspace(-L/2,L/2,150)
X,Y=np.meshgrid(x,x)
RR = np.sqrt(X**2 + Y**2)
PHI = np.arctan2(Y,X)
stress, strain, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)
stress_cyl = cartesian_tensors_to_cylindrical(stress)
strain_cyl = cartesian_tensors_to_cylindrical(strain)
stress_cyl_ref = {}
stress_cyl_ref['rr'] = E/(16*R**2)*(L**2/4-RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['phiphi'] = E/(16*R**2)*(L**2/4-3*RR**2)+stress['xx'](X,Y)*0
stress_cyl_ref['rphi'] = stress['xx'](X,Y)*0
stress_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref = {}
strain_cyl_ref['rr'] = 1/(16*R**2)*((1-nu)*L**2/4-(1-3*nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['phiphi'] = 1/(16*R**2)*((1-nu)*L**2/4-(3-nu)*RR**2)+stress['xx'](X,Y)*0
strain_cyl_ref['rphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phir'] = stress['xx'](X,Y)*0
strain_cyl_ref['zphi'] = stress['xx'](X,Y)*0
strain_cyl_ref['phiz'] = stress['xx'](X,Y)*0
strain_cyl_ref['rz'] = stress['xx'](X,Y)*0
strain_cyl_ref['zr'] = stress['xx'](X,Y)*0
strain_cyl_ref['zz'] = nu/(4*R**2)*(RR**2-L**2/8)+stress['xx'](X,Y)*0
meps = np.finfo(np.float).eps
for i in ['r','phi']:
for j in ['r','phi']:
assert np.all(np.logical_or(np.abs(stress_cyl_ref[i+j] - stress_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(stress_cyl_ref[i+j]), np.isnan(stress_cyl[i+j](RR,PHI)))))
for i in ['r','phi','z']:
for j in ['r','phi','z']:
assert np.all(np.logical_or(np.abs(strain_cyl_ref[i+j] - strain_cyl[i+j](RR,PHI)) < meps,
np.logical_and(np.isnan(strain_cyl_ref[i+j]), np.isnan(strain_cyl[i+j](RR,PHI))))) | true | true |
f736512234f6783eb63953d115f2c91aeee1b5bf | 23,644 | py | Python | src/trapi/trapi.py | guilhermehott/trapi | 7f44df1872b2e253bc423fa92a52efdd6d4fdbf1 | [
"MIT"
] | 1 | 2022-03-23T23:16:04.000Z | 2022-03-23T23:16:04.000Z | src/trapi/trapi.py | guilhermehott/trapi | 7f44df1872b2e253bc423fa92a52efdd6d4fdbf1 | [
"MIT"
] | null | null | null | src/trapi/trapi.py | guilhermehott/trapi | 7f44df1872b2e253bc423fa92a52efdd6d4fdbf1 | [
"MIT"
] | null | null | null | import asyncio
import base64
import hashlib
import json
import logging
import time
import urllib.parse
import uuid
import requests
import websockets
from ecdsa import NIST256p, SigningKey
from ecdsa.util import sigencode_der
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
# handlers=[
# logging.FileHandler("src/tmp/debug.log"),
# logging.StreamHandler()
# ]
)
logger = logging.getLogger(__name__)
# home = pathlib.Path.home()
# Necessary when this lib is called from a flask endpoint because it's not on the main thread.
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
class TradeRepublicApi:
_default_headers = {'User-Agent': 'TradeRepublic/Android 24/App Version 1.1.2875'}
_host = "https://api.traderepublic.com"
_refresh_token = None
_session_token = None
_session_token_expires_at = None
credentials_file = "credentials"
_ws = None
get_or_create_eventloop()
_lock = asyncio.Lock()
_subscription_id_counter = 1
_previous_responses = {}
subscriptions = {}
@property
def session_token(self):
if not self._refresh_token:
self.login()
elif self._refresh_token and time.time() > self._session_token_expires_at:
self.refresh_access_token()
return self._session_token
@session_token.setter
def session_token(self, val):
self._session_token_expires_at = time.time() + 290
self._session_token = val
def __init__(self, phone_no=None, pin=None, keyfile=None, locale="de"):
self._locale = locale
if not (phone_no and pin):
try:
with open(self.credentials_file, 'r') as f:
lines = f.readlines()
self.phone_no = lines[0].strip()
self.pin = lines[1].strip()
except FileNotFoundError:
raise ValueError(f"phone_no and pin must be specified explicitly or via credentials file")
else:
self.phone_no = phone_no
self.pin = pin
self.keyfile = keyfile if keyfile else "keyfile.pem"
try:
with open(self.keyfile, 'rb') as f:
self.sk = SigningKey.from_pem(f.read(), hashfunc=hashlib.sha512)
except FileNotFoundError as ex:
logger.warn('File not found %s', self.keyfile)
pass
def interactive_device_reset(self):
self.sk = SigningKey.generate(curve=NIST256p, hashfunc=hashlib.sha512)
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device",
json={"phoneNumber": self.phone_no, "pin": self.pin},
headers=self._default_headers)
if r.status_code == 200:
process_id = r.json()['processId']
else:
self.print_error_response(r)
token = input("Please enter the sms code: ")
self.pair_device(process_id, token)
def initiate_device_reset(self):
self.sk = SigningKey.generate(curve=NIST256p, hashfunc=hashlib.sha512)
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device",
json={"phoneNumber": self.phone_no, "pin": self.pin},
headers=self._default_headers)
if r.status_code == 200:
# save processId
with open(self.credentials_file, 'a') as f:
f.write("\n")
f.write(r.json()['processId'])
logger.info("processId %s", r.json()['processId'])
# save signature key
with open(self.keyfile, 'wb') as f:
f.write(self.sk.to_pem())
logger.info("writing to pem file")
logger.info(self.sk.to_pem())
else:
self.print_error_response(r)
raise Exception(r.json())
def complete_device_reset(self, token):
with open(self.credentials_file, 'r') as f:
lines = f.readlines()
process_id = lines[len(lines)-1].strip() # get the latest process_id
with open(self.keyfile, 'rb') as f:
self.sk = SigningKey.from_pem(f.read(), hashfunc=hashlib.sha512)
if not process_id or not self.sk:
raise ValueError("Initiate Device Reset first.")
else:
self.pair_device(process_id, token)
def pair_device(self, process_id, token):
logger.info("pairing device")
pubkey_bytes = self.sk.get_verifying_key().to_string('uncompressed')
pubkey_string = base64.b64encode(pubkey_bytes).decode('ascii')
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device/{process_id}/key",
json={"code": token, "deviceKey": pubkey_string},
headers=self._default_headers)
if r.status_code != 200:
self.print_error_response(r)
raise Exception(r.json())
@staticmethod
def print_error_response(r):
logger.error("%s %s", r.request.method, r.request.url)
logger.error("%s %s", r.status_code, r.json())
def login(self):
logging.info("Logging in")
r = self._sign_request("/api/v1/auth/login",
payload={"phoneNumber": self.phone_no, "pin": self.pin})
if r.status_code == 200:
self._refresh_token = r.json()['refreshToken']
self.session_token = r.json()['sessionToken']
# else:
# # the device lost the session, must be reseted
# self.interactive_device_reset()
# self.login()
def refresh_access_token(self):
logger.info("Refreshing access token")
r = self._sign_request("/api/v1/auth/session", method="GET")
self.session_token = r.json()['sessionToken']
def _sign_request(self, url_path, payload=None, method="POST"):
ts = int(time.time() * 1000)
payload_string = json.dumps(payload) if payload else ""
signature_payload = f"{ts}.{payload_string}"
signature = self.sk.sign(bytes(signature_payload, "utf-8"), hashfunc=hashlib.sha512, sigencode=sigencode_der)
signature_string = base64.b64encode(signature).decode('ascii')
headers = self._default_headers.copy()
headers["X-Zeta-Timestamp"] = str(ts)
headers["X-Zeta-Signature"] = signature_string
headers['Content-Type'] = 'application/json'
if url_path == "/api/v1/auth/login":
pass
elif url_path == "/api/v1/auth/session":
headers['Authorization'] = f'Bearer {self._refresh_token}'
elif self.session_token:
headers['Authorization'] = f'Bearer {self.session_token}'
return requests.request(method=method, url=f"{self._host}{url_path}", data=payload_string, headers=headers)
async def _get_ws(self):
if self._ws and self._ws.open:
return self._ws
logger.info(f"Connecting to websocket ...")
self._ws = await websockets.connect("wss://api.traderepublic.com")
connection_message = {'locale': self._locale}
await self._ws.send(f"connect 21 {json.dumps(connection_message)}")
response = await self._ws.recv()
if not response == 'connected':
raise ValueError(f"Connection Error: {response}")
logger.info(f"Connected to websocket ...")
return self._ws
async def _next_subscription_id(self):
async with self._lock:
subscription_id = self._subscription_id_counter
self._subscription_id_counter += 1
return str(subscription_id)
async def subscribe(self, payload):
subscription_id = await self._next_subscription_id()
ws = await self._get_ws()
logger.info(f"Subscribing: 'sub {subscription_id} {json.dumps(payload)}'")
self.subscriptions[subscription_id] = payload
payload_with_token = payload.copy()
payload_with_token["token"] = self.session_token
await ws.send(f"sub {subscription_id} {json.dumps(payload_with_token)}")
return subscription_id
async def unsubscribe(self, subscription_id):
ws = await self._get_ws()
logger.info(f"Unubscribing: {subscription_id}")
await ws.send(f"unsub {subscription_id}")
self.subscriptions.pop(subscription_id, None)
self._previous_responses.pop(subscription_id, None)
async def recv(self):
ws = await self._get_ws()
while True:
response = await ws.recv()
logger.debug(f"Received message: {response!r}")
subscription_id = response[:response.find(" ")]
code = response[response.find(" ") + 1: response.find(" ") + 2]
payload_str = response[response.find(" ") + 2:].lstrip()
if subscription_id not in self.subscriptions:
if code != "C":
logger.info(f"No active subscription for id {subscription_id}, dropping message")
continue
subscription = self.subscriptions[subscription_id]
if code == 'A':
self._previous_responses[subscription_id] = payload_str
payload = json.loads(payload_str) if payload_str else {}
return subscription_id, subscription, payload
elif code == 'D':
response = self._calculate_delta(subscription_id, payload_str)
logger.debug(f"Payload is {response}")
self._previous_responses[subscription_id] = response
return subscription_id, subscription, json.loads(response)
if code == 'C':
self.subscriptions.pop(subscription_id, None)
self._previous_responses.pop(subscription_id, None)
continue
elif code == 'E':
logger.error(f"Received error message: {response!r}")
await self.unsubscribe(subscription_id)
payload = json.loads(payload_str) if payload_str else {}
raise TradeRepublicError(subscription_id, subscription, payload)
def _calculate_delta(self, subscription_id, delta_payload):
previous_response = self._previous_responses[subscription_id]
i, result = 0, []
for diff in delta_payload.split('\t'):
sign = diff[0]
if sign == '+':
result.append(urllib.parse.unquote_plus(diff).strip())
elif sign == '-' or sign == '=':
if sign == '=':
result.append(previous_response[i:i + int(diff[1:])])
i += int(diff[1:])
return "".join(result)
async def _recv_subscription(self, subscription_id):
while True:
response_subscription_id, _, response = await self.recv()
if response_subscription_id == subscription_id:
return response
async def _receive_one(self, fut, timeout):
subscription_id = await fut
try:
return await asyncio.wait_for(self._recv_subscription(subscription_id), timeout)
finally:
await self.unsubscribe(subscription_id)
def run_blocking(self, fut, timeout=5.0):
return get_or_create_eventloop().run_until_complete(self._receive_one(fut, timeout=timeout))
async def portfolio(self):
return await self.subscribe({"type": "portfolio"})
async def watchlist(self):
return await self.subscribe({"type": "watchlist"})
async def cash(self):
return await self.subscribe({"type": "cash"})
async def available_cash_for_payout(self):
return await self.subscribe({"type": "availableCashForPayout"})
async def portfolio_status(self):
return await self.subscribe({"type": "portfolioStatus"})
async def portfolio_history(self, timeframe):
return await self.subscribe({"type": "portfolioAggregateHistory", "range": timeframe})
async def instrument_details(self, isin):
return await self.subscribe({"type": "instrument", "id": isin})
async def instrument_suitability(self, isin):
return await self.subscribe({"type": "instrumentSuitability", "instrumentId": isin})
async def stock_details(self, isin):
return await self.subscribe({"type": "stockDetails", "id": isin})
async def add_watchlist(self, isin):
return await self.subscribe({"type": "addToWatchlist", "instrumentId": isin})
async def remove_watchlist(self, isin):
return await self.subscribe({"type": "removeFromWatchlist", "instrumentId": isin})
async def ticker(self, isin, exchange="LSX"):
return await self.subscribe({"type": "ticker", "id": f"{isin}.{exchange}"})
async def performance(self, isin, exchange="LSX"):
return await self.subscribe({"type": "performance", "id": f"{isin}.{exchange}"})
async def performance_history(self, isin, timeframe, exchange="LSX", resolution=None):
parameters = {"type": "aggregateHistory", "id": f"{isin}.{exchange}", "range": timeframe}
if resolution:
parameters["resolution"] = resolution
return await self.subscribe(parameters)
async def experience(self):
return await self.subscribe({"type": "experience"})
async def motd(self):
return await self.subscribe({"type": "messageOfTheDay"})
async def neon_cards(self):
return await self.subscribe({"type": "neonCards"})
async def timeline(self, after=None):
return await self.subscribe({"type": "timeline", "after": after})
async def timeline_detail(self, timeline_id):
return await self.subscribe({"type": "timelineDetail", "id": timeline_id})
async def timeline_detail_order(self, order_id):
return await self.subscribe({"type": "timelineDetail", "orderId": order_id})
async def timeline_detail_savings_plan(self, savings_plan_id):
return await self.subscribe({"type": "timelineDetail", "savingsPlanId": savings_plan_id})
async def search_tags(self):
return await self.subscribe({"type": "neonSearchTags"})
async def search_suggested_tags(self, query):
return await self.subscribe({"type": "neonSearchSuggestedTags", "data": {"q": query}})
async def search(self, query, asset_type="stock", page=1, page_size=20, aggregate=False, only_savable=False,
filter_index=None, filter_country=None, filter_sector=None, filter_region=None):
search_parameters = {
"q": query,
"filter": [{'key': 'type', 'value': asset_type}],
"page": page,
"pageSize": page_size,
}
if only_savable:
search_parameters["filter"].append({'key': 'attribute', 'value': 'savable'})
if filter_index:
search_parameters["filter"].append({'key': 'index', 'value': filter_index})
if filter_country:
search_parameters["filter"].append({'key': 'country', 'value': filter_country})
if filter_region:
search_parameters["filter"].append({'key': 'region', 'value': filter_region})
if filter_sector:
search_parameters["filter"].append({'key': 'sector', 'value': filter_sector})
search_type = "neonSearch" if not aggregate else "neonSearchAggregations"
return await self.subscribe({"type": search_type, "data": search_parameters})
# product_type: [knockOutProduct, vanillaWarrant]
# option_type: [call, put]
async def search_derivative(self, underlying_isin, product_type="vanillaWarrant", option_type=None):
return await self.subscribe(
{"type": "derivatives", "underlying": underlying_isin, "productCategory": product_type, "optionType": option_type}
)
async def order_overview(self):
return await self.subscribe({"type": "orders"})
async def price_for_order(self, isin, exchange, order_type):
return await self.subscribe(
{"type": "priceForOrder", "parameters": {"exchangeId": exchange, "instrumentId": isin, "type": order_type}}
)
async def cash_available_for_order(self):
return await self.subscribe({"type": "availableCash"})
async def size_available_for_order(self, isin, exchange):
return await self.subscribe(
{"type": "availableSize", "parameters": {"exchangeId": exchange, "instrumentId": isin}}
)
async def limit_order(self, isin, exchange, order_type, size, limit, expiry, expiry_date=None, warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"limit": limit,
"mode": "limit",
"size": size,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def market_order(self, isin, exchange, order_type, size, expiry, sell_fractions, expiry_date=None,
warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"mode": "market",
"sellFractions": sell_fractions,
"size": size,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def stop_market_order(self, isin, exchange, order_type, size, stop, expiry, expiry_date=None,
warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"mode": "stopMarket",
"size": size,
"stop": stop,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def cancel_order(self, order_id):
return await self.subscribe({"type": "cancelOrder", "orderId": order_id})
async def savings_plan_overview(self):
return await self.subscribe({"type": "savingsPlans"})
async def savings_plan_parameters(self, isin):
return await self.subscribe({"type": "cancelSavingsPlan", "instrumentId": isin})
async def create_savings_plan(self, isin, amount, interval, start_date, start_date_type, start_date_value,
warnings_shown=None):
parameters = {
"type": "createSavingsPlan",
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"amount": amount,
"instrumentId": isin,
"interval": interval,
"startDate": {
'nextExecutionDate': start_date,
'type': start_date_type,
'value': start_date_value
}
}
}
return await self.subscribe(parameters)
async def change_savings_plan(self, savings_plan_id, isin, amount, interval,
start_date, start_date_type, start_date_value, warnings_shown=None):
parameters = {
"id": savings_plan_id,
"type": "createSavingsPlan",
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"amount": amount,
"instrumentId": isin,
"interval": interval,
"startDate": {
'nextExecutionDate': start_date,
'type': start_date_type,
'value': start_date_value
}
}
}
return await self.subscribe(parameters)
async def cancel_savings_plan(self, savings_plan_id):
return await self.subscribe({"type": "cancelSavingsPlan", "id": savings_plan_id})
async def price_alarm_overview(self):
return await self.subscribe({"type": "priceAlarms"})
async def create_price_alarm(self, isin, price):
return await self.subscribe({"type": "createPriceAlarm", "instrumentId": isin, "targetPrice": price})
async def cancel_price_alarm(self, price_alarm_id):
return await self.subscribe({"type": "cancelPriceAlarm", "id": price_alarm_id})
async def news(self, isin):
return await self.subscribe({"type": "neonNews", "isin": isin})
async def news_subscriptions(self):
return await self.subscribe({"type": "newsSubscriptions"})
async def subscribe_news(self, isin):
return await self.subscribe({"type": "subscribeNews", "instrumentId": isin})
async def unsubscribe_news(self, isin):
return await self.subscribe({"type": "unsubscribeNews", "instrumentId": isin})
def payout(self, amount):
return self._sign_request("/api/v1/payout", {"amount": amount}).json()
def confirm_payout(self, process_id, code):
r = self._sign_request(f"/api/v1/payout/{process_id}/code", {"code": code})
if r.status_code != 200:
raise ValueError(f"Payout failed with response {r.text!r}")
def settings(self):
return self._sign_request("/api/v1/auth/account", method="GET").json()
def order_cost(self, isin, exchange, order_mode, order_type, size, sell_fractions):
url = f"/api/v1/user/costtransparency?instrumentId={isin}&exchangeId={exchange}" \
f"&mode={order_mode}&type={order_type}&size={size}&sellFractions={sell_fractions}"
return self._sign_request(url, method="GET").text
def savings_plan_cost(self, isin, amount, interval):
url = f"/api/v1/user/savingsplancosttransparency?instrumentId={isin}&amount={amount}&interval={interval}"
return self._sign_request(url, method="GET").text
def __getattr__(self, name):
if name[:9] == "blocking_":
attr = object.__getattribute__(self, name[9:])
if hasattr(attr, '__call__'):
return lambda *args, **kwargs: \
self.run_blocking(timeout=kwargs.pop("timeout", 5), fut=attr(*args, **kwargs))
return object.__getattribute__(self, name)
class TradeRepublicError(ValueError):
def __init__(self, subscription_id, subscription, error_message):
self.subscription_id = subscription_id
self.subscription = subscription
self.error = error_message
| 39.737815 | 126 | 0.611487 | import asyncio
import base64
import hashlib
import json
import logging
import time
import urllib.parse
import uuid
import requests
import websockets
from ecdsa import NIST256p, SigningKey
from ecdsa.util import sigencode_der
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
)
logger = logging.getLogger(__name__)
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
class TradeRepublicApi:
_default_headers = {'User-Agent': 'TradeRepublic/Android 24/App Version 1.1.2875'}
_host = "https://api.traderepublic.com"
_refresh_token = None
_session_token = None
_session_token_expires_at = None
credentials_file = "credentials"
_ws = None
get_or_create_eventloop()
_lock = asyncio.Lock()
_subscription_id_counter = 1
_previous_responses = {}
subscriptions = {}
@property
def session_token(self):
if not self._refresh_token:
self.login()
elif self._refresh_token and time.time() > self._session_token_expires_at:
self.refresh_access_token()
return self._session_token
@session_token.setter
def session_token(self, val):
self._session_token_expires_at = time.time() + 290
self._session_token = val
def __init__(self, phone_no=None, pin=None, keyfile=None, locale="de"):
self._locale = locale
if not (phone_no and pin):
try:
with open(self.credentials_file, 'r') as f:
lines = f.readlines()
self.phone_no = lines[0].strip()
self.pin = lines[1].strip()
except FileNotFoundError:
raise ValueError(f"phone_no and pin must be specified explicitly or via credentials file")
else:
self.phone_no = phone_no
self.pin = pin
self.keyfile = keyfile if keyfile else "keyfile.pem"
try:
with open(self.keyfile, 'rb') as f:
self.sk = SigningKey.from_pem(f.read(), hashfunc=hashlib.sha512)
except FileNotFoundError as ex:
logger.warn('File not found %s', self.keyfile)
pass
def interactive_device_reset(self):
self.sk = SigningKey.generate(curve=NIST256p, hashfunc=hashlib.sha512)
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device",
json={"phoneNumber": self.phone_no, "pin": self.pin},
headers=self._default_headers)
if r.status_code == 200:
process_id = r.json()['processId']
else:
self.print_error_response(r)
token = input("Please enter the sms code: ")
self.pair_device(process_id, token)
def initiate_device_reset(self):
self.sk = SigningKey.generate(curve=NIST256p, hashfunc=hashlib.sha512)
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device",
json={"phoneNumber": self.phone_no, "pin": self.pin},
headers=self._default_headers)
if r.status_code == 200:
# save processId
with open(self.credentials_file, 'a') as f:
f.write("\n")
f.write(r.json()['processId'])
logger.info("processId %s", r.json()['processId'])
# save signature key
with open(self.keyfile, 'wb') as f:
f.write(self.sk.to_pem())
logger.info("writing to pem file")
logger.info(self.sk.to_pem())
else:
self.print_error_response(r)
raise Exception(r.json())
def complete_device_reset(self, token):
with open(self.credentials_file, 'r') as f:
lines = f.readlines()
process_id = lines[len(lines)-1].strip() # get the latest process_id
with open(self.keyfile, 'rb') as f:
self.sk = SigningKey.from_pem(f.read(), hashfunc=hashlib.sha512)
if not process_id or not self.sk:
raise ValueError("Initiate Device Reset first.")
else:
self.pair_device(process_id, token)
def pair_device(self, process_id, token):
logger.info("pairing device")
pubkey_bytes = self.sk.get_verifying_key().to_string('uncompressed')
pubkey_string = base64.b64encode(pubkey_bytes).decode('ascii')
r = requests.post(f"{self._host}/api/v1/auth/account/reset/device/{process_id}/key",
json={"code": token, "deviceKey": pubkey_string},
headers=self._default_headers)
if r.status_code != 200:
self.print_error_response(r)
raise Exception(r.json())
@staticmethod
def print_error_response(r):
logger.error("%s %s", r.request.method, r.request.url)
logger.error("%s %s", r.status_code, r.json())
def login(self):
logging.info("Logging in")
r = self._sign_request("/api/v1/auth/login",
payload={"phoneNumber": self.phone_no, "pin": self.pin})
if r.status_code == 200:
self._refresh_token = r.json()['refreshToken']
self.session_token = r.json()['sessionToken']
# else:
# # the device lost the session, must be reseted
# self.interactive_device_reset()
# self.login()
def refresh_access_token(self):
logger.info("Refreshing access token")
r = self._sign_request("/api/v1/auth/session", method="GET")
self.session_token = r.json()['sessionToken']
def _sign_request(self, url_path, payload=None, method="POST"):
ts = int(time.time() * 1000)
payload_string = json.dumps(payload) if payload else ""
signature_payload = f"{ts}.{payload_string}"
signature = self.sk.sign(bytes(signature_payload, "utf-8"), hashfunc=hashlib.sha512, sigencode=sigencode_der)
signature_string = base64.b64encode(signature).decode('ascii')
headers = self._default_headers.copy()
headers["X-Zeta-Timestamp"] = str(ts)
headers["X-Zeta-Signature"] = signature_string
headers['Content-Type'] = 'application/json'
if url_path == "/api/v1/auth/login":
pass
elif url_path == "/api/v1/auth/session":
headers['Authorization'] = f'Bearer {self._refresh_token}'
elif self.session_token:
headers['Authorization'] = f'Bearer {self.session_token}'
return requests.request(method=method, url=f"{self._host}{url_path}", data=payload_string, headers=headers)
async def _get_ws(self):
if self._ws and self._ws.open:
return self._ws
logger.info(f"Connecting to websocket ...")
self._ws = await websockets.connect("wss://api.traderepublic.com")
connection_message = {'locale': self._locale}
await self._ws.send(f"connect 21 {json.dumps(connection_message)}")
response = await self._ws.recv()
if not response == 'connected':
raise ValueError(f"Connection Error: {response}")
logger.info(f"Connected to websocket ...")
return self._ws
async def _next_subscription_id(self):
async with self._lock:
subscription_id = self._subscription_id_counter
self._subscription_id_counter += 1
return str(subscription_id)
async def subscribe(self, payload):
subscription_id = await self._next_subscription_id()
ws = await self._get_ws()
logger.info(f"Subscribing: 'sub {subscription_id} {json.dumps(payload)}'")
self.subscriptions[subscription_id] = payload
payload_with_token = payload.copy()
payload_with_token["token"] = self.session_token
await ws.send(f"sub {subscription_id} {json.dumps(payload_with_token)}")
return subscription_id
async def unsubscribe(self, subscription_id):
ws = await self._get_ws()
logger.info(f"Unubscribing: {subscription_id}")
await ws.send(f"unsub {subscription_id}")
self.subscriptions.pop(subscription_id, None)
self._previous_responses.pop(subscription_id, None)
async def recv(self):
ws = await self._get_ws()
while True:
response = await ws.recv()
logger.debug(f"Received message: {response!r}")
subscription_id = response[:response.find(" ")]
code = response[response.find(" ") + 1: response.find(" ") + 2]
payload_str = response[response.find(" ") + 2:].lstrip()
if subscription_id not in self.subscriptions:
if code != "C":
logger.info(f"No active subscription for id {subscription_id}, dropping message")
continue
subscription = self.subscriptions[subscription_id]
if code == 'A':
self._previous_responses[subscription_id] = payload_str
payload = json.loads(payload_str) if payload_str else {}
return subscription_id, subscription, payload
elif code == 'D':
response = self._calculate_delta(subscription_id, payload_str)
logger.debug(f"Payload is {response}")
self._previous_responses[subscription_id] = response
return subscription_id, subscription, json.loads(response)
if code == 'C':
self.subscriptions.pop(subscription_id, None)
self._previous_responses.pop(subscription_id, None)
continue
elif code == 'E':
logger.error(f"Received error message: {response!r}")
await self.unsubscribe(subscription_id)
payload = json.loads(payload_str) if payload_str else {}
raise TradeRepublicError(subscription_id, subscription, payload)
def _calculate_delta(self, subscription_id, delta_payload):
previous_response = self._previous_responses[subscription_id]
i, result = 0, []
for diff in delta_payload.split('\t'):
sign = diff[0]
if sign == '+':
result.append(urllib.parse.unquote_plus(diff).strip())
elif sign == '-' or sign == '=':
if sign == '=':
result.append(previous_response[i:i + int(diff[1:])])
i += int(diff[1:])
return "".join(result)
async def _recv_subscription(self, subscription_id):
while True:
response_subscription_id, _, response = await self.recv()
if response_subscription_id == subscription_id:
return response
async def _receive_one(self, fut, timeout):
subscription_id = await fut
try:
return await asyncio.wait_for(self._recv_subscription(subscription_id), timeout)
finally:
await self.unsubscribe(subscription_id)
def run_blocking(self, fut, timeout=5.0):
return get_or_create_eventloop().run_until_complete(self._receive_one(fut, timeout=timeout))
async def portfolio(self):
return await self.subscribe({"type": "portfolio"})
async def watchlist(self):
return await self.subscribe({"type": "watchlist"})
async def cash(self):
return await self.subscribe({"type": "cash"})
async def available_cash_for_payout(self):
return await self.subscribe({"type": "availableCashForPayout"})
async def portfolio_status(self):
return await self.subscribe({"type": "portfolioStatus"})
async def portfolio_history(self, timeframe):
return await self.subscribe({"type": "portfolioAggregateHistory", "range": timeframe})
async def instrument_details(self, isin):
return await self.subscribe({"type": "instrument", "id": isin})
async def instrument_suitability(self, isin):
return await self.subscribe({"type": "instrumentSuitability", "instrumentId": isin})
async def stock_details(self, isin):
return await self.subscribe({"type": "stockDetails", "id": isin})
async def add_watchlist(self, isin):
return await self.subscribe({"type": "addToWatchlist", "instrumentId": isin})
async def remove_watchlist(self, isin):
return await self.subscribe({"type": "removeFromWatchlist", "instrumentId": isin})
async def ticker(self, isin, exchange="LSX"):
return await self.subscribe({"type": "ticker", "id": f"{isin}.{exchange}"})
async def performance(self, isin, exchange="LSX"):
return await self.subscribe({"type": "performance", "id": f"{isin}.{exchange}"})
async def performance_history(self, isin, timeframe, exchange="LSX", resolution=None):
parameters = {"type": "aggregateHistory", "id": f"{isin}.{exchange}", "range": timeframe}
if resolution:
parameters["resolution"] = resolution
return await self.subscribe(parameters)
async def experience(self):
return await self.subscribe({"type": "experience"})
async def motd(self):
return await self.subscribe({"type": "messageOfTheDay"})
async def neon_cards(self):
return await self.subscribe({"type": "neonCards"})
async def timeline(self, after=None):
return await self.subscribe({"type": "timeline", "after": after})
async def timeline_detail(self, timeline_id):
return await self.subscribe({"type": "timelineDetail", "id": timeline_id})
async def timeline_detail_order(self, order_id):
return await self.subscribe({"type": "timelineDetail", "orderId": order_id})
async def timeline_detail_savings_plan(self, savings_plan_id):
return await self.subscribe({"type": "timelineDetail", "savingsPlanId": savings_plan_id})
async def search_tags(self):
return await self.subscribe({"type": "neonSearchTags"})
async def search_suggested_tags(self, query):
return await self.subscribe({"type": "neonSearchSuggestedTags", "data": {"q": query}})
async def search(self, query, asset_type="stock", page=1, page_size=20, aggregate=False, only_savable=False,
filter_index=None, filter_country=None, filter_sector=None, filter_region=None):
search_parameters = {
"q": query,
"filter": [{'key': 'type', 'value': asset_type}],
"page": page,
"pageSize": page_size,
}
if only_savable:
search_parameters["filter"].append({'key': 'attribute', 'value': 'savable'})
if filter_index:
search_parameters["filter"].append({'key': 'index', 'value': filter_index})
if filter_country:
search_parameters["filter"].append({'key': 'country', 'value': filter_country})
if filter_region:
search_parameters["filter"].append({'key': 'region', 'value': filter_region})
if filter_sector:
search_parameters["filter"].append({'key': 'sector', 'value': filter_sector})
search_type = "neonSearch" if not aggregate else "neonSearchAggregations"
return await self.subscribe({"type": search_type, "data": search_parameters})
# product_type: [knockOutProduct, vanillaWarrant]
# option_type: [call, put]
async def search_derivative(self, underlying_isin, product_type="vanillaWarrant", option_type=None):
return await self.subscribe(
{"type": "derivatives", "underlying": underlying_isin, "productCategory": product_type, "optionType": option_type}
)
async def order_overview(self):
return await self.subscribe({"type": "orders"})
async def price_for_order(self, isin, exchange, order_type):
return await self.subscribe(
{"type": "priceForOrder", "parameters": {"exchangeId": exchange, "instrumentId": isin, "type": order_type}}
)
async def cash_available_for_order(self):
return await self.subscribe({"type": "availableCash"})
async def size_available_for_order(self, isin, exchange):
return await self.subscribe(
{"type": "availableSize", "parameters": {"exchangeId": exchange, "instrumentId": isin}}
)
async def limit_order(self, isin, exchange, order_type, size, limit, expiry, expiry_date=None, warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"limit": limit,
"mode": "limit",
"size": size,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def market_order(self, isin, exchange, order_type, size, expiry, sell_fractions, expiry_date=None,
warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"mode": "market",
"sellFractions": sell_fractions,
"size": size,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def stop_market_order(self, isin, exchange, order_type, size, stop, expiry, expiry_date=None,
warnings_shown=None):
parameters = {
"type": "simpleCreateOrder",
"clientProcessId": str(uuid.uuid4()),
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"instrumentId": isin,
"exchangeId": exchange,
"expiry": {"type": expiry},
"mode": "stopMarket",
"size": size,
"stop": stop,
"type": order_type,
}
}
if expiry == "gtd" and expiry_date:
parameters["parameters"]["expiry"]["value"] = expiry_date
return await self.subscribe(parameters)
async def cancel_order(self, order_id):
return await self.subscribe({"type": "cancelOrder", "orderId": order_id})
async def savings_plan_overview(self):
return await self.subscribe({"type": "savingsPlans"})
async def savings_plan_parameters(self, isin):
return await self.subscribe({"type": "cancelSavingsPlan", "instrumentId": isin})
async def create_savings_plan(self, isin, amount, interval, start_date, start_date_type, start_date_value,
warnings_shown=None):
parameters = {
"type": "createSavingsPlan",
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"amount": amount,
"instrumentId": isin,
"interval": interval,
"startDate": {
'nextExecutionDate': start_date,
'type': start_date_type,
'value': start_date_value
}
}
}
return await self.subscribe(parameters)
async def change_savings_plan(self, savings_plan_id, isin, amount, interval,
start_date, start_date_type, start_date_value, warnings_shown=None):
parameters = {
"id": savings_plan_id,
"type": "createSavingsPlan",
"warningsShown": warnings_shown if warnings_shown else [],
"parameters": {
"amount": amount,
"instrumentId": isin,
"interval": interval,
"startDate": {
'nextExecutionDate': start_date,
'type': start_date_type,
'value': start_date_value
}
}
}
return await self.subscribe(parameters)
async def cancel_savings_plan(self, savings_plan_id):
return await self.subscribe({"type": "cancelSavingsPlan", "id": savings_plan_id})
async def price_alarm_overview(self):
return await self.subscribe({"type": "priceAlarms"})
async def create_price_alarm(self, isin, price):
return await self.subscribe({"type": "createPriceAlarm", "instrumentId": isin, "targetPrice": price})
async def cancel_price_alarm(self, price_alarm_id):
return await self.subscribe({"type": "cancelPriceAlarm", "id": price_alarm_id})
async def news(self, isin):
return await self.subscribe({"type": "neonNews", "isin": isin})
async def news_subscriptions(self):
return await self.subscribe({"type": "newsSubscriptions"})
async def subscribe_news(self, isin):
return await self.subscribe({"type": "subscribeNews", "instrumentId": isin})
async def unsubscribe_news(self, isin):
return await self.subscribe({"type": "unsubscribeNews", "instrumentId": isin})
def payout(self, amount):
return self._sign_request("/api/v1/payout", {"amount": amount}).json()
def confirm_payout(self, process_id, code):
r = self._sign_request(f"/api/v1/payout/{process_id}/code", {"code": code})
if r.status_code != 200:
raise ValueError(f"Payout failed with response {r.text!r}")
def settings(self):
return self._sign_request("/api/v1/auth/account", method="GET").json()
def order_cost(self, isin, exchange, order_mode, order_type, size, sell_fractions):
url = f"/api/v1/user/costtransparency?instrumentId={isin}&exchangeId={exchange}" \
f"&mode={order_mode}&type={order_type}&size={size}&sellFractions={sell_fractions}"
return self._sign_request(url, method="GET").text
def savings_plan_cost(self, isin, amount, interval):
url = f"/api/v1/user/savingsplancosttransparency?instrumentId={isin}&amount={amount}&interval={interval}"
return self._sign_request(url, method="GET").text
def __getattr__(self, name):
if name[:9] == "blocking_":
attr = object.__getattribute__(self, name[9:])
if hasattr(attr, '__call__'):
return lambda *args, **kwargs: \
self.run_blocking(timeout=kwargs.pop("timeout", 5), fut=attr(*args, **kwargs))
return object.__getattribute__(self, name)
class TradeRepublicError(ValueError):
def __init__(self, subscription_id, subscription, error_message):
self.subscription_id = subscription_id
self.subscription = subscription
self.error = error_message
| true | true |
f73653a564946bc4476618628286841d18811544 | 87 | py | Python | URI/1 - INICIANTE/Python/1002 - AeraCirculo.py | william-james-pj/LogicaProgramacao | 629f746e34da2e829dc7ea2e489ac36bb1b1fb13 | [
"MIT"
] | 1 | 2020-04-14T16:48:16.000Z | 2020-04-14T16:48:16.000Z | URI/1 - INICIANTE/Python/1002 - AeraCirculo.py | william-james-pj/LogicaProgramacao | 629f746e34da2e829dc7ea2e489ac36bb1b1fb13 | [
"MIT"
] | null | null | null | URI/1 - INICIANTE/Python/1002 - AeraCirculo.py | william-james-pj/LogicaProgramacao | 629f746e34da2e829dc7ea2e489ac36bb1b1fb13 | [
"MIT"
] | null | null | null | raio = float(input())
n = 3.14159
area = n * pow(raio,2)
print('A={:.4f}'.format(area)) | 21.75 | 30 | 0.597701 | raio = float(input())
n = 3.14159
area = n * pow(raio,2)
print('A={:.4f}'.format(area)) | true | true |
f73653de4094f8605b644d6f1bb802bce0914670 | 2,100 | py | Python | convert_ymls.py | geofabrik/openstreetmap-carto-vectortiles | 80f8d70b17c2db4b24bfed94f128f3ef03e0ef16 | [
"CC0-1.0"
] | 50 | 2016-07-13T17:02:22.000Z | 2021-12-21T14:00:27.000Z | convert_ymls.py | geofabrik/openstreetmap-carto-vectortiles | 80f8d70b17c2db4b24bfed94f128f3ef03e0ef16 | [
"CC0-1.0"
] | 5 | 2016-12-22T14:40:57.000Z | 2018-11-10T19:44:23.000Z | convert_ymls.py | geofabrik/openstreetmap-carto-vectortiles | 80f8d70b17c2db4b24bfed94f128f3ef03e0ef16 | [
"CC0-1.0"
] | 6 | 2016-07-29T19:12:44.000Z | 2022-03-31T14:41:33.000Z | import argparse, yaml, os, os.path
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input")
parser.add_argument("-o", "--output")
parser.add_argument("--tm2", "--tm", action="store_const", const="tm2", dest="action")
parser.add_argument("--tm2source", "--tmsource", action="store_const", const="tm2source", dest="action")
parser.add_argument("--zoom", default="14", help="Last zoom in the vector tile, default 14")
parser.add_argument("--source", action="store_true", dest="source")
parser.add_argument("--no-source", action="store_false", dest="source")
parser.add_argument("--only-shapefiles", action="store_true")
parser.add_argument("--only-postgis", action="store_true")
args = parser.parse_args()
cwd = os.getcwd()
with open(args.input) as fp:
projectfile = yaml.load(fp)
if args.action == 'tm2':
new_layers = []
for layer in projectfile['Layer']:
new_layers.append({'id': layer['id']})
projectfile['Layer'] = new_layers
if args.source:
projectfile['source'] = "tmsource://{}/osm-carto.tm2source/".format(cwd)
elif args.action == 'tm2source':
del projectfile['source']
del projectfile['Stylesheet']
zoom = int(args.zoom)
projectfile['maxzoom'] = zoom
for layer in projectfile['Layer']:
# If the maxzoom is less than the minzoom, don't do anything
# This can happen for a generic land polygon shapefile, which has
# maxzoom: 9. If you include a minzoom: 14 in that layer, then that
# layer won't show up from 0-9, i.e. it won't show up at all.
if layer['properties'].get('minzoom', 22) > zoom and layer['properties'].get('maxzoom', 22) >= zoom:
layer['properties']['minzoom'] = zoom
if args.only_shapefiles:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'shape']
elif args.only_postgis:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'postgis']
else:
raise NotImplementedError()
with open(args.output, 'w') as fp:
yaml.safe_dump(projectfile, fp)
| 36.206897 | 108 | 0.661905 | import argparse, yaml, os, os.path
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input")
parser.add_argument("-o", "--output")
parser.add_argument("--tm2", "--tm", action="store_const", const="tm2", dest="action")
parser.add_argument("--tm2source", "--tmsource", action="store_const", const="tm2source", dest="action")
parser.add_argument("--zoom", default="14", help="Last zoom in the vector tile, default 14")
parser.add_argument("--source", action="store_true", dest="source")
parser.add_argument("--no-source", action="store_false", dest="source")
parser.add_argument("--only-shapefiles", action="store_true")
parser.add_argument("--only-postgis", action="store_true")
args = parser.parse_args()
cwd = os.getcwd()
with open(args.input) as fp:
projectfile = yaml.load(fp)
if args.action == 'tm2':
new_layers = []
for layer in projectfile['Layer']:
new_layers.append({'id': layer['id']})
projectfile['Layer'] = new_layers
if args.source:
projectfile['source'] = "tmsource://{}/osm-carto.tm2source/".format(cwd)
elif args.action == 'tm2source':
del projectfile['source']
del projectfile['Stylesheet']
zoom = int(args.zoom)
projectfile['maxzoom'] = zoom
for layer in projectfile['Layer']:
# This can happen for a generic land polygon shapefile, which has
# maxzoom: 9. If you include a minzoom: 14 in that layer, then that
# layer won't show up from 0-9, i.e. it won't show up at all.
if layer['properties'].get('minzoom', 22) > zoom and layer['properties'].get('maxzoom', 22) >= zoom:
layer['properties']['minzoom'] = zoom
if args.only_shapefiles:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'shape']
elif args.only_postgis:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'postgis']
else:
raise NotImplementedError()
with open(args.output, 'w') as fp:
yaml.safe_dump(projectfile, fp)
| true | true |
f736544ee090ae0f772d827cff827a4f55b45858 | 3,479 | py | Python | QUANTAXIS/QASetting/crontab.py | xiongyixiaoyang/QUANTAXIS | 08441ce711e55385e2b01f80df17d34e7e89f564 | [
"MIT"
] | 2 | 2019-05-31T02:53:12.000Z | 2020-03-09T03:21:55.000Z | QUANTAXIS/QASetting/crontab.py | xiongyixiaoyang/QUANTAXIS | 08441ce711e55385e2b01f80df17d34e7e89f564 | [
"MIT"
] | null | null | null | QUANTAXIS/QASetting/crontab.py | xiongyixiaoyang/QUANTAXIS | 08441ce711e55385e2b01f80df17d34e7e89f564 | [
"MIT"
] | 3 | 2018-11-29T07:07:56.000Z | 2021-02-09T17:24:56.000Z | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import md5
import time
from .external.crontab import CronTab as _CronTab
from QUANTAXIS.QASetting.cache import get_cache
# Globals ######################################################################
DEBUG = False
class CronTabItem(_CronTab):
"""A cron tab schedule.
:param str cronschedule: The cron schedule. E.g. ``@daily``,
``0 0 * * */2 *``. `See the project page for more
information <https://github.com/josiahcarlson/parse-crontab>`_.
"""
def __init__(self, cronschedule):
self.__schedule = cronschedule
super(CronTabItem, self).__init__(cronschedule)
@property
def schedule(self):
return self.__schedule
def next_time(self, asc=False):
"""Get the local time of the next schedule time this job will run.
:param bool asc: Format the result with ``time.asctime()``
:returns: The epoch time or string representation of the epoch time that
the job should be run next
"""
_time = time.localtime(time.time() + self.next())
if asc:
return time.asctime(_time)
return time.mktime(_time)
class CronTab(object):
"""Represents a set of cron jobs, much like a crontab file. The jobs will
be given an ID (md5 hash of description + command + schedule). If the job
already exists in the cache, "last-run" and "last-run-result" will be read
from the cache. If the job does not exist in the cache, it will be added.
:param list jobs: A list of dictionaries representing a job
"""
def __init__(self, jobs):
cache = get_cache()
self.jobs = []
for job in jobs:
m = md5.new()
m.update(job["description"])
m.update(job["command"])
m.update(job["cron-job"])
job["id"] = m.hexdigest()
job["cron-job"] = CronTabItem(job["cron-job"])
job["next-run"] = job["cron-job"].next_time()
cached = cache.get(job["id"])
if cached:
job["last-run"] = cached["last-run"]
job["last-run-result"] = cached["last-run-result"]
else:
job["last-run"] = 0
job["last-run-result"] = 0
cache.add_job(job)
self.jobs.append(job) | 37.010638 | 80 | 0.634378 |
import md5
import time
from .external.crontab import CronTab as _CronTab
from QUANTAXIS.QASetting.cache import get_cache
| true | true |
f73654a1fc198e5bf51531dd3397e8f7e7919a6b | 957 | py | Python | stackl/helpers.py | ArtOfCode-/stackl | c66c025fa39c5fc5a6dbda7f0ea0628ee526b4b6 | [
"MIT"
] | null | null | null | stackl/helpers.py | ArtOfCode-/stackl | c66c025fa39c5fc5a6dbda7f0ea0628ee526b4b6 | [
"MIT"
] | null | null | null | stackl/helpers.py | ArtOfCode-/stackl | c66c025fa39c5fc5a6dbda7f0ea0628ee526b4b6 | [
"MIT"
] | null | null | null | class Helpers:
_cache = {}
@classmethod
def cached(cls, key, scope=None, func=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
if key in cls._cache[scope]:
return cls._cache[scope][key]
else:
result = None if func is None else func()
cls._cache[scope][key] = result
return result
else:
if key in cls._cache:
return cls._cache[key]
else:
result = None if func is None else func()
cls._cache[key] = result
return result
@classmethod
def cache(cls, key, scope=None, object=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
cls._cache[scope][key] = object
else:
cls._cache[key] = object
| 28.147059 | 57 | 0.487983 | class Helpers:
_cache = {}
@classmethod
def cached(cls, key, scope=None, func=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
if key in cls._cache[scope]:
return cls._cache[scope][key]
else:
result = None if func is None else func()
cls._cache[scope][key] = result
return result
else:
if key in cls._cache:
return cls._cache[key]
else:
result = None if func is None else func()
cls._cache[key] = result
return result
@classmethod
def cache(cls, key, scope=None, object=None):
if scope is not None:
if scope not in cls._cache:
cls._cache[scope] = {}
cls._cache[scope][key] = object
else:
cls._cache[key] = object
| true | true |
f736559162d2fb353e82402c79261dc665f58d3e | 425 | py | Python | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | 1 | 2021-06-09T09:51:13.000Z | 2021-06-09T09:51:13.000Z | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | null | null | null | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | 1 | 2021-06-15T11:31:44.000Z | 2021-06-15T11:31:44.000Z | """
PyTorch Forecasting package for timeseries forecasting with PyTorch.
"""
from pytorch_forecasting.data import EncoderNormalizer, GroupNormalizer, TimeSeriesDataSet
from pytorch_forecasting.models import Baseline, NBeats, TemporalFusionTransformer
__all__ = [
"TimeSeriesDataSet",
"GroupNormalizer",
"EncoderNormalizer",
"TemporalFusionTransformer",
"NBeats",
"Baseline",
]
__version__ = "0.0.0"
| 25 | 90 | 0.764706 | from pytorch_forecasting.data import EncoderNormalizer, GroupNormalizer, TimeSeriesDataSet
from pytorch_forecasting.models import Baseline, NBeats, TemporalFusionTransformer
__all__ = [
"TimeSeriesDataSet",
"GroupNormalizer",
"EncoderNormalizer",
"TemporalFusionTransformer",
"NBeats",
"Baseline",
]
__version__ = "0.0.0"
| true | true |
f73655e939118e53abadbf7e66e1712cae7bc2c9 | 6,540 | py | Python | tensorflow_toolkit/text_recognition/text_recognition/model.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 3 | 2020-12-29T02:47:32.000Z | 2021-11-12T08:12:51.000Z | tensorflow_toolkit/text_recognition/text_recognition/model.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 28 | 2020-09-25T22:40:36.000Z | 2022-03-12T00:37:36.000Z | tensorflow_toolkit/text_recognition/text_recognition/model.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 1 | 2021-03-12T10:08:44.000Z | 2021-03-12T10:08:44.000Z | # Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This module contains architecture of Text Recognition model."""
import tensorflow as tf
from tensorflow.contrib import rnn
import tensorflow.contrib.slim as slim
class TextRecognition:
""" Text recognition model definition. """
def __init__(self, is_training, num_classes, backbone_dropout=0.0):
self.is_training = is_training
self.lstm_dim = 256
self.num_classes = num_classes
self.backbone_dropout = backbone_dropout
def __call__(self, inputdata):
with tf.variable_scope('shadow'):
features = self.feature_extractor(inputdata=inputdata)
logits = self.encoder_decoder(inputdata=tf.squeeze(features, axis=1))
return logits
# pylint: disable=too-many-locals
def feature_extractor(self, inputdata):
""" Extracts features from input text image. """
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(0.00025),
biases_initializer=None, activation_fn=None):
with slim.arg_scope([slim.batch_norm], updates_collections=None):
bn0 = slim.batch_norm(inputdata, 0.9, scale=True, is_training=self.is_training,
activation_fn=None)
dropout1 = slim.dropout(bn0, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv1 = slim.conv2d(dropout1, num_outputs=64, kernel_size=3)
bn1 = slim.batch_norm(conv1, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool1 = slim.max_pool2d(bn1, kernel_size=2, stride=2)
dropout2 = slim.dropout(pool1, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv2 = slim.conv2d(dropout2, num_outputs=128, kernel_size=3)
bn2 = slim.batch_norm(conv2, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool2 = slim.max_pool2d(bn2, kernel_size=2, stride=2)
dropout3 = slim.dropout(pool2, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv3 = slim.conv2d(dropout3, num_outputs=256, kernel_size=3)
bn3 = slim.batch_norm(conv3, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout4 = slim.dropout(bn3, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv4 = slim.conv2d(dropout4, num_outputs=256, kernel_size=3)
bn4 = slim.batch_norm(conv4, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool4 = slim.max_pool2d(bn4, kernel_size=[2, 1], stride=[2, 1])
dropout5 = slim.dropout(pool4, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv5 = slim.conv2d(dropout5, num_outputs=512, kernel_size=3)
bn5 = slim.batch_norm(conv5, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout6 = slim.dropout(bn5, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv6 = slim.conv2d(dropout6, num_outputs=512, kernel_size=3)
bn6 = slim.batch_norm(conv6, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool6 = slim.max_pool2d(bn6, kernel_size=[2, 1], stride=[2, 1])
dropout7 = slim.dropout(pool6, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv7 = slim.conv2d(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])
bn7 = slim.batch_norm(conv7, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
return bn7
def encoder_decoder(self, inputdata):
""" LSTM-based encoder-decoder module. """
with tf.variable_scope('LSTMLayers'):
[batch_size, width, _] = inputdata.get_shape().as_list()
with tf.variable_scope('encoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, inputdata, dtype=tf.float32)
with tf.variable_scope('decoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, encoder_layer, dtype=tf.float32)
rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])
logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)
logits = tf.reshape(logits, [batch_size, width, self.num_classes])
rnn_out = tf.transpose(logits, (1, 0, 2))
return rnn_out
| 49.172932 | 97 | 0.595107 |
import tensorflow as tf
from tensorflow.contrib import rnn
import tensorflow.contrib.slim as slim
class TextRecognition:
def __init__(self, is_training, num_classes, backbone_dropout=0.0):
self.is_training = is_training
self.lstm_dim = 256
self.num_classes = num_classes
self.backbone_dropout = backbone_dropout
def __call__(self, inputdata):
with tf.variable_scope('shadow'):
features = self.feature_extractor(inputdata=inputdata)
logits = self.encoder_decoder(inputdata=tf.squeeze(features, axis=1))
return logits
def feature_extractor(self, inputdata):
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(0.00025),
biases_initializer=None, activation_fn=None):
with slim.arg_scope([slim.batch_norm], updates_collections=None):
bn0 = slim.batch_norm(inputdata, 0.9, scale=True, is_training=self.is_training,
activation_fn=None)
dropout1 = slim.dropout(bn0, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv1 = slim.conv2d(dropout1, num_outputs=64, kernel_size=3)
bn1 = slim.batch_norm(conv1, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool1 = slim.max_pool2d(bn1, kernel_size=2, stride=2)
dropout2 = slim.dropout(pool1, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv2 = slim.conv2d(dropout2, num_outputs=128, kernel_size=3)
bn2 = slim.batch_norm(conv2, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool2 = slim.max_pool2d(bn2, kernel_size=2, stride=2)
dropout3 = slim.dropout(pool2, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv3 = slim.conv2d(dropout3, num_outputs=256, kernel_size=3)
bn3 = slim.batch_norm(conv3, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout4 = slim.dropout(bn3, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv4 = slim.conv2d(dropout4, num_outputs=256, kernel_size=3)
bn4 = slim.batch_norm(conv4, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool4 = slim.max_pool2d(bn4, kernel_size=[2, 1], stride=[2, 1])
dropout5 = slim.dropout(pool4, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv5 = slim.conv2d(dropout5, num_outputs=512, kernel_size=3)
bn5 = slim.batch_norm(conv5, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
dropout6 = slim.dropout(bn5, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv6 = slim.conv2d(dropout6, num_outputs=512, kernel_size=3)
bn6 = slim.batch_norm(conv6, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
pool6 = slim.max_pool2d(bn6, kernel_size=[2, 1], stride=[2, 1])
dropout7 = slim.dropout(pool6, keep_prob=1.0 - self.backbone_dropout,
is_training=self.is_training)
conv7 = slim.conv2d(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])
bn7 = slim.batch_norm(conv7, 0.9, scale=True, is_training=self.is_training,
activation_fn=tf.nn.relu)
return bn7
def encoder_decoder(self, inputdata):
with tf.variable_scope('LSTMLayers'):
[batch_size, width, _] = inputdata.get_shape().as_list()
with tf.variable_scope('encoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, inputdata, dtype=tf.float32)
with tf.variable_scope('decoder'):
forward_cells = []
backward_cells = []
for _ in range(2):
forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
forward_cells, backward_cells, encoder_layer, dtype=tf.float32)
rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])
logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)
logits = tf.reshape(logits, [batch_size, width, self.num_classes])
rnn_out = tf.transpose(logits, (1, 0, 2))
return rnn_out
| true | true |
f7365729a08e2f23924988f567fa30354632418c | 4,823 | py | Python | docs/conf.py | michaelbilow/issho | ed0dd5487978b1753c14ce5d444d07448b13d168 | [
"MIT"
] | 1 | 2019-04-22T17:20:05.000Z | 2019-04-22T17:20:05.000Z | docs/conf.py | michaelbilow/smol | ed0dd5487978b1753c14ce5d444d07448b13d168 | [
"MIT"
] | 7 | 2019-03-28T14:26:46.000Z | 2019-05-31T03:58:26.000Z | docs/conf.py | michaelbilow/smol | ed0dd5487978b1753c14ce5d444d07448b13d168 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# issho documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import issho
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"issho"
copyright = u"2019, Michael Bilow"
author = u"Michael Bilow"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = issho.__version__
# The full version, including alpha/beta/rc tags.
release = issho.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "isshodoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "issho.tex", u"issho Documentation", u"Michael Bilow", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "issho", u"issho Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"issho",
u"issho Documentation",
author,
"issho",
"Simple connections & command execution with a remote host.",
"issho Documentation",
)
]
| 30.916667 | 81 | 0.681733 |
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import issho
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"issho"
copyright = u"2019, Michael Bilow"
author = u"Michael Bilow"
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = issho.__version__
# The full version, including alpha/beta/rc tags.
release = issho.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "isshodoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "issho.tex", u"issho Documentation", u"Michael Bilow", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "issho", u"issho Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"issho",
u"issho Documentation",
author,
"issho",
"Simple connections & command execution with a remote host.",
"issho Documentation",
)
]
| true | true |
f7365971d36a0732ad199a569df15896294a5c5d | 2,173 | py | Python | app.py | liyunze-coding/Trigger-Me-Elmo-2 | 6950ffa4bfd264e213626f1ab3cff249fbab36da | [
"MIT"
] | 1 | 2022-01-02T09:50:38.000Z | 2022-01-02T09:50:38.000Z | app.py | liyunze-coding/Trigger-Me-Elmo-2 | 6950ffa4bfd264e213626f1ab3cff249fbab36da | [
"MIT"
] | null | null | null | app.py | liyunze-coding/Trigger-Me-Elmo-2 | 6950ffa4bfd264e213626f1ab3cff249fbab36da | [
"MIT"
] | 1 | 2022-01-10T19:59:15.000Z | 2022-01-10T19:59:15.000Z | from flask import Flask, render_template, request, jsonify
import base64
import logging
import numpy as np
from deepface import DeepFace
from PIL import Image
from io import BytesIO
import subprocess
import os
import cv2
import random
import webbrowser
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
error_path = {'race': {'asian': 0, 'indian': 0, 'black': 0, 'white': 0,
'middle eastern': 0, 'latino hispanic': 0}, 'dominant_race': '?'}
directory = 'static/img'
if 'img' not in os.listdir('static/'):
os.mkdir(directory)
for f in os.listdir(directory):
os.remove(os.path.join(directory, f))
def generate_random_string():
numbers = '1234567890'
res = ''.join(random.choice(numbers) for _ in range(10))
return f'{directory}/{res}.png'
@app.route('/')
def main():
return render_template('index.html')
@app.route('/photocap')
def photo_cap():
photo_base64 = request.args.get('photo')
_, encoded = photo_base64.split(",", 1)
binary_data = base64.b64decode(encoded)
f = BytesIO()
f.write(binary_data)
f.seek(0)
image = Image.open(f)
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
fn = generate_random_string()
cv2.imwrite(fn, image)
try:
obj = DeepFace.analyze(image, actions=['race'])
obj['filename'] = fn
return jsonify(obj)
except ValueError:
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
except Exception as e:
print(e)
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
if __name__ == "__main__":
# p = subprocess.Popen(['python -m SimpleHTTPServer'], shell=True) #Only for macOS
webbrowser.open_new('http://127.0.0.1:8000/')
app.run(host='localhost', port=8000, debug=True)
| 25.869048 | 88 | 0.658076 | from flask import Flask, render_template, request, jsonify
import base64
import logging
import numpy as np
from deepface import DeepFace
from PIL import Image
from io import BytesIO
import subprocess
import os
import cv2
import random
import webbrowser
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
error_path = {'race': {'asian': 0, 'indian': 0, 'black': 0, 'white': 0,
'middle eastern': 0, 'latino hispanic': 0}, 'dominant_race': '?'}
directory = 'static/img'
if 'img' not in os.listdir('static/'):
os.mkdir(directory)
for f in os.listdir(directory):
os.remove(os.path.join(directory, f))
def generate_random_string():
numbers = '1234567890'
res = ''.join(random.choice(numbers) for _ in range(10))
return f'{directory}/{res}.png'
@app.route('/')
def main():
return render_template('index.html')
@app.route('/photocap')
def photo_cap():
photo_base64 = request.args.get('photo')
_, encoded = photo_base64.split(",", 1)
binary_data = base64.b64decode(encoded)
f = BytesIO()
f.write(binary_data)
f.seek(0)
image = Image.open(f)
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
fn = generate_random_string()
cv2.imwrite(fn, image)
try:
obj = DeepFace.analyze(image, actions=['race'])
obj['filename'] = fn
return jsonify(obj)
except ValueError:
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
except Exception as e:
print(e)
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
if __name__ == "__main__":
.open_new('http://127.0.0.1:8000/')
app.run(host='localhost', port=8000, debug=True)
| true | true |
f7365980ef0f648a9b1148fdf6a7cf556501bab6 | 15,803 | py | Python | solver.py | pyrito/SpeechSplit | ee70ee77e54d5b7cd1b39e7bef1cb96ae78f8beb | [
"MIT"
] | null | null | null | solver.py | pyrito/SpeechSplit | ee70ee77e54d5b7cd1b39e7bef1cb96ae78f8beb | [
"MIT"
] | null | null | null | solver.py | pyrito/SpeechSplit | ee70ee77e54d5b7cd1b39e7bef1cb96ae78f8beb | [
"MIT"
] | null | null | null | from torch.utils.tensorboard.summary import hparams
from model import Generator_3 as Generator
from model import InterpLnr
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
import pickle
from utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy
# use demo data for simplicity
# make your own validation set as needed
validation_pt = pickle.load(open('assets/demo.pkl', "rb"))
class Solver(object):
"""Solver for training"""
def __init__(self, vcc_loader, config, hparams):
"""Initialize configurations."""
# Data loader.
self.vcc_loader = vcc_loader
self.hparams = hparams
# Training configurations.
self.num_iters = config.num_iters
self.g_lr = config.g_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
# Miscellaneous.
self.use_tensorboard = config.use_tensorboard
self.use_cuda = torch.cuda.is_available()
self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu')
# Directories.
self.log_dir = config.log_dir
self.sample_dir = config.sample_dir
self.model_save_dir = config.model_save_dir
# Step size.
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
# Build the model and tensorboard.
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self):
self.G = Generator(self.hparams)
self.Interp = InterpLnr(self.hparams)
self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.print_network(self.G, 'G')
self.G.to(self.device)
self.Interp.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def print_optimizer(self, opt, name):
print(opt)
print(name)
def restore_model(self, resume_iters):
print('Loading the trained models from step {}...'.format(resume_iters))
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage)
self.G.load_state_dict(g_checkpoint['model'])
self.g_optimizer.load_state_dict(g_checkpoint['optimizer'])
self.g_lr = self.g_optimizer.param_groups[0]['lr']
def build_tensorboard(self):
"""Build a tensorboard logger."""
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(self.log_dir)
def reset_grad(self):
"""Reset the gradient buffers."""
self.g_optimizer.zero_grad()
def encode_context(self):
# Set data loader.
data_loader = self.vcc_loader
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
# Start encoding from scratch or resume from checkpoint.
start_iters = 0
if self.resume_iters:
print('Resuming ...')
start_iters = self.resume_iters
self.num_iters += self.resume_iters
self.restore_model(self.resume_iters)
# self.print_optimizer(self.g_optimizer, 'G_optimizer')
# Print logs in specified order
keys = ['G/loss_id']
# Start encoding.
print('Start encoding...')
start_time = time.time()
encoded_audio = {}
# May need this if looping doesn't work:
# for i in max(range(start_iters, self.num_iters), len(self.vcc_loader)):
print(len(self.vcc_loader))
count = 0
for i, (x_real_org, emb_org, f0_org, len_org, id_org) in enumerate(self.vcc_loader):
# =================================================================================== #
# 1. Send input data to device #
# =================================================================================== #
# x_real_org = x_real_org.to(self.device)
# emb_org = emb_org.to(self.device)
# len_org = len_org.to(self.device)
# f0_org = f0_org.to(self.device)
# =================================================================================== #
# 2. Encode using the generator #
# =================================================================================== #
self.G = self.G.eval()
pad = 8 - ((len_org[0] + 1) % 8)
encode_length = len_org[0] + 1 + pad
print(id_org)
x_real_pad, _ = pad_seq_to_2(x_real_org, encode_length)
# len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org_pad, _ = pad_seq_to_2(f0_org, encode_length) # np.pad(f0_org, (0, 512-len_org[0]), 'constant', constant_values=(0, 0))
assert x_real_pad.shape[1] == f0_org_pad.shape[1]
f0_quantized = quantize_f0_numpy(np.squeeze(f0_org_pad))[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0, x_real_pad, emb_org)
# code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0_intrp_org, x_real_org, emb_org)
# print(f'content: {code_content}')
encoded_audio[id_org[0]] = code_content
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Audio file[{}/{}]".format(et, i+1, len(self.vcc_loader))
print(log)
count += 1
if count % 100 == 0:
with open(f'assets/encoded-{self.hparams.encode_mode}-{count}.pkl', 'wb') as f:
pickle.dump(encoded_audio, f)
del encoded_audio
encoded_audio = {}
#=====================================================================================================================
def train(self):
# Set data loader.
data_loader = self.vcc_loader
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
print('Resuming ...')
start_iters = self.resume_iters
self.num_iters += self.resume_iters
self.restore_model(self.resume_iters)
self.print_optimizer(self.g_optimizer, 'G_optimizer')
# Learning rate cache for decaying.
g_lr = self.g_lr
print ('Current learning rates, g_lr: {}.'.format(g_lr))
# Print logs in specified order
keys = ['G/loss_id']
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
try:
x_real_org, emb_org, f0_org, len_org = next(data_iter)
except:
data_iter = iter(data_loader)
x_real_org, emb_org, f0_org, len_org = next(data_iter)
x_real_org = x_real_org.to(self.device)
emb_org = emb_org.to(self.device)
len_org = len_org.to(self.device)
f0_org = f0_org.to(self.device)
# =================================================================================== #
# 2. Train the generator #
# =================================================================================== #
self.G = self.G.train()
# Identity mapping loss
x_f0 = torch.cat((x_real_org, f0_org), dim=-1)
x_f0_intrp = self.Interp(x_f0, len_org)
f0_org_intrp = quantize_f0_torch(x_f0_intrp[:,:,-1])[0]
x_f0_intrp_org = torch.cat((x_f0_intrp[:,:,:-1], f0_org_intrp), dim=-1)
x_identic = self.G(x_f0_intrp_org, x_real_org, emb_org)
g_loss_id = F.mse_loss(x_real_org, x_identic, reduction='mean')
# Backward and optimize.
g_loss = g_loss_id
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss = {}
loss['G/loss_id'] = g_loss_id.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training information.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag in keys:
log += ", {}: {:.8f}".format(tag, loss[tag])
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.writer.add_scalar(tag, value, i+1)
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
torch.save({'model': self.G.state_dict(),
'optimizer': self.g_optimizer.state_dict()}, G_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Validation.
if (i+1) % self.sample_step == 0:
self.G = self.G.eval()
with torch.no_grad():
loss_val = []
for val_sub in validation_pt:
emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)
for k in range(2, 3):
x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)
len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))
f0_quantized = quantize_f0_numpy(f0_org)[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)
g_loss_val = F.mse_loss(x_real_pad, x_identic_val, reduction='sum')
loss_val.append(g_loss_val.item())
val_loss = np.mean(loss_val)
print('Validation loss: {}'.format(val_loss))
if self.use_tensorboard:
self.writer.add_scalar('Validation_loss', val_loss, i+1)
# plot test samples
if (i+1) % self.sample_step == 0:
self.G = self.G.eval()
with torch.no_grad():
for val_sub in validation_pt:
emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)
for k in range(2, 3):
x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)
len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))
f0_quantized = quantize_f0_numpy(f0_org)[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_val)), dim=-1)
x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_val), dim=-1)
x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)
x_identic_woF = self.G(x_f0_F, x_real_pad, emb_org_val)
x_identic_woR = self.G(x_f0, torch.zeros_like(x_real_pad), emb_org_val)
x_identic_woC = self.G(x_f0_C, x_real_pad, emb_org_val)
melsp_gd_pad = x_real_pad[0].cpu().numpy().T
melsp_out = x_identic_val[0].cpu().numpy().T
melsp_woF = x_identic_woF[0].cpu().numpy().T
melsp_woR = x_identic_woR[0].cpu().numpy().T
melsp_woC = x_identic_woC[0].cpu().numpy().T
min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))
max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))
# fig, (ax1,ax2,ax3,ax4,ax5) = plt.subplots(5, 1, sharex=True)
# im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value)
# im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value)
# im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value)
# im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value)
# im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value)
# plt.savefig(f'{self.sample_dir}/{i+1}_{val_sub[0]}_{k}.png', dpi=150)
# plt.close(fig) | 45.41092 | 137 | 0.485351 | from torch.utils.tensorboard.summary import hparams
from model import Generator_3 as Generator
from model import InterpLnr
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
import pickle
from utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy
validation_pt = pickle.load(open('assets/demo.pkl', "rb"))
class Solver(object):
def __init__(self, vcc_loader, config, hparams):
self.vcc_loader = vcc_loader
self.hparams = hparams
self.num_iters = config.num_iters
self.g_lr = config.g_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
self.use_tensorboard = config.use_tensorboard
self.use_cuda = torch.cuda.is_available()
self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu')
self.log_dir = config.log_dir
self.sample_dir = config.sample_dir
self.model_save_dir = config.model_save_dir
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self):
self.G = Generator(self.hparams)
self.Interp = InterpLnr(self.hparams)
self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.print_network(self.G, 'G')
self.G.to(self.device)
self.Interp.to(self.device)
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def print_optimizer(self, opt, name):
print(opt)
print(name)
def restore_model(self, resume_iters):
print('Loading the trained models from step {}...'.format(resume_iters))
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage)
self.G.load_state_dict(g_checkpoint['model'])
self.g_optimizer.load_state_dict(g_checkpoint['optimizer'])
self.g_lr = self.g_optimizer.param_groups[0]['lr']
def build_tensorboard(self):
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(self.log_dir)
def reset_grad(self):
self.g_optimizer.zero_grad()
def encode_context(self):
data_loader = self.vcc_loader
data_iter = iter(data_loader)
start_iters = 0
if self.resume_iters:
print('Resuming ...')
start_iters = self.resume_iters
self.num_iters += self.resume_iters
self.restore_model(self.resume_iters)
keys = ['G/loss_id']
print('Start encoding...')
start_time = time.time()
encoded_audio = {}
# for i in max(range(start_iters, self.num_iters), len(self.vcc_loader)):
print(len(self.vcc_loader))
count = 0
for i, (x_real_org, emb_org, f0_org, len_org, id_org) in enumerate(self.vcc_loader):
# =================================================================================== #
# 1. Send input data to device #
# =================================================================================== #
# x_real_org = x_real_org.to(self.device)
# emb_org = emb_org.to(self.device)
# len_org = len_org.to(self.device)
# f0_org = f0_org.to(self.device)
# =================================================================================== #
# 2. Encode using the generator #
# =================================================================================== #
self.G = self.G.eval()
pad = 8 - ((len_org[0] + 1) % 8)
encode_length = len_org[0] + 1 + pad
print(id_org)
x_real_pad, _ = pad_seq_to_2(x_real_org, encode_length)
# len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org_pad, _ = pad_seq_to_2(f0_org, encode_length) # np.pad(f0_org, (0, 512-len_org[0]), 'constant', constant_values=(0, 0))
assert x_real_pad.shape[1] == f0_org_pad.shape[1]
f0_quantized = quantize_f0_numpy(np.squeeze(f0_org_pad))[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0, x_real_pad, emb_org)
# code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0_intrp_org, x_real_org, emb_org)
# print(f'content: {code_content}')
encoded_audio[id_org[0]] = code_content
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Audio file[{}/{}]".format(et, i+1, len(self.vcc_loader))
print(log)
count += 1
if count % 100 == 0:
with open(f'assets/encoded-{self.hparams.encode_mode}-{count}.pkl', 'wb') as f:
pickle.dump(encoded_audio, f)
del encoded_audio
encoded_audio = {}
#=====================================================================================================================
def train(self):
# Set data loader.
data_loader = self.vcc_loader
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
print('Resuming ...')
start_iters = self.resume_iters
self.num_iters += self.resume_iters
self.restore_model(self.resume_iters)
self.print_optimizer(self.g_optimizer, 'G_optimizer')
# Learning rate cache for decaying.
g_lr = self.g_lr
print ('Current learning rates, g_lr: {}.'.format(g_lr))
# Print logs in specified order
keys = ['G/loss_id']
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
try:
x_real_org, emb_org, f0_org, len_org = next(data_iter)
except:
data_iter = iter(data_loader)
x_real_org, emb_org, f0_org, len_org = next(data_iter)
x_real_org = x_real_org.to(self.device)
emb_org = emb_org.to(self.device)
len_org = len_org.to(self.device)
f0_org = f0_org.to(self.device)
# =================================================================================== #
# 2. Train the generator #
# =================================================================================== #
self.G = self.G.train()
# Identity mapping loss
x_f0 = torch.cat((x_real_org, f0_org), dim=-1)
x_f0_intrp = self.Interp(x_f0, len_org)
f0_org_intrp = quantize_f0_torch(x_f0_intrp[:,:,-1])[0]
x_f0_intrp_org = torch.cat((x_f0_intrp[:,:,:-1], f0_org_intrp), dim=-1)
x_identic = self.G(x_f0_intrp_org, x_real_org, emb_org)
g_loss_id = F.mse_loss(x_real_org, x_identic, reduction='mean')
# Backward and optimize.
g_loss = g_loss_id
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss = {}
loss['G/loss_id'] = g_loss_id.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training information.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag in keys:
log += ", {}: {:.8f}".format(tag, loss[tag])
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.writer.add_scalar(tag, value, i+1)
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
torch.save({'model': self.G.state_dict(),
'optimizer': self.g_optimizer.state_dict()}, G_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Validation.
if (i+1) % self.sample_step == 0:
self.G = self.G.eval()
with torch.no_grad():
loss_val = []
for val_sub in validation_pt:
emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)
for k in range(2, 3):
x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)
len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))
f0_quantized = quantize_f0_numpy(f0_org)[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)
g_loss_val = F.mse_loss(x_real_pad, x_identic_val, reduction='sum')
loss_val.append(g_loss_val.item())
val_loss = np.mean(loss_val)
print('Validation loss: {}'.format(val_loss))
if self.use_tensorboard:
self.writer.add_scalar('Validation_loss', val_loss, i+1)
# plot test samples
if (i+1) % self.sample_step == 0:
self.G = self.G.eval()
with torch.no_grad():
for val_sub in validation_pt:
emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)
for k in range(2, 3):
x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)
len_org = torch.tensor([val_sub[k][2]]).to(self.device)
f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))
f0_quantized = quantize_f0_numpy(f0_org)[0]
f0_onehot = f0_quantized[np.newaxis, :, :]
f0_org_val = torch.from_numpy(f0_onehot).to(self.device)
x_real_pad = torch.from_numpy(x_real_pad).to(self.device)
x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)
x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_val)), dim=-1)
x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_val), dim=-1)
x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)
x_identic_woF = self.G(x_f0_F, x_real_pad, emb_org_val)
x_identic_woR = self.G(x_f0, torch.zeros_like(x_real_pad), emb_org_val)
x_identic_woC = self.G(x_f0_C, x_real_pad, emb_org_val)
melsp_gd_pad = x_real_pad[0].cpu().numpy().T
melsp_out = x_identic_val[0].cpu().numpy().T
melsp_woF = x_identic_woF[0].cpu().numpy().T
melsp_woR = x_identic_woR[0].cpu().numpy().T
melsp_woC = x_identic_woC[0].cpu().numpy().T
min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))
max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))
# fig, (ax1,ax2,ax3,ax4,ax5) = plt.subplots(5, 1, sharex=True)
# im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value)
# im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value)
# im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value)
# im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value)
# im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value)
# plt.savefig(f'{self.sample_dir}/{i+1}_{val_sub[0]}_{k}.png', dpi=150)
# plt.close(fig) | true | true |
f7365a1da28790199225cb69fa9ccb4430d35ff0 | 1,133 | py | Python | cms/test_utils/project/placeholderapp/admin.py | foobacca/django-cms | d15586b399b18de49208bca74495a771cdbd494f | [
"BSD-3-Clause"
] | 1 | 2021-04-08T13:49:04.000Z | 2021-04-08T13:49:04.000Z | cms/test_utils/project/placeholderapp/admin.py | foobacca/django-cms | d15586b399b18de49208bca74495a771cdbd494f | [
"BSD-3-Clause"
] | 8 | 2021-06-08T23:36:57.000Z | 2022-03-12T00:49:20.000Z | cms/test_utils/project/placeholderapp/admin.py | foobacca/django-cms | d15586b399b18de49208bca74495a771cdbd494f | [
"BSD-3-Clause"
] | null | null | null | from cms.admin.placeholderadmin import PlaceholderAdmin
from cms.test_utils.project.placeholderapp.models import (Example1, MultilingualExample1, TwoPlaceholderExample)
from django.contrib import admin
from hvad.admin import TranslatableAdmin
class MixinAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# silly test that placeholderadmin doesn't fuck stuff up
request = kwargs.pop('request', None)
return super(MixinAdmin, self).formfield_for_dbfield(db_field, request=request, **kwargs)
class ExampleAdmin(PlaceholderAdmin, MixinAdmin):
pass
class TwoPlaceholderExampleAdmin(PlaceholderAdmin, MixinAdmin):
pass
class MultilingualAdmin(TranslatableAdmin, PlaceholderAdmin):
pass
admin.site.register(Example1, ExampleAdmin)
admin.site.register(TwoPlaceholderExample, TwoPlaceholderExampleAdmin)
admin.site.register(MultilingualExample1, MultilingualAdmin)
| 32.371429 | 112 | 0.772286 | from cms.admin.placeholderadmin import PlaceholderAdmin
from cms.test_utils.project.placeholderapp.models import (Example1, MultilingualExample1, TwoPlaceholderExample)
from django.contrib import admin
from hvad.admin import TranslatableAdmin
class MixinAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs.pop('request', None)
return super(MixinAdmin, self).formfield_for_dbfield(db_field, request=request, **kwargs)
class ExampleAdmin(PlaceholderAdmin, MixinAdmin):
pass
class TwoPlaceholderExampleAdmin(PlaceholderAdmin, MixinAdmin):
pass
class MultilingualAdmin(TranslatableAdmin, PlaceholderAdmin):
pass
admin.site.register(Example1, ExampleAdmin)
admin.site.register(TwoPlaceholderExample, TwoPlaceholderExampleAdmin)
admin.site.register(MultilingualExample1, MultilingualAdmin)
| true | true |
f7365a4261bf5528b341b9ad8439969db155368a | 591 | py | Python | setup.py | Lanseuo/alasan | 198a340b74429ccab2f4095c39b245936c60ea4c | [
"MIT"
] | null | null | null | setup.py | Lanseuo/alasan | 198a340b74429ccab2f4095c39b245936c60ea4c | [
"MIT"
] | 5 | 2020-05-10T12:03:03.000Z | 2020-05-10T12:04:40.000Z | setup.py | Lanseuo/alasan | 198a340b74429ccab2f4095c39b245936c60ea4c | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="alasan",
version="0.0.1",
author="Lucas Hild",
author_email="contact@lucas-hild.de",
description="Alasan helps you build Alexa skills on AWS Lambda using Python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Lanseuo/alasan",
packages=find_packages(),
install_requires="click",
entry_points='''
[console_scripts]
alasan=alasan.cli:cli
'''
)
| 26.863636 | 82 | 0.680203 | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="alasan",
version="0.0.1",
author="Lucas Hild",
author_email="contact@lucas-hild.de",
description="Alasan helps you build Alexa skills on AWS Lambda using Python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Lanseuo/alasan",
packages=find_packages(),
install_requires="click",
entry_points='''
[console_scripts]
alasan=alasan.cli:cli
'''
)
| true | true |
f7365b4272faa183cad3252dfec1668c82b1770d | 599 | py | Python | var/spack/repos/builtin/packages/perl-math-cdf/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/perl-math-cdf/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/perl-math-cdf/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PerlMathCdf(PerlPackage):
"""Generate probabilities and quantiles from several statistical
probability functions"""
homepage = "https://metacpan.org/pod/Math::CDF"
url = "http://search.cpan.org/CPAN/authors/id/C/CA/CALLAHAN/Math-CDF-0.1.tar.gz"
version('0.1', sha256='7896bf250835ce47dcc813cb8cf9dc576c5455de42e822dcd7d8d3fef2125565')
| 35.235294 | 93 | 0.749583 |
from spack.package import *
class PerlMathCdf(PerlPackage):
homepage = "https://metacpan.org/pod/Math::CDF"
url = "http://search.cpan.org/CPAN/authors/id/C/CA/CALLAHAN/Math-CDF-0.1.tar.gz"
version('0.1', sha256='7896bf250835ce47dcc813cb8cf9dc576c5455de42e822dcd7d8d3fef2125565')
| true | true |
f7365b43e013e39a7186f6896627308675c2098b | 15,826 | py | Python | vyper/context/types/bases.py | erdnaag/vyper | 22bef3a4b4161db18c7831041e20b917984cff83 | [
"Apache-2.0"
] | 1 | 2020-07-04T01:47:26.000Z | 2020-07-04T01:47:26.000Z | vyper/context/types/bases.py | erdnaag/vyper | 22bef3a4b4161db18c7831041e20b917984cff83 | [
"Apache-2.0"
] | null | null | null | vyper/context/types/bases.py | erdnaag/vyper | 22bef3a4b4161db18c7831041e20b917984cff83 | [
"Apache-2.0"
] | null | null | null | import copy
from collections import OrderedDict
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Type, Union
from vyper import ast as vy_ast
from vyper.context.types.abstract import AbstractDataType
from vyper.exceptions import (
CompilerPanic,
ImmutableViolation,
InvalidLiteral,
InvalidOperation,
NamespaceCollision,
StructureException,
UnexpectedNodeType,
UnexpectedValue,
UnknownAttribute,
)
class DataLocation(Enum):
UNSET = 0
MEMORY = 1
STORAGE = 2
CALLDATA = 3
class BasePrimitive:
"""
Base class for primitive type classes.
Primitives are objects that are invoked when applying a type to a variable.
They must contain a `from_annotation` (and optionally `from_literal`) method
that returns their equivalent `BaseTypeDefinition` object.
Attributes
----------
_id : str
The name of the type.
_type : BaseTypeDefinition
The related `BaseTypeDefinition` class generated from this primitive
_as_array: bool, optional
If `True`, this type can be used as the base member for an array.
_valid_literal : Tuple
A tuple of Vyper ast classes that may be assigned this type.
"""
_id: str
_type: Type["BaseTypeDefinition"]
_valid_literal: Tuple
@classmethod
def from_annotation(
cls,
node: Union[vy_ast.Name, vy_ast.Call],
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> "BaseTypeDefinition":
"""
Generate a `BaseTypeDefinition` instance of this type from `AnnAssign.annotation`
Arguments
---------
node : VyperNode
Vyper ast node from the `annotation` member of an `AnnAssign` node.
Returns
-------
BaseTypeDefinition
BaseTypeDefinition related to the primitive that the method was called on.
"""
if not isinstance(node, vy_ast.Name):
raise StructureException("Invalid type assignment", node)
if node.id != cls._id:
raise UnexpectedValue("Node id does not match type name")
return cls._type(location, is_immutable, is_public)
@classmethod
def from_literal(cls, node: vy_ast.Constant) -> "BaseTypeDefinition":
"""
Generate a `BaseTypeDefinition` instance of this type from a literal constant.
This method is called on every primitive class in order to determine
potential types for a `Constant` AST node.
Types that may be assigned from literals should include a `_valid_literal`
attribute, containing a list of AST node classes that may be valid for
this type. If the `_valid_literal` attribute is not included, the type
cannot be assigned to a literal.
Arguments
---------
node : VyperNode
`Constant` Vyper ast node, or a list or tuple of constants.
Returns
-------
BaseTypeDefinition
BaseTypeDefinition related to the primitive that the method was called on.
"""
if not isinstance(node, vy_ast.Constant):
raise UnexpectedNodeType(f"Attempted to validate a '{node.ast_type}' node.")
if not isinstance(node, cls._valid_literal):
raise InvalidLiteral(f"Invalid literal type for {cls.__name__}", node)
return cls._type()
@classmethod
def compare_type(
cls, other: Union["BaseTypeDefinition", "BasePrimitive", AbstractDataType]
) -> bool:
"""
Compare this type object against another type object.
Failed comparisons must return `False`, not raise an exception.
This method is not intended to be called directly. Type comparisons
are handled by methods in `vyper.context.validation.utils`
Arguments
---------
other : BaseTypeDefinition
Another type object to be compared against this one.
Returns
-------
bool
Indicates if the types are equivalent.
"""
return isinstance(other, cls._type)
@classmethod
def fetch_call_return(self, node: vy_ast.Call) -> "BaseTypeDefinition":
"""
Validate a call to this type and return the result.
This method must raise if the type is not callable, or the call arguments
are not valid.
Arguments
---------
node : Call
Vyper ast node of call action to validate.
Returns
-------
BaseTypeDefinition, optional
Type generated as a result of the call.
"""
raise StructureException("Type is not callable", node)
@classmethod
def get_index_type(self, node: vy_ast.Index) -> None:
# always raises - do not implement in inherited classes
raise StructureException("Types cannot be indexed", node)
@classmethod
def get_member(cls, key: str, node: vy_ast.Attribute) -> None:
# always raises - do not implement in inherited classes
raise StructureException("Types do not have members", node)
@classmethod
def validate_modification(cls, node: Union[vy_ast.Assign, vy_ast.AugAssign]) -> None:
# always raises - do not implement in inherited classes
raise InvalidOperation("Cannot assign to a type", node)
class BaseTypeDefinition:
"""
Base class for type definition classes.
Type definitions are objects that represent the type of a specific object
within a contract. They are usually derived from a `BasePrimitive` counterpart.
Class Attributes
-----------------
_id : str
The name of the type.
_is_callable : bool, optional
If `True`, attempts to assign this value without calling it will raise
a more expressive error message recommending that the user performs a
function call.
Object Attributes
-----------------
is_immutable : bool, optional
If `True`, the value of this object cannot be modified after assignment.
"""
def __init__(
self,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
self.location = location
self.is_immutable = is_immutable
self.is_public = is_public
def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:
# always raises, user should have used a primitive
raise StructureException("Value is not a type", node)
def compare_type(
self, other: Union["BaseTypeDefinition", BasePrimitive, AbstractDataType]
) -> bool:
"""
Compare this type object against another type object.
Failed comparisons must return `False`, not raise an exception.
This method is not intended to be called directly. Type comparisons
are handled by methods in `vyper.context.validation.utils`
Arguments
---------
other : BaseTypeDefinition
Another type object to be compared against this one.
Returns
-------
bool
Indicates if the types are equivalent.
"""
return isinstance(other, type(self))
def validate_numeric_op(
self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]
) -> None:
"""
Validate a numeric operation for this type.
Arguments
---------
node : UnaryOp | BinOp | AugAssign
Vyper ast node of the numeric operation to be validated.
Returns
-------
None. A failed validation must raise an exception.
"""
raise InvalidOperation(f"Cannot perform {node.op.description} on {self}", node)
def validate_boolean_op(self, node: vy_ast.BoolOp) -> None:
"""
Validate a boolean operation for this type.
Arguments
---------
node : BoolOp
Vyper ast node of the boolean operation to be validated.
Returns
-------
None. A failed validation must raise an exception.
"""
raise InvalidOperation(f"Invalid type for operand: {self}", node)
def validate_comparator(self, node: vy_ast.Compare) -> None:
"""
Validate a comparator for this type.
Arguments
---------
node : Compare
Vyper ast node of the comparator to be validated.
Returns
-------
None. A failed validation must raise an exception.
"""
if not isinstance(node.op, (vy_ast.Eq, vy_ast.NotEq)):
raise InvalidOperation(
f"Cannot perform {node.op.description} comparison on {self}", node
)
def validate_implements(self, node: vy_ast.AnnAssign) -> None:
"""
Validate an implements statement.
This method is unique to user-defined interfaces. It should not be
included in other types.
Arguments
---------
node : AnnAssign
Vyper ast node of the implements statement being validated.
Returns
-------
None. A failed validation must raise an exception.
"""
raise StructureException("Value is not an interface", node)
def fetch_call_return(self, node: vy_ast.Call) -> Union["BaseTypeDefinition", None]:
"""
Validate a call to this value and return the result.
This method must raise if the value is not callable, or the call arguments
are not valid.
Arguments
---------
node : Call
Vyper ast node of call action to validate.
Returns
-------
BaseTypeDefinition, optional
Type generated as a result of the call.
"""
raise StructureException("Value is not callable", node)
def get_index_type(self, node: vy_ast.Index) -> "BaseTypeDefinition":
"""
Validate an index reference and return the given type at the index.
Arguments
---------
node : Index
Vyper ast node from the `slice` member of a Subscript node.
Returns
-------
BaseTypeDefinition
Type object for value at the given index.
"""
raise StructureException(f"Type '{self}' does not support indexing", node)
def get_member(self, key: str, node: vy_ast.Attribute) -> "BaseTypeDefinition":
"""
Validate an attribute reference and return the given type for the member.
Arguments
---------
key : str
Name of the member being accessed.
node: Attribute
Vyper ast Attribute node representing the member being accessed.
Returns
-------
BaseTypeDefinition
A type object for the value of the given member. Raises if the member
does not exist for the given type.
"""
raise StructureException(f"Type '{self}' does not support members", node)
def validate_modification(self, node: Union[vy_ast.Assign, vy_ast.AugAssign]) -> None:
"""
Validate an attempt to modify this value.
Raises if the value is a constant or involves an invalid operation.
Arguments
---------
node : Assign | AugAssign
Vyper ast node of the modifying action.
"""
if self.location == DataLocation.CALLDATA:
raise ImmutableViolation("Cannot write to calldata", node)
if self.is_immutable:
raise ImmutableViolation("Immutable value cannot be written to", node)
if isinstance(node, vy_ast.AugAssign):
self.validate_numeric_op(node)
def get_signature(self) -> Tuple[Tuple, Optional["BaseTypeDefinition"]]:
raise CompilerPanic("Method must be implemented by the inherited class")
def compare_signature(self, other: "BaseTypeDefinition") -> bool:
"""
Compare the signature of this type with another type.
Used when determining if an interface has been implemented. This method
should not be directly implemented by any inherited classes.
"""
if not self.is_public:
return False
arguments, return_type = self.get_signature()
other_arguments, other_return_type = other.get_signature()
if len(arguments) != len(other_arguments):
return False
for a, b in zip(arguments, other_arguments):
if not a.compare_type(b):
return False
if return_type and not return_type.compare_type(other_return_type): # type: ignore
return False
return True
class ValueTypeDefinition(BaseTypeDefinition):
"""
Base class for types representing a single value.
Class attributes
----------------
_valid_literal: VyperNode | Tuple
A vyper ast class or tuple of ast classes that can represent valid literals
for the given type. Including this attribute will allow literal values to be
assigned this type.
"""
def __repr__(self):
return self._id
def get_signature(self):
return (), self
class MemberTypeDefinition(ValueTypeDefinition):
"""
Base class for types that have accessible members.
Class attributes
----------------
_type_members : Dict[str, BaseType]
Dictionary of members common to all values of this type.
Object attributes
-----------------
members : OrderedDict[str, BaseType]
Dictionary of members for the given type.
"""
_type_members: Dict
def __init__(
self,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
super().__init__(location, is_immutable, is_public)
self.members: OrderedDict = OrderedDict()
def add_member(self, name: str, type_: BaseTypeDefinition) -> None:
if name in self.members:
raise NamespaceCollision(f"Member {name} already exists in {self}")
if name in getattr(self, "_type_members", []):
raise NamespaceCollision(f"Member {name} already exists in {self}")
self.members[name] = type_
def get_member(self, key: str, node: vy_ast.VyperNode) -> BaseTypeDefinition:
if key in self.members:
return self.members[key]
elif key in getattr(self, "_type_members", []):
type_ = copy.deepcopy(self._type_members[key])
type_.location = self.location
type_.is_immutable = self.is_immutable
return type_
raise UnknownAttribute(f"{self} has no member '{key}'", node)
def __repr__(self):
return f"{self._id}"
class IndexableTypeDefinition(BaseTypeDefinition):
"""
Base class for indexable types such as arrays and mappings.
Attributes
----------
key_type: BaseType
Type representing the index value for this object.
value_type : BaseType
Type representing the value(s) contained in this object.
_id : str
Name of the type.
"""
def __init__(
self,
value_type: BaseTypeDefinition,
key_type: BaseTypeDefinition,
_id: str,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
super().__init__(location, is_immutable, is_public)
self.value_type = value_type
self.key_type = key_type
self._id = _id
def get_signature(self) -> Tuple[Tuple, Optional[BaseTypeDefinition]]:
new_args, return_type = self.value_type.get_signature()
return (self.key_type,) + new_args, return_type
| 32.166667 | 91 | 0.625174 | import copy
from collections import OrderedDict
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Type, Union
from vyper import ast as vy_ast
from vyper.context.types.abstract import AbstractDataType
from vyper.exceptions import (
CompilerPanic,
ImmutableViolation,
InvalidLiteral,
InvalidOperation,
NamespaceCollision,
StructureException,
UnexpectedNodeType,
UnexpectedValue,
UnknownAttribute,
)
class DataLocation(Enum):
UNSET = 0
MEMORY = 1
STORAGE = 2
CALLDATA = 3
class BasePrimitive:
_id: str
_type: Type["BaseTypeDefinition"]
_valid_literal: Tuple
@classmethod
def from_annotation(
cls,
node: Union[vy_ast.Name, vy_ast.Call],
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> "BaseTypeDefinition":
if not isinstance(node, vy_ast.Name):
raise StructureException("Invalid type assignment", node)
if node.id != cls._id:
raise UnexpectedValue("Node id does not match type name")
return cls._type(location, is_immutable, is_public)
@classmethod
def from_literal(cls, node: vy_ast.Constant) -> "BaseTypeDefinition":
if not isinstance(node, vy_ast.Constant):
raise UnexpectedNodeType(f"Attempted to validate a '{node.ast_type}' node.")
if not isinstance(node, cls._valid_literal):
raise InvalidLiteral(f"Invalid literal type for {cls.__name__}", node)
return cls._type()
@classmethod
def compare_type(
cls, other: Union["BaseTypeDefinition", "BasePrimitive", AbstractDataType]
) -> bool:
return isinstance(other, cls._type)
@classmethod
def fetch_call_return(self, node: vy_ast.Call) -> "BaseTypeDefinition":
raise StructureException("Type is not callable", node)
@classmethod
def get_index_type(self, node: vy_ast.Index) -> None:
raise StructureException("Types cannot be indexed", node)
@classmethod
def get_member(cls, key: str, node: vy_ast.Attribute) -> None:
raise StructureException("Types do not have members", node)
@classmethod
def validate_modification(cls, node: Union[vy_ast.Assign, vy_ast.AugAssign]) -> None:
raise InvalidOperation("Cannot assign to a type", node)
class BaseTypeDefinition:
def __init__(
self,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
self.location = location
self.is_immutable = is_immutable
self.is_public = is_public
def from_annotation(self, node: vy_ast.VyperNode, **kwargs: Any) -> None:
raise StructureException("Value is not a type", node)
def compare_type(
self, other: Union["BaseTypeDefinition", BasePrimitive, AbstractDataType]
) -> bool:
return isinstance(other, type(self))
def validate_numeric_op(
self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]
) -> None:
raise InvalidOperation(f"Cannot perform {node.op.description} on {self}", node)
def validate_boolean_op(self, node: vy_ast.BoolOp) -> None:
raise InvalidOperation(f"Invalid type for operand: {self}", node)
def validate_comparator(self, node: vy_ast.Compare) -> None:
if not isinstance(node.op, (vy_ast.Eq, vy_ast.NotEq)):
raise InvalidOperation(
f"Cannot perform {node.op.description} comparison on {self}", node
)
def validate_implements(self, node: vy_ast.AnnAssign) -> None:
raise StructureException("Value is not an interface", node)
def fetch_call_return(self, node: vy_ast.Call) -> Union["BaseTypeDefinition", None]:
raise StructureException("Value is not callable", node)
def get_index_type(self, node: vy_ast.Index) -> "BaseTypeDefinition":
raise StructureException(f"Type '{self}' does not support indexing", node)
def get_member(self, key: str, node: vy_ast.Attribute) -> "BaseTypeDefinition":
raise StructureException(f"Type '{self}' does not support members", node)
def validate_modification(self, node: Union[vy_ast.Assign, vy_ast.AugAssign]) -> None:
if self.location == DataLocation.CALLDATA:
raise ImmutableViolation("Cannot write to calldata", node)
if self.is_immutable:
raise ImmutableViolation("Immutable value cannot be written to", node)
if isinstance(node, vy_ast.AugAssign):
self.validate_numeric_op(node)
def get_signature(self) -> Tuple[Tuple, Optional["BaseTypeDefinition"]]:
raise CompilerPanic("Method must be implemented by the inherited class")
def compare_signature(self, other: "BaseTypeDefinition") -> bool:
if not self.is_public:
return False
arguments, return_type = self.get_signature()
other_arguments, other_return_type = other.get_signature()
if len(arguments) != len(other_arguments):
return False
for a, b in zip(arguments, other_arguments):
if not a.compare_type(b):
return False
if return_type and not return_type.compare_type(other_return_type):
return False
return True
class ValueTypeDefinition(BaseTypeDefinition):
def __repr__(self):
return self._id
def get_signature(self):
return (), self
class MemberTypeDefinition(ValueTypeDefinition):
_type_members: Dict
def __init__(
self,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
super().__init__(location, is_immutable, is_public)
self.members: OrderedDict = OrderedDict()
def add_member(self, name: str, type_: BaseTypeDefinition) -> None:
if name in self.members:
raise NamespaceCollision(f"Member {name} already exists in {self}")
if name in getattr(self, "_type_members", []):
raise NamespaceCollision(f"Member {name} already exists in {self}")
self.members[name] = type_
def get_member(self, key: str, node: vy_ast.VyperNode) -> BaseTypeDefinition:
if key in self.members:
return self.members[key]
elif key in getattr(self, "_type_members", []):
type_ = copy.deepcopy(self._type_members[key])
type_.location = self.location
type_.is_immutable = self.is_immutable
return type_
raise UnknownAttribute(f"{self} has no member '{key}'", node)
def __repr__(self):
return f"{self._id}"
class IndexableTypeDefinition(BaseTypeDefinition):
def __init__(
self,
value_type: BaseTypeDefinition,
key_type: BaseTypeDefinition,
_id: str,
location: DataLocation = DataLocation.UNSET,
is_immutable: bool = False,
is_public: bool = False,
) -> None:
super().__init__(location, is_immutable, is_public)
self.value_type = value_type
self.key_type = key_type
self._id = _id
def get_signature(self) -> Tuple[Tuple, Optional[BaseTypeDefinition]]:
new_args, return_type = self.value_type.get_signature()
return (self.key_type,) + new_args, return_type
| true | true |
f7365b6614a2b2f2a73eeb08494f8f98110af4db | 9,560 | py | Python | mars/dataframe/datastore/tests/test_datastore_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | 1 | 2021-11-30T12:07:21.000Z | 2021-11-30T12:07:21.000Z | mars/dataframe/datastore/tests/test_datastore_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | null | null | null | mars/dataframe/datastore/tests/test_datastore_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.config import option_context
from mars.dataframe import DataFrame
from mars.deploy.local.core import new_cluster
from mars.session import new_session
from mars.tests.core import TestBase, flaky
try:
import vineyard
except ImportError:
vineyard = None
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import fastparquet
except ImportError:
fastparquet = None
_exec_timeout = 120 if 'CI' in os.environ else -1
class Test(TestBase):
def setUp(self):
super().setUp()
self.ctx, self.executor = self._create_test_context()
def testToCSVExecution(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100)
}, index=index)
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
# DATAFRAME TESTS
# test one file with dataframe
path = os.path.join(base_path, 'out.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
# test multi files with dataframe
path = os.path.join(base_path, 'out-*.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.iloc[33: 66])
with self.ctx:
# test df with unknown shape
df2 = DataFrame(raw, chunk_size=(50, 2))
df2 = df2[df2['col1'] < 1]
path2 = os.path.join(base_path, 'out2.csv')
r = df2.to_csv(path2)
self.executor.execute_dataframes([r])
result = pd.read_csv(path2, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
# SERIES TESTS
series = md.Series(raw.col1, chunk_size=33)
# test one file with series
path = os.path.join(base_path, 'out.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
# test multi files with series
path = os.path.join(base_path, 'out-*.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.col1.to_frame().iloc[33: 66])
@unittest.skipIf(sqlalchemy is None, 'sqlalchemy not installed')
def testToSQL(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100).astype('int64'),
}, index=index)
with tempfile.TemporaryDirectory() as d:
table_name1 = 'test_table'
table_name2 = 'test_table2'
uri = 'sqlite:///' + os.path.join(d, 'test.db')
engine = sqlalchemy.create_engine(uri)
# test write dataframe
df = DataFrame(raw, chunk_size=33)
r = df.to_sql(table_name1, con=engine)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name1, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw, written)
# test write with existing table
with self.assertRaises(ValueError):
df.to_sql(table_name1, con=uri).execute()
# test write series
series = md.Series(raw.col1, chunk_size=33)
with engine.connect() as conn:
r = series.to_sql(table_name2, con=conn)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name2, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw.col1.to_frame(), written)
@unittest.skipIf(vineyard is None, 'vineyard not installed')
@flaky(max_runs=3)
def testToVineyard(self):
def run_with_given_session(session, **kw):
ipc_socket = os.environ.get('VINEYARD_IPC_SOCKET', '/tmp/vineyard/vineyard.sock')
with option_context({'vineyard.socket': ipc_socket}):
df1 = DataFrame(pd.DataFrame(np.arange(12).reshape(3, 4), columns=['a', 'b', 'c', 'd']),
chunk_size=2)
object_id = df1.to_vineyard().execute(session=session, **kw).fetch(session=session)
df2 = md.from_vineyard(object_id)
df1_value = df1.execute(session=session, **kw).fetch(session=session)
df2_value = df2.execute(session=session, **kw).fetch(session=session)
pd.testing.assert_frame_equal(
df1_value.reset_index(drop=True), df2_value.reset_index(drop=True))
with new_session().as_default() as session:
run_with_given_session(session)
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=False) as cluster:
with new_session(cluster.endpoint).as_default() as session:
run_with_given_session(session, timeout=_exec_timeout)
@unittest.skipIf(pa is None, 'pyarrow not installed')
def testToParquetArrowExecution(self):
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.arange(100),
'col3': np.random.choice(['a', 'b', 'c'], (100,)),
})
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
# DATAFRAME TESTS
path = os.path.join(base_path, 'out-*.parquet')
r = df.to_parquet(path)
self.executor.execute_dataframe(r)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result = result.sort_index()
pd.testing.assert_frame_equal(result, raw)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result = result.sort_index()
pd.testing.assert_frame_equal(result, raw)
# test read_parquet then to_parquet
read_df = md.read_parquet(path)
r = read_df.to_parquet(path)
self.executor.execute_dataframes([r])
# test partition_cols
path = os.path.join(base_path, 'out-partitioned')
r = df.to_parquet(path, partition_cols=['col3'])
self.executor.execute_dataframe(r)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result['col3'] = result['col3'].astype('object')
pd.testing.assert_frame_equal(result.sort_values('col1').reset_index(drop=True),
raw.sort_values('col1').reset_index(drop=True))
@unittest.skipIf(fastparquet is None, 'fastparquet not installed')
def testToParquetFastParquetExecution(self):
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.arange(100),
'col3': np.random.choice(['a', 'b', 'c'], (100,)),
})
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
# test fastparquet
path = os.path.join(base_path, 'out-fastparquet-*.parquet')
r = df.to_parquet(path, engine='fastparquet', compression='gzip')
self.executor.execute_dataframe(r)
| 39.341564 | 104 | 0.598954 |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.config import option_context
from mars.dataframe import DataFrame
from mars.deploy.local.core import new_cluster
from mars.session import new_session
from mars.tests.core import TestBase, flaky
try:
import vineyard
except ImportError:
vineyard = None
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import fastparquet
except ImportError:
fastparquet = None
_exec_timeout = 120 if 'CI' in os.environ else -1
class Test(TestBase):
def setUp(self):
super().setUp()
self.ctx, self.executor = self._create_test_context()
def testToCSVExecution(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100)
}, index=index)
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
path = os.path.join(base_path, 'out.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
path = os.path.join(base_path, 'out-*.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.iloc[33: 66])
with self.ctx:
df2 = DataFrame(raw, chunk_size=(50, 2))
df2 = df2[df2['col1'] < 1]
path2 = os.path.join(base_path, 'out2.csv')
r = df2.to_csv(path2)
self.executor.execute_dataframes([r])
result = pd.read_csv(path2, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
series = md.Series(raw.col1, chunk_size=33)
path = os.path.join(base_path, 'out.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
path = os.path.join(base_path, 'out-*.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.col1.to_frame().iloc[33: 66])
@unittest.skipIf(sqlalchemy is None, 'sqlalchemy not installed')
def testToSQL(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100).astype('int64'),
}, index=index)
with tempfile.TemporaryDirectory() as d:
table_name1 = 'test_table'
table_name2 = 'test_table2'
uri = 'sqlite:///' + os.path.join(d, 'test.db')
engine = sqlalchemy.create_engine(uri)
df = DataFrame(raw, chunk_size=33)
r = df.to_sql(table_name1, con=engine)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name1, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw, written)
with self.assertRaises(ValueError):
df.to_sql(table_name1, con=uri).execute()
series = md.Series(raw.col1, chunk_size=33)
with engine.connect() as conn:
r = series.to_sql(table_name2, con=conn)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name2, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw.col1.to_frame(), written)
@unittest.skipIf(vineyard is None, 'vineyard not installed')
@flaky(max_runs=3)
def testToVineyard(self):
def run_with_given_session(session, **kw):
ipc_socket = os.environ.get('VINEYARD_IPC_SOCKET', '/tmp/vineyard/vineyard.sock')
with option_context({'vineyard.socket': ipc_socket}):
df1 = DataFrame(pd.DataFrame(np.arange(12).reshape(3, 4), columns=['a', 'b', 'c', 'd']),
chunk_size=2)
object_id = df1.to_vineyard().execute(session=session, **kw).fetch(session=session)
df2 = md.from_vineyard(object_id)
df1_value = df1.execute(session=session, **kw).fetch(session=session)
df2_value = df2.execute(session=session, **kw).fetch(session=session)
pd.testing.assert_frame_equal(
df1_value.reset_index(drop=True), df2_value.reset_index(drop=True))
with new_session().as_default() as session:
run_with_given_session(session)
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=False) as cluster:
with new_session(cluster.endpoint).as_default() as session:
run_with_given_session(session, timeout=_exec_timeout)
@unittest.skipIf(pa is None, 'pyarrow not installed')
def testToParquetArrowExecution(self):
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.arange(100),
'col3': np.random.choice(['a', 'b', 'c'], (100,)),
})
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
path = os.path.join(base_path, 'out-*.parquet')
r = df.to_parquet(path)
self.executor.execute_dataframe(r)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result = result.sort_index()
pd.testing.assert_frame_equal(result, raw)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result = result.sort_index()
pd.testing.assert_frame_equal(result, raw)
read_df = md.read_parquet(path)
r = read_df.to_parquet(path)
self.executor.execute_dataframes([r])
path = os.path.join(base_path, 'out-partitioned')
r = df.to_parquet(path, partition_cols=['col3'])
self.executor.execute_dataframe(r)
read_df = md.read_parquet(path)
result = self.executor.execute_dataframe(read_df, concat=True)[0]
result['col3'] = result['col3'].astype('object')
pd.testing.assert_frame_equal(result.sort_values('col1').reset_index(drop=True),
raw.sort_values('col1').reset_index(drop=True))
@unittest.skipIf(fastparquet is None, 'fastparquet not installed')
def testToParquetFastParquetExecution(self):
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.arange(100),
'col3': np.random.choice(['a', 'b', 'c'], (100,)),
})
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
path = os.path.join(base_path, 'out-fastparquet-*.parquet')
r = df.to_parquet(path, engine='fastparquet', compression='gzip')
self.executor.execute_dataframe(r)
| true | true |
f7365c6d507efee3fdb9f656365455527ac25a4f | 5,172 | py | Python | setup.py | Eothred/pymad | f560239325452adbb1d37b6408f275ac03674069 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2015-01-21T07:36:11.000Z | 2020-05-29T12:18:33.000Z | setup.py | Eothred/pymad | f560239325452adbb1d37b6408f275ac03674069 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-03-22T02:24:57.000Z | 2015-10-15T15:50:51.000Z | setup.py | Eothred/pymad | f560239325452adbb1d37b6408f275ac03674069 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-02-08T16:55:01.000Z | 2017-02-08T16:55:01.000Z | #-------------------------------------------------------------------------------
# This file is part of PyMad.
#
# Copyright (c) 2011, CERN. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# Make sure setuptools is available. NOTE: the try/except hack is required to
# make installation work with pip: If an older version of setuptools is
# already imported, `use_setuptools()` will just exit the current process.
try:
import pkg_resources
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
from distutils.util import get_platform
import sys
from os import path
# Version of pymad (major,minor):
PYMADVERSION=(0, 9)
# setuptools.Extension automatically converts all '.pyx' extensions to '.c'
# extensions if detecting that neither Cython nor Pyrex is available. Early
# versions of setuptools don't know about Cython. Since we don't use Pyrex
# in this module, this leads to problems in the two cases where Cython is
# available and Pyrex is not or vice versa. Therefore, setuptools.Extension
# needs to be patched to match our needs:
try:
# Use Cython if available:
from Cython.Build import cythonize
except ImportError:
# Otherwise, always use the distributed .c instead of the .pyx file:
def cythonize(extensions):
def pyx_to_c(source):
return source[:-4]+'.c' if source.endswith('.pyx') else source
for ext in extensions:
ext.sources = list(map(pyx_to_c, ext.sources))
missing_sources = [s for s in ext.sources if not path.exists(s)]
if missing_sources:
raise OSError(('Missing source file: {0[0]!r}. '
'Install Cython to resolve this problem.')
.format(missing_sources))
return extensions
else:
orig_Extension = Extension
class Extension(orig_Extension):
"""Extension that *never* replaces '.pyx' by '.c' (using Cython)."""
def __init__(self, name, sources, *args, **kwargs):
orig_Extension.__init__(self, name, sources, *args, **kwargs)
self.sources = sources
# Let's just use the default system headers:
include_dirs = []
library_dirs = []
# Parse command line option: --madxdir=/path/to/madxinstallation. We could
# use build_ext.user_options instead, but then the --madxdir argument can
# be passed only to the 'build_ext' command, not to 'build' or 'install',
# which is a minor nuisance.
for arg in sys.argv[:]:
if arg.startswith('--madxdir='):
sys.argv.remove(arg)
prefix = path.expanduser(arg.split('=', 1)[1])
lib_path_candidates = [path.join(prefix, 'lib'),
path.join(prefix, 'lib64')]
include_dirs += [path.join(prefix, 'include')]
library_dirs += list(filter(path.isdir, lib_path_candidates))
# required libraries
if get_platform() == "win32" or get_platform() == "win-amd64":
libraries = ['madx', 'stdc++', 'ptc', 'gfortran', 'msvcrt']
else:
libraries = ['madx', 'stdc++', 'c']
# Common arguments for the Cython extensions:
extension_args = dict(
define_macros=[('MAJOR_VERSION', PYMADVERSION[0]),
('MINOR_VERSION', PYMADVERSION[1])],
libraries=libraries,
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
extra_compile_args=['-std=c99'],
)
# Compose a long description for PyPI:
long_description = None
try:
long_description = open('README.rst').read()
long_description += '\n' + open('COPYING.rst').read()
long_description += '\n' + open('CHANGES.rst').read()
except IOError:
pass
setup(
name='cern-cpymad',
version='.'.join(map(str, PYMADVERSION)),
description='Cython binding to MAD-X',
long_description=long_description,
url='http://pymad.github.io/cpymad',
package_dir={
'': 'src' # look for packages in src/ subfolder
},
ext_modules = cythonize([
Extension('cern.cpymad.libmadx',
sources=["src/cern/cpymad/libmadx.pyx"],
**extension_args),
]),
namespace_packages=[
'cern'
],
packages = [
"cern",
"cern.resource",
"cern.cpymad",
],
include_package_data=True, # include files matched by MANIFEST.in
author='PyMAD developers',
author_email='pymad@cern.ch',
setup_requires=[
],
install_requires=[
'setuptools',
'numpy',
'PyYAML',
],
license = 'CERN Standard Copyright License'
)
| 35.424658 | 80 | 0.642691 |
try:
import pkg_resources
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
from distutils.util import get_platform
import sys
from os import path
PYMADVERSION=(0, 9)
try:
from Cython.Build import cythonize
except ImportError:
def cythonize(extensions):
def pyx_to_c(source):
return source[:-4]+'.c' if source.endswith('.pyx') else source
for ext in extensions:
ext.sources = list(map(pyx_to_c, ext.sources))
missing_sources = [s for s in ext.sources if not path.exists(s)]
if missing_sources:
raise OSError(('Missing source file: {0[0]!r}. '
'Install Cython to resolve this problem.')
.format(missing_sources))
return extensions
else:
orig_Extension = Extension
class Extension(orig_Extension):
"""Extension that *never* replaces '.pyx' by '.c' (using Cython)."""
def __init__(self, name, sources, *args, **kwargs):
orig_Extension.__init__(self, name, sources, *args, **kwargs)
self.sources = sources
include_dirs = []
library_dirs = []
# Parse command line option: --madxdir=/path/to/madxinstallation. We could
# use build_ext.user_options instead, but then the --madxdir argument can
# be passed only to the 'build_ext' command, not to 'build' or 'install',
# which is a minor nuisance.
for arg in sys.argv[:]:
if arg.startswith('--madxdir='):
sys.argv.remove(arg)
prefix = path.expanduser(arg.split('=', 1)[1])
lib_path_candidates = [path.join(prefix, 'lib'),
path.join(prefix, 'lib64')]
include_dirs += [path.join(prefix, 'include')]
library_dirs += list(filter(path.isdir, lib_path_candidates))
# required libraries
if get_platform() == "win32" or get_platform() == "win-amd64":
libraries = ['madx', 'stdc++', 'ptc', 'gfortran', 'msvcrt']
else:
libraries = ['madx', 'stdc++', 'c']
# Common arguments for the Cython extensions:
extension_args = dict(
define_macros=[('MAJOR_VERSION', PYMADVERSION[0]),
('MINOR_VERSION', PYMADVERSION[1])],
libraries=libraries,
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
extra_compile_args=['-std=c99'],
)
# Compose a long description for PyPI:
long_description = None
try:
long_description = open('README.rst').read()
long_description += '\n' + open('COPYING.rst').read()
long_description += '\n' + open('CHANGES.rst').read()
except IOError:
pass
setup(
name='cern-cpymad',
version='.'.join(map(str, PYMADVERSION)),
description='Cython binding to MAD-X',
long_description=long_description,
url='http://pymad.github.io/cpymad',
package_dir={
'': 'src' # look for packages in src/ subfolder
},
ext_modules = cythonize([
Extension('cern.cpymad.libmadx',
sources=["src/cern/cpymad/libmadx.pyx"],
**extension_args),
]),
namespace_packages=[
'cern'
],
packages = [
"cern",
"cern.resource",
"cern.cpymad",
],
include_package_data=True, # include files matched by MANIFEST.in
author='PyMAD developers',
author_email='pymad@cern.ch',
setup_requires=[
],
install_requires=[
'setuptools',
'numpy',
'PyYAML',
],
license = 'CERN Standard Copyright License'
)
| true | true |
f7365c9b7c7c285ab21ee7e4bac00b72277fcf50 | 122 | py | Python | pypro/modulos/context_processors.py | wosubtil/curso-django | 4673e71fbbec062c74dfaf866ff13b81383e3fcc | [
"MIT"
] | null | null | null | pypro/modulos/context_processors.py | wosubtil/curso-django | 4673e71fbbec062c74dfaf866ff13b81383e3fcc | [
"MIT"
] | 806 | 2020-09-18T11:26:48.000Z | 2022-03-31T00:43:46.000Z | pypro/modulos/context_processors.py | taniodev/curso-django | aa5b0edd6ca55d2ea7f73220644d5c64a96c60df | [
"MIT"
] | 1 | 2020-08-06T19:50:33.000Z | 2020-08-06T19:50:33.000Z | from pypro.modulos import facade
def listar_modulos(request):
return {'MODULOS': facade.listar_modulos_ordenados()}
| 20.333333 | 57 | 0.778689 | from pypro.modulos import facade
def listar_modulos(request):
return {'MODULOS': facade.listar_modulos_ordenados()}
| true | true |
f7365cf7cf02c8871fff6ab4ece9ab8717635813 | 7,703 | py | Python | roms/tests/joy/spadtest-nes-0.01/tools/pilbmp2nes.py | MrKOSMOS/ANESE | 8ae814d615479b1496c98033a1f5bc4da5921c6f | [
"MIT"
] | 349 | 2017-11-15T22:51:00.000Z | 2022-03-21T13:43:57.000Z | tools/pilbmp2nes.py | pubby/Ralph-4 | dcda1d3e5bce25ca22b54dfee82eb64625d64d25 | [
"CC-BY-4.0"
] | 12 | 2018-08-28T21:38:29.000Z | 2021-12-11T16:24:36.000Z | tools/pilbmp2nes.py | pubby/Ralph-4 | dcda1d3e5bce25ca22b54dfee82eb64625d64d25 | [
"CC-BY-4.0"
] | 28 | 2018-06-10T07:31:13.000Z | 2022-03-21T10:54:26.000Z | #!/usr/bin/env python
#
# Bitmap to multi-console CHR converter using PIL or Pillow
#
# Copyright 2014 Damian Yerrick
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty
# provided the copyright notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
from __future__ import with_statement, print_function, unicode_literals
from PIL import Image
from time import sleep
import array
# python 2/3 cross compatibility fixes
try:
xrange
except NameError:
xrange = range
try:
raw_input
except NameError:
raw_input = input
try:
next
except NameError:
next = lambda x: x.next()
def blank_byte_array():
try:
return array.array('B')
except TypeError:
return array.array(b'B')
def formatTilePlanar(tile, planemap, hflip=False, little=False):
"""Turn a tile into bitplanes.
Planemap opcodes:
10 -- bit 1 then bit 0 of each tile
0,1 -- planar interleaved by rows
0;1 -- planar interlaved by planes
0,1;2,3 -- SNES/PCE format
"""
hflip = 7 if hflip else 0
if (tile.size != (8, 8)):
return None
pixels = list(tile.getdata())
pixelrows = [pixels[i:i + 8] for i in xrange(0, 64, 8)]
if hflip:
for row in pixelrows:
row.reverse()
out = blank_byte_array()
planemap = [[[int(c) for c in row]
for row in plane.split(',')]
for plane in planemap.split(';')]
# format: [tile-plane number][plane-within-row number][bit number]
# we have five (!) nested loops
# outermost: separate planes
# within separate planes: pixel rows
# within pixel rows: row planes
# within row planes: pixels
# within pixels: bits
for plane in planemap:
for pxrow in pixelrows:
for rowplane in plane:
rowbits = 1
thisrow = blank_byte_array()
for px in pxrow:
for bitnum in rowplane:
rowbits = (rowbits << 1) | ((px >> bitnum) & 1)
if rowbits >= 0x100:
thisrow.append(rowbits & 0xFF)
rowbits = 1
if little: thisrow.reverse()
out.extend(thisrow)
return out.tostring()
def pilbmp2chr(im, tileWidth=8, tileHeight=8,
formatTile=lambda im: formatTilePlanar(im, "0;1")):
"""Convert a bitmap image into a list of byte strings representing tiles."""
im.load()
(w, h) = im.size
outdata = []
for mt_y in range(0, h, tileHeight):
for mt_x in range(0, w, tileWidth):
metatile = im.crop((mt_x, mt_y,
mt_x + tileWidth, mt_y + tileHeight))
for tile_y in range(0, tileHeight, 8):
for tile_x in range(0, tileWidth, 8):
tile = metatile.crop((tile_x, tile_y,
tile_x + 8, tile_y + 8))
data = formatTile(tile)
outdata.append(data)
return outdata
def parse_argv(argv):
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] [-i] INFILE [-o] OUTFILE")
parser.add_option("-i", "--image", dest="infilename",
help="read image from INFILE", metavar="INFILE")
parser.add_option("-o", "--output", dest="outfilename",
help="write CHR data to OUTFILE", metavar="OUTFILE")
parser.add_option("-W", "--tile-width", dest="tileWidth",
help="set width of metatiles", metavar="HEIGHT",
type="int", default=8)
parser.add_option("--packbits", dest="packbits",
help="use PackBits RLE compression",
action="store_true", default=False)
parser.add_option("-H", "--tile-height", dest="tileHeight",
help="set height of metatiles", metavar="HEIGHT",
type="int", default=8)
parser.add_option("-1", dest="planes",
help="set 1bpp mode (default: 2bpp NES)",
action="store_const", const="0", default="0;1")
parser.add_option("--planes", dest="planes",
help="set the plane map (1bpp: 0) (NES: 0;1) (GB: 0,1) (SMS:0,1,2,3) (TG16/SNES: 0,1;2,3) (MD: 3210)")
parser.add_option("--hflip", dest="hflip",
help="horizontally flip all tiles (most significant pixel on right)",
action="store_true", default=False)
parser.add_option("--little", dest="little",
help="reverse the bytes within each row-plane (needed for GBA and a few others)",
action="store_true", default=False)
(options, args) = parser.parse_args(argv[1:])
tileWidth = int(options.tileWidth)
if tileWidth <= 0:
raise ValueError("tile width '%d' must be positive" % tileWidth)
tileHeight = int(options.tileHeight)
if tileHeight <= 0:
raise ValueError("tile height '%d' must be positive" % tileHeight)
# Fill unfilled roles with positional arguments
argsreader = iter(args)
try:
infilename = options.infilename
if infilename is None:
infilename = next(argsreader)
except StopIteration:
raise ValueError("not enough filenames")
outfilename = options.outfilename
if outfilename is None:
try:
outfilename = next(argsreader)
except StopIteration:
outfilename = '-'
if outfilename == '-':
import sys
if sys.stdout.isatty():
raise ValueError("cannot write CHR to terminal")
return (infilename, outfilename, tileWidth, tileHeight,
options.packbits, options.planes, options.hflip, options.little)
argvTestingMode = True
def make_stdout_binary():
"""Ensure that sys.stdout is in binary mode, with no newline translation."""
# Recipe from
# http://code.activestate.com/recipes/65443-sending-binary-data-to-stdout-under-windows/
# via http://stackoverflow.com/a/2374507/2738262
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def main(argv=None):
import sys
if argv is None:
argv = sys.argv
if (argvTestingMode and len(argv) < 2
and sys.stdin.isatty() and sys.stdout.isatty()):
argv.extend(raw_input('args:').split())
try:
(infilename, outfilename, tileWidth, tileHeight,
usePackBits, planes, hflip, little) = parse_argv(argv)
except Exception as e:
sys.stderr.write("%s: %s\n" % (argv[0], str(e)))
sys.exit(1)
im = Image.open(infilename)
outdata = pilbmp2chr(im, tileWidth, tileHeight,
lambda im: formatTilePlanar(im, planes, hflip, little))
outdata = b''.join(outdata)
if usePackBits:
from packbits import PackBits
sz = len(outdata) % 0x10000
outdata = PackBits(outdata).flush().tostring()
outdata = b''.join([chr(sz >> 8), chr(sz & 0xFF), outdata])
# Read input file
outfp = None
try:
if outfilename != '-':
outfp = open(outfilename, 'wb')
else:
outfp = sys.stdout
make_stdout_binary()
outfp.write(outdata)
finally:
if outfp and outfilename != '-':
outfp.close()
if __name__=='__main__':
main()
## main(['pilbmp2nes.py', '../tilesets/char_pinocchio.png', 'char_pinocchio.chr'])
## main(['pilbmp2nes.py', '--packbits', '../tilesets/char_pinocchio.png', 'char_pinocchio.pkb'])
| 36.164319 | 124 | 0.589251 |
from __future__ import with_statement, print_function, unicode_literals
from PIL import Image
from time import sleep
import array
try:
xrange
except NameError:
xrange = range
try:
raw_input
except NameError:
raw_input = input
try:
next
except NameError:
next = lambda x: x.next()
def blank_byte_array():
try:
return array.array('B')
except TypeError:
return array.array(b'B')
def formatTilePlanar(tile, planemap, hflip=False, little=False):
hflip = 7 if hflip else 0
if (tile.size != (8, 8)):
return None
pixels = list(tile.getdata())
pixelrows = [pixels[i:i + 8] for i in xrange(0, 64, 8)]
if hflip:
for row in pixelrows:
row.reverse()
out = blank_byte_array()
planemap = [[[int(c) for c in row]
for row in plane.split(',')]
for plane in planemap.split(';')]
for plane in planemap:
for pxrow in pixelrows:
for rowplane in plane:
rowbits = 1
thisrow = blank_byte_array()
for px in pxrow:
for bitnum in rowplane:
rowbits = (rowbits << 1) | ((px >> bitnum) & 1)
if rowbits >= 0x100:
thisrow.append(rowbits & 0xFF)
rowbits = 1
if little: thisrow.reverse()
out.extend(thisrow)
return out.tostring()
def pilbmp2chr(im, tileWidth=8, tileHeight=8,
formatTile=lambda im: formatTilePlanar(im, "0;1")):
im.load()
(w, h) = im.size
outdata = []
for mt_y in range(0, h, tileHeight):
for mt_x in range(0, w, tileWidth):
metatile = im.crop((mt_x, mt_y,
mt_x + tileWidth, mt_y + tileHeight))
for tile_y in range(0, tileHeight, 8):
for tile_x in range(0, tileWidth, 8):
tile = metatile.crop((tile_x, tile_y,
tile_x + 8, tile_y + 8))
data = formatTile(tile)
outdata.append(data)
return outdata
def parse_argv(argv):
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] [-i] INFILE [-o] OUTFILE")
parser.add_option("-i", "--image", dest="infilename",
help="read image from INFILE", metavar="INFILE")
parser.add_option("-o", "--output", dest="outfilename",
help="write CHR data to OUTFILE", metavar="OUTFILE")
parser.add_option("-W", "--tile-width", dest="tileWidth",
help="set width of metatiles", metavar="HEIGHT",
type="int", default=8)
parser.add_option("--packbits", dest="packbits",
help="use PackBits RLE compression",
action="store_true", default=False)
parser.add_option("-H", "--tile-height", dest="tileHeight",
help="set height of metatiles", metavar="HEIGHT",
type="int", default=8)
parser.add_option("-1", dest="planes",
help="set 1bpp mode (default: 2bpp NES)",
action="store_const", const="0", default="0;1")
parser.add_option("--planes", dest="planes",
help="set the plane map (1bpp: 0) (NES: 0;1) (GB: 0,1) (SMS:0,1,2,3) (TG16/SNES: 0,1;2,3) (MD: 3210)")
parser.add_option("--hflip", dest="hflip",
help="horizontally flip all tiles (most significant pixel on right)",
action="store_true", default=False)
parser.add_option("--little", dest="little",
help="reverse the bytes within each row-plane (needed for GBA and a few others)",
action="store_true", default=False)
(options, args) = parser.parse_args(argv[1:])
tileWidth = int(options.tileWidth)
if tileWidth <= 0:
raise ValueError("tile width '%d' must be positive" % tileWidth)
tileHeight = int(options.tileHeight)
if tileHeight <= 0:
raise ValueError("tile height '%d' must be positive" % tileHeight)
argsreader = iter(args)
try:
infilename = options.infilename
if infilename is None:
infilename = next(argsreader)
except StopIteration:
raise ValueError("not enough filenames")
outfilename = options.outfilename
if outfilename is None:
try:
outfilename = next(argsreader)
except StopIteration:
outfilename = '-'
if outfilename == '-':
import sys
if sys.stdout.isatty():
raise ValueError("cannot write CHR to terminal")
return (infilename, outfilename, tileWidth, tileHeight,
options.packbits, options.planes, options.hflip, options.little)
argvTestingMode = True
def make_stdout_binary():
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def main(argv=None):
import sys
if argv is None:
argv = sys.argv
if (argvTestingMode and len(argv) < 2
and sys.stdin.isatty() and sys.stdout.isatty()):
argv.extend(raw_input('args:').split())
try:
(infilename, outfilename, tileWidth, tileHeight,
usePackBits, planes, hflip, little) = parse_argv(argv)
except Exception as e:
sys.stderr.write("%s: %s\n" % (argv[0], str(e)))
sys.exit(1)
im = Image.open(infilename)
outdata = pilbmp2chr(im, tileWidth, tileHeight,
lambda im: formatTilePlanar(im, planes, hflip, little))
outdata = b''.join(outdata)
if usePackBits:
from packbits import PackBits
sz = len(outdata) % 0x10000
outdata = PackBits(outdata).flush().tostring()
outdata = b''.join([chr(sz >> 8), chr(sz & 0xFF), outdata])
outfp = None
try:
if outfilename != '-':
outfp = open(outfilename, 'wb')
else:
outfp = sys.stdout
make_stdout_binary()
outfp.write(outdata)
finally:
if outfp and outfilename != '-':
outfp.close()
if __name__=='__main__':
main()
| true | true |
f7365d1f3a6d608c0c9d0eddaeb0a2bb1cb6cb4f | 34,363 | py | Python | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 1 | 2021-09-07T18:51:21.000Z | 2021-09-07T18:51:21.000Z | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 1 | 2020-08-08T03:56:56.000Z | 2020-08-08T03:56:56.000Z | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_staticapp_commands_thru_mock.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 1 | 2022-02-16T18:23:11.000Z | 2022-02-16T18:23:11.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest import mock
from azure.cli.command_modules.appservice.static_sites import \
list_staticsites, show_staticsite, delete_staticsite, create_staticsites, CLIError, disconnect_staticsite, \
reconnect_staticsite, list_staticsite_environments, show_staticsite_environment, list_staticsite_domains, \
set_staticsite_domain, delete_staticsite_domain, list_staticsite_functions, list_staticsite_app_settings, \
set_staticsite_app_settings, delete_staticsite_app_settings, list_staticsite_users, \
invite_staticsite_users, update_staticsite_users, update_staticsite, list_staticsite_secrets, \
reset_staticsite_api_key, delete_staticsite_environment, link_user_function, unlink_user_function, get_user_function, \
assign_identity, remove_identity, show_identity
from azure.core.exceptions import ResourceNotFoundError
class TestStaticAppCommands(unittest.TestCase):
def setUp(self):
_set_up_client_mock(self)
_set_up_fake_apps(self)
def test_list_empty_staticapp(self):
self.staticapp_client.list.return_value = []
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 0)
def test_list_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_sites_by_resource_group.return_value = [self.app1]
response = list_staticsites(self.mock_cmd, self.rg1)
self.staticapp_client.get_static_sites_by_resource_group.assert_called_once_with(self.rg1)
self.assertEqual(len(response), 1)
self.assertIn(self.app1, response)
def test_list_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 2)
self.assertIn(self.app1, response)
self.assertIn(self.app2, response)
def test_show_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
response = show_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_without_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = show_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_not_exist(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
show_staticsite(self.mock_cmd, self.name1_not_exist)
def test_delete_staticapp_with_resourcegroup(self):
delete_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_not_exist(self):
with self.assertRaises(CLIError):
delete_staticsite(self.mock_cmd, self.name1_not_exist)
def test_create_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.rg1, arg_list["resource_group_name"])
self.assertEqual(self.location1, arg_list["static_site_envelope"].location)
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
self.assertEqual(app_location, arg_list["static_site_envelope"].build_properties.app_location)
self.assertEqual(api_location, arg_list["static_site_envelope"].build_properties.api_location)
self.assertEqual(output_location, arg_list["static_site_envelope"].build_properties.app_artifact_location)
# assert that a duplicate create call doesn't raise an error or call client create method again
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_create_staticapp_with_standard_sku(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, sku='standard')
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual('Standard', arg_list["static_site_envelope"].sku.name)
def test_create_staticapp_missing_token(self):
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with self.assertRaises(CLIError):
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
def test_update_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
tags = {'key1': 'value1'}
sku = 'Standard'
update_staticsite(self.mock_cmd, self.name1, self.source2, self.branch2, self.token2, tags=tags, sku=sku)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source2, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch2, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token2, arg_list["static_site_envelope"].repository_token)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual(sku, arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_with_no_values_passed_in(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
update_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token1, arg_list["static_site_envelope"].repository_token)
self.assertEqual(self.app1.tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_not_exist(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
update_staticsite(self.mock_cmd, self.name1_not_exist)
def test_disconnect_staticapp_with_resourcegroup(self):
disconnect_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_disconnect_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
disconnect_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_with_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1,
resource_group_name=self.rg1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_without_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
def test_list_staticsite_environments_with_resourcegroup(self):
list_staticsite_environments(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_list_staticsite_environments_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_environments(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_show_staticsite_environment_with_resourcegroup(self):
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_show_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_set_staticsite_domain_with_resourcegroup(self):
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_set_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_delete_staticsite_domain_with_resourcegroup(self):
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_environment_with_resourcegroup(self):
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_delete_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_with_resourcegroup(self):
list_staticsite_functions(self.mock_cmd, self.name1, self.rg1, self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_functions(self.mock_cmd, self.name1, environment_name=self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_app_settings_with_resourcegroup(self):
list_staticsite_app_settings(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_list_staticsite_app_settings_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_app_settings(self.mock_cmd, self.name1)
self.staticapp_client.list_static_site_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_set_staticsite_app_settings_with_resourcegroup(self):
from azure.mgmt.web.models import StringDictionary
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
self.staticapp_client.list_static_site_app_settings.return_value = StringDictionary(properties={})
set_staticsite_app_settings(self.mock_cmd, self.name1, app_settings1_input, self.rg1)
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_set_staticsite_app_settings_without_resourcegroup(self):
from azure.mgmt.web.models import StringDictionary
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
self.staticapp_client.list.return_value = [self.app1, self.app2]
self.staticapp_client.list_static_site_app_settings.return_value = StringDictionary(properties={})
set_staticsite_app_settings(self.mock_cmd, self.name1, app_settings1_input)
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_delete_staticsite_app_settings_with_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_app_settings.return_value = AppSettings
# action
delete_staticsite_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete, self.rg1)
# validate
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_delete_staticsite_app_settings_without_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_app_settings.return_value = AppSettings
self.staticapp_client.list.return_value = [self.app1, self.app2]
# action
delete_staticsite_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete)
# validate
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_list_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, self.rg1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_list_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_invite_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours, self.rg1)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_invite_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_update_staticsite_users_with_resourcegroup_with_all_args(self):
roles = 'Contributor,Reviewer'
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
user_id = 100
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, 'other_user_id',
'dummy_authentication_provider', 'dummy_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_user_details(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
resource_group_name=self.rg1)
def test_list_staticsite_secrets(self):
from azure.mgmt.web.models import StringDictionary
self.staticapp_client.list_static_site_secrets.return_value = StringDictionary(properties={"apiKey": "key"})
secret = list_staticsite_secrets(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_secrets.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
from ast import literal_eval
self.assertEqual(literal_eval(secret.__str__())["properties"]["apiKey"], "key")
def test_staticsite_identity_assign(self):
from azure.mgmt.web.models import ManagedServiceIdentity, ManagedServiceIdentityType
self.mock_cmd.get_models.return_value = ManagedServiceIdentity, ManagedServiceIdentityType
assign_identity(self.mock_cmd, self.rg1, self.name1)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_staticsite_identity_remove(self):
from azure.mgmt.web.models import ManagedServiceIdentityType, Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties
get_models = lambda s: ManagedServiceIdentityType if s == "ManagedServiceIdentityType" else Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties
self.mock_cmd.get_models.side_effect = get_models
remove_identity(self.mock_cmd, self.rg1, self.name1)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_staticsite_identity_show(self):
mock_site = mock.MagicMock()
mock_site.identity = "identity"
self.staticapp_client.get_static_site.return_value = mock_site
self.assertEqual(show_identity(self.mock_cmd, self.rg1, self.name1), "identity")
def test_reset_staticsite_api_key(self):
from azure.mgmt.web.models import StringDictionary, StaticSiteResetPropertiesARMResource
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.reset_static_site_api_key.return_value = StringDictionary(properties={"apiKey": "new_key"})
self.mock_cmd.get_models.return_value = StaticSiteResetPropertiesARMResource
secret = reset_staticsite_api_key(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.mock_cmd.get_models.assert_called_once_with('StaticSiteResetPropertiesARMResource')
self.staticapp_client.reset_static_site_api_key.assert_called_once()
from ast import literal_eval
reset_envelope = literal_eval(self.staticapp_client.reset_static_site_api_key.call_args[1]["reset_properties_envelope"].__str__())
self.assertEqual(reset_envelope["repository_token"], self.token1)
@mock.patch("azure.cli.command_modules.appservice.static_sites.show_functionapp")
def test_functions_link(self, *args, **kwargs):
functionapp_name = "functionapp"
functionapp_resource_id = "/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/sites/{}".format(
self.rg1, functionapp_name
)
link_user_function(self.mock_cmd, self.name1, self.rg1, functionapp_resource_id)
self.staticapp_client.begin_register_user_provided_function_app_with_static_site.assert_called_once()
@mock.patch("azure.cli.command_modules.appservice.static_sites.get_user_function", return_value=[mock.MagicMock()])
def test_functions_unlink(self, *args, **kwargs):
unlink_user_function(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.detach_user_provided_function_app_from_static_site.assert_called_once()
def test_functions_show(self, *args, **kwargs):
get_user_function(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_user_provided_function_apps_for_static_site.assert_called_once()
def _set_up_client_mock(self):
self.mock_cmd = mock.MagicMock()
self.mock_cmd.cli_ctx = mock.MagicMock()
self.staticapp_client = mock.MagicMock()
client_factory_patcher = mock.patch(
'azure.cli.command_modules.appservice.static_sites._get_staticsites_client_factory', autospec=True)
self.addCleanup(client_factory_patcher.stop)
self.mock_static_site_client_factory = client_factory_patcher.start()
self.mock_static_site_client_factory.return_value = self.staticapp_client
def _set_up_fake_apps(self):
from azure.mgmt.web.models import StaticSiteCustomDomainRequestPropertiesARMResource
self.rg1 = 'rg1'
self.name1 = 'name1'
self.name1_not_exist = 'name1_not_exist'
self.location1 = 'location1'
self.source1 = 'https://github.com/Contoso/My-First-Static-App'
self.branch1 = 'dev'
self.token1 = 'TOKEN_1'
self.environment1 = 'default'
self.hostname1 = 'www.app1.com'
self.hostname1_validation = StaticSiteCustomDomainRequestPropertiesARMResource(validation_method="cname-delegation")
self.app1 = _contruct_static_site_object(
self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1)
self.rg2 = 'rg2'
self.name2 = 'name2'
self.location2 = 'location2'
self.source2 = 'https://github.com/Contoso/My-Second-Static-App'
self.branch2 = 'master'
self.token2 = 'TOKEN_2'
self.environment1 = 'prod'
self.hostname1 = 'www.app2.com'
self.app2 = _contruct_static_site_object(
self.rg2, self.name2, self.location2,
self.source2, self.branch2, self.token2)
def _contruct_static_site_object(rg, app_name, location, source, branch, token):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
app = StaticSiteARMResource(
location=location,
repository_url=source,
branch=branch,
repository_token=token,
sku=SkuDescription(name='Free', tier='Free'))
app.name = app_name
app.id = \
"/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/staticSites/{}".format(rg, app_name)
return app
def _mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details):
class User:
def __init__(self, name, provider, display_name):
self.name = name
self.provider = provider
self.display_name = display_name
user1 = User(user_id, authentication_provider, user_details)
user2 = User(user_id + '2', authentication_provider + '2', user_details + '2')
self.staticapp_client.list_static_site_users.return_value = [user1, user2]
| 51.059435 | 198 | 0.74359 |
import unittest
from unittest import mock
from azure.cli.command_modules.appservice.static_sites import \
list_staticsites, show_staticsite, delete_staticsite, create_staticsites, CLIError, disconnect_staticsite, \
reconnect_staticsite, list_staticsite_environments, show_staticsite_environment, list_staticsite_domains, \
set_staticsite_domain, delete_staticsite_domain, list_staticsite_functions, list_staticsite_app_settings, \
set_staticsite_app_settings, delete_staticsite_app_settings, list_staticsite_users, \
invite_staticsite_users, update_staticsite_users, update_staticsite, list_staticsite_secrets, \
reset_staticsite_api_key, delete_staticsite_environment, link_user_function, unlink_user_function, get_user_function, \
assign_identity, remove_identity, show_identity
from azure.core.exceptions import ResourceNotFoundError
class TestStaticAppCommands(unittest.TestCase):
def setUp(self):
_set_up_client_mock(self)
_set_up_fake_apps(self)
def test_list_empty_staticapp(self):
self.staticapp_client.list.return_value = []
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 0)
def test_list_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_sites_by_resource_group.return_value = [self.app1]
response = list_staticsites(self.mock_cmd, self.rg1)
self.staticapp_client.get_static_sites_by_resource_group.assert_called_once_with(self.rg1)
self.assertEqual(len(response), 1)
self.assertIn(self.app1, response)
def test_list_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = list_staticsites(self.mock_cmd)
self.assertEqual(len(response), 2)
self.assertIn(self.app1, response)
self.assertIn(self.app2, response)
def test_show_staticapp_with_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
response = show_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_without_resourcegroup(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
response = show_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.assertEqual(self.app1, response)
def test_show_staticapp_not_exist(self):
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
show_staticsite(self.mock_cmd, self.name1_not_exist)
def test_delete_staticapp_with_resourcegroup(self):
delete_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_delete_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_delete_staticapp_not_exist(self):
with self.assertRaises(CLIError):
delete_staticsite(self.mock_cmd, self.name1_not_exist)
def test_create_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.rg1, arg_list["resource_group_name"])
self.assertEqual(self.location1, arg_list["static_site_envelope"].location)
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
self.assertEqual(app_location, arg_list["static_site_envelope"].build_properties.app_location)
self.assertEqual(api_location, arg_list["static_site_envelope"].build_properties.api_location)
self.assertEqual(output_location, arg_list["static_site_envelope"].build_properties.app_artifact_location)
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_create_staticapp_with_standard_sku(self):
from azure.mgmt.web.models import StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, StaticSiteBuildProperties, SkuDescription
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, sku='standard')
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
arg_list = self.staticapp_client.begin_create_or_update_static_site.call_args[1]
self.assertEqual('Standard', arg_list["static_site_envelope"].sku.name)
def test_create_staticapp_missing_token(self):
app_location = './src'
api_location = './api/'
output_location = '/.git/'
tags = {'key1': 'value1'}
with self.assertRaises(CLIError):
with mock.patch("azure.cli.command_modules.appservice.static_sites.show_staticsite", side_effect=ResourceNotFoundError("msg")):
create_staticsites(
self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1,
app_location=app_location, api_location=api_location, output_location=output_location,
tags=tags)
def test_update_staticapp(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
tags = {'key1': 'value1'}
sku = 'Standard'
update_staticsite(self.mock_cmd, self.name1, self.source2, self.branch2, self.token2, tags=tags, sku=sku)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source2, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch2, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token2, arg_list["static_site_envelope"].repository_token)
self.assertEqual(tags, arg_list["static_site_envelope"].tags)
self.assertEqual(sku, arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_with_no_values_passed_in(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
update_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.update_static_site.assert_called_once()
arg_list = self.staticapp_client.update_static_site.call_args[1]
self.assertEqual(self.name1, arg_list["name"])
self.assertEqual(self.source1, arg_list["static_site_envelope"].repository_url)
self.assertEqual(self.branch1, arg_list["static_site_envelope"].branch)
self.assertEqual(self.token1, arg_list["static_site_envelope"].repository_token)
self.assertEqual(self.app1.tags, arg_list["static_site_envelope"].tags)
self.assertEqual('Free', arg_list["static_site_envelope"].sku.name)
def test_update_staticapp_not_exist(self):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
self.mock_cmd.get_models.return_value = StaticSiteARMResource, SkuDescription
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.list.return_value = [self.app1, self.app2]
with self.assertRaises(CLIError):
update_staticsite(self.mock_cmd, self.name1_not_exist)
def test_disconnect_staticapp_with_resourcegroup(self):
disconnect_staticsite(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
def test_disconnect_staticapp_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
disconnect_staticsite(self.mock_cmd, self.name1)
self.staticapp_client.begin_detach_static_site.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_with_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1,
resource_group_name=self.rg1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
@mock.patch('azure.cli.command_modules.appservice.static_sites.create_staticsites', autospec=True)
def test_reconnect_staticapp_without_resourcegroup(self, create_staticsites_mock):
self.staticapp_client.list.return_value = [self.app1, self.app2]
reconnect_staticsite(self.mock_cmd, self.name1, self.source1, self.branch1, self.token1)
create_staticsites_mock.assert_called_once_with(self.mock_cmd, self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1, login_with_github=False, no_wait=False)
def test_list_staticsite_environments_with_resourcegroup(self):
list_staticsite_environments(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_list_staticsite_environments_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_environments(self.mock_cmd, self.name1)
self.staticapp_client.get_static_site_builds.assert_called_once_with(self.rg1, self.name1)
def test_show_staticsite_environment_with_resourcegroup(self):
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_show_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
show_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.get_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_set_staticsite_domain_with_resourcegroup(self):
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_set_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
set_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_validate_custom_domain_can_be_added_to_static_site.assert_called_once_with(
self.rg1, self.name1, self.hostname1, self.hostname1_validation)
self.staticapp_client.begin_create_or_update_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1,
static_site_custom_domain_request_properties_envelope=self.hostname1_validation)
def test_delete_staticsite_domain_with_resourcegroup(self):
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1, self.rg1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_domain_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_domain(self.mock_cmd, self.name1, self.hostname1)
self.staticapp_client.begin_delete_static_site_custom_domain.assert_called_once_with(
resource_group_name=self.rg1, name=self.name1, domain_name=self.hostname1)
def test_delete_staticsite_environment_with_resourcegroup(self):
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1, self.rg1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_delete_staticsite_environment_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
delete_staticsite_environment(self.mock_cmd, self.name1, self.environment1)
self.staticapp_client.begin_delete_static_site_build.assert_called_once_with(self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_with_resourcegroup(self):
list_staticsite_functions(self.mock_cmd, self.name1, self.rg1, self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_functions_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_functions(self.mock_cmd, self.name1, environment_name=self.environment1)
self.staticapp_client.list_static_site_build_functions.assert_called_once_with(
self.rg1, self.name1, self.environment1)
def test_list_staticsite_app_settings_with_resourcegroup(self):
list_staticsite_app_settings(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_list_staticsite_app_settings_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
list_staticsite_app_settings(self.mock_cmd, self.name1)
self.staticapp_client.list_static_site_app_settings.assert_called_once_with(
self.rg1, self.name1)
def test_set_staticsite_app_settings_with_resourcegroup(self):
from azure.mgmt.web.models import StringDictionary
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
self.staticapp_client.list_static_site_app_settings.return_value = StringDictionary(properties={})
set_staticsite_app_settings(self.mock_cmd, self.name1, app_settings1_input, self.rg1)
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_set_staticsite_app_settings_without_resourcegroup(self):
from azure.mgmt.web.models import StringDictionary
app_settings1_input = ['key1=val1', 'key2=val2==', 'key3=val3=']
self.staticapp_client.list.return_value = [self.app1, self.app2]
self.staticapp_client.list_static_site_app_settings.return_value = StringDictionary(properties={})
set_staticsite_app_settings(self.mock_cmd, self.name1, app_settings1_input)
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_delete_staticsite_app_settings_with_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_app_settings.return_value = AppSettings
# action
delete_staticsite_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete, self.rg1)
# validate
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_delete_staticsite_app_settings_without_resourcegroup(self):
# setup
current_app_settings = {'key1': 'val1', 'key2': 'val2'}
app_settings_keys_to_delete = ['key1']
class AppSettings:
properties = current_app_settings
self.staticapp_client.list_static_site_app_settings.return_value = AppSettings
self.staticapp_client.list.return_value = [self.app1, self.app2]
# action
delete_staticsite_app_settings(self.mock_cmd, self.name1, app_settings_keys_to_delete)
# validate
self.staticapp_client.create_or_update_static_site_app_settings.assert_called_once()
def test_list_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, self.rg1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_list_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
list_staticsite_users(self.mock_cmd, self.name1, authentication_provider=authentication_provider)
self.staticapp_client.list_static_site_users.assert_called_once_with(
self.rg1, self.name1, authentication_provider)
def test_invite_staticsite_users_with_resourcegroup(self):
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours, self.rg1)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_invite_staticsite_users_without_resourcegroup(self):
self.staticapp_client.list.return_value = [self.app1, self.app2]
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
roles = 'Contributor,Reviewer'
invitation_expiration_in_hours = 2
from azure.mgmt.web.models import StaticSiteUserInvitationRequestResource
self.mock_cmd.get_models.return_value = StaticSiteUserInvitationRequestResource
invite_staticsite_users(self.mock_cmd, self.name1, authentication_provider, user_details, self.hostname1,
roles, invitation_expiration_in_hours)
arg_list = self.staticapp_client.create_user_roles_invitation_link.call_args[0]
self.assertEqual(self.rg1, arg_list[0])
self.assertEqual(self.name1, arg_list[1])
self.assertEqual(self.hostname1, arg_list[2].domain)
self.assertEqual(authentication_provider, arg_list[2].provider)
self.assertEqual(user_details, arg_list[2].user_details)
self.assertEqual(invitation_expiration_in_hours, arg_list[2].num_hours_to_expiration)
def test_update_staticsite_users_with_resourcegroup_with_all_args(self):
roles = 'Contributor,Reviewer'
authentication_provider = 'GitHub'
user_details = 'JohnDoe'
user_id = 100
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, 'other_user_id',
'dummy_authentication_provider', 'dummy_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, user_id=user_id, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_auth_provider_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
self.staticapp_client.update_static_site_user.assert_called_once_with(
self.rg1, self.name1, authentication_provider, user_id, roles=roles)
def test_update_staticsite_users_with_resourcegroup_without_user_id_user_not_found(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
_mock_list_users_for_without_auth_provider(self, 'dummy_user_id', 'dummy_authentication_provider',
'other_user_details')
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
user_details=user_details, resource_group_name=self.rg1)
def test_update_staticsite_users_with_resourcegroup_without_user_id_without_user_details(self):
roles = 'Contributor,Reviewer'
user_details = 'JohnDoe'
authentication_provider = 'GitHub'
user_id = '100'
_mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details)
with self.assertRaises(CLIError):
update_staticsite_users(self.mock_cmd, self.name1, roles, authentication_provider=authentication_provider,
resource_group_name=self.rg1)
def test_list_staticsite_secrets(self):
from azure.mgmt.web.models import StringDictionary
self.staticapp_client.list_static_site_secrets.return_value = StringDictionary(properties={"apiKey": "key"})
secret = list_staticsite_secrets(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.list_static_site_secrets.assert_called_once_with(resource_group_name=self.rg1, name=self.name1)
from ast import literal_eval
self.assertEqual(literal_eval(secret.__str__())["properties"]["apiKey"], "key")
def test_staticsite_identity_assign(self):
from azure.mgmt.web.models import ManagedServiceIdentity, ManagedServiceIdentityType
self.mock_cmd.get_models.return_value = ManagedServiceIdentity, ManagedServiceIdentityType
assign_identity(self.mock_cmd, self.rg1, self.name1)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_staticsite_identity_remove(self):
from azure.mgmt.web.models import ManagedServiceIdentityType, Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties
get_models = lambda s: ManagedServiceIdentityType if s == "ManagedServiceIdentityType" else Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties
self.mock_cmd.get_models.side_effect = get_models
remove_identity(self.mock_cmd, self.rg1, self.name1)
self.staticapp_client.begin_create_or_update_static_site.assert_called_once()
def test_staticsite_identity_show(self):
mock_site = mock.MagicMock()
mock_site.identity = "identity"
self.staticapp_client.get_static_site.return_value = mock_site
self.assertEqual(show_identity(self.mock_cmd, self.rg1, self.name1), "identity")
def test_reset_staticsite_api_key(self):
from azure.mgmt.web.models import StringDictionary, StaticSiteResetPropertiesARMResource
self.staticapp_client.get_static_site.return_value = self.app1
self.staticapp_client.reset_static_site_api_key.return_value = StringDictionary(properties={"apiKey": "new_key"})
self.mock_cmd.get_models.return_value = StaticSiteResetPropertiesARMResource
secret = reset_staticsite_api_key(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_static_site.assert_called_once_with(self.rg1, self.name1)
self.mock_cmd.get_models.assert_called_once_with('StaticSiteResetPropertiesARMResource')
self.staticapp_client.reset_static_site_api_key.assert_called_once()
from ast import literal_eval
reset_envelope = literal_eval(self.staticapp_client.reset_static_site_api_key.call_args[1]["reset_properties_envelope"].__str__())
self.assertEqual(reset_envelope["repository_token"], self.token1)
@mock.patch("azure.cli.command_modules.appservice.static_sites.show_functionapp")
def test_functions_link(self, *args, **kwargs):
functionapp_name = "functionapp"
functionapp_resource_id = "/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/sites/{}".format(
self.rg1, functionapp_name
)
link_user_function(self.mock_cmd, self.name1, self.rg1, functionapp_resource_id)
self.staticapp_client.begin_register_user_provided_function_app_with_static_site.assert_called_once()
@mock.patch("azure.cli.command_modules.appservice.static_sites.get_user_function", return_value=[mock.MagicMock()])
def test_functions_unlink(self, *args, **kwargs):
unlink_user_function(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.detach_user_provided_function_app_from_static_site.assert_called_once()
def test_functions_show(self, *args, **kwargs):
get_user_function(self.mock_cmd, self.name1, self.rg1)
self.staticapp_client.get_user_provided_function_apps_for_static_site.assert_called_once()
def _set_up_client_mock(self):
self.mock_cmd = mock.MagicMock()
self.mock_cmd.cli_ctx = mock.MagicMock()
self.staticapp_client = mock.MagicMock()
client_factory_patcher = mock.patch(
'azure.cli.command_modules.appservice.static_sites._get_staticsites_client_factory', autospec=True)
self.addCleanup(client_factory_patcher.stop)
self.mock_static_site_client_factory = client_factory_patcher.start()
self.mock_static_site_client_factory.return_value = self.staticapp_client
def _set_up_fake_apps(self):
from azure.mgmt.web.models import StaticSiteCustomDomainRequestPropertiesARMResource
self.rg1 = 'rg1'
self.name1 = 'name1'
self.name1_not_exist = 'name1_not_exist'
self.location1 = 'location1'
self.source1 = 'https://github.com/Contoso/My-First-Static-App'
self.branch1 = 'dev'
self.token1 = 'TOKEN_1'
self.environment1 = 'default'
self.hostname1 = 'www.app1.com'
self.hostname1_validation = StaticSiteCustomDomainRequestPropertiesARMResource(validation_method="cname-delegation")
self.app1 = _contruct_static_site_object(
self.rg1, self.name1, self.location1,
self.source1, self.branch1, self.token1)
self.rg2 = 'rg2'
self.name2 = 'name2'
self.location2 = 'location2'
self.source2 = 'https://github.com/Contoso/My-Second-Static-App'
self.branch2 = 'master'
self.token2 = 'TOKEN_2'
self.environment1 = 'prod'
self.hostname1 = 'www.app2.com'
self.app2 = _contruct_static_site_object(
self.rg2, self.name2, self.location2,
self.source2, self.branch2, self.token2)
def _contruct_static_site_object(rg, app_name, location, source, branch, token):
from azure.mgmt.web.models import StaticSiteARMResource, SkuDescription
app = StaticSiteARMResource(
location=location,
repository_url=source,
branch=branch,
repository_token=token,
sku=SkuDescription(name='Free', tier='Free'))
app.name = app_name
app.id = \
"/subscriptions/sub/resourceGroups/{}/providers/Microsoft.Web/staticSites/{}".format(rg, app_name)
return app
def _mock_list_users_for_without_auth_provider(self, user_id, authentication_provider, user_details):
class User:
def __init__(self, name, provider, display_name):
self.name = name
self.provider = provider
self.display_name = display_name
user1 = User(user_id, authentication_provider, user_details)
user2 = User(user_id + '2', authentication_provider + '2', user_details + '2')
self.staticapp_client.list_static_site_users.return_value = [user1, user2]
| true | true |
f7365d53fe98443b87d6023ffdb580ca656c6f9e | 11,556 | py | Python | pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class native_vlan_classification(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/trunk/native-vlan-classification. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__native_vlan_id','__native_vlan_ctag_id',)
_yang_name = 'native-vlan-classification'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__native_vlan_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
self.__native_vlan_ctag_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'trunk', u'native-vlan-classification']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'trunk']
def _get_native_vlan_id(self):
"""
Getter method for native_vlan_id, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/native_vlan_id (native-vlan-type)
YANG Description: The native vlan for an interface.
"""
return self.__native_vlan_id
def _set_native_vlan_id(self, v, load=False):
"""
Setter method for native_vlan_id, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/native_vlan_id (native-vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan_id() directly.
YANG Description: The native vlan for an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan_id must be of a type compatible with native-vlan-type""",
'defined-type': "brocade-interface:native-vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)""",
})
self.__native_vlan_id = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan_id(self):
self.__native_vlan_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
def _get_native_vlan_ctag_id(self):
"""
Getter method for native_vlan_ctag_id, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/native_vlan_ctag_id (dot1q-vlan-type)
YANG Description: Associate a Ctag.
"""
return self.__native_vlan_ctag_id
def _set_native_vlan_ctag_id(self, v, load=False):
"""
Setter method for native_vlan_ctag_id, mapped from YANG variable /interface/hundredgigabitethernet/switchport/trunk/native_vlan_classification/native_vlan_ctag_id (dot1q-vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan_ctag_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan_ctag_id() directly.
YANG Description: Associate a Ctag.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan_ctag_id must be of a type compatible with dot1q-vlan-type""",
'defined-type': "brocade-interface:dot1q-vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)""",
})
self.__native_vlan_ctag_id = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan_ctag_id(self):
self.__native_vlan_ctag_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
native_vlan_id = __builtin__.property(_get_native_vlan_id, _set_native_vlan_id)
native_vlan_ctag_id = __builtin__.property(_get_native_vlan_ctag_id, _set_native_vlan_ctag_id)
_pyangbind_elements = {'native_vlan_id': native_vlan_id, 'native_vlan_ctag_id': native_vlan_ctag_id, }
| 69.614458 | 765 | 0.74325 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class native_vlan_classification(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__native_vlan_id','__native_vlan_ctag_id',)
_yang_name = 'native-vlan-classification'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__native_vlan_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
self.__native_vlan_ctag_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'trunk', u'native-vlan-classification']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'trunk']
def _get_native_vlan_id(self):
return self.__native_vlan_id
def _set_native_vlan_id(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan_id must be of a type compatible with native-vlan-type""",
'defined-type': "brocade-interface:native-vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)""",
})
self.__native_vlan_id = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan_id(self):
self.__native_vlan_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..8191']}), is_leaf=True, yang_name="native-vlan-id", rest_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Set the native VLAN characteristics of the \nLayer2 trunk interface for classifying untagged\ntraffic', u'alt-name': u'native-vlan', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='native-vlan-type', is_config=True)
def _get_native_vlan_ctag_id(self):
return self.__native_vlan_ctag_id
def _set_native_vlan_ctag_id(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan_ctag_id must be of a type compatible with dot1q-vlan-type""",
'defined-type': "brocade-interface:dot1q-vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)""",
})
self.__native_vlan_ctag_id = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan_ctag_id(self):
self.__native_vlan_ctag_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4094']}), is_leaf=True, yang_name="native-vlan-ctag-id", rest_name="ctag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'native_vlan_phy_interface_conf', u'info': u'Associate a Ctag.', u'alt-name': u'ctag'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='dot1q-vlan-type', is_config=True)
native_vlan_id = __builtin__.property(_get_native_vlan_id, _set_native_vlan_id)
native_vlan_ctag_id = __builtin__.property(_get_native_vlan_ctag_id, _set_native_vlan_ctag_id)
_pyangbind_elements = {'native_vlan_id': native_vlan_id, 'native_vlan_ctag_id': native_vlan_ctag_id, }
| true | true |
f7365e5d99eac8285580b730f2f811951fa7ba87 | 1,861 | py | Python | test_runner.py | tmatsuo/appengine-blobstoremigrator-python | c59c84c1a91cdab47489fc4d32ddcf35c579ba6a | [
"Apache-2.0"
] | 12 | 2015-06-10T19:48:45.000Z | 2021-04-15T06:11:01.000Z | test_runner.py | tmatsuo/appengine-blobstoremigrator-python | c59c84c1a91cdab47489fc4d32ddcf35c579ba6a | [
"Apache-2.0"
] | 1 | 2021-03-21T10:33:24.000Z | 2021-03-21T10:33:24.000Z | test_runner.py | tmatsuo/appengine-blobstoremigrator-python | c59c84c1a91cdab47489fc4d32ddcf35c579ba6a | [
"Apache-2.0"
] | 6 | 2016-07-12T02:11:43.000Z | 2021-10-03T00:54:03.000Z | #!/usr/bin/python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
CUR_DIR = os.path.dirname(__file__)
SRC_DIR = os.path.join(CUR_DIR, 'src')
TEST_DIR = os.path.join(CUR_DIR, 'test')
LIB_DIR = os.path.join(SRC_DIR, 'lib')
def _fix_path():
"""
Finds the google_appengine directory and fixes Python imports to use it.
(Mostly) copied from Pipelines API.
"""
if SRC_DIR not in sys.path:
sys.path.append(SRC_DIR)
if LIB_DIR not in sys.path:
sys.path.append(LIB_DIR)
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
# Use the next import will fix up sys.path even further to bring in
# any dependent lib directories that the SDK needs.
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
_fix_path()
def run_tests():
"""Run all unit tests."""
suite = unittest.TestLoader().discover(TEST_DIR, pattern='*_test.py')
unittest.TextTestRunner(verbosity=1).run(suite)
if __name__ == '__main__':
run_tests()
| 31.016667 | 78 | 0.73079 |
import os
import sys
import unittest
CUR_DIR = os.path.dirname(__file__)
SRC_DIR = os.path.join(CUR_DIR, 'src')
TEST_DIR = os.path.join(CUR_DIR, 'test')
LIB_DIR = os.path.join(SRC_DIR, 'lib')
def _fix_path():
if SRC_DIR not in sys.path:
sys.path.append(SRC_DIR)
if LIB_DIR not in sys.path:
sys.path.append(LIB_DIR)
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
_fix_path()
def run_tests():
suite = unittest.TestLoader().discover(TEST_DIR, pattern='*_test.py')
unittest.TextTestRunner(verbosity=1).run(suite)
if __name__ == '__main__':
run_tests()
| true | true |
f7365eb934ee15b523ebb1bbcdf7881c563276d4 | 18,861 | py | Python | stratipy/filtering_diffusion.py | candleinwindsteve/Stratipy | ea505df1e4830141c590922d654edfbde498b924 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T22:52:19.000Z | 2021-03-08T22:52:19.000Z | stratipy/filtering_diffusion.py | candleinwindsteve/Stratipy | ea505df1e4830141c590922d654edfbde498b924 | [
"BSD-3-Clause"
] | null | null | null | stratipy/filtering_diffusion.py | candleinwindsteve/Stratipy | ea505df1e4830141c590922d654edfbde498b924 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import norm
from scipy.io import loadmat, savemat
from nbs_class import Ppi, Patient
from subprocess import call
# import h5py
import os
import glob
import time
import datetime
# NOTE mutationProfileDiffusion -> propagation
# mutationProfile -> M, PPIAdjacencyMatrix -> adj, dataFolder -> result_folder
# PPI_influence_min -> ppi_influence_min, PPI_influence_max-> ppi_influence_max
# PPI_influence()-> calcul_ppi_influence(), PPI_influence -> ppi_influence
# influenceDistance->influence_distance
# influenceMat -> ppi_influence, PPIneighboorsMax -> ngh_max,
# bestInfluencers -> best_influencers
# filteredGenes -> deg0, keepSingletons -> keep_singletons
# mutationsMin -> min_mutation, mutationsMax -> mutationsMax
# newnet -> ppi_ngh, netFinal -> ppi_final, mutFinal -> mut_final
# filteredPatients -> filtered_patients
# @profile
def propagation(M, adj, alpha=0.7, tol=10e-6): # TODO equation, M, alpha
"""Network propagation iterative process
Iterative algorithm for apply propagation using random walk on a network:
Initialize::
X1 = M
Repeat::
X2 = alpha * X1.A + (1-alpha) * M
X1 = X2
Until::
norm(X2-X1) < tol
Where::
A : degree-normalized adjacency matrix
Parameters
----------
M : sparse matrix
Data matrix to be diffused.
adj : sparse matrix
Adjacency matrice.
alpha : float, default: 0.7
Diffusion/propagation factor with 0 <= alpha <= 1.
For alpha = 0 : no diffusion.
For alpha = 1 :
tol : float, default: 10e-6
Convergence threshold.
Returns
-------
X2 : sparse matrix
Smoothed matrix.
"""
print(' ==== propagation ==== ')
n = adj.shape[0]
# diagonal = 1 -> degree
# TODO to set diagonal = 0 before applying eye
adj = adj+sp.eye(n, dtype=np.float32)
d = sp.dia_matrix((np.array(adj.sum(axis=0))**-1, [0]),
shape=(n, n),
dtype=np.float32)
A = adj.dot(d)
X1 = M.astype(np.float32)
X2 = alpha * X1.dot(A) + (1-alpha) * M
i = 0
while norm(X2-X1) > tol:
X1 = X2
X2 = alpha * X1.dot(A) + (1-alpha) * M
i += 1
print('Propagation iteration = {} ----- {}'.format(
i, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return X2
# @profile
def compare_ij_ji(ppi, out_min=True, out_max=True):
"""Helper function for calcul_ppi_influence
In most cases the influence (propagation) is not symmetric. We have to
compare weight (a_ij) and (a_ji) for all pairs in order to obtain symmetric
matrix/matrices. 2 choices available: minimum or maximum weight.
a = min [(a_ij),(a_ji)]
a = max [(a_ij),(a_ji)]
Minimum weight is chosen to avoid Hubs phenomenon.
Parameters
----------
ppi : sparse matrix
Matrice to apply comparison.
out_min, out_max : boolean, default: True
Minimum and/or maximum weight is chosen.
Returns
-------
ppi_min, ppi_max : sparse matrix
Symmertric matrix with minimum and/or maximum weight.
"""
# TODO matrice type of ppi
n = ppi.shape[0]
ppi = ppi.tolil() # need "lil_matrix" for reshape
# transpose to compare ppi(ij) and ppi(ji)
ppi_transp = sp.lil_matrix.transpose(ppi)
# reshape to 1D matrix
ppi_1d = ppi.reshape((1, n**2))
ppi_1d_transp = ppi_transp.reshape((1, n**2))
# reshapeto original size matrix after comparison (min/max)
if out_min and out_max:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
print('ppi_min', type(ppi_min), ppi_min.dtype, ppi_min.shape)
print('ppi_max', type(ppi_max), ppi_max.dtype, ppi_max.shape)
return ppi_min, ppi_max
elif out_min:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_min
elif out_max:
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_max
else:
print('You have to choice Min or Max') # TODO change error message
# @profile
def calcul_final_influence(M, adj, result_folder, influence_weight='min',
simplification=True, compute=False, overwrite=False,
alpha=0.7, tol=10e-6):
"""Compute network influence score
Network propagation iterative process is applied on PPI. (1) The network
influence distance matrix and (2) influence matrices based on minimum /
maximum weight are saved as MATLAB-style files (.mat).
- (1) : 'influence_distance_alpha={}_tol={}.mat'
in 'influence_distance' directory
- (2) : 'ppi_influence_alpha={}_tol={}.mat'
in 'ppi_influence' directory
Where {} are parameter values. The directories will be automatically
created if not exist.
If compute=False, the latest data of directory will be taken into
account:
- latest data with same parameters (alpha and tol)
- if not exist, latest data of directory but with differents parameters
Parameters
----------
M : sparse matrix
Data matrix to be diffused.
adj : sparse matrix
Adjacency matrice.
result_folder : str
Path to create a new directory for save new files. If you want to creat
in current directory, enter '/directory_name'. Absolute path is also
supported.
influence_weight :
simplification : boolean, default: True
compute : boolean, default: False
If True, new network influence score will be computed.
If False, the latest network influence score will be taken into
account.
overwrite : boolean, default: False
If True, new network influence score will be computed even if the file
which same parameters already exists in the directory.
alpha : float, default: 0.7
Diffusion (propagation) factor with 0 <= alpha <= 1.
For alpha = 0 : no diffusion.
For alpha = 1 :
tol : float, default: 10e-6
Convergence threshold.
Returns
-------
final_influence : sparse matrix
Smoothed PPI influence matrices based on minimum / maximum weight.
"""
influence_distance_directory = result_folder + 'influence_distance/'
influence_distance_file = (
influence_distance_directory +
'influence_distance_alpha={}_tol={}.mat'.format(alpha, tol))
#######
final_influence_directory = result_folder + 'final_influence/'
final_influence_file = (
final_influence_directory +
'final_influence_simp={}_alpha={}_tol={}.mat'.format(
simplification, alpha, tol))
#######
existance_same_param = os.path.exists(final_influence_file)
# TODO overwrite condition
# check if same parameters file exists in directory
if existance_same_param:
final_influence_data = loadmat(final_influence_file)
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
print('final influence matrix', type(final_influence), final_influence.shape)
print('***** Same parameters file of FINAL INFLUENCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
if compute:
start = time.time()
# check if influence distance file exists
existance_same_influence = os.path.exists(influence_distance_file)
if existance_same_influence:
influence_data = loadmat(influence_distance_file)
influence = influence_data['influence_distance']
print('***** Same parameters file of INFLUENCE DISTANCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
influence = propagation(M, adj, alpha, tol)
print('influence', type(influence), influence.dtype)
# save influence distance before simplification with parameters' values in filename
os.makedirs(influence_distance_directory, exist_ok=True) # NOTE For Python ≥ 3.2
print(' ==== Start to save INFLUENCE DISTANCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(influence_distance_file,
{'influence_distance': influence,
'alpha': alpha},
do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# simplification: multiply by PPI adjacency matrix
if simplification:
influence = influence.multiply(sp.lil_matrix(adj))
# -> influence as csr_matrix
else:
print("---------- No simplification ----------")
pass
# compare influence[i,j] and influence[j,i] => min/max => final influence
start_ij = time.time()
final_influence_min, final_influence_max = compare_ij_ji(
influence, out_min=True, out_max=True)
end_ij = time.time()
print("---------- compare ij/ji = {} ---------- {}"
.format(datetime.timedelta(seconds=end_ij - start_ij),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# save final influence with parameters' values in filename
os.makedirs(final_influence_directory, exist_ok=True)
print(' ==== Start to save FINAL INFLUENCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(final_influence_file,
{'final_influence_min': final_influence_min,
'final_influence_max': final_influence_max,
'alpha': alpha}, do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
if influence_weight == 'min':
final_influence = final_influence_min
else:
final_influence = final_influence_max
end = time.time()
print("---------- Influence = {} ---------- {}"
.format(datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# take most recent file
else:
for x in final_influence_file, influence_distance_directory:
print(x)
newest_file = max(glob.iglob(x + '*.mat'),
key=os.path.getctime)
final_influence_data = loadmat(newest_file)
if x == final_influence_directory:
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
return final_influence
# @profile
def best_neighboors(ppi_filt, final_influence, ngh_max):
"""Helper function for filter_ppi_patients
Keeps only the connections with the best influencers.
Parameters
----------
ppi_filt : sparse matrix
Filtration from ppi_total : only genes in PPI are considered.
final_influence :
Smoothed PPI influence matrices based on minimum or maximum weight.
ngh_max : int
Number of best influencers in PPI.
Returns
-------
ppi_ngh : sparse matrix
PPI with only best influencers.
"""
ngh_max = ngh_max + 1 # central protein included
final_influence = final_influence.todense()
print(type(final_influence))
ppi_filt = ppi_filt.todense()
ppi_ngh = np.zeros(ppi_filt.shape, dtype=np.float32)
print('ppi_ngh', ppi_ngh.shape)
for i in range(ppi_filt.shape[0]):
best_influencers = np.argpartition(-final_influence[i, :], ngh_max)[:ngh_max]
#NOTE different result if same value exists several times
# best_influencers2 = np.argpartition(final_influence[i, :], -ngh_max)[-ngh_max:]
ppi_ngh[i, best_influencers] = ppi_filt[i, best_influencers]
ppi_ngh = np.max(np.dstack((ppi_ngh, ppi_ngh.T)), axis=2)
print('ppi_ngh ', ppi_ngh.dtype)
# too stringent if np.min
return sp.csc_matrix(ppi_ngh)
# @profile
def filter_ppi_patients(ppi_total, mut_total, ppi_filt, final_influence, ngh_max,
keep_singletons=False,
min_mutation=10, max_mutation=2000):
"""Keeping only the connections with the best influencers and Filtering some
patients based on mutation number
'the 11 most influential neighbors of each gene in the network as
determined by network influence distance were used'
'Only mutation data generated using the Illumina GAIIx platform were
retained for subsequent analy- sis, and patients with fewer than 10
mutations were discarded.'
Parameters
----------
ppi_total : sparse matrix
Built from all sparse sub-matrices (AA, ... , CC).
mut_total : sparse matrix
Patients' mutation profiles of all genes (rows: patients,
columns: genes of AA, BB and CC).
ppi_filt : sparse matrix
Filtration from ppi_total : only genes in PPI are considered.
final_influence :
Smoothed PPI influence matrices based on minimum or maximum weight.
ngh_max : int
Number of best influencers in PPI.
keep_singletons : boolean, default: False
If True, proteins not annotated in PPI (genes founded only in patients'
mutation profiles) will be also considered.
If False, only annotated proteins in PPI will be considered.
min_mutation, max_mutation : int
Numbers of lowest mutations and highest mutations per patient.
Returns
-------
ppi_final, mut_final : sparse matrix
PPI and mutation profiles after filtering.
"""
# n = final_influence.shape[0]
# final_influence = index_to_sym_matrix(n, final_influence)
ppi_ngh = best_neighboors(ppi_filt, final_influence, ngh_max)
print('ppi_ngh ', ppi_ngh.dtype)
deg0 = Ppi(ppi_total).deg == 0 # True if protein degree = 0
if keep_singletons:
ppi_final = sp.bmat([
[ppi_ngh, sp.csc_matrix((ppi_ngh.shape[0], sum(deg0)))],
[sp.csc_matrix((sum(deg0), ppi_ngh.shape[0])),
sp.csc_matrix((sum(deg0), sum(deg0)))]
]) # -> COO matrix
# mut_final=sp.bmat([[mut_total[:,deg0==False],mut_total[:,deg0==True]]])
mut_final = mut_total
else:
ppi_final = ppi_ngh
mut_final = mut_total[:, Ppi(ppi_total).deg > 0]
# filtered_patients = np.array([k < min_mutation or k > max_mutation for k in Patient(mut_final).mut_per_patient])
# mut_final = mut_final[filtered_patients == False, :]
# to avoid worse comparison '== False'
mut_final = mut_final[np.array([min_mutation < k < max_mutation for k in
Patient(mut_final).mut_per_patient])]
print("Removing %i patients with less than %i or more than %i mutations" %
(mut_total.shape[0]-mut_final.shape[0], min_mutation, max_mutation))
print("New adjacency matrix:", ppi_final.shape)
print("New mutation profile matrix:", mut_final.shape)
return ppi_final, mut_final
# @profile
def quantile_norm_mean(anarray):
"""Helper function for propagation_profile
Forces the observations/variables to have identical intensity distribution.
Parameters
----------
Returns
-------
"""
A = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.mean(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
# @profile
def quantile_norm_median(anarray):
A = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.median(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
# @profile
def propagation_profile(mut_raw, adj, alpha, tol, qn):
# TODO error messages
start = time.time()
if alpha > 0:
# TODO verification of same parameter file
mut_propag = propagation(mut_raw, adj, alpha, tol).todense()
mut_propag[np.isnan(mut_propag)] = 0
if qn == 'mean':
mut_type = 'mean_qn'
mut_propag = quantile_norm_mean(mut_propag)
elif qn == 'median':
mut_type = 'median_qn'
mut_propag = quantile_norm_median(mut_propag)
else:
mut_type = 'diff'
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_propag
else:
mut_type = 'raw'
mut_raw = mut_raw.todense()
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_raw
| 37.05501 | 118 | 0.600286 |
import sys
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import norm
from scipy.io import loadmat, savemat
from nbs_class import Ppi, Patient
from subprocess import call
import os
import glob
import time
import datetime
def propagation(M, adj, alpha=0.7, tol=10e-6):
print(' ==== propagation ==== ')
n = adj.shape[0]
adj = adj+sp.eye(n, dtype=np.float32)
d = sp.dia_matrix((np.array(adj.sum(axis=0))**-1, [0]),
shape=(n, n),
dtype=np.float32)
A = adj.dot(d)
X1 = M.astype(np.float32)
X2 = alpha * X1.dot(A) + (1-alpha) * M
i = 0
while norm(X2-X1) > tol:
X1 = X2
X2 = alpha * X1.dot(A) + (1-alpha) * M
i += 1
print('Propagation iteration = {} ----- {}'.format(
i, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return X2
def compare_ij_ji(ppi, out_min=True, out_max=True):
n = ppi.shape[0]
ppi = ppi.tolil()
ppi_transp = sp.lil_matrix.transpose(ppi)
ppi_1d = ppi.reshape((1, n**2))
ppi_1d_transp = ppi_transp.reshape((1, n**2))
if out_min and out_max:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))
).reshape((n, n)).astype(np.float32)
print('ppi_min', type(ppi_min), ppi_min.dtype, ppi_min.shape)
print('ppi_max', type(ppi_max), ppi_max.dtype, ppi_max.shape)
return ppi_min, ppi_max
elif out_min:
ppi_min = (sp.coo_matrix.tolil(
sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_min
elif out_max:
ppi_max = (sp.coo_matrix.tolil(
sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,
dtype=np.float32))).reshape((n, n))
return ppi_max
else:
print('You have to choice Min or Max')
def calcul_final_influence(M, adj, result_folder, influence_weight='min',
simplification=True, compute=False, overwrite=False,
alpha=0.7, tol=10e-6):
influence_distance_directory = result_folder + 'influence_distance/'
influence_distance_file = (
influence_distance_directory +
'influence_distance_alpha={}_tol={}.mat'.format(alpha, tol))
directory = result_folder + 'final_influence/'
final_influence_file = (
final_influence_directory +
'final_influence_simp={}_alpha={}_tol={}.mat'.format(
simplification, alpha, tol))
param = os.path.exists(final_influence_file)
if existance_same_param:
final_influence_data = loadmat(final_influence_file)
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
print('final influence matrix', type(final_influence), final_influence.shape)
print('***** Same parameters file of FINAL INFLUENCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
if compute:
start = time.time()
existance_same_influence = os.path.exists(influence_distance_file)
if existance_same_influence:
influence_data = loadmat(influence_distance_file)
influence = influence_data['influence_distance']
print('***** Same parameters file of INFLUENCE DISTANCE already exists ***** {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
influence = propagation(M, adj, alpha, tol)
print('influence', type(influence), influence.dtype)
os.makedirs(influence_distance_directory, exist_ok=True) # NOTE For Python ≥ 3.2
print(' ==== Start to save INFLUENCE DISTANCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(influence_distance_file,
{'influence_distance': influence,
'alpha': alpha},
do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# simplification: multiply by PPI adjacency matrix
if simplification:
influence = influence.multiply(sp.lil_matrix(adj))
# -> influence as csr_matrix
else:
print("---------- No simplification ----------")
pass
# compare influence[i,j] and influence[j,i] => min/max => final influence
start_ij = time.time()
final_influence_min, final_influence_max = compare_ij_ji(
influence, out_min=True, out_max=True)
end_ij = time.time()
print("---------- compare ij/ji = {} ---------- {}"
.format(datetime.timedelta(seconds=end_ij - start_ij),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# save final influence with parameters' values in filename
os.makedirs(final_influence_directory, exist_ok=True)
print(' ==== Start to save FINAL INFLUENCE ==== {}'
.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
start_save = time.time()
savemat(final_influence_file,
{'final_influence_min': final_influence_min,
'final_influence_max': final_influence_max,
'alpha': alpha}, do_compression=True)
end_save = time.time()
print("---------- save time = {} ---------- {}"
.format(datetime.timedelta(seconds=end_save - start_save),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
if influence_weight == 'min':
final_influence = final_influence_min
else:
final_influence = final_influence_max
end = time.time()
print("---------- Influence = {} ---------- {}"
.format(datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
else:
for x in final_influence_file, influence_distance_directory:
print(x)
newest_file = max(glob.iglob(x + '*.mat'),
key=os.path.getctime)
final_influence_data = loadmat(newest_file)
if x == final_influence_directory:
if influence_weight == 'min':
final_influence = final_influence_data['final_influence_min']
else:
final_influence = final_influence_data['final_influence_max']
return final_influence
def best_neighboors(ppi_filt, final_influence, ngh_max):
ngh_max = ngh_max + 1
final_influence = final_influence.todense()
print(type(final_influence))
ppi_filt = ppi_filt.todense()
ppi_ngh = np.zeros(ppi_filt.shape, dtype=np.float32)
print('ppi_ngh', ppi_ngh.shape)
for i in range(ppi_filt.shape[0]):
best_influencers = np.argpartition(-final_influence[i, :], ngh_max)[:ngh_max]
ppi_ngh[i, best_influencers] = ppi_filt[i, best_influencers]
ppi_ngh = np.max(np.dstack((ppi_ngh, ppi_ngh.T)), axis=2)
print('ppi_ngh ', ppi_ngh.dtype)
return sp.csc_matrix(ppi_ngh)
def filter_ppi_patients(ppi_total, mut_total, ppi_filt, final_influence, ngh_max,
keep_singletons=False,
min_mutation=10, max_mutation=2000):
ppi_ngh = best_neighboors(ppi_filt, final_influence, ngh_max)
print('ppi_ngh ', ppi_ngh.dtype)
deg0 = Ppi(ppi_total).deg == 0
if keep_singletons:
ppi_final = sp.bmat([
[ppi_ngh, sp.csc_matrix((ppi_ngh.shape[0], sum(deg0)))],
[sp.csc_matrix((sum(deg0), ppi_ngh.shape[0])),
sp.csc_matrix((sum(deg0), sum(deg0)))]
])
mut_final = mut_total
else:
ppi_final = ppi_ngh
mut_final = mut_total[:, Ppi(ppi_total).deg > 0]
mut_final = mut_final[np.array([min_mutation < k < max_mutation for k in
Patient(mut_final).mut_per_patient])]
print("Removing %i patients with less than %i or more than %i mutations" %
(mut_total.shape[0]-mut_final.shape[0], min_mutation, max_mutation))
print("New adjacency matrix:", ppi_final.shape)
print("New mutation profile matrix:", mut_final.shape)
return ppi_final, mut_final
def quantile_norm_mean(anarray):
A = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.mean(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
def quantile_norm_median(anarray):
A = np.squeeze(np.asarray(anarray.T))
AA = np.zeros_like(A)
I = np.argsort(A, axis=0)
AA[I, np.arange(A.shape[1])] = np.median(A[I, np.arange(A.shape[1])],
axis=1)[:, np.newaxis]
return AA.T
def propagation_profile(mut_raw, adj, alpha, tol, qn):
start = time.time()
if alpha > 0:
mut_propag = propagation(mut_raw, adj, alpha, tol).todense()
mut_propag[np.isnan(mut_propag)] = 0
if qn == 'mean':
mut_type = 'mean_qn'
mut_propag = quantile_norm_mean(mut_propag)
elif qn == 'median':
mut_type = 'median_qn'
mut_propag = quantile_norm_median(mut_propag)
else:
mut_type = 'diff'
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_propag
else:
mut_type = 'raw'
mut_raw = mut_raw.todense()
end = time.time()
print("---------- Propagation on {} mutation profile = {} ---------- {}"
.format(mut_type,
datetime.timedelta(seconds=end-start),
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return mut_type, mut_raw
| true | true |
f73661c09ab447a073f32a96f89585dfb6482f20 | 5,509 | py | Python | src/lib/utils/video.py | HLJ1997/human-action-recognition | 550fcc3d1a2d8b338535ab12cb2124cf2e9c0be0 | [
"MIT"
] | 1 | 2021-07-14T09:15:09.000Z | 2021-07-14T09:15:09.000Z | src/lib/utils/video.py | HLJ1997/human-action-recognition | 550fcc3d1a2d8b338535ab12cb2124cf2e9c0be0 | [
"MIT"
] | null | null | null | src/lib/utils/video.py | HLJ1997/human-action-recognition | 550fcc3d1a2d8b338535ab12cb2124cf2e9c0be0 | [
"MIT"
] | null | null | null | """
Credit to original code https://github.com/tryolabs/norfair/blob/master/norfair/video.py
modified to get output file path as user provided filename suffix and
changed constructor of Video class
"""
import os
import os.path as osp
import time
from typing import List, Optional, Union, Tuple
import cv2
import numpy as np
from rich import print
from rich.progress import BarColumn, Progress, ProgressColumn, TimeRemainingColumn
def get_terminal_size(default: Tuple[int, int] = (80, 24)) -> Tuple[int, int]:
columns, lines = default
for fd in range(0, 3): # First in order 0=Std In, 1=Std Out, 2=Std Error
try:
columns, lines = os.get_terminal_size(fd)
except OSError:
continue
break
return columns, lines
class Video:
def __init__(self, source: str):
self.source = source
is_webcam = lambda x: isinstance(x, int)
self.display = 'webcam' if is_webcam(source) \
else osp.basename(source)
# Read Input Video
self.video_capture = cv2.VideoCapture(source)
if not self.video_capture.isOpened:
self._fail(
f"[bold red]Error:[/bold red] '{self.source}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly."
)
self.total_frames = 0 if is_webcam(source) \
else int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
self.frame_cnt = 0
description = 'Run'
# Setup progressbar
if self.display:
description += f" | {self.display}"
progress_bar_fields: List[Union[str, ProgressColumn]] = [
"[progress.description]{task.description}",
BarColumn(),
"[yellow]{task.fields[process_fps]:.2f}fps[/yellow]",
]
progress_bar_fields.insert(
2, "[progress.percentage]{task.percentage:>3.0f}%"
)
progress_bar_fields.insert(
3,
TimeRemainingColumn(),
)
self.progress_bar = Progress(
*progress_bar_fields,
auto_refresh=False,
redirect_stdout=False,
redirect_stderr=False,
)
self.task = self.progress_bar.add_task(
self.abbreviate_description(description),
total=self.total_frames,
start=self.source,
process_fps=0,
)
# This is a generator, note the yield keyword below.
def __iter__(self):
with self.progress_bar as progress_bar:
start = time.time()
# Iterate over video
while True:
self.frame_cnt += 1
ret, frame = self.video_capture.read()
if ret is False or frame is None:
break
self.fps = self.frame_cnt / (time.time() - start)
progress_bar.update(
self.task, advance=1, refresh=True, process_fps=self.fps
)
yield frame
self.stop()
def stop(self):
self.video_capture.release()
cv2.destroyAllWindows()
def _fail(self, msg: str):
print(msg)
exit()
def show(self, frame: np.array, winname: str='show',downsample_ratio: float = 1.0) -> int:
# Resize to lower resolution for faster streaming over slow connections
if self.frame_cnt == 1:
cv2.namedWindow(winname, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(winname, 640, 480)
cv2.moveWindow(winname, 20, 20)
if downsample_ratio != 1.0:
frame = cv2.resize(
frame,
(
frame.shape[1] // downsample_ratio,
frame.shape[0] // downsample_ratio,
),
)
cv2.imshow(winname, frame)
return cv2.waitKey(1)
def get_writer(self, frame: np.array, output_path: str, fps: Optional[int]=20) -> int:
fourcc = cv2.VideoWriter_fourcc(*"XVID")
output_size = (frame.shape[1], frame.shape[0]) # OpenCV format is (width, height)
writer = cv2.VideoWriter(output_path, fourcc, fps, output_size)
return writer
def get_output_file_path(self, output_folder, suffix: List=[]) -> str:
os.makedirs(output_folder, exist_ok=True)
filename = '{}_' * (len(suffix)+1)
filename = filename.format(
'webcam' if isinstance(self.source, int) else osp.splitext(self.display)[0],
*iter(suffix)
)
output_path = osp.join(output_folder, f'{filename[:-1]}.avi')
return output_path
def abbreviate_description(self, description: str) -> str:
"""Conditionally abbreviate description so that progress bar fits in small terminals"""
terminal_columns, _ = get_terminal_size()
space_for_description = (
int(terminal_columns) - 25
) # Leave 25 space for progressbar
if len(description) < space_for_description:
return description
else:
return "{} ... {}".format(
description[: space_for_description // 2 - 3],
description[-space_for_description // 2 + 3 :],
)
if __name__ == '__main__':
path = '/home/zmh/hdd/Test_Videos/Tracking/aung_la_fight_cut_1.mp4'
video = Video(path)
for i in video:
video.show(i, 'debug') | 36.243421 | 219 | 0.592304 |
import os
import os.path as osp
import time
from typing import List, Optional, Union, Tuple
import cv2
import numpy as np
from rich import print
from rich.progress import BarColumn, Progress, ProgressColumn, TimeRemainingColumn
def get_terminal_size(default: Tuple[int, int] = (80, 24)) -> Tuple[int, int]:
columns, lines = default
for fd in range(0, 3):
try:
columns, lines = os.get_terminal_size(fd)
except OSError:
continue
break
return columns, lines
class Video:
def __init__(self, source: str):
self.source = source
is_webcam = lambda x: isinstance(x, int)
self.display = 'webcam' if is_webcam(source) \
else osp.basename(source)
self.video_capture = cv2.VideoCapture(source)
if not self.video_capture.isOpened:
self._fail(
f"[bold red]Error:[/bold red] '{self.source}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly."
)
self.total_frames = 0 if is_webcam(source) \
else int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
self.frame_cnt = 0
description = 'Run'
if self.display:
description += f" | {self.display}"
progress_bar_fields: List[Union[str, ProgressColumn]] = [
"[progress.description]{task.description}",
BarColumn(),
"[yellow]{task.fields[process_fps]:.2f}fps[/yellow]",
]
progress_bar_fields.insert(
2, "[progress.percentage]{task.percentage:>3.0f}%"
)
progress_bar_fields.insert(
3,
TimeRemainingColumn(),
)
self.progress_bar = Progress(
*progress_bar_fields,
auto_refresh=False,
redirect_stdout=False,
redirect_stderr=False,
)
self.task = self.progress_bar.add_task(
self.abbreviate_description(description),
total=self.total_frames,
start=self.source,
process_fps=0,
)
def __iter__(self):
with self.progress_bar as progress_bar:
start = time.time()
while True:
self.frame_cnt += 1
ret, frame = self.video_capture.read()
if ret is False or frame is None:
break
self.fps = self.frame_cnt / (time.time() - start)
progress_bar.update(
self.task, advance=1, refresh=True, process_fps=self.fps
)
yield frame
self.stop()
def stop(self):
self.video_capture.release()
cv2.destroyAllWindows()
def _fail(self, msg: str):
print(msg)
exit()
def show(self, frame: np.array, winname: str='show',downsample_ratio: float = 1.0) -> int:
if self.frame_cnt == 1:
cv2.namedWindow(winname, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(winname, 640, 480)
cv2.moveWindow(winname, 20, 20)
if downsample_ratio != 1.0:
frame = cv2.resize(
frame,
(
frame.shape[1] // downsample_ratio,
frame.shape[0] // downsample_ratio,
),
)
cv2.imshow(winname, frame)
return cv2.waitKey(1)
def get_writer(self, frame: np.array, output_path: str, fps: Optional[int]=20) -> int:
fourcc = cv2.VideoWriter_fourcc(*"XVID")
output_size = (frame.shape[1], frame.shape[0])
writer = cv2.VideoWriter(output_path, fourcc, fps, output_size)
return writer
def get_output_file_path(self, output_folder, suffix: List=[]) -> str:
os.makedirs(output_folder, exist_ok=True)
filename = '{}_' * (len(suffix)+1)
filename = filename.format(
'webcam' if isinstance(self.source, int) else osp.splitext(self.display)[0],
*iter(suffix)
)
output_path = osp.join(output_folder, f'{filename[:-1]}.avi')
return output_path
def abbreviate_description(self, description: str) -> str:
terminal_columns, _ = get_terminal_size()
space_for_description = (
int(terminal_columns) - 25
)
if len(description) < space_for_description:
return description
else:
return "{} ... {}".format(
description[: space_for_description // 2 - 3],
description[-space_for_description // 2 + 3 :],
)
if __name__ == '__main__':
path = '/home/zmh/hdd/Test_Videos/Tracking/aung_la_fight_cut_1.mp4'
video = Video(path)
for i in video:
video.show(i, 'debug') | true | true |
f7366281db5c2cbc331075eeeac4051f393c83a9 | 4,294 | py | Python | Self Driving Car/Python with Tensorflow/driveSDC.py | MohammadWasil/Self-Driving-Car | 9ef5b77e1268623c11e4c39d5c8e1e990caee273 | [
"MIT"
] | 7 | 2018-12-19T18:44:02.000Z | 2022-03-18T08:22:43.000Z | Self Driving Car/Python with Tensorflow/driveSDC.py | MohammadWasil/Self-Driving-Car | 9ef5b77e1268623c11e4c39d5c8e1e990caee273 | [
"MIT"
] | null | null | null | Self Driving Car/Python with Tensorflow/driveSDC.py | MohammadWasil/Self-Driving-Car | 9ef5b77e1268623c11e4c39d5c8e1e990caee273 | [
"MIT"
] | 4 | 2019-03-29T14:12:49.000Z | 2021-01-01T15:08:01.000Z | import socket
from tensorflow.keras.models import load_model
from PIL import ImageGrab
import numpy as np
import cv2
import os
#Load the model.
model = load_model(r"D:\Unity Game\Self Driving Car\SDCProgram\Best Models\data-003.h5") # Directory to load the model
# Socket Tcp Connection.
host = "127.0.0.1"
port = 25001 # Port number
#data = "1,1,11" # Data to be send
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP connection
print("starting connection")
try:
sock.connect((host, port)) #To connect ot the given port.
print("Connected")
except:
print("Might happen socket is closed!")
#######
def send_data(steering_angle, throttle):
data_01 = str(steering_angle)
data_02 = str(throttle)
data = data_01 + ',' + data_02
sock.sendall(data.encode("utf-8")) # To send the data
steeringAngleList = []
velocityList = []
throttleList = []
steeringAngle = 0
velocity = 0
throttle = 0
arr1=[]
arr2=[]
arr3=[]
splitted_data = []
reply=[]
def socketConnection():
global globalsteeringAngle
global velocity
global throttle
try:
#data = "1,0"
reply = sock.recv(2048).decode("utf-8") # To receive the data
#######send_data(reply)
#print("Actual data received is: ", reply)
splitted_data = reply.split(',')
#print("after splitting the data: ", splitted_data)
arr1.append(splitted_data[0])
arr2.append(splitted_data[1])
arr3.append(splitted_data[2])
steeringAngle = float(splitted_data[0])
velocity = float(splitted_data[1])
throttle = float(splitted_data[2])
except:
print("Exception")
steeringAngleList = np.array(arr1)
velocityList = np.array(arr2)
throttleList = np.array(arr3)
return steeringAngleList, velocityList, throttleList, steeringAngle, velocity, throttle
filename = r"D:\ML\Unity-ML\Drive SDC.csv" #Directory to save your current Data in a csv file.
def csv_file(steer_Angle, velocity, throttle):
#print("Writing to csv file!")
f = open(filename, "w")
f.write("{},{},{}\n".format("Steerring Angle", "Current Velocity", "Throttle"))
for x in zip( steer_Angle, velocity, throttle):
f.write("{},{},{}\n".format(x[0], x[1], x[2]))
f.close()
#############################
MAX_SPEED = 25
MIN_SPEED = 10
speed_limit = MAX_SPEED
def preprocess(image):
return cv2.resize(image, (200, 66), cv2.INTER_AREA)
def drive(image, steering_angle, velocity, throttle):
try:
image = np.asarray(image) # from PIL image to numpy array
image = preprocess(image) # apply the preprocessing
image = np.array([image]) # the model expects 4D array
steering_angle = float(model.predict(image, batch_size=1))
steering_angle = (steering_angle/10)
global speed_limit
if velocity > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
throttle = 1.0 - steering_angle**2 - (velocity/speed_limit)**2
print('{} {} {}'.format(steering_angle, throttle, velocity))
steering_angle = (steering_angle*10)
send_data(steering_angle, throttle)
except Exception as e:
print("Exception Occured", e)
num = 0
path = r"D:\ML\Unity-ML\Drive SDC" # Destination/path to which all the current images will be saved
while (True):
num = num + 1
imageName = 'Wasil'+ str(num) + '.png' # Name of the images.
#collecting current data
strAngl, vlcty, thrttl, steeringAngle, velocity, throttle = socketConnection()
image = np.array(ImageGrab.grab(bbox=(0, 120, 750, 540))) # Taking the screebshot and adding in the array
csv_file(strAngl, vlcty, thrttl)
cv2.imwrite(os.path.join(path, imageName), image) # Trying to save the image in the exact same directory.
drive(image, steeringAngle, velocity, throttle)
"""
### NOTE: divide steering angle by 10.
""" | 30.453901 | 148 | 0.6034 | import socket
from tensorflow.keras.models import load_model
from PIL import ImageGrab
import numpy as np
import cv2
import os
model = load_model(r"D:\Unity Game\Self Driving Car\SDCProgram\Best Models\data-003.h5")
host = "127.0.0.1"
port = 25001
ket(socket.AF_INET, socket.SOCK_STREAM)
print("starting connection")
try:
sock.connect((host, port))
print("Connected")
except:
print("Might happen socket is closed!")
ngle, throttle):
data_01 = str(steering_angle)
data_02 = str(throttle)
data = data_01 + ',' + data_02
sock.sendall(data.encode("utf-8"))
steeringAngleList = []
velocityList = []
throttleList = []
steeringAngle = 0
velocity = 0
throttle = 0
arr1=[]
arr2=[]
arr3=[]
splitted_data = []
reply=[]
def socketConnection():
global globalsteeringAngle
global velocity
global throttle
try:
reply = sock.recv(2048).decode("utf-8")
ppend(splitted_data[1])
arr3.append(splitted_data[2])
steeringAngle = float(splitted_data[0])
velocity = float(splitted_data[1])
throttle = float(splitted_data[2])
except:
print("Exception")
steeringAngleList = np.array(arr1)
velocityList = np.array(arr2)
throttleList = np.array(arr3)
return steeringAngleList, velocityList, throttleList, steeringAngle, velocity, throttle
filename = r"D:\ML\Unity-ML\Drive SDC.csv"
def csv_file(steer_Angle, velocity, throttle):
f = open(filename, "w")
f.write("{},{},{}\n".format("Steerring Angle", "Current Velocity", "Throttle"))
for x in zip( steer_Angle, velocity, throttle):
f.write("{},{},{}\n".format(x[0], x[1], x[2]))
f.close()
it:
speed_limit = MIN_SPEED
else:
speed_limit = MAX_SPEED
throttle = 1.0 - steering_angle**2 - (velocity/speed_limit)**2
print('{} {} {}'.format(steering_angle, throttle, velocity))
steering_angle = (steering_angle*10)
send_data(steering_angle, throttle)
except Exception as e:
print("Exception Occured", e)
num = 0
path = r"D:\ML\Unity-ML\Drive SDC"
while (True):
num = num + 1
imageName = 'Wasil'+ str(num) + '.png'
strAngl, vlcty, thrttl, steeringAngle, velocity, throttle = socketConnection()
image = np.array(ImageGrab.grab(bbox=(0, 120, 750, 540)))
csv_file(strAngl, vlcty, thrttl)
cv2.imwrite(os.path.join(path, imageName), image)
drive(image, steeringAngle, velocity, throttle)
| true | true |
f736631eac1f687cc72652145a76d0e53fa16fc2 | 4,127 | py | Python | fiasco/element.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | null | null | null | fiasco/element.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | null | null | null | fiasco/element.py | fluxtransport/fiasco | 9d70d8bdb03197be1ddfd433e1392e214a1468e8 | [
"BSD-3-Clause"
] | null | null | null | """
Classes and functions for element-level operations
"""
import numpy as np
import astropy.units as u
import plasmapy
import fiasco
__all__ = ['Element']
class Element(fiasco.IonCollection):
"""
Collection of all ions for a particular element.
The `Element` object provides a way to logically group together ions of the same
element. This provides an easy way to compute element-level derived quantities such
as the ionization fraction as a function of temperature.
Parameters
----------
element_name : `str`, `int`
Symbol, atomic number, or full name of the element
temperature : `~astropy.units.Quantity`
See Also
--------
fiasco.Ion : All the same keyword arguments can also be passed here.
"""
@u.quantity_input
def __init__(self, element_name, temperature: u.K, **kwargs):
if type(element_name) is str:
element_name = element_name.capitalize()
Z = plasmapy.atomic.atomic_number(element_name)
ion_list = []
for i in range(Z + 1):
ion = fiasco.Ion(f'{Z} {i+1}', temperature, **kwargs)
ion_list.append(ion)
super().__init__(*ion_list)
@property
def atomic_symbol(self):
return self[0].atomic_symbol
@property
def atomic_number(self):
return self[0].atomic_number
@property
def element_name(self):
return self[0].element_name
@property
def abundance(self):
return self[0].abundance
def _rate_matrix(self):
rate_matrix = np.zeros(self.temperature.shape+(self.atomic_number+1, self.atomic_number+1))
rate_unit = self[0].ionization_rate().unit
rate_matrix = rate_matrix * rate_unit
for i in range(1, self.atomic_number):
rate_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate())
rate_matrix[:, i, i-1] = self[i-1].ionization_rate()
rate_matrix[:, i, i+1] = self[i+1].recombination_rate()
rate_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate())
rate_matrix[:, 0, 1] = self[1].recombination_rate()
rate_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate())
rate_matrix[:, -1, -2] = self[-2].ionization_rate()
return rate_matrix
def equilibrium_ionization(self, **kwargs):
"""
Calculate the ionization fraction, in equilibrium, for all ions of the element.
Calculate the population fractions for every ion of this element as a function of
temperature, assuming ionization equilibrium.
Parameters
----------
rate_matrix : `~astropy.units.Quantity`, optional
:math:`Z+1` by :math:`Z+1` matrix of ionization and recombination rates. If not
given, this will be computed automatically.
See Also
--------
fiasco.Ion.ionization_rate
fiasco.Ion.recombination_rate
"""
rate_matrix = kwargs.get('rate_matrix', None)
if rate_matrix is None:
rate_matrix = self._rate_matrix()
# Solve system of equations using singular value decomposition
_, _, V = np.linalg.svd(rate_matrix.value)
# Select columns of V with smallest eigenvalues (returned in descending order)
# NOTE: must take the absolute value as the SVD solution is only accurate up
# to the sign. We require that the solutions must be positive.
ioneq = np.fabs(V[:, -1, :])
ioneq /= ioneq.sum(axis=1)[:, np.newaxis]
return u.Quantity(ioneq)
def __getitem__(self, value):
if type(value) is str:
el, ion = value.split()
if '+' in ion:
value = int(ion.strip('+'))
else:
value = int(ion) - 1
return super().__getitem__(value)
def __repr__(self):
ion_list = '\n'.join([i.ion_name for i in self._ion_list])
return f"""Element
-------
{self.atomic_symbol} ({self.atomic_number}) -- {self.element_name}
Available Ions
--------------
{ion_list}"""
| 33.282258 | 99 | 0.620305 | import numpy as np
import astropy.units as u
import plasmapy
import fiasco
__all__ = ['Element']
class Element(fiasco.IonCollection):
@u.quantity_input
def __init__(self, element_name, temperature: u.K, **kwargs):
if type(element_name) is str:
element_name = element_name.capitalize()
Z = plasmapy.atomic.atomic_number(element_name)
ion_list = []
for i in range(Z + 1):
ion = fiasco.Ion(f'{Z} {i+1}', temperature, **kwargs)
ion_list.append(ion)
super().__init__(*ion_list)
@property
def atomic_symbol(self):
return self[0].atomic_symbol
@property
def atomic_number(self):
return self[0].atomic_number
@property
def element_name(self):
return self[0].element_name
@property
def abundance(self):
return self[0].abundance
def _rate_matrix(self):
rate_matrix = np.zeros(self.temperature.shape+(self.atomic_number+1, self.atomic_number+1))
rate_unit = self[0].ionization_rate().unit
rate_matrix = rate_matrix * rate_unit
for i in range(1, self.atomic_number):
rate_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate())
rate_matrix[:, i, i-1] = self[i-1].ionization_rate()
rate_matrix[:, i, i+1] = self[i+1].recombination_rate()
rate_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate())
rate_matrix[:, 0, 1] = self[1].recombination_rate()
rate_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate())
rate_matrix[:, -1, -2] = self[-2].ionization_rate()
return rate_matrix
def equilibrium_ionization(self, **kwargs):
rate_matrix = kwargs.get('rate_matrix', None)
if rate_matrix is None:
rate_matrix = self._rate_matrix()
_, _, V = np.linalg.svd(rate_matrix.value)
ioneq = np.fabs(V[:, -1, :])
ioneq /= ioneq.sum(axis=1)[:, np.newaxis]
return u.Quantity(ioneq)
def __getitem__(self, value):
if type(value) is str:
el, ion = value.split()
if '+' in ion:
value = int(ion.strip('+'))
else:
value = int(ion) - 1
return super().__getitem__(value)
def __repr__(self):
ion_list = '\n'.join([i.ion_name for i in self._ion_list])
return f"""Element
-------
{self.atomic_symbol} ({self.atomic_number}) -- {self.element_name}
Available Ions
--------------
{ion_list}"""
| true | true |
f73663f3d443c0c444771b600f5712a2e9d729ec | 983 | py | Python | whoahqa/tests/models/test_reporting_period_factory.py | onaio/who-adolescent-hqa | 108a7e60b025d0723247f5f02eab2c4d41f5a02a | [
"Apache-2.0"
] | null | null | null | whoahqa/tests/models/test_reporting_period_factory.py | onaio/who-adolescent-hqa | 108a7e60b025d0723247f5f02eab2c4d41f5a02a | [
"Apache-2.0"
] | 2 | 2018-01-09T08:58:11.000Z | 2019-01-18T09:20:14.000Z | whoahqa/tests/models/test_reporting_period_factory.py | onaio/who-adolescent-hqa | 108a7e60b025d0723247f5f02eab2c4d41f5a02a | [
"Apache-2.0"
] | null | null | null | import datetime
from pyramid import testing
from whoahqa.models import (
DBSession,
ReportingPeriodFactory,
ReportingPeriod,)
from whoahqa.tests.test_base import TestBase
class TestReportingPeriodFactory(TestBase):
def test_get_item_retrieves_by_id(self):
period = ReportingPeriod(
title="2014/2015",
start_date=datetime.date(2014, 2, 1),
end_date=datetime.date(2015, 2, 1))
DBSession.add(period)
DBSession.flush()
id = period.id
factory = ReportingPeriodFactory(
testing.DummyRequest())
period = factory.__getitem__(id)
self.assertEqual(period.id, id)
self.assertEqual(period.__parent__, factory)
self.assertEqual(period.__name__, id)
def test_get_item_throws_key_error_if_non_existent_id(self):
factory = ReportingPeriodFactory(
testing.DummyRequest())
self.assertRaises(KeyError, factory.__getitem__, '0')
| 29.787879 | 64 | 0.677518 | import datetime
from pyramid import testing
from whoahqa.models import (
DBSession,
ReportingPeriodFactory,
ReportingPeriod,)
from whoahqa.tests.test_base import TestBase
class TestReportingPeriodFactory(TestBase):
def test_get_item_retrieves_by_id(self):
period = ReportingPeriod(
title="2014/2015",
start_date=datetime.date(2014, 2, 1),
end_date=datetime.date(2015, 2, 1))
DBSession.add(period)
DBSession.flush()
id = period.id
factory = ReportingPeriodFactory(
testing.DummyRequest())
period = factory.__getitem__(id)
self.assertEqual(period.id, id)
self.assertEqual(period.__parent__, factory)
self.assertEqual(period.__name__, id)
def test_get_item_throws_key_error_if_non_existent_id(self):
factory = ReportingPeriodFactory(
testing.DummyRequest())
self.assertRaises(KeyError, factory.__getitem__, '0')
| true | true |
f7366410d3402dde7686f8ca4b30c6c1c2234403 | 52,914 | py | Python | models/new/sencebgan.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | 3 | 2019-07-27T14:00:42.000Z | 2020-01-17T17:07:51.000Z | models/new/sencebgan.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | null | null | null | models/new/sencebgan.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | 4 | 2019-10-22T02:58:26.000Z | 2020-10-06T09:59:26.000Z | import tensorflow as tf
from base.base_model import BaseModel
from utils.alad_utils import get_getter
import utils.alad_utils as sn
class SENCEBGAN(BaseModel):
def __init__(self, config):
super(SENCEBGAN, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
############################################################################################
# INIT
############################################################################################
# Kernel initialization for the convolutions
if self.config.trainer.init_type == "normal":
self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
elif self.config.trainer.init_type == "xavier":
self.init_kernel = tf.contrib.layers.xavier_initializer(
uniform=False, seed=None, dtype=tf.float32
)
# Placeholders
self.is_training_gen = tf.placeholder(tf.bool)
self.is_training_dis = tf.placeholder(tf.bool)
self.is_training_enc_g = tf.placeholder(tf.bool)
self.is_training_enc_r = tf.placeholder(tf.bool)
self.feature_match1 = tf.placeholder(tf.float32)
self.feature_match2 = tf.placeholder(tf.float32)
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="x"
)
self.noise_tensor = tf.placeholder(
tf.float32, shape=[None, self.config.trainer.noise_dim], name="noise"
)
############################################################################################
# MODEL
############################################################################################
self.logger.info("Building training graph...")
with tf.variable_scope("SENCEBGAN"):
# First training part
# G(z) ==> x'
with tf.variable_scope("Generator_Model"):
self.image_gen = self.generator(self.noise_tensor)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_real, self.decoded_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_fake, self.decoded_fake = self.discriminator(
self.image_gen, do_spectral_norm=self.config.trainer.do_spectral_norm
)
# Second training part
# E(x) ==> z'
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded = self.encoder_g(self.image_input)
# G(z') ==> G(E(x)) ==> x''
with tf.variable_scope("Generator_Model"):
self.image_gen_enc = self.generator(self.image_encoded)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake, self.decoded_enc_fake = self.discriminator(
self.image_gen_enc, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_enc_real, self.decoded_enc_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real, self.im_f_real = self.discriminator_xx(
self.image_input,
self.image_input,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake, self.im_f_fake = self.discriminator_xx(
self.image_input,
self.image_gen_enc,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r = self.generator(self.image_encoded_r)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege = self.encoder_r(self.image_gen_enc_r)
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real, self.z_f_real = self.discriminator_zz(
self.image_encoded_r,
self.image_encoded_r,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake, self.z_f_fake = self.discriminator_zz(
self.image_encoded_r,
self.image_ege,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
############################################################################################
# LOSS FUNCTIONS
############################################################################################
with tf.name_scope("Loss_Functions"):
with tf.name_scope("Generator_Discriminator"):
# Discriminator Loss
if self.config.trainer.mse_mode == "norm":
self.disc_loss_real = tf.reduce_mean(
self.mse_loss(
self.decoded_real,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.disc_loss_fake = tf.reduce_mean(
self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.disc_loss_real = self.mse_loss(
self.decoded_real,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
self.disc_loss_fake = self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="mse",
order=self.config.trainer.order,
)
self.loss_discriminator = (
tf.math.maximum(self.config.trainer.disc_margin - self.disc_loss_fake, 0)
+ self.disc_loss_real
)
# Generator Loss
pt_loss = 0
if self.config.trainer.pullaway:
pt_loss = self.pullaway_loss(self.embedding_fake)
self.loss_generator = self.disc_loss_fake + self.config.trainer.pt_weight * pt_loss
# New addition to enforce visual similarity
delta_noise = self.embedding_real - self.embedding_fake
delta_flat = tf.layers.Flatten()(delta_noise)
loss_noise_gen = tf.reduce_mean(tf.norm(delta_flat, ord=2, axis=1, keepdims=False))
self.loss_generator += 0.1 * loss_noise_gen
with tf.name_scope("Encoder_G"):
if self.config.trainer.mse_mode == "norm":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.decoded_enc_real,
self.decoded_enc_fake,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.embedding_enc_real,
self.embedding_enc_fake,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_encoder_g = (
self.loss_enc_rec + self.config.trainer.encoder_f_factor * self.loss_enc_f
)
if self.config.trainer.enable_disc_xx:
self.enc_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.zeros_like(self.im_logit_real)
)
self.enc_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.ones_like(self.im_logit_fake)
)
self.enc_loss_xx = tf.reduce_mean(self.enc_xx_real + self.enc_xx_fake)
self.loss_encoder_g += self.enc_loss_xx
with tf.name_scope("Encoder_R"):
if self.config.trainer.mse_mode == "norm":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="mse",
order=self.config.trainer.order,
)
)
if self.config.trainer.enable_disc_zz:
self.enc_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.zeros_like(self.z_logit_real)
)
self.enc_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.ones_like(self.z_logit_fake)
)
self.enc_loss_zz = tf.reduce_mean(self.enc_zz_real + self.enc_zz_fake)
self.loss_encoder_r += self.enc_loss_zz
if self.config.trainer.enable_disc_xx:
with tf.name_scope("Discriminator_XX"):
self.loss_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.ones_like(self.im_logit_real)
)
self.loss_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.zeros_like(self.im_logit_fake)
)
self.dis_loss_xx = tf.reduce_mean(self.loss_xx_real + self.loss_xx_fake)
if self.config.trainer.enable_disc_zz:
with tf.name_scope("Discriminator_ZZ"):
self.loss_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.ones_like(self.z_logit_real)
)
self.loss_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.zeros_like(self.z_logit_fake)
)
self.dis_loss_zz = tf.reduce_mean(self.loss_zz_real + self.loss_zz_fake)
############################################################################################
# OPTIMIZERS
############################################################################################
with tf.name_scope("Optimizers"):
self.generator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_gen,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_g_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_r_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_dis,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
# Collect all the variables
all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# Generator Network Variables
self.generator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Generator_Model")
]
# Discriminator Network Variables
self.discriminator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model")
]
# Discriminator Network Variables
self.encoder_g_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_G_Model")
]
self.encoder_r_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_R_Model")
]
self.dxxvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_XX")
]
self.dzzvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_ZZ")
]
# Generator Network Operations
self.gen_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Generator_Model"
)
# Discriminator Network Operations
self.disc_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model"
)
self.encg_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_G_Model"
)
self.encr_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_R_Model"
)
self.update_ops_dis_xx = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_XX"
)
self.update_ops_dis_zz = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_ZZ"
)
with tf.control_dependencies(self.gen_update_ops):
self.gen_op = self.generator_optimizer.minimize(
self.loss_generator,
var_list=self.generator_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.disc_update_ops):
self.disc_op = self.discriminator_optimizer.minimize(
self.loss_discriminator, var_list=self.discriminator_vars
)
with tf.control_dependencies(self.encg_update_ops):
self.encg_op = self.encoder_g_optimizer.minimize(
self.loss_encoder_g,
var_list=self.encoder_g_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.encr_update_ops):
self.encr_op = self.encoder_r_optimizer.minimize(
self.loss_encoder_r,
var_list=self.encoder_r_vars,
global_step=self.global_step_tensor,
)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies(self.update_ops_dis_xx):
self.disc_op_xx = self.discriminator_optimizer.minimize(
self.dis_loss_xx, var_list=self.dxxvars
)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies(self.update_ops_dis_zz):
self.disc_op_zz = self.discriminator_optimizer.minimize(
self.dis_loss_zz, var_list=self.dzzvars
)
# Exponential Moving Average for Estimation
self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)
self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)
self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)
self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)
if self.config.trainer.enable_disc_xx:
self.dis_xx_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)
if self.config.trainer.enable_disc_zz:
self.dis_zz_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)
with tf.control_dependencies([self.disc_op]):
self.train_dis_op = tf.group(maintain_averages_op_dis)
with tf.control_dependencies([self.gen_op]):
self.train_gen_op = tf.group(maintain_averages_op_gen)
with tf.control_dependencies([self.encg_op]):
self.train_enc_g_op = tf.group(maintain_averages_op_encg)
with tf.control_dependencies([self.encr_op]):
self.train_enc_r_op = tf.group(maintain_averages_op_encr)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies([self.disc_op_xx]):
self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies([self.disc_op_zz]):
self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)
############################################################################################
# TESTING
############################################################################################
self.logger.info("Building Testing Graph...")
with tf.variable_scope("SENCEBGAN"):
with tf.variable_scope("Discriminator_Model"):
self.embedding_q_ema, self.decoded_q_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.variable_scope("Generator_Model"):
self.image_gen_ema = self.generator(
self.embedding_q_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(
self.image_gen_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Second Training Part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_ema = self.encoder_g(
self.image_input, getter=get_getter(self.encg_ema)
)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_ema = self.generator(
self.image_encoded_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(
self.image_gen_enc_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_xx:
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(
self.image_input,
self.image_input,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(
self.image_input,
self.image_gen_enc_ema,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r_ema = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)
with tf.variable_scope("Discriminator_Model"):
self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(
self.image_gen_enc_r_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_zz:
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_encoded_r_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_ege_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.name_scope("Testing"):
with tf.name_scope("Image_Based"):
delta = self.image_input - self.image_gen_enc_ema
self.rec_residual = -delta
delta_flat = tf.layers.Flatten()(delta)
img_score_l1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__1"
)
self.img_score_l1 = tf.squeeze(img_score_l1)
delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema
delta_flat = tf.layers.Flatten()(delta)
img_score_l2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__2"
)
self.img_score_l2 = tf.squeeze(img_score_l2)
with tf.name_scope("Noise_Based"):
delta = self.image_encoded_r_ema - self.image_ege_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_1"
)
self.final_score_1 = tf.squeeze(final_score_1)
self.score_comb_im = (
1 * self.img_score_l1
+ self.feature_match1 * self.final_score_1
)
delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_2"
)
self.final_score_2 = tf.squeeze(final_score_2)
delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_3 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_3"
)
self.final_score_3 = tf.squeeze(final_score_3)
# Combo 1
self.score_comb_z = (
(1 - self.feature_match2) * self.final_score_2
+ self.feature_match2 * self.final_score_3
)
# Combo 2
if self.config.trainer.enable_disc_xx:
delta = self.im_f_real_ema - self.im_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_4 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_4"
)
self.final_score_4 = tf.squeeze(final_score_4)
delta = self.z_f_real_ema - self.z_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_6 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_6"
)
self.final_score_6 = tf.squeeze(final_score_6)
############################################################################################
# TENSORBOARD
############################################################################################
if self.config.log.enable_summary:
with tf.name_scope("train_summary"):
with tf.name_scope("dis_summary"):
tf.summary.scalar("loss_disc", self.loss_discriminator, ["dis"])
tf.summary.scalar("loss_disc_real", self.disc_loss_real, ["dis"])
tf.summary.scalar("loss_disc_fake", self.disc_loss_fake, ["dis"])
if self.config.trainer.enable_disc_xx:
tf.summary.scalar("loss_dis_xx", self.dis_loss_xx, ["enc_g"])
if self.config.trainer.enable_disc_zz:
tf.summary.scalar("loss_dis_zz", self.dis_loss_zz, ["enc_r"])
with tf.name_scope("gen_summary"):
tf.summary.scalar("loss_generator", self.loss_generator, ["gen"])
with tf.name_scope("enc_summary"):
tf.summary.scalar("loss_encoder_g", self.loss_encoder_g, ["enc_g"])
tf.summary.scalar("loss_encoder_r", self.loss_encoder_r, ["enc_r"])
with tf.name_scope("img_summary"):
tf.summary.image("input_image", self.image_input, 1, ["img_1"])
tf.summary.image("reconstructed", self.image_gen, 1, ["img_1"])
# From discriminator in part 1
tf.summary.image("decoded_real", self.decoded_real, 1, ["img_1"])
tf.summary.image("decoded_fake", self.decoded_fake, 1, ["img_1"])
# Second Stage of Training
tf.summary.image("input_enc", self.image_input, 1, ["img_2"])
tf.summary.image("reconstructed", self.image_gen_enc, 1, ["img_2"])
# From discriminator in part 2
tf.summary.image("decoded_enc_real", self.decoded_enc_real, 1, ["img_2"])
tf.summary.image("decoded_enc_fake", self.decoded_enc_fake, 1, ["img_2"])
# Testing
tf.summary.image("input_image", self.image_input, 1, ["test"])
tf.summary.image("reconstructed", self.image_gen_enc_r_ema, 1, ["test"])
tf.summary.image("residual", self.rec_residual, 1, ["test"])
self.sum_op_dis = tf.summary.merge_all("dis")
self.sum_op_gen = tf.summary.merge_all("gen")
self.sum_op_enc_g = tf.summary.merge_all("enc_g")
self.sum_op_enc_r = tf.summary.merge_all("enc_r")
self.sum_op_im_1 = tf.summary.merge_all("img_1")
self.sum_op_im_2 = tf.summary.merge_all("img_2")
self.sum_op_im_test = tf.summary.merge_all("test")
self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])
###############################################################################################
# MODULES
###############################################################################################
def generator(self, noise_input, getter=None):
with tf.variable_scope("Generator", custom_getter=getter, reuse=tf.AUTO_REUSE):
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_g = tf.layers.Dense(
units=2 * 2 * 256, kernel_initializer=self.init_kernel, name="fc"
)(noise_input)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
x_g = tf.reshape(x_g, [-1, 2, 2, 256])
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.tanh(x_g, name="tanh")
return x_g
def discriminator(self, image_input, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator", custom_getter=getter, reuse=tf.AUTO_REUSE):
with tf.variable_scope("Encoder"):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 14 x 14 x 64
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=64,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 7 x 7 x 128
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=128,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 4 x 4 x 256
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = layers.dense(
x_e,
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)
embedding = x_e
with tf.variable_scope("Decoder"):
net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])
net_name = "layer_1"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv1",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv1/bn",
)
net = tf.nn.relu(features=net, name="tconv1/relu")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv2",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv2/bn",
)
net = tf.nn.relu(features=net, name="tconv2/relu")
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv3",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv3/bn",
)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_4"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv4",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv4/bn",
)
net = tf.nn.relu(features=net, name="tconv4/relu")
net_name = "layer_5"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv5",
)(net)
decoded = tf.nn.tanh(net, name="tconv5/tanh")
return embedding, decoded
def encoder_g(self, image_input, getter=None):
with tf.variable_scope("Encoder_G", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
def encoder_r(self, image_input, getter=None):
with tf.variable_scope("Encoder_R", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
# Regularizer discriminator for the Generator Encoder
def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (x, x) and (x, rec_x)
Args:
img_tensor:
recreated_img:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_xx", reuse=tf.AUTO_REUSE, custom_getter=getter):
net = tf.concat([img_tensor, recreated_img], axis=1)
net_name = "layer_1"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=64,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv1",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
with tf.variable_scope(net_name, reuse=True):
weights = tf.get_variable("conv1/kernel")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=128,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
net = tf.layers.Flatten()(net)
intermediate_layer = net
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(net)
return logits, intermediate_layer
# Regularizer discriminator for the Reconstruction Encoder
def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (z, z) and (z, rec_z)
Args:
noise_tensor:
recreated_noise:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_zz", reuse=tf.AUTO_REUSE, custom_getter=getter):
y = tf.concat([noise_tensor, recreated_noise], axis=-1)
net_name = "y_layer_1"
with tf.variable_scope(net_name):
y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
net_name = "y_layer_2"
with tf.variable_scope(net_name):
y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
intermediate_layer = y
net_name = "y_layer_3"
with tf.variable_scope(net_name):
y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(y)
return logits, intermediate_layer
###############################################################################################
# CUSTOM LOSSES
###############################################################################################
def mse_loss(self, pred, data, mode="norm", order=2):
if mode == "norm":
delta = pred - data
delta = tf.layers.Flatten()(delta)
loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)
elif mode == "mse":
loss_val = tf.reduce_mean(tf.squared_difference(pred, data))
return loss_val
def pullaway_loss(self, embeddings):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))
return pt_loss
def init_saver(self):
self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)
| 46.785146 | 100 | 0.501191 | import tensorflow as tf
from base.base_model import BaseModel
from utils.alad_utils import get_getter
import utils.alad_utils as sn
class SENCEBGAN(BaseModel):
def __init__(self, config):
super(SENCEBGAN, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
y(self.discriminator_vars)
self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)
self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)
self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)
if self.config.trainer.enable_disc_xx:
self.dis_xx_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)
if self.config.trainer.enable_disc_zz:
self.dis_zz_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)
with tf.control_dependencies([self.disc_op]):
self.train_dis_op = tf.group(maintain_averages_op_dis)
with tf.control_dependencies([self.gen_op]):
self.train_gen_op = tf.group(maintain_averages_op_gen)
with tf.control_dependencies([self.encg_op]):
self.train_enc_g_op = tf.group(maintain_averages_op_encg)
with tf.control_dependencies([self.encr_op]):
self.train_enc_r_op = tf.group(maintain_averages_op_encr)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies([self.disc_op_xx]):
self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies([self.disc_op_zz]):
self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)
############################################################################################
# TESTING
############################################################################################
self.logger.info("Building Testing Graph...")
with tf.variable_scope("SENCEBGAN"):
with tf.variable_scope("Discriminator_Model"):
self.embedding_q_ema, self.decoded_q_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.variable_scope("Generator_Model"):
self.image_gen_ema = self.generator(
self.embedding_q_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(
self.image_gen_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Second Training Part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_ema = self.encoder_g(
self.image_input, getter=get_getter(self.encg_ema)
)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_ema = self.generator(
self.image_encoded_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(
self.image_gen_enc_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_xx:
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(
self.image_input,
self.image_input,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(
self.image_input,
self.image_gen_enc_ema,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r_ema = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)
with tf.variable_scope("Discriminator_Model"):
self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(
self.image_gen_enc_r_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_zz:
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_encoded_r_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_ege_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.name_scope("Testing"):
with tf.name_scope("Image_Based"):
delta = self.image_input - self.image_gen_enc_ema
self.rec_residual = -delta
delta_flat = tf.layers.Flatten()(delta)
img_score_l1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__1"
)
self.img_score_l1 = tf.squeeze(img_score_l1)
delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema
delta_flat = tf.layers.Flatten()(delta)
img_score_l2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__2"
)
self.img_score_l2 = tf.squeeze(img_score_l2)
with tf.name_scope("Noise_Based"):
delta = self.image_encoded_r_ema - self.image_ege_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_1"
)
self.final_score_1 = tf.squeeze(final_score_1)
self.score_comb_im = (
1 * self.img_score_l1
+ self.feature_match1 * self.final_score_1
)
delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_2"
)
self.final_score_2 = tf.squeeze(final_score_2)
delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_3 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_3"
)
self.final_score_3 = tf.squeeze(final_score_3)
# Combo 1
self.score_comb_z = (
(1 - self.feature_match2) * self.final_score_2
+ self.feature_match2 * self.final_score_3
)
# Combo 2
if self.config.trainer.enable_disc_xx:
delta = self.im_f_real_ema - self.im_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_4 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_4"
)
self.final_score_4 = tf.squeeze(final_score_4)
delta = self.z_f_real_ema - self.z_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_6 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_6"
)
self.final_score_6 = tf.squeeze(final_score_6)
############################################################################################
# TENSORBOARD
############################################################################################
if self.config.log.enable_summary:
with tf.name_scope("train_summary"):
with tf.name_scope("dis_summary"):
tf.summary.scalar("loss_disc", self.loss_discriminator, ["dis"])
tf.summary.scalar("loss_disc_real", self.disc_loss_real, ["dis"])
tf.summary.scalar("loss_disc_fake", self.disc_loss_fake, ["dis"])
if self.config.trainer.enable_disc_xx:
tf.summary.scalar("loss_dis_xx", self.dis_loss_xx, ["enc_g"])
if self.config.trainer.enable_disc_zz:
tf.summary.scalar("loss_dis_zz", self.dis_loss_zz, ["enc_r"])
with tf.name_scope("gen_summary"):
tf.summary.scalar("loss_generator", self.loss_generator, ["gen"])
with tf.name_scope("enc_summary"):
tf.summary.scalar("loss_encoder_g", self.loss_encoder_g, ["enc_g"])
tf.summary.scalar("loss_encoder_r", self.loss_encoder_r, ["enc_r"])
with tf.name_scope("img_summary"):
tf.summary.image("input_image", self.image_input, 1, ["img_1"])
tf.summary.image("reconstructed", self.image_gen, 1, ["img_1"])
# From discriminator in part 1
tf.summary.image("decoded_real", self.decoded_real, 1, ["img_1"])
tf.summary.image("decoded_fake", self.decoded_fake, 1, ["img_1"])
# Second Stage of Training
tf.summary.image("input_enc", self.image_input, 1, ["img_2"])
tf.summary.image("reconstructed", self.image_gen_enc, 1, ["img_2"])
# From discriminator in part 2
tf.summary.image("decoded_enc_real", self.decoded_enc_real, 1, ["img_2"])
tf.summary.image("decoded_enc_fake", self.decoded_enc_fake, 1, ["img_2"])
# Testing
tf.summary.image("input_image", self.image_input, 1, ["test"])
tf.summary.image("reconstructed", self.image_gen_enc_r_ema, 1, ["test"])
tf.summary.image("residual", self.rec_residual, 1, ["test"])
self.sum_op_dis = tf.summary.merge_all("dis")
self.sum_op_gen = tf.summary.merge_all("gen")
self.sum_op_enc_g = tf.summary.merge_all("enc_g")
self.sum_op_enc_r = tf.summary.merge_all("enc_r")
self.sum_op_im_1 = tf.summary.merge_all("img_1")
self.sum_op_im_2 = tf.summary.merge_all("img_2")
self.sum_op_im_test = tf.summary.merge_all("test")
self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])
###############################################################################################
# MODULES
###############################################################################################
def generator(self, noise_input, getter=None):
with tf.variable_scope("Generator", custom_getter=getter, reuse=tf.AUTO_REUSE):
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_g = tf.layers.Dense(
units=2 * 2 * 256, kernel_initializer=self.init_kernel, name="fc"
)(noise_input)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
x_g = tf.reshape(x_g, [-1, 2, 2, 256])
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.tanh(x_g, name="tanh")
return x_g
def discriminator(self, image_input, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator", custom_getter=getter, reuse=tf.AUTO_REUSE):
with tf.variable_scope("Encoder"):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 14 x 14 x 64
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=64,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 7 x 7 x 128
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=128,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 4 x 4 x 256
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = layers.dense(
x_e,
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)
embedding = x_e
with tf.variable_scope("Decoder"):
net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])
net_name = "layer_1"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv1",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv1/bn",
)
net = tf.nn.relu(features=net, name="tconv1/relu")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv2",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv2/bn",
)
net = tf.nn.relu(features=net, name="tconv2/relu")
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv3",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv3/bn",
)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_4"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv4",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv4/bn",
)
net = tf.nn.relu(features=net, name="tconv4/relu")
net_name = "layer_5"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv5",
)(net)
decoded = tf.nn.tanh(net, name="tconv5/tanh")
return embedding, decoded
def encoder_g(self, image_input, getter=None):
with tf.variable_scope("Encoder_G", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
def encoder_r(self, image_input, getter=None):
with tf.variable_scope("Encoder_R", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
# Regularizer discriminator for the Generator Encoder
def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_xx", reuse=tf.AUTO_REUSE, custom_getter=getter):
net = tf.concat([img_tensor, recreated_img], axis=1)
net_name = "layer_1"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=64,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv1",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
with tf.variable_scope(net_name, reuse=True):
weights = tf.get_variable("conv1/kernel")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=128,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
net = tf.layers.Flatten()(net)
intermediate_layer = net
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(net)
return logits, intermediate_layer
# Regularizer discriminator for the Reconstruction Encoder
def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_zz", reuse=tf.AUTO_REUSE, custom_getter=getter):
y = tf.concat([noise_tensor, recreated_noise], axis=-1)
net_name = "y_layer_1"
with tf.variable_scope(net_name):
y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
net_name = "y_layer_2"
with tf.variable_scope(net_name):
y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
intermediate_layer = y
net_name = "y_layer_3"
with tf.variable_scope(net_name):
y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(y)
return logits, intermediate_layer
###############################################################################################
# CUSTOM LOSSES
###############################################################################################
def mse_loss(self, pred, data, mode="norm", order=2):
if mode == "norm":
delta = pred - data
delta = tf.layers.Flatten()(delta)
loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)
elif mode == "mse":
loss_val = tf.reduce_mean(tf.squared_difference(pred, data))
return loss_val
def pullaway_loss(self, embeddings):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))
return pt_loss
def init_saver(self):
self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)
| true | true |
f7366460f1bafd203830c13ee2d0e7ade440377d | 643 | py | Python | lfd/environment/settings.py | def670/lfd | 9fef15f556cba49dd4b42c0c29505a4137f95fc5 | [
"BSD-2-Clause"
] | 36 | 2015-05-22T14:47:18.000Z | 2021-07-27T15:30:36.000Z | lfd/environment/settings.py | jeffmahler/lfd | ecc6b934db098c0b1af9946454917b6dc911cb74 | [
"BSD-2-Clause"
] | null | null | null | lfd/environment/settings.py | jeffmahler/lfd | ecc6b934db098c0b1af9946454917b6dc911cb74 | [
"BSD-2-Clause"
] | 13 | 2015-05-22T15:38:07.000Z | 2021-07-28T03:20:35.000Z | #:
GRIPPER_OPEN_CLOSE_THRESH = 0.04 # .06 for fig8 (thick rope), 0.04 for thin rope (overhand)
#:
ROPE_RADIUS = .005
#:
ROPE_ANG_STIFFNESS = .1
#:
ROPE_ANG_DAMPING = 1
#:
ROPE_LIN_DAMPING = .75
#:
ROPE_ANG_LIMIT = .4
#:
ROPE_LIN_STOP_ERP = .2
#:
ROPE_MASS = 1.0
#:
ROPE_RADIUS_THICK = .008
#: window properties for the viewer's window
WINDOW_PROP = [0,0,1500,1500]
#: transposed homogeneous matrix for the viewer's camera
CAMERA_MATRIX = [[ 0, 1, 0, 0],
[ -1, 0, 0.5, 0],
[ 0.5, 0, 1, 0],
[ 2.25, 0, 4.5, 1]]
try:
from lfd_settings.environment.settings import *
except ImportError:
pass
| 19.484848 | 91 | 0.623639 |
GRIPPER_OPEN_CLOSE_THRESH = 0.04
ROPE_RADIUS = .005
ROPE_ANG_STIFFNESS = .1
ROPE_ANG_DAMPING = 1
ROPE_LIN_DAMPING = .75
ROPE_ANG_LIMIT = .4
ROPE_LIN_STOP_ERP = .2
ROPE_MASS = 1.0
ROPE_RADIUS_THICK = .008
WINDOW_PROP = [0,0,1500,1500]
#: transposed homogeneous matrix for the viewer's camera
CAMERA_MATRIX = [[ 0, 1, 0, 0],
[ -1, 0, 0.5, 0],
[ 0.5, 0, 1, 0],
[ 2.25, 0, 4.5, 1]]
try:
from lfd_settings.environment.settings import *
except ImportError:
pass
| true | true |
f73666fc6d19b1f19d47b390ab17540bd4d53368 | 579 | py | Python | setup.py | hanhha/console_ui | 4fd3d37367bc23316620d5ad76666981762fb344 | [
"MIT"
] | 1 | 2022-02-04T03:20:31.000Z | 2022-02-04T03:20:31.000Z | setup.py | alimogh/console_ui | 4fd3d37367bc23316620d5ad76666981762fb344 | [
"MIT"
] | null | null | null | setup.py | alimogh/console_ui | 4fd3d37367bc23316620d5ad76666981762fb344 | [
"MIT"
] | 1 | 2022-02-04T03:22:03.000Z | 2022-02-04T03:22:03.000Z | #!/usr/bin/env python3
from setuptools import setup
import sys
if sys.version_info < (3,0):
sys.exit ("Python version < 3.0 is not supported")
elif sys.version_info < (3,6):
print ("Using Python version < 3.6 would cause issue relevant to the order of windows generated")
setup (name = 'console_ui',
version = '1.0',
py_modules=['wingen', 'console_utils'],
description = 'A quick UI generator using Curses',
author = 'Hanh Ha',
author_email = 'tranhanh.haminh@gmail.com',
license = 'MIT',
url = 'https://github.com/hanhha/console_ui',
)
| 28.95 | 98 | 0.668394 |
from setuptools import setup
import sys
if sys.version_info < (3,0):
sys.exit ("Python version < 3.0 is not supported")
elif sys.version_info < (3,6):
print ("Using Python version < 3.6 would cause issue relevant to the order of windows generated")
setup (name = 'console_ui',
version = '1.0',
py_modules=['wingen', 'console_utils'],
description = 'A quick UI generator using Curses',
author = 'Hanh Ha',
author_email = 'tranhanh.haminh@gmail.com',
license = 'MIT',
url = 'https://github.com/hanhha/console_ui',
)
| true | true |
f736674bc693ddff36470a5c55535e91fbce24e8 | 1,355 | py | Python | src/spn/experiments/AQP/leaves/static/InferenceRange.py | tkrons/SPFlow_topdownrules | 6227fc973f4f36da7fbe25fa500d656eb7273033 | [
"Apache-2.0"
] | 199 | 2018-11-13T10:37:45.000Z | 2022-02-06T17:07:28.000Z | src/spn/experiments/AQP/leaves/static/InferenceRange.py | minimrbanana/SPFlow | 32233bf29d107c62f0f727b0e64aaa74b37cfe1e | [
"Apache-2.0"
] | 46 | 2018-11-30T13:40:38.000Z | 2022-01-22T21:05:07.000Z | src/spn/experiments/AQP/leaves/static/InferenceRange.py | minimrbanana/SPFlow | 32233bf29d107c62f0f727b0e64aaa74b37cfe1e | [
"Apache-2.0"
] | 78 | 2018-11-13T10:37:48.000Z | 2022-03-14T21:34:13.000Z | """
Created on June 21, 2018
@author: Moritz
"""
import numpy as np
from spn.algorithms.Inference import add_node_likelihood
from spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric
def static_likelihood_range(node, ranges, dtype=np.float64, **kwargs):
assert len(node.scope) == 1, node.scope
probs = np.ones((ranges.shape[0], 1), dtype=dtype)
ranges = ranges[:, node.scope[0]]
for i, rang in enumerate(ranges):
# Skip if no range is specified aka use a log-probability of 0 for that instance
if rang is None:
continue
# Skip if no values for the range are provided
if rang.is_impossible():
probs[i] = 0
# Compute the sum of the probability of all possible values
probs[i] = sum([_compute_probability_for_range(node, interval) for interval in rang.get_ranges()])
return probs
def _compute_probability_for_range(node, interval):
if len(interval) == 1:
if node.val == interval[0]:
return 1
else:
return 0
else:
lower = interval[0]
higher = interval[1]
if lower <= node.val and node.val <= higher:
return 1
else:
return 0
def add_static_inference_range_support():
add_node_likelihood(StaticNumeric, static_likelihood_range)
| 25.092593 | 106 | 0.647232 |
import numpy as np
from spn.algorithms.Inference import add_node_likelihood
from spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric
def static_likelihood_range(node, ranges, dtype=np.float64, **kwargs):
assert len(node.scope) == 1, node.scope
probs = np.ones((ranges.shape[0], 1), dtype=dtype)
ranges = ranges[:, node.scope[0]]
for i, rang in enumerate(ranges):
if rang is None:
continue
if rang.is_impossible():
probs[i] = 0
probs[i] = sum([_compute_probability_for_range(node, interval) for interval in rang.get_ranges()])
return probs
def _compute_probability_for_range(node, interval):
if len(interval) == 1:
if node.val == interval[0]:
return 1
else:
return 0
else:
lower = interval[0]
higher = interval[1]
if lower <= node.val and node.val <= higher:
return 1
else:
return 0
def add_static_inference_range_support():
add_node_likelihood(StaticNumeric, static_likelihood_range)
| true | true |
f7366767fdec0eacbd4d0d9376ef997a8cdd4826 | 35,093 | py | Python | sphinx/writers/html.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 3 | 2019-06-11T09:42:08.000Z | 2020-03-10T15:57:09.000Z | sphinx/writers/html.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 12 | 2019-01-09T15:43:57.000Z | 2020-01-21T10:46:30.000Z | sphinx/writers/html.py | PeerHerholz/smobsc | db34d2bb96b80579bd4a3f4c198a6b524c5a134a | [
"BSD-2-Clause"
] | 10 | 2019-02-04T11:49:35.000Z | 2020-03-21T13:32:20.000Z | """
sphinx.writers.html
~~~~~~~~~~~~~~~~~~~
docutils writers handling Sphinx' custom nodes.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import copy
import os
import posixpath
import sys
import warnings
from typing import Iterable, cast
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
logger = logging.getLogger(__name__)
# A good overview of the purpose behind these classes can be found here:
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
class HTMLWriter(Writer):
# override embed-stylesheet default value to 0.
settings_spec = copy.deepcopy(Writer.settings_spec)
for _setting in settings_spec[2]:
if '--embed-stylesheet' in _setting[1]:
_setting[2]['default'] = 0
def __init__(self, builder):
# type: (StandaloneHTMLBuilder) -> None
super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
# sadly, this is mostly copied from parent class
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(HTMLTranslator, visitor)
self.document.walkabout(visitor)
self.output = self.visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
self.clean_meta = ''.join(self.visitor.meta[2:])
class HTMLTranslator(SphinxTranslator, BaseTranslator):
"""
Our custom HTML translator.
"""
builder = None # type: StandaloneHTMLBuilder
def __init__(self, *args):
# type: (Any) -> None
if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
document, builder = args
else:
warnings.warn('The order of arguments for HTMLTranslator has been changed. '
'Please give "document" as 1st and "builder" as 2nd.',
RemovedInSphinx40Warning, stacklevel=2)
builder, document = args
super().__init__(document, builder)
self.highlighter = self.builder.highlighter
self.docnames = [self.builder.current_docname] # for singlehtml builder
self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool
if not isinstance(self.permalink_text, str):
self.permalink_text = self.permalink_text and '\u00B6' or ''
self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
self._fieldlist_row_index = 0
self.required_params_left = 0
def visit_start_of_file(self, node):
# type: (nodes.Element) -> None
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
# type: (nodes.Element) -> None
self.docnames.pop()
def visit_desc(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
# type: (nodes.Element) -> None
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
# type: (nodes.Element) -> None
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' \
and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
# type: (nodes.Element) -> None
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
# type: (nodes.Element) -> None
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
# type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
# type: (nodes.Element) -> None
self.body.append(' → ')
def depart_desc_returns(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_name(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
# type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
# type: (nodes.Element) -> None
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if not node.hasattr('noemph'):
self.body.append('<em>')
def depart_desc_parameter(self, node):
# type: (nodes.Element) -> None
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
# type: (nodes.Element) -> None
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
# type: (nodes.Element) -> None
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
# type: (nodes.Element) -> None
self.body.append('</em>')
def visit_desc_content(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
# type: (nodes.Element) -> None
self.body.append('</dd>')
def visit_versionmodified(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
# type: (nodes.Element) -> None
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
# type: (nodes.Element) -> None
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
# type: (nodes.Element) -> None
self.visit_reference(node)
def depart_number_reference(self, node):
# type: (nodes.Element) -> None
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
def visit_comment(self, node): # type: ignore
# type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
# type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
self.set_first_last(node)
def visit_seealso(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
# type: (nodes.Element) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = "%s/" % docname # try first heading which has no anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
def add_fignumber(self, node):
# type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
# type: (str, str) -> None
if self.builder.name == 'singlehtml':
key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
self.body.append('<span class="caption-number">')
prefix = self.builder.config.numfig_format.get(figtype)
if prefix is None:
msg = __('numfig_format is not defined for %s') % figtype
logger.warning(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
figtype = self.builder.env.domains['std'].get_enumerable_node_type(node)
if figtype:
if len(node['ids']) == 0:
msg = __('Any IDs not assigned for %s node') % node.tagname
logger.warning(msg, location=node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
# type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
# type: (nodes.Element) -> None
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
generates hyperlink targets inside listing tags (<ul>, <ol> and <dl>) if multiple
IDs are assigned to listings. That is invalid DOM structure.
(This is a bug of docutils <= 0.12)
This exports hyperlink targets before listings to make valid DOM structure.
"""
for id in node['ids'][1:]:
self.body.append('<span id="%s"></span>' % id)
node['ids'].remove(id)
# overwritten
def visit_bullet_list(self, node):
# type: (nodes.Element) -> None
if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
super().visit_bullet_list(node)
# overwritten
def visit_enumerated_list(self, node):
# type: (nodes.Element) -> None
self.generate_targets_for_listing(node)
super().visit_enumerated_list(node)
# overwritten
def visit_title(self, node):
# type: (nodes.Element) -> None
super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
# type: (nodes.Element) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
# add permalink anchor
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
self.add_permalink_ref(node.parent, _('Permalink to this table'))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
super().depart_title(node)
# overwritten
def visit_literal_block(self, node):
# type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force_highlighting', False)
if lang is self.builder.config.highlight_language:
# only pass highlighter options for original language
opts = self.builder.config.highlight_options
else:
opts = {}
highlighted = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=(self.builder.current_docname, node.line), **highlight_args
)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s notranslate' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node):
# type: (nodes.Element) -> None
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
# type: (nodes.Element) -> None
self.body.append('</span>')
# append permalink if available
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.add_permalink_ref(node.parent, _('Permalink to this code'))
elif isinstance(node.parent, nodes.figure):
image_nodes = node.parent.traverse(nodes.image)
target_node = image_nodes and image_nodes[0] or node.parent
self.add_permalink_ref(target_node, _('Permalink to this image'))
elif node.parent.get('toctree'):
self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
super().depart_caption(node)
def visit_doctest_block(self, node):
# type: (nodes.Element) -> None
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
# type: (nodes.Element) -> None
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
# type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
else:
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal notranslate'))
self.protect_literal_text += 1
def depart_literal(self, node):
# type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'pre'))
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ')
elif lastname is not None:
self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('</pre>\n')
raise nodes.SkipNode
def depart_productionlist(self, node):
# type: (nodes.Element) -> None
pass
def visit_production(self, node):
# type: (nodes.Element) -> None
pass
def depart_production(self, node):
# type: (nodes.Element) -> None
pass
def visit_centered(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
# type: (nodes.Element) -> None
self.body.append('</strong></p>')
# overwritten
def should_be_compact_paragraph(self, node):
# type: (nodes.Node) -> bool
"""Determine if the <p> tags around paragraph can be omitted."""
if isinstance(node.parent, addnodes.desc_content):
# Never compact desc_content items.
return False
if isinstance(node.parent, addnodes.versionmodified):
# Never compact versionmodified nodes.
return False
return super().should_be_compact_paragraph(node)
def visit_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def visit_download_reference(self, node):
# type: (nodes.Element) -> None
atts = {'class': 'reference download',
'download': ''}
if not self.builder.download_support:
self.context.append('')
elif 'refuri' in node:
atts['class'] += ' external'
atts['href'] = node['refuri']
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
elif 'filename' in node:
atts['class'] += ' internal'
atts['href'] = posixpath.join(self.builder.dlpath, node['filename'])
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
else:
self.context.append('')
def depart_download_reference(self, node):
# type: (nodes.Element) -> None
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
# type: (nodes.Element) -> None
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
uri = node['uri']
if uri.lower().endswith(('svg', 'svgz')):
atts = {'src': uri}
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
atts['alt'] = node.get('alt', uri)
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.context.append('</div>\n')
else:
self.context.append('')
self.body.append(self.emptytag(node, 'img', '', **atts))
return
if 'scale' in node:
# Try to figure out image height and width. Docutils does that too,
# but it tries the final file name, which does not necessarily exist
# yet at the time the HTML file is written.
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
logger.warning(__('Could not obtain image size. :scale: option is ignored.'), # NOQA
location=node)
else:
if 'width' not in node:
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
super().visit_image(node)
# overwritten
def depart_image(self, node):
# type: (nodes.Element) -> None
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
super().depart_image(node)
def visit_toctree(self, node):
# type: (nodes.Element) -> None
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
# type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
# type: (nodes.Element) -> None
pass
def visit_acks(self, node):
# type: (nodes.Element) -> None
pass
def depart_acks(self, node):
# type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('</td>')
def visit_option_group(self, node):
# type: (nodes.Element) -> None
super().visit_option_group(node)
self.context[-2] = self.context[-2].replace(' ', ' ')
# overwritten
def visit_Text(self, node):
# type: (nodes.Text) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
# moved here from base class's visit_literal to support
# more formatting in literal nodes
for token in self.words_and_spaces.findall(encoded):
if token.strip():
# protect literal text from line wrapping
self.body.append('<span class="pre">%s</span>' % token)
elif token in ' \n':
# allow breaks at whitespace
self.body.append(token)
else:
# protect runs of multiple spaces; the last one can wrap
self.body.append(' ' * (len(token) - 1) + ' ')
else:
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def visit_note(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'note')
def depart_note(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_warning(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_attention(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_caution(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_danger(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_error(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'error')
def depart_error(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_hint(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_important(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'important')
def depart_important(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_tip(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_literal_emphasis(self, node):
# type: (nodes.Element) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
# type: (nodes.Element) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
# type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
# type: (nodes.Element) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
# type: (nodes.Element) -> None
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
# type: (nodes.Element) -> None
self.body.append('</abbr>')
def visit_manpage(self, node):
# type: (nodes.Element) -> None
self.visit_literal_emphasis(node)
if self.manpages_url:
node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node):
# type: (nodes.Element) -> None
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
# overwritten to add even/odd classes
def visit_table(self, node):
# type: (nodes.Element) -> None
self._table_row_index = 0
return super().visit_table(node)
def visit_row(self, node):
# type: (nodes.Element) -> None
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0 # type: ignore
def visit_entry(self, node):
# type: (nodes.Element) -> None
super().visit_entry(node)
if self.body[-1] == ' ':
self.body[-1] = ' '
def visit_field_list(self, node):
# type: (nodes.Element) -> None
self._fieldlist_row_index = 0
return super().visit_field_list(node)
def visit_field(self, node):
# type: (nodes.Element) -> None
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node):
# type: (nodes.Element) -> None
context_count = len(self.context)
super().visit_field_name(node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace(' ', ' ')
def visit_math(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
depart(self, node)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
# --------- METHODS FOR COMPATIBILITY --------------------------------------
@property
def highlightlang(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning)
return self.builder.config.highlight_language
@property
def highlightopts(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
@property
def highlightlinenothreshold(self):
# type: () -> int
warnings.warn('HTMLTranslator.highlightlinenothreshold is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return sys.maxsize
| 37.41258 | 105 | 0.58402 |
import copy
import os
import posixpath
import sys
import warnings
from typing import Iterable, cast
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if False:
from typing import Any
from sphinx.builders.html import StandaloneHTMLBuilder
logger = logging.getLogger(__name__)
class HTMLWriter(Writer):
settings_spec = copy.deepcopy(Writer.settings_spec)
for _setting in settings_spec[2]:
if '--embed-stylesheet' in _setting[1]:
_setting[2]['default'] = 0
def __init__(self, builder):
super().__init__()
self.builder = builder
def translate(self):
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(HTMLTranslator, visitor)
self.document.walkabout(visitor)
self.output = self.visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
self.clean_meta = ''.join(self.visitor.meta[2:])
class HTMLTranslator(SphinxTranslator, BaseTranslator):
builder = None
def __init__(self, *args):
if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
document, builder = args
else:
warnings.warn('The order of arguments for HTMLTranslator has been changed. '
'Please give "document" as 1st and "builder" as 2nd.',
RemovedInSphinx40Warning, stacklevel=2)
builder, document = args
super().__init__(document, builder)
self.highlighter = self.builder.highlighter
self.docnames = [self.builder.current_docname]
self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
self.permalink_text = self.config.html_add_permalinks
if not isinstance(self.permalink_text, str):
self.permalink_text = self.permalink_text and '\u00B6' or ''
self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
self._fieldlist_row_index = 0
self.required_params_left = 0
def visit_start_of_file(self, node):
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
self.docnames.pop()
def visit_desc(self, node):
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
self.body.append(self.starttag(node, 'dt'))
if node.parent['objtype'] != 'describe' \
and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
pass
def depart_desc_signature_line(self, node):
if node.get('add_permalink'):
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
self.body.append('</code>')
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node):
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append('<span class="sig-paren">)</span>')
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if not node.hasattr('noemph'):
self.body.append('<em>')
def depart_desc_parameter(self, node):
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
self.body.append('</em>')
def visit_desc_content(self, node):
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
self.body.append('</dd>')
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append('</div>\n')
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
self.visit_reference(node)
def depart_number_reference(self, node):
self.depart_reference(node)
def visit_comment(self, node): # type: ignore
# type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
# type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
self.set_first_last(node)
def visit_seealso(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
# type: (nodes.Element) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = "%s/" % docname # try first heading which has no anchor
else:
anchorname = '
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
def add_fignumber(self, node):
# type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
# type: (str, str) -> None
if self.builder.name == 'singlehtml':
key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
self.body.append('<span class="caption-number">')
prefix = self.builder.config.numfig_format.get(figtype)
if prefix is None:
msg = __('numfig_format is not defined for %s') % figtype
logger.warning(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
figtype = self.builder.env.domains['std'].get_enumerable_node_type(node)
if figtype:
if len(node['ids']) == 0:
msg = __('Any IDs not assigned for %s node') % node.tagname
logger.warning(msg, location=node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
# type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
# type: (nodes.Element) -> None
for id in node['ids'][1:]:
self.body.append('<span id="%s"></span>' % id)
node['ids'].remove(id)
# overwritten
def visit_bullet_list(self, node):
# type: (nodes.Element) -> None
if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
super().visit_bullet_list(node)
# overwritten
def visit_enumerated_list(self, node):
# type: (nodes.Element) -> None
self.generate_targets_for_listing(node)
super().visit_enumerated_list(node)
# overwritten
def visit_title(self, node):
# type: (nodes.Element) -> None
super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
# type: (nodes.Element) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
# add permalink anchor
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
self.add_permalink_ref(node.parent, _('Permalink to this table'))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
super().depart_title(node)
# overwritten
def visit_literal_block(self, node):
# type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force_highlighting', False)
if lang is self.builder.config.highlight_language:
opts = self.builder.config.highlight_options
else:
opts = {}
highlighted = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=(self.builder.current_docname, node.line), **highlight_args
)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s notranslate' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node):
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
self.body.append('</span>')
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.add_permalink_ref(node.parent, _('Permalink to this code'))
elif isinstance(node.parent, nodes.figure):
image_nodes = node.parent.traverse(nodes.image)
target_node = image_nodes and image_nodes[0] or node.parent
self.add_permalink_ref(target_node, _('Permalink to this image'))
elif node.parent.get('toctree'):
self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
super().depart_caption(node)
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
self.body.append('</div></blockquote>\n')
def visit_literal(self, node):
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
else:
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal notranslate'))
self.protect_literal_text += 1
def depart_literal(self, node):
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node):
self.body.append(self.starttag(node, 'pre'))
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ')
elif lastname is not None:
self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('</pre>\n')
raise nodes.SkipNode
def depart_productionlist(self, node):
pass
def visit_production(self, node):
pass
def depart_production(self, node):
pass
def visit_centered(self, node):
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
self.body.append('</strong></p>')
def should_be_compact_paragraph(self, node):
if isinstance(node.parent, addnodes.desc_content):
return False
if isinstance(node.parent, addnodes.versionmodified):
return False
return super().should_be_compact_paragraph(node)
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_download_reference(self, node):
atts = {'class': 'reference download',
'download': ''}
if not self.builder.download_support:
self.context.append('')
elif 'refuri' in node:
atts['class'] += ' external'
atts['href'] = node['refuri']
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
elif 'filename' in node:
atts['class'] += ' internal'
atts['href'] = posixpath.join(self.builder.dlpath, node['filename'])
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
else:
self.context.append('')
def depart_download_reference(self, node):
self.body.append(self.context.pop())
def visit_image(self, node):
olduri = node['uri']
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
uri = node['uri']
if uri.lower().endswith(('svg', 'svgz')):
atts = {'src': uri}
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
atts['alt'] = node.get('alt', uri)
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.context.append('</div>\n')
else:
self.context.append('')
self.body.append(self.emptytag(node, 'img', '', **atts))
return
if 'scale' in node:
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
logger.warning(__('Could not obtain image size. :scale: option is ignored.'),
location=node)
else:
if 'width' not in node:
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
super().visit_image(node)
def depart_image(self, node):
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
super().depart_image(node)
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_index(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
# type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
# type: (nodes.Element) -> None
pass
def visit_acks(self, node):
# type: (nodes.Element) -> None
pass
def depart_acks(self, node):
# type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('</td>')
def visit_option_group(self, node):
# type: (nodes.Element) -> None
super().visit_option_group(node)
self.context[-2] = self.context[-2].replace(' ', '&
# overwritten
def visit_Text(self, node):
# type: (nodes.Text) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
# moved here from base class's visit_literal to support
for token in self.words_and_spaces.findall(encoded):
if token.strip():
self.body.append('<span class="pre">%s</span>' % token)
elif token in ' \n':
self.body.append(token)
else:
self.body.append(' ' * (len(token) - 1) + ' ')
else:
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition(node)
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition(node)
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition(node)
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition(node)
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition(node)
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition(node)
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition(node)
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition(node)
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition(node)
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
return self.visit_strong(node)
def depart_literal_strong(self, node):
return self.depart_strong(node)
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_manpage(self, node):
self.visit_literal_emphasis(node)
if self.manpages_url:
node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node):
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
def visit_table(self, node):
self._table_row_index = 0
return super().visit_table(node)
def visit_row(self, node):
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def visit_entry(self, node):
super().visit_entry(node)
if self.body[-1] == ' ':
self.body[-1] = ' '
def visit_field_list(self, node):
self._fieldlist_row_index = 0
return super().visit_field_list(node)
def visit_field(self, node):
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node):
context_count = len(self.context)
super().visit_field_name(node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace(' ', ' ')
def visit_math(self, node, math_env=''):
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
depart(self, node)
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
@property
def highlightlang(self):
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning)
return self.builder.config.highlight_language
@property
def highlightopts(self):
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
@property
def highlightlinenothreshold(self):
warnings.warn('HTMLTranslator.highlightlinenothreshold is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return sys.maxsize
| true | true |
f7366798202eb5b63a293458869820181b482fe2 | 10,620 | py | Python | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 24 | 2019-12-12T20:54:39.000Z | 2021-03-25T15:40:26.000Z | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 9 | 2020-10-21T23:01:06.000Z | 2021-04-22T09:59:50.000Z | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 9 | 2019-12-03T21:05:11.000Z | 2021-04-02T11:41:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - Protocol/Transport layer.
Helper functions.
"""
import ctypes
import sys
import time
from datetime import datetime as dt
from typing import Optional, Union
from .const import DEVICE_TYPES, NON_DEVICE_ID, NUL_DEVICE_ID
class FILETIME(ctypes.Structure):
"""Data structure for GetSystemTimePreciseAsFileTime()."""
_fields_ = [("dwLowDateTime", ctypes.c_uint), ("dwHighDateTime", ctypes.c_uint)]
def dt_now() -> dt:
"""Return the current datetime as a local/naive datetime object.
This is slower, but potentially more accurate, than dt.now(), and is used mainly for
packet timestamps.
"""
return dt.fromtimestamp(timestamp())
def dt_str() -> str:
"""Return the current datetime as a isoformat string."""
return dt_now().isoformat(timespec="microseconds")
def timestamp() -> float:
"""Return the number of seconds since the Unix epoch.
Return an accurate value, even for Windows-based systems.
""" # see: https://www.python.org/dev/peps/pep-0564/
if sys.platform != "win32":
return time.time_ns() / 1e9 # since 1970-01-01T00:00:00Z, time.gmtime(0)
file_time = FILETIME()
ctypes.windll.kernel32.GetSystemTimePreciseAsFileTime(ctypes.byref(file_time))
_time = (file_time.dwLowDateTime + (file_time.dwHighDateTime << 32)) / 1e7
return _time - 134774 * 24 * 60 * 60 # otherwise, is since 1601-01-01T00:00:00Z
def _precision_v_cost():
import math
#
LOOPS = 10 ** 6
#
print("time.time_ns(): %s" % time.time_ns())
print("time.time(): %s\r\n" % time.time())
#
starts = time.time_ns()
min_dt = [abs(time.time_ns() - time.time_ns()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time_ns(): %s ns" % min_dt)
print("duration time_ns(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(time.time() - time.time()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time(): %s ns" % math.ceil(min_dt * 1e9))
print("duration time(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(timestamp() - timestamp()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta timestamp(): %s ns" % math.ceil(min_dt * 1e9))
print("duration timestamp(): %s ns\r\n" % (time.time_ns() - starts))
#
LOOPS = 10 ** 4
#
starts = time.time_ns()
min_td = [abs(dt.now() - dt.now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt.now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt.now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [abs(dt_now() - dt_now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [
abs(
(dt_now if sys.platform == "win32" else dt.now)()
- (dt_now if sys.platform == "win32" else dt.now)()
)
for _ in range(LOOPS)
]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
dt_nov = dt_now if sys.platform == "win32" else dt.now
starts = time.time_ns()
min_td = [abs(dt_nov() - dt_nov()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
def double(val, factor=1) -> Optional[float]:
"""Return a double, used by 31DA."""
if val == "7FFF":
return
result = int(val, 16)
assert result < 32767
return result if factor == 1 else result / factor
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
def percent(value: str) -> Optional[float]: # a percentage 0-100% (0.0 to 1.0)
"""Return a percentage, 0-100% with resolution of 0.5%."""
assert len(value) == 2, f"percent({value}): len is not 2"
if value in {"EF", "FE", "FF"}: # TODO: diff b/w FE (seen with 3150) & FF
return
assert int(value, 16) <= 200, "max value should be 0xC8, not 0x{value}"
return int(value, 16) / 200
def bool_from_hex(value: str) -> Optional[bool]: # either 00 or C8
"""Return a boolean."""
assert value in {"00", "C8", "FF"}, value
return {"00": False, "C8": True}.get(value)
def date_from_hex(value: str) -> Optional[str]: # YY-MM-DD
"""Return a date string in the format YY-MM-DD."""
assert len(value) == 8, "len is not 8"
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111, # 1st 3 bits: DayOfWeek
).strftime("%Y-%m-%d")
def dtm_from_hex(value: str) -> str: # from parsers
"""Convert a hex string to an (naive, local) isoformat string."""
# 00141B0A07E3 (...HH:MM:00) for system_mode, zone_mode (schedules?)
# 0400041C0A07E3 (...HH:MM:SS) for sync_datetime
if value == "FF" * 6:
return None
if len(value) == 12:
value = f"00{value}"
# assert len(value) == 14
return dt(
year=int(value[10:14], 16),
month=int(value[8:10], 16),
day=int(value[6:8], 16),
hour=int(value[4:6], 16) & 0b11111, # 1st 3 bits: DayOfWeek
minute=int(value[2:4], 16),
second=int(value[:2], 16) & 0b1111111, # 1st bit: used for DST
).isoformat(timespec="seconds")
def dtm_to_hex(dtm: Union[str, dt]) -> str:
"""Convert a datetime (isoformat string, or datetime obj) to a hex string."""
def _dtm_to_hex(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args):
return f"{tm_min:02X}{tm_hour:02X}{tm_mday:02X}{tm_mon:02X}{tm_year:04X}"
if dtm is None:
return "FF" * 6
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
# if dtm < dt.now() + td(minutes=1):
# raise ValueError("Invalid datetime")
return _dtm_to_hex(*dtm.timetuple())
def dts_from_hex(value: str) -> Optional[str]:
"""YY-MM-DD HH:MM:SS."""
if value == "00000000007F":
return None
_seqx = int(value, 16)
return dt(
year=(_seqx & 0b1111111 << 24) >> 24,
month=(_seqx & 0b1111 << 36) >> 36,
day=(_seqx & 0b11111 << 31) >> 31,
hour=(_seqx & 0b11111 << 19) >> 19,
minute=(_seqx & 0b111111 << 13) >> 13,
second=(_seqx & 0b111111 << 7) >> 7,
).strftime("%Y-%m-%dT%H:%M:%S")
def dts_to_hex(dtm: Union[str, dt]) -> str: # TODO: WIP
"""YY-MM-DD HH:MM:SS."""
if dtm is None:
return "00000000007F"
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm) # TODO: YY-MM-DD, not YYYY-MM-DD
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args) = dtm.timetuple()
val = sum(
(
tm_year % 100 << 24,
tm_mon << 36,
tm_mday << 31,
tm_hour << 19,
tm_min << 13,
tm_sec << 7,
)
)
return f"{val:012X}"
def str_from_hex(value: str) -> Optional[str]: # printable ASCII characters
"""Return a string of printable ASCII characters."""
# result = bytearray.fromhex(value).split(b"\x7F")[0] # TODO: needs checking
result = bytearray([x for x in bytearray.fromhex(value) if 31 < x < 127])
return result.decode("ascii").strip() if result else None
def str_to_hex(value: str) -> str:
"""Convert a string to a variable-length ASCII hex string."""
return "".join(f"{ord(x):02X}" for x in value)
# return value.encode().hex()
def temp_from_hex(value: str) -> Union[float, bool, None]:
"""Convert a 2's complement 4-byte hex string to an float."""
assert len(value) == 4, f"temp_from_hex({value}): should be 4 bytes long"
if value == "31FF": # means: N/A (== 127.99, 2s complement), signed?
return
if value == "7EFF": # possibly only for setpoints? unsigned?
return False
if value == "7FFF": # also: FFFF?, means: N/A (== 327.67)
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def temp_to_hex(value: float) -> str:
"""Convert a float to a 2's complement 4-byte hex string."""
assert (
not value or -(2 ** 7) <= value < 2 ** 7
), f"temp_to_hex({value}): is out of 2's complement range"
if value is None:
return "7FFF" # or: "31FF"?
if value is False:
return "7EFF"
temp = int(value * 100)
return f"{temp if temp >= 0 else temp + 2 ** 16:04X}"
def valve_demand(value: str) -> dict:
# a damper restricts flow, a valve permits flow
demand = int(value, 16)
if demand & 0xF0 == 0xF0:
VALVE_STATE = {
"F0": "open_circuit",
"F1": "short_circuit",
"FD": "valve_stuck", # damper/valve stuck
"FE": "actuator_stuck",
} # VALVE_STATE.get(value, "malfunction")
return {
"heat_demand": None,
"fault": VALVE_STATE.get(value, "malfunction"),
}
assert demand <= 200
return {"heat_demand": demand / 200}
def hex_id_to_dec(device_hex: str, friendly_id=False) -> str:
"""Convert (say) '06368E' to '01:145038' (or 'CTL:145038')."""
if device_hex == "FFFFFE": # aka '63:262142'
return "NUL:262142" if friendly_id else NUL_DEVICE_ID
if not device_hex.strip(): # aka '--:------'
return f"{'':10}" if friendly_id else NON_DEVICE_ID
_tmp = int(device_hex, 16)
dev_type = f"{(_tmp & 0xFC0000) >> 18:02d}"
if friendly_id:
dev_type = DEVICE_TYPES.get(dev_type, f"{dev_type:<3}")
return f"{dev_type}:{_tmp & 0x03FFFF:06d}"
| 34.14791 | 88 | 0.591149 |
import ctypes
import sys
import time
from datetime import datetime as dt
from typing import Optional, Union
from .const import DEVICE_TYPES, NON_DEVICE_ID, NUL_DEVICE_ID
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.c_uint), ("dwHighDateTime", ctypes.c_uint)]
def dt_now() -> dt:
return dt.fromtimestamp(timestamp())
def dt_str() -> str:
return dt_now().isoformat(timespec="microseconds")
def timestamp() -> float:
if sys.platform != "win32":
return time.time_ns() / 1e9
file_time = FILETIME()
ctypes.windll.kernel32.GetSystemTimePreciseAsFileTime(ctypes.byref(file_time))
_time = (file_time.dwLowDateTime + (file_time.dwHighDateTime << 32)) / 1e7
return _time - 134774 * 24 * 60 * 60
def _precision_v_cost():
import math
LOOPS = 10 ** 6
print("time.time_ns(): %s" % time.time_ns())
print("time.time(): %s\r\n" % time.time())
starts = time.time_ns()
min_dt = [abs(time.time_ns() - time.time_ns()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time_ns(): %s ns" % min_dt)
print("duration time_ns(): %s ns\r\n" % (time.time_ns() - starts))
starts = time.time_ns()
min_dt = [abs(time.time() - time.time()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time(): %s ns" % math.ceil(min_dt * 1e9))
print("duration time(): %s ns\r\n" % (time.time_ns() - starts))
starts = time.time_ns()
min_dt = [abs(timestamp() - timestamp()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta timestamp(): %s ns" % math.ceil(min_dt * 1e9))
print("duration timestamp(): %s ns\r\n" % (time.time_ns() - starts))
LOOPS = 10 ** 4
starts = time.time_ns()
min_td = [abs(dt.now() - dt.now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt.now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt.now(): %s ns\r\n" % (time.time_ns() - starts))
starts = time.time_ns()
min_td = [abs(dt_now() - dt_now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
starts = time.time_ns()
min_td = [
abs(
(dt_now if sys.platform == "win32" else dt.now)()
- (dt_now if sys.platform == "win32" else dt.now)()
)
for _ in range(LOOPS)
]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
dt_nov = dt_now if sys.platform == "win32" else dt.now
starts = time.time_ns()
min_td = [abs(dt_nov() - dt_nov()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
def double(val, factor=1) -> Optional[float]:
if val == "7FFF":
return
result = int(val, 16)
assert result < 32767
return result if factor == 1 else result / factor
def flag8(byte, lsb=False) -> list:
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
def percent(value: str) -> Optional[float]:
assert len(value) == 2, f"percent({value}): len is not 2"
if value in {"EF", "FE", "FF"}:
return
assert int(value, 16) <= 200, "max value should be 0xC8, not 0x{value}"
return int(value, 16) / 200
def bool_from_hex(value: str) -> Optional[bool]:
assert value in {"00", "C8", "FF"}, value
return {"00": False, "C8": True}.get(value)
def date_from_hex(value: str) -> Optional[str]:
assert len(value) == 8, "len is not 8"
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111,
).strftime("%Y-%m-%d")
def dtm_from_hex(value: str) -> str:
if value == "FF" * 6:
return None
if len(value) == 12:
value = f"00{value}"
return dt(
year=int(value[10:14], 16),
month=int(value[8:10], 16),
day=int(value[6:8], 16),
hour=int(value[4:6], 16) & 0b11111,
minute=int(value[2:4], 16),
second=int(value[:2], 16) & 0b1111111,
).isoformat(timespec="seconds")
def dtm_to_hex(dtm: Union[str, dt]) -> str:
def _dtm_to_hex(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args):
return f"{tm_min:02X}{tm_hour:02X}{tm_mday:02X}{tm_mon:02X}{tm_year:04X}"
if dtm is None:
return "FF" * 6
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
return _dtm_to_hex(*dtm.timetuple())
def dts_from_hex(value: str) -> Optional[str]:
if value == "00000000007F":
return None
_seqx = int(value, 16)
return dt(
year=(_seqx & 0b1111111 << 24) >> 24,
month=(_seqx & 0b1111 << 36) >> 36,
day=(_seqx & 0b11111 << 31) >> 31,
hour=(_seqx & 0b11111 << 19) >> 19,
minute=(_seqx & 0b111111 << 13) >> 13,
second=(_seqx & 0b111111 << 7) >> 7,
).strftime("%Y-%m-%dT%H:%M:%S")
def dts_to_hex(dtm: Union[str, dt]) -> str:
if dtm is None:
return "00000000007F"
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args) = dtm.timetuple()
val = sum(
(
tm_year % 100 << 24,
tm_mon << 36,
tm_mday << 31,
tm_hour << 19,
tm_min << 13,
tm_sec << 7,
)
)
return f"{val:012X}"
def str_from_hex(value: str) -> Optional[str]:
y([x for x in bytearray.fromhex(value) if 31 < x < 127])
return result.decode("ascii").strip() if result else None
def str_to_hex(value: str) -> str:
return "".join(f"{ord(x):02X}" for x in value)
def temp_from_hex(value: str) -> Union[float, bool, None]:
assert len(value) == 4, f"temp_from_hex({value}): should be 4 bytes long"
if value == "31FF":
return
if value == "7EFF":
return False
if value == "7FFF":
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def temp_to_hex(value: float) -> str:
assert (
not value or -(2 ** 7) <= value < 2 ** 7
), f"temp_to_hex({value}): is out of 2's complement range"
if value is None:
return "7FFF" # or: "31FF"?
if value is False:
return "7EFF"
temp = int(value * 100)
return f"{temp if temp >= 0 else temp + 2 ** 16:04X}"
def valve_demand(value: str) -> dict:
# a damper restricts flow, a valve permits flow
demand = int(value, 16)
if demand & 0xF0 == 0xF0:
VALVE_STATE = {
"F0": "open_circuit",
"F1": "short_circuit",
"FD": "valve_stuck", # damper/valve stuck
"FE": "actuator_stuck",
} # VALVE_STATE.get(value, "malfunction")
return {
"heat_demand": None,
"fault": VALVE_STATE.get(value, "malfunction"),
}
assert demand <= 200
return {"heat_demand": demand / 200}
def hex_id_to_dec(device_hex: str, friendly_id=False) -> str:
if device_hex == "FFFFFE": # aka '63:262142'
return "NUL:262142" if friendly_id else NUL_DEVICE_ID
if not device_hex.strip(): # aka '--:------'
return f"{'':10}" if friendly_id else NON_DEVICE_ID
_tmp = int(device_hex, 16)
dev_type = f"{(_tmp & 0xFC0000) >> 18:02d}"
if friendly_id:
dev_type = DEVICE_TYPES.get(dev_type, f"{dev_type:<3}")
return f"{dev_type}:{_tmp & 0x03FFFF:06d}"
| true | true |
f736682c03031c943fab6c203b47c7184e8ec07d | 12,492 | py | Python | models/resnet_lgdv2.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | models/resnet_lgdv2.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | models/resnet_lgdv2.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import numpy as np
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200'
]
def look_bottleneck_global(glo):
if look_bottleneck_global:
if glo is None:
print('first bottleneck-> no global content!')
else:
print('glo has content!')
# Can print the model structure
def model_info(model, report='summary'):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if report is 'full':
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
# Implement of bottleneck with se block
class BottleneckX(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, first_block=False):
super(BottleneckX, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
#self.bn1 = nn.GroupNorm(4, planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
#self.bn2 = nn.GroupNorm(4, planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
#self.bn3 = nn.GroupNorm(4, planes * 4)
#self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# If first bottleneckX, it does not contain global path
self.first_block = first_block
# If downsampling occurs, set true
self.ds = False
#self.se_module = SEModule(planes * 4, reduction=16, first_block=self.first_block)
self.avg_pool = nn.AdaptiveAvgPool3d(1)
#Implement LGD block
self.fc1 = nn.Conv3d(planes * 4 // 2, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
#self.fc2 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn4 = nn.BatchNorm3d(planes * 4)
#self.bn4 = nn.GroupNorm(4, planes * 4)
self.fc3 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)
self.fc4 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.fc5 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)
self.fc6 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, xx):
# xx contains two element: input->x and global path->glo
x = xx[0]
glo = xx[1]
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#out = self.relu(out)
# If downsample, downsampleing global path & residual channels
if self.downsample is not None:
if glo is not None:
glo = self.avg_pool(glo)
glo = self.fc1(glo)
glo = self.relu(glo)
residual = self.downsample(x)
#LGD block
if glo is not None:
glo = self.fc3(glo)
glo = self.relu(glo)
glo = self.fc4(glo)
glo = self.sigmoid(glo)
out = out * glo
#out = self.relu(out)
glo2 = self.avg_pool(out)
glo2 = self.fc5(glo2)
glo2 = self.relu(glo2)
glo2 = self.fc6(glo2)
glo2 = self.sigmoid(glo2)
g = glo + glo2
g = self.relu(g)
out = out + residual
out = self.relu(out)
outg = [out, g]
# Normal bottleneck
else:
out = out + residual
out = self.relu(out)
outg = [out, residual]
return outg
class ResNet(nn.Module):
def __init__(self,
blockx,
layers,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
#self.bn1 = nn.GroupNorm(4, 64)
self.relu = nn.LeakyReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(blockx, 64, layers[0], shortcut_type, first_block=True)
self.layer2 = self._make_layer(blockx, 128, layers[1], shortcut_type, stride=2, first_block=False)
self.layer3 = self._make_layer(blockx, 256, layers[2], shortcut_type, stride=2, first_block=False)
self.layer4 = self._make_layer(blockx, 512, layers[3], shortcut_type, stride=2, first_block=False)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
#last_size = 4
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * blockx.expansion, num_classes)
#self.fusion = nn.Conv3d(512 * block.expansion * 2, 512 * block.expansion, kernel_size=1, stride=1, padding=0, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, first_block=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, first_block))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
#print('lgd')
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
lookshape = False
# First time need to give two element to model
xx = [x, None]
x = self.layer1(xx)
if lookshape:
print('\nlayer1-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer2(x)
if lookshape:
print('\nlayer2-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer3(x)
if lookshape:
print('\nlayer3-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer4(x)
if lookshape:
print('\nlayer4-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
# After bottlenck part
loc, g = x[0], x[1]
#print(g)
if lookshape:
print('loc & g:--------')
print(np.shape(loc))
print(np.shape(g))
print('----------------')
x = self.avgpool(loc)
#x = x + g
#x = self.bn2(x)
#x = self.relu(x)
if lookshape:
print('\nlayer5-------------')
print(np.shape(x))
print('--------------')
# Test local and global path feature maps fusion type below
# 3d conv
#x = torch.cat((x, g), 1)
#x = self.fusion(x)
#x = self.bn2(x)
#x = self.relu(x)
# concat (need to change fc layer filter number)
#x = torch.cat((x, g), 1)
#x = self.relu(x)
x = x.view(x.size(0), -1)
if lookshape:
print('\nlayer6-------------')
print(np.shape(x))
print('--------------')
x = self.fc(x)
if lookshape:
print('\nlayer7-------------')
print(np.shape(x))
print('--------------')
return x
def get_fine_tuning_parameters(model, ft_begin_index):
#if ft_begin_index == 0:
# return model.parameters()
print('ohraaaa')
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
# Look the content of ft_module
print('ft: ', ft_module_names)
parameters = []
ii = 0
'''
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ii >= 271: #220 271
print(ii)
parameters.append({'params': v})
else:
print('notfc')
print(ii)
parameters.append({'params': v, 'lr': 0.0})
#parameters.append({'params': v})
print(k)
ii = ii+1
return parameters
'''
# bakup code
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
#if ii >= 271:
print('fc')
#print(ii)
parameters.append({'params': v})
break
else:
print('notfc')
#print(ii)
#parameters.append({'params': v, 'lr': 0.0})
parameters.append({'params': v})
print(k)
ii = ii+1
return parameters
def resnet10(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(BottleneckX, [3, 4, 6, 3], **kwargs)
#model = ResNet(Bottleneck, BottleneckX, [3, 4, 23, 3], **kwargs)
#model_info(model,'full')
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(BottleneckX, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def resnet200(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model | 32.030769 | 130 | 0.53186 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import numpy as np
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200'
]
def look_bottleneck_global(glo):
if look_bottleneck_global:
if glo is None:
print('first bottleneck-> no global content!')
else:
print('glo has content!')
def model_info(model, report='summary'):
n_p = sum(x.numel() for x in model.parameters())
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)
if report is 'full':
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
def conv3x3x3(in_planes, out_planes, stride=1):
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BottleneckX(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, first_block=False):
super(BottleneckX, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.downsample = downsample
self.stride = stride
self.first_block = first_block
self.ds = False
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.fc1 = nn.Conv3d(planes * 4 // 2, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn4 = nn.BatchNorm3d(planes * 4)
self.fc3 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)
self.fc4 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.fc5 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)
self.fc6 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, xx):
x = xx[0]
glo = xx[1]
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
if glo is not None:
glo = self.avg_pool(glo)
glo = self.fc1(glo)
glo = self.relu(glo)
residual = self.downsample(x)
if glo is not None:
glo = self.fc3(glo)
glo = self.relu(glo)
glo = self.fc4(glo)
glo = self.sigmoid(glo)
out = out * glo
glo2 = self.avg_pool(out)
glo2 = self.fc5(glo2)
glo2 = self.relu(glo2)
glo2 = self.fc6(glo2)
glo2 = self.sigmoid(glo2)
g = glo + glo2
g = self.relu(g)
out = out + residual
out = self.relu(out)
outg = [out, g]
else:
out = out + residual
out = self.relu(out)
outg = [out, residual]
return outg
class ResNet(nn.Module):
def __init__(self,
blockx,
layers,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.LeakyReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(blockx, 64, layers[0], shortcut_type, first_block=True)
self.layer2 = self._make_layer(blockx, 128, layers[1], shortcut_type, stride=2, first_block=False)
self.layer3 = self._make_layer(blockx, 256, layers[2], shortcut_type, stride=2, first_block=False)
self.layer4 = self._make_layer(blockx, 512, layers[3], shortcut_type, stride=2, first_block=False)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * blockx.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, first_block=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, first_block))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
lookshape = False
xx = [x, None]
x = self.layer1(xx)
if lookshape:
print('\nlayer1-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer2(x)
if lookshape:
print('\nlayer2-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer3(x)
if lookshape:
print('\nlayer3-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
x = self.layer4(x)
if lookshape:
print('\nlayer4-------------')
print(np.shape(x[0]))
print(np.shape(x[1]))
print('--------------')
loc, g = x[0], x[1]
if lookshape:
print('loc & g:--------')
print(np.shape(loc))
print(np.shape(g))
print('----------------')
x = self.avgpool(loc)
if lookshape:
print('\nlayer5-------------')
print(np.shape(x))
print('--------------')
x = x.view(x.size(0), -1)
if lookshape:
print('\nlayer6-------------')
print(np.shape(x))
print('--------------')
x = self.fc(x)
if lookshape:
print('\nlayer7-------------')
print(np.shape(x))
print('--------------')
return x
def get_fine_tuning_parameters(model, ft_begin_index):
print('ohraaaa')
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
print('ft: ', ft_module_names)
parameters = []
ii = 0
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
print('fc')
parameters.append({'params': v})
break
else:
print('notfc')
parameters.append({'params': v})
print(k)
ii = ii+1
return parameters
def resnet10(**kwargs):
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
model = ResNet(BottleneckX, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
model = ResNet(BottleneckX, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def resnet200(**kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model | true | true |
f7366835c02e403a339394e5e63ac8c59828af30 | 123 | py | Python | src/Stage/stage4.py | yuduki24/Rikako | 989a0499d06bd37b5a6d826b1fb898369a50bbfb | [
"MIT"
] | null | null | null | src/Stage/stage4.py | yuduki24/Rikako | 989a0499d06bd37b5a6d826b1fb898369a50bbfb | [
"MIT"
] | null | null | null | src/Stage/stage4.py | yuduki24/Rikako | 989a0499d06bd37b5a6d826b1fb898369a50bbfb | [
"MIT"
] | null | null | null | from Stage.stage import *
class Stage4(Stage):
def deployEnemy(self):
Boss2((self.scr_rect.width//2, 200)) | 24.6 | 44 | 0.650407 | from Stage.stage import *
class Stage4(Stage):
def deployEnemy(self):
Boss2((self.scr_rect.width//2, 200)) | true | true |
f73669c910733fc3bbdb968d1c678045f94256d1 | 999 | py | Python | velhot/migrations/0004_profile.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | velhot/migrations/0004_profile.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | velhot/migrations/0004_profile.py | matiasmane/BWA | 1dd3e68362fafb40e615f1485f2cdf4ad74837af | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2018-11-20 12:48
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('velhot', '0003_friend'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('real_name', models.CharField(max_length=60)),
('address', models.CharField(max_length=60)),
('phone_number', models.CharField(max_length=17, validators=[django.core.validators.RegexValidator(regex='^\\+?1?\\d{9,15}$')])),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.678571 | 145 | 0.638639 |
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('velhot', '0003_friend'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('real_name', models.CharField(max_length=60)),
('address', models.CharField(max_length=60)),
('phone_number', models.CharField(max_length=17, validators=[django.core.validators.RegexValidator(regex='^\\+?1?\\d{9,15}$')])),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7366a3d5b380afad1909ab74d0d3a13473a6311 | 43,835 | py | Python | statsmodels/tsa/tests/results/arima111nc_css_results.py | yarikoptic/statsmodels | f990cb1a1ef0c9883c9394444e6f9d027efabec6 | [
"BSD-3-Clause"
] | 34 | 2018-07-13T11:30:46.000Z | 2022-01-05T13:48:10.000Z | venv/lib/python3.6/site-packages/statsmodels/tsa/tests/results/arima111nc_css_results.py | HeyWeiPan/vnpy_crypto | 844381797a475a01c05a4e162592a5a6e3a48032 | [
"MIT"
] | 6 | 2015-08-28T16:59:03.000Z | 2019-04-12T22:29:01.000Z | venv/lib/python3.6/site-packages/statsmodels/tsa/tests/results/arima111nc_css_results.py | HeyWeiPan/vnpy_crypto | 844381797a475a01c05a4e162592a5a6e3a48032 | [
"MIT"
] | 28 | 2015-04-01T20:02:25.000Z | 2021-07-03T00:09:28.000Z | import numpy as np
llf = np.array([-242.89663276735])
nobs = np.array([ 202])
k = np.array([ 3])
k_exog = np.array([ 1])
sigma = np.array([ .8053519404535])
chi2 = np.array([ 15723.381396967])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .99479180506163,
-.84461527652809,
.64859174799221])
cov_params = np.array([ .00008904968254,
-.00023560410507,
.00012795903324,
-.00023560410507,
.00131628534915,
-.00022462340695,
.00012795903324,
-.00022462340695,
.0005651128627]).reshape(3,3)
xb = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
y = np.array([np.nan,
28.979999542236,
29.178695678711,
29.40651512146,
29.420400619507,
29.608880996704,
29.609405517578,
29.830673217773,
29.921676635742,
29.874292373657,
29.990877151489,
30.048864364624,
30.10717010498,
30.292304992676,
30.290996551514,
30.464012145996,
30.519966125488,
30.553541183472,
30.783664703369,
30.838117599487,
31.042964935303,
31.038463592529,
31.105230331421,
31.207004547119,
31.377513885498,
31.477378845215,
31.692283630371,
31.755348205566,
32.003520965576,
32.444396972656,
32.61438369751,
33.048908233643,
33.07551574707,
33.278274536133,
33.595630645752,
33.91028213501,
34.337677001953,
34.645801544189,
35.182697296143,
35.598838806152,
36.012474060059,
36.654026031494,
37.174102783203,
37.691062927246,
38.320404052734,
38.94518661499,
39.336082458496,
39.843410491943,
40.349597930908,
40.509769439697,
41.021186828613,
41.300796508789,
41.583572387695,
41.869022369385,
42.156734466553,
42.561374664307,
43.080310821533,
44.171394348145,
44.673233032227,
46.209945678711,
47.495380401611,
48.882556915283,
50.141174316406,
51.965770721436,
53.310932159424,
53.958972930908,
54.960140228271,
55.84610748291,
56.734252929688,
56.934139251709,
57.839687347412,
58.744373321533,
59.533309936523,
60.899055480957,
61.679496765137,
62.46297454834,
63.594078063965,
64.83536529541,
66.530303955078,
68.210494995117,
69.64818572998,
71.885147094727,
74.445877075195,
76.751594543457,
79.731002807617,
82.797538757324,
84.457992553711,
86.584655761719,
89.167251586914,
91.046905517578,
93.504814147949,
95.378631591797,
96.22135925293,
96.628448486328,
99.250854492188,
99.668907165527,
99.195091247559,
100.0290145874,
100.98822021484,
101.95376586914,
103.26971435547,
104.46817779541,
105.20679473877,
106.1849899292,
106.70650482178,
108.0453414917,
108.68803405762,
109.45465087891,
110.91656494141,
109.37838745117,
110.19312286377,
110.89054107666,
112.16345977783,
113.54005432129,
114.67472076416,
115.91901397705,
116.92639160156,
118.16265106201,
119.50830078125,
120.96187591553,
122.29209899902,
124.30773925781,
125.7303237915,
126.57431030273,
128.8072052002,
130.21432495117,
131.85038757324,
134.97607421875,
136.22640991211,
136.44931030273,
137.50482177734,
138.45225524902,
139.5228729248,
140.59803771973,
141.67707824707,
142.87438964844,
143.95710754395,
144.92749023438,
145.55741882324,
146.65830993652,
147.29898071289,
148.17890930176,
149.40701293945,
150.40071105957,
151.51042175293,
152.84864807129,
153.60585021973,
154.48471069336,
155.7119140625,
157.16493225098,
158.03460693359,
159.25405883789,
160.47047424316,
160.87922668457,
161.30215454102,
162.42718505859,
162.85820007324,
162.95487976074,
163.98776245117,
164.67047119141,
165.47090148926,
166.73132324219,
167.52229309082,
169.00477600098,
170.24440002441,
171.93792724609,
173.84696960449,
175.04895019531,
176.82572937012,
177.55540466309,
178.52604675293,
178.58113098145,
178.54368591309,
180.25286865234,
180.90992736816,
182.14875793457,
183.61158752441,
184.14450073242,
184.5728302002,
185.81741333008,
187.28511047363,
188.39723205566,
190.19758605957,
191.98233032227,
192.94879150391,
195.07064819336,
195.90835571289,
200.89639282227,
200.86282348633,
202.13075256348,
204.20880126953,
203.05419921875,
204.80026245117,
207.3080291748,
208.72329711914,
210.57261657715,
214.21580505371,
215.67597961426,
220.72087097168,
218.41342163086,
212.75346374512,
213.23506164551,
215.21542358398])
resid = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
yr = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
mse = np.array([ 1.1112809181213,
.6632194519043,
.65879660844803,
.65575885772705,
.65364873409271,
.65217137336731,
.65113133192062,
.6503963470459,
.64987552165985,
.64950579404831,
.64924287796021,
.64905577898026,
.64892256259918,
.64882761240005,
.64875996112823,
.64871168136597,
.64867728948593,
.64865279197693,
.64863526821136,
.64862281084061,
.64861387014389,
.64860755205154,
.64860302209854,
.64859980344772,
.64859747886658,
.64859586954117,
.64859467744827,
.64859384298325,
.6485932469368,
.64859282970428,
.64859253168106,
.64859229326248,
.64859211444855,
.64859199523926,
.64859193563461,
.64859187602997,
.64859187602997,
.64859181642532,
.64859181642532,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068])
stdp = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
icstats = np.array([ 202,
np.nan,
-242.89663276735,
3,
491.79326553469,
501.7180686269])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| 34.407378 | 229 | 0.401278 | import numpy as np
llf = np.array([-242.89663276735])
nobs = np.array([ 202])
k = np.array([ 3])
k_exog = np.array([ 1])
sigma = np.array([ .8053519404535])
chi2 = np.array([ 15723.381396967])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .99479180506163,
-.84461527652809,
.64859174799221])
cov_params = np.array([ .00008904968254,
-.00023560410507,
.00012795903324,
-.00023560410507,
.00131628534915,
-.00022462340695,
.00012795903324,
-.00022462340695,
.0005651128627]).reshape(3,3)
xb = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
y = np.array([np.nan,
28.979999542236,
29.178695678711,
29.40651512146,
29.420400619507,
29.608880996704,
29.609405517578,
29.830673217773,
29.921676635742,
29.874292373657,
29.990877151489,
30.048864364624,
30.10717010498,
30.292304992676,
30.290996551514,
30.464012145996,
30.519966125488,
30.553541183472,
30.783664703369,
30.838117599487,
31.042964935303,
31.038463592529,
31.105230331421,
31.207004547119,
31.377513885498,
31.477378845215,
31.692283630371,
31.755348205566,
32.003520965576,
32.444396972656,
32.61438369751,
33.048908233643,
33.07551574707,
33.278274536133,
33.595630645752,
33.91028213501,
34.337677001953,
34.645801544189,
35.182697296143,
35.598838806152,
36.012474060059,
36.654026031494,
37.174102783203,
37.691062927246,
38.320404052734,
38.94518661499,
39.336082458496,
39.843410491943,
40.349597930908,
40.509769439697,
41.021186828613,
41.300796508789,
41.583572387695,
41.869022369385,
42.156734466553,
42.561374664307,
43.080310821533,
44.171394348145,
44.673233032227,
46.209945678711,
47.495380401611,
48.882556915283,
50.141174316406,
51.965770721436,
53.310932159424,
53.958972930908,
54.960140228271,
55.84610748291,
56.734252929688,
56.934139251709,
57.839687347412,
58.744373321533,
59.533309936523,
60.899055480957,
61.679496765137,
62.46297454834,
63.594078063965,
64.83536529541,
66.530303955078,
68.210494995117,
69.64818572998,
71.885147094727,
74.445877075195,
76.751594543457,
79.731002807617,
82.797538757324,
84.457992553711,
86.584655761719,
89.167251586914,
91.046905517578,
93.504814147949,
95.378631591797,
96.22135925293,
96.628448486328,
99.250854492188,
99.668907165527,
99.195091247559,
100.0290145874,
100.98822021484,
101.95376586914,
103.26971435547,
104.46817779541,
105.20679473877,
106.1849899292,
106.70650482178,
108.0453414917,
108.68803405762,
109.45465087891,
110.91656494141,
109.37838745117,
110.19312286377,
110.89054107666,
112.16345977783,
113.54005432129,
114.67472076416,
115.91901397705,
116.92639160156,
118.16265106201,
119.50830078125,
120.96187591553,
122.29209899902,
124.30773925781,
125.7303237915,
126.57431030273,
128.8072052002,
130.21432495117,
131.85038757324,
134.97607421875,
136.22640991211,
136.44931030273,
137.50482177734,
138.45225524902,
139.5228729248,
140.59803771973,
141.67707824707,
142.87438964844,
143.95710754395,
144.92749023438,
145.55741882324,
146.65830993652,
147.29898071289,
148.17890930176,
149.40701293945,
150.40071105957,
151.51042175293,
152.84864807129,
153.60585021973,
154.48471069336,
155.7119140625,
157.16493225098,
158.03460693359,
159.25405883789,
160.47047424316,
160.87922668457,
161.30215454102,
162.42718505859,
162.85820007324,
162.95487976074,
163.98776245117,
164.67047119141,
165.47090148926,
166.73132324219,
167.52229309082,
169.00477600098,
170.24440002441,
171.93792724609,
173.84696960449,
175.04895019531,
176.82572937012,
177.55540466309,
178.52604675293,
178.58113098145,
178.54368591309,
180.25286865234,
180.90992736816,
182.14875793457,
183.61158752441,
184.14450073242,
184.5728302002,
185.81741333008,
187.28511047363,
188.39723205566,
190.19758605957,
191.98233032227,
192.94879150391,
195.07064819336,
195.90835571289,
200.89639282227,
200.86282348633,
202.13075256348,
204.20880126953,
203.05419921875,
204.80026245117,
207.3080291748,
208.72329711914,
210.57261657715,
214.21580505371,
215.67597961426,
220.72087097168,
218.41342163086,
212.75346374512,
213.23506164551,
215.21542358398])
resid = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
yr = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
mse = np.array([ 1.1112809181213,
.6632194519043,
.65879660844803,
.65575885772705,
.65364873409271,
.65217137336731,
.65113133192062,
.6503963470459,
.64987552165985,
.64950579404831,
.64924287796021,
.64905577898026,
.64892256259918,
.64882761240005,
.64875996112823,
.64871168136597,
.64867728948593,
.64865279197693,
.64863526821136,
.64862281084061,
.64861387014389,
.64860755205154,
.64860302209854,
.64859980344772,
.64859747886658,
.64859586954117,
.64859467744827,
.64859384298325,
.6485932469368,
.64859282970428,
.64859253168106,
.64859229326248,
.64859211444855,
.64859199523926,
.64859193563461,
.64859187602997,
.64859187602997,
.64859181642532,
.64859181642532,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068])
stdp = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
icstats = np.array([ 202,
np.nan,
-242.89663276735,
3,
491.79326553469,
501.7180686269])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| true | true |
f7366a7972aa995d6c049ca89cf300ad3dc9460e | 1,234 | py | Python | script.py | DarkDevil1265/Malluunlimitedfilterbot | b8e8795e1c167a40070a9a698802122c461fcfda | [
"MIT"
] | 1 | 2021-09-19T17:49:37.000Z | 2021-09-19T17:49:37.000Z | script.py | DarkDevil1265/Malluunlimitedfilterbot | b8e8795e1c167a40070a9a698802122c461fcfda | [
"MIT"
] | null | null | null | script.py | DarkDevil1265/Malluunlimitedfilterbot | b8e8795e1c167a40070a9a698802122c461fcfda | [
"MIT"
] | 1 | 2021-09-20T08:20:14.000Z | 2021-09-20T08:20:14.000Z | class Script(object):
START_MSG = """<b>Hello {} How are you,
I'm an advanced filter bot with many capabilities!
Made by @RJMALLU
See <i>/help</i> for commands and more details.</b>
"""
HELP_MSG = """
<i>Add me as admin in your group and start filtering :)</i>
<b>Basic Commands;</b>
/start - Check if I'm alive!
/help - Command help
/about - Something about me!
<b>Filter Commands;</b>
<code>/add name reply</code> - Add filter for name
<code>/del name</code> - Delete filter
<code>/delall</code> - Delete entire filters (Group Owner Only!)
<code>/viewfilters</code> - List all filters in chat
<b>Connection Commands;</b>
<code>/connect groupid</code> - Connect your group to my PM. You can also simply use,
<code>/connect</code> in groups.
<code>/connections</code> - Manage your connections.
<b>Extras;</b>
/status - Shows current status of your bot (Auth User Only)
/id - Shows ID information
<code>/info userid</code> - Shows User Information. Use <code>/info</code> as reply to some message for their details!
<b>© @malluinstafollowers</b>
"""
ABOUT_MSG = """⭕️<b>My Name : RJ Filter Bot</b>
⭕️<b>Creater :</b> @RJMALLU
⭕️<b>Language :</b> <code>Python3</code>
"""
| 19.903226 | 120 | 0.658023 | class Script(object):
START_MSG = """<b>Hello {} How are you,
I'm an advanced filter bot with many capabilities!
Made by @RJMALLU
See <i>/help</i> for commands and more details.</b>
"""
HELP_MSG = """
<i>Add me as admin in your group and start filtering :)</i>
<b>Basic Commands;</b>
/start - Check if I'm alive!
/help - Command help
/about - Something about me!
<b>Filter Commands;</b>
<code>/add name reply</code> - Add filter for name
<code>/del name</code> - Delete filter
<code>/delall</code> - Delete entire filters (Group Owner Only!)
<code>/viewfilters</code> - List all filters in chat
<b>Connection Commands;</b>
<code>/connect groupid</code> - Connect your group to my PM. You can also simply use,
<code>/connect</code> in groups.
<code>/connections</code> - Manage your connections.
<b>Extras;</b>
/status - Shows current status of your bot (Auth User Only)
/id - Shows ID information
<code>/info userid</code> - Shows User Information. Use <code>/info</code> as reply to some message for their details!
<b>© @malluinstafollowers</b>
"""
ABOUT_MSG = """⭕️<b>My Name : RJ Filter Bot</b>
⭕️<b>Creater :</b> @RJMALLU
⭕️<b>Language :</b> <code>Python3</code>
"""
| true | true |
f7366b93b511f5c29ce318235f5766cca881d088 | 27,114 | py | Python | sphinx/writers/text.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | 2 | 2017-07-05T09:57:33.000Z | 2017-11-14T23:05:53.000Z | sphinx/writers/text.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | 1 | 2019-01-17T14:26:22.000Z | 2019-01-17T22:56:26.000Z | sphinx/writers/text.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | 1 | 2017-08-31T14:33:03.000Z | 2017-08-31T14:33:03.000Z | # -*- coding: utf-8 -*-
"""
sphinx.writers.text
~~~~~~~~~~~~~~~~~~~
Custom docutils writer for plain text.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import textwrap
from itertools import groupby
import warnings
from six.moves import zip_longest
from docutils import nodes, writers
from docutils.utils import column_width
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def _break_word(self, word, space_left):
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[:i - 1], word[i - 1:]
return word, ''
def _split(self, text):
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
return textwrap.TextWrapper._split(self, t)
chunks = []
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split(''.join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
MAXWIDTH = 70
STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
class TextWriter(writers.Writer):
supported = ('text',)
settings_spec = ('No options here.', '', ())
settings_defaults = {}
output = None
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = self.builder.translator_class or TextTranslator
def translate(self):
visitor = self.translator_class(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
class TextTranslator(nodes.NodeVisitor):
sectionchars = '*=-~"+`'
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
newlines = builder.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
self.sectionchars = builder.config.text_sectionchars
self.states = [[]]
self.stateindent = [0]
self.list_counter = []
self.sectionlevel = 0
self.lineblocklevel = 0
self.table = None
def add_text(self, text):
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
self.states.append([])
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
result = []
toformat = []
def do_format():
if not toformat:
return
if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH - maxindent)
else:
res = ''.join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item)
else:
do_format()
result.append((indent + itemindent, item))
toformat = []
do_format()
if first is not None and result:
itemindent, item = result[0]
result_rest, result = result[1:], []
if item:
toformat = [first + ' '.join(item)]
do_format() # re-create `result` from `toformat`
_dummy, new_item = result[0]
result.insert(0, (itemindent - indent, [new_item[0]]))
result[1] = (itemindent, new_item[1:])
result.extend(result_rest)
self.states[-1].extend(result)
def visit_document(self, node):
self.new_state(0)
def depart_document(self, node):
self.end_state()
self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0]
for line in lines)
# XXX header/footer?
def visit_highlightlang(self, node):
raise nodes.SkipNode
def visit_section(self, node):
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
self.sectionlevel -= 1
def visit_topic(self, node):
self.new_state(0)
def depart_topic(self, node):
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_title(self, node):
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ': ')
raise nodes.SkipNode
self.new_state(0)
def depart_title(self, node):
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
self.stateindent.pop()
title = ['', text, '%s' % (char * column_width(text)), '']
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
self.states[-1].append((0, title))
def visit_subtitle(self, node):
pass
def depart_subtitle(self, node):
pass
def visit_attribution(self, node):
self.add_text('-- ')
def depart_attribution(self, node):
pass
def visit_desc(self, node):
pass
def depart_desc(self, node):
pass
def visit_desc_signature(self, node):
self.new_state(0)
def depart_desc_signature(self, node):
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node):
pass
def depart_desc_signature_line(self, node):
self.add_text('\n')
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.add_text(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.add_text(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.add_text(', ')
else:
self.first_param = 0
self.add_text(node.astext())
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.add_text('[')
def depart_desc_optional(self, node):
self.add_text(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
self.end_state()
def visit_figure(self, node):
self.new_state()
def depart_figure(self, node):
self.end_state()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_productionlist(self, node):
self.new_state()
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
elif lastname is not None:
self.add_text('%s ' % (' ' * len(lastname)))
self.add_text(production.astext() + self.nl)
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_footnote(self, node):
self._footnote = node.children[0].astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
self._citlabel = ''
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
raise nodes.SkipNode
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
pass
def depart_option_list(self, node):
pass
def visit_option_list_item(self, node):
self.new_state(0)
def depart_option_list_item(self, node):
self.end_state()
def visit_option_group(self, node):
self._firstoption = True
def depart_option_group(self, node):
self.add_text(' ')
def visit_option(self, node):
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
pass
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_colspec(self, node):
self.table[0].append(node['colwidth'])
raise nodes.SkipNode
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
pass
def depart_thead(self, node):
pass
def visit_tbody(self, node):
self.table.append('sep')
def depart_tbody(self, node):
pass
def visit_row(self, node):
self.table.append([])
def depart_row(self, node):
pass
def visit_entry(self, node):
if 'morerows' in node or 'morecols' in node:
raise NotImplementedError('Column or row spanning cells are '
'not implemented.')
self.new_state(0)
def depart_entry(self, node):
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
self.table[-1].append(text)
def visit_table(self, node):
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = [[]]
def depart_table(self, node):
lines = self.table[1:]
fmted_rows = []
colwidths = self.table[0]
realwidths = colwidths[:]
separator = 0
# don't allow paragraphs in table cells for now
for line in lines:
if line == 'sep':
separator = len(fmted_rows)
else:
cells = []
for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i])
if par:
maxwidth = max(column_width(x) for x in par)
else:
maxwidth = 0
realwidths[i] = max(realwidths[i], maxwidth)
cells.append(par)
fmted_rows.append(cells)
def writesep(char='-'):
out = ['+']
for width in realwidths:
out.append(char * (width + 2))
out.append('+')
self.add_text(''.join(out) + self.nl)
def writerow(row):
lines = zip_longest(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
if cell:
adjust_len = len(cell) - column_width(cell)
out.append(' ' + cell.ljust(
realwidths[i] + 1 + adjust_len))
else:
out.append(' ' * (realwidths[i] + 2))
out.append('|')
self.add_text(''.join(out) + self.nl)
for i, row in enumerate(fmted_rows):
if separator and i == separator:
writesep('=')
else:
writesep('-')
writerow(row)
writesep('-')
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
self.new_state(0)
self.add_text(', '.join(n.astext() for n in node.children[0].children) +
'.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
self.end_state()
raise nodes.SkipNode
def visit_bullet_list(self, node):
self.list_counter.append(-1)
def depart_bullet_list(self, node):
self.list_counter.pop()
def visit_enumerated_list(self, node):
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
self.list_counter.pop()
def visit_definition_list(self, node):
self.list_counter.append(-2)
def depart_definition_list(self, node):
self.list_counter.pop()
def visit_list_item(self, node):
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
elif self.list_counter[-1] == -2:
# definition list
pass
else:
# enumerated list
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.new_state(0)
def depart_term(self, node):
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_termsep(self, node):
warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
'This warning is displayed because some Sphinx extension '
'uses sphinx.addnodes.termsep. Please report it to '
'author of the extension.', RemovedInSphinx16Warning)
self.add_text(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
self.new_state()
def depart_definition(self, node):
self.end_state()
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_name(self, node):
self.new_state(0)
def depart_field_name(self, node):
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
self.new_state()
def depart_field_body(self, node):
self.end_state()
def visit_centered(self, node):
pass
def depart_centered(self, node):
pass
def visit_hlist(self, node):
pass
def depart_hlist(self, node):
pass
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_admonition(self, node):
self.new_state(0)
def depart_admonition(self, node):
self.end_state()
def _visit_admonition(self, node):
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
def _make_depart_admonition(name):
def depart_admonition(self, node):
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
visit_attention = _visit_admonition
depart_attention = _make_depart_admonition('attention')
visit_caution = _visit_admonition
depart_caution = _make_depart_admonition('caution')
visit_danger = _visit_admonition
depart_danger = _make_depart_admonition('danger')
visit_error = _visit_admonition
depart_error = _make_depart_admonition('error')
visit_hint = _visit_admonition
depart_hint = _make_depart_admonition('hint')
visit_important = _visit_admonition
depart_important = _make_depart_admonition('important')
visit_note = _visit_admonition
depart_note = _make_depart_admonition('note')
visit_tip = _visit_admonition
depart_tip = _make_depart_admonition('tip')
visit_warning = _visit_admonition
depart_warning = _make_depart_admonition('warning')
visit_seealso = _visit_admonition
depart_seealso = _make_depart_admonition('seealso')
def visit_versionmodified(self, node):
self.new_state(0)
def depart_versionmodified(self, node):
self.end_state()
def visit_literal_block(self, node):
self.new_state()
def depart_literal_block(self, node):
self.end_state(wrap=False)
def visit_doctest_block(self, node):
self.new_state(0)
def depart_doctest_block(self, node):
self.end_state(wrap=False)
def visit_line_block(self, node):
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.add_text('\n')
def visit_block_quote(self, node):
self.new_state()
def depart_block_quote(self, node):
self.end_state()
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
def visit_reference(self, node):
pass
def depart_reference(self, node):
pass
def visit_number_reference(self, node):
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_emphasis(self, node):
self.add_text('*')
def depart_emphasis(self, node):
self.add_text('*')
def visit_literal_emphasis(self, node):
self.add_text('*')
def depart_literal_emphasis(self, node):
self.add_text('*')
def visit_strong(self, node):
self.add_text('**')
def depart_strong(self, node):
self.add_text('**')
def visit_literal_strong(self, node):
self.add_text('**')
def depart_literal_strong(self, node):
self.add_text('**')
def visit_abbreviation(self, node):
self.add_text('')
def depart_abbreviation(self, node):
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_manpage(self, node):
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
self.add_text('*')
def depart_title_reference(self, node):
self.add_text('*')
def visit_literal(self, node):
self.add_text('"')
def depart_literal(self, node):
self.add_text('"')
def visit_subscript(self, node):
self.add_text('_')
def depart_subscript(self, node):
pass
def visit_superscript(self, node):
self.add_text('^')
def depart_superscript(self, node):
pass
def visit_footnote_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
self.add_text(node.astext())
def depart_Text(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_problematic(self, node):
self.add_text('>>')
def depart_problematic(self, node):
self.add_text('<<')
def visit_system_message(self, node):
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
raise nodes.SkipNode
def visit_meta(self, node):
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
self.end_state(wrap = False)
raise nodes.SkipNode
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html',
(self.builder.current_docname, node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
| 27.667347 | 92 | 0.579996 |
import os
import re
import textwrap
from itertools import groupby
import warnings
from six.moves import zip_longest
from docutils import nodes, writers
from docutils.utils import column_width
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|'
r'(?<=\s)(?::[a-z-]+:)?`\S+|'
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|'
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def _break_word(self, word, space_left):
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[:i - 1], word[i - 1:]
return word, ''
def _split(self, text):
def split(t):
return textwrap.TextWrapper._split(self, t)
chunks = []
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split(''.join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
MAXWIDTH = 70
STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
class TextWriter(writers.Writer):
supported = ('text',)
settings_spec = ('No options here.', '', ())
settings_defaults = {}
output = None
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = self.builder.translator_class or TextTranslator
def translate(self):
visitor = self.translator_class(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
class TextTranslator(nodes.NodeVisitor):
sectionchars = '*=-~"+`'
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
newlines = builder.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
self.sectionchars = builder.config.text_sectionchars
self.states = [[]]
self.stateindent = [0]
self.list_counter = []
self.sectionlevel = 0
self.lineblocklevel = 0
self.table = None
def add_text(self, text):
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
self.states.append([])
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
result = []
toformat = []
def do_format():
if not toformat:
return
if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH - maxindent)
else:
res = ''.join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item)
else:
do_format()
result.append((indent + itemindent, item))
toformat = []
do_format()
if first is not None and result:
itemindent, item = result[0]
result_rest, result = result[1:], []
if item:
toformat = [first + ' '.join(item)]
do_format() # re-create `result` from `toformat`
_dummy, new_item = result[0]
result.insert(0, (itemindent - indent, [new_item[0]]))
result[1] = (itemindent, new_item[1:])
result.extend(result_rest)
self.states[-1].extend(result)
def visit_document(self, node):
self.new_state(0)
def depart_document(self, node):
self.end_state()
self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0]
for line in lines)
# XXX header/footer?
def visit_highlightlang(self, node):
raise nodes.SkipNode
def visit_section(self, node):
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
self.sectionlevel -= 1
def visit_topic(self, node):
self.new_state(0)
def depart_topic(self, node):
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_title(self, node):
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ': ')
raise nodes.SkipNode
self.new_state(0)
def depart_title(self, node):
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
self.stateindent.pop()
title = ['', text, '%s' % (char * column_width(text)), '']
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
self.states[-1].append((0, title))
def visit_subtitle(self, node):
pass
def depart_subtitle(self, node):
pass
def visit_attribution(self, node):
self.add_text('-- ')
def depart_attribution(self, node):
pass
def visit_desc(self, node):
pass
def depart_desc(self, node):
pass
def visit_desc_signature(self, node):
self.new_state(0)
def depart_desc_signature(self, node):
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node):
pass
def depart_desc_signature_line(self, node):
self.add_text('\n')
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.add_text(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.add_text(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.add_text(', ')
else:
self.first_param = 0
self.add_text(node.astext())
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.add_text('[')
def depart_desc_optional(self, node):
self.add_text(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
self.end_state()
def visit_figure(self, node):
self.new_state()
def depart_figure(self, node):
self.end_state()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_productionlist(self, node):
self.new_state()
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
elif lastname is not None:
self.add_text('%s ' % (' ' * len(lastname)))
self.add_text(production.astext() + self.nl)
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_footnote(self, node):
self._footnote = node.children[0].astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
self._citlabel = ''
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
raise nodes.SkipNode
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
pass
def depart_option_list(self, node):
pass
def visit_option_list_item(self, node):
self.new_state(0)
def depart_option_list_item(self, node):
self.end_state()
def visit_option_group(self, node):
self._firstoption = True
def depart_option_group(self, node):
self.add_text(' ')
def visit_option(self, node):
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
pass
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_colspec(self, node):
self.table[0].append(node['colwidth'])
raise nodes.SkipNode
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
pass
def depart_thead(self, node):
pass
def visit_tbody(self, node):
self.table.append('sep')
def depart_tbody(self, node):
pass
def visit_row(self, node):
self.table.append([])
def depart_row(self, node):
pass
def visit_entry(self, node):
if 'morerows' in node or 'morecols' in node:
raise NotImplementedError('Column or row spanning cells are '
'not implemented.')
self.new_state(0)
def depart_entry(self, node):
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
self.table[-1].append(text)
def visit_table(self, node):
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = [[]]
def depart_table(self, node):
lines = self.table[1:]
fmted_rows = []
colwidths = self.table[0]
realwidths = colwidths[:]
separator = 0
# don't allow paragraphs in table cells for now
for line in lines:
if line == 'sep':
separator = len(fmted_rows)
else:
cells = []
for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i])
if par:
maxwidth = max(column_width(x) for x in par)
else:
maxwidth = 0
realwidths[i] = max(realwidths[i], maxwidth)
cells.append(par)
fmted_rows.append(cells)
def writesep(char='-'):
out = ['+']
for width in realwidths:
out.append(char * (width + 2))
out.append('+')
self.add_text(''.join(out) + self.nl)
def writerow(row):
lines = zip_longest(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
if cell:
adjust_len = len(cell) - column_width(cell)
out.append(' ' + cell.ljust(
realwidths[i] + 1 + adjust_len))
else:
out.append(' ' * (realwidths[i] + 2))
out.append('|')
self.add_text(''.join(out) + self.nl)
for i, row in enumerate(fmted_rows):
if separator and i == separator:
writesep('=')
else:
writesep('-')
writerow(row)
writesep('-')
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
self.new_state(0)
self.add_text(', '.join(n.astext() for n in node.children[0].children) +
'.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
self.end_state()
raise nodes.SkipNode
def visit_bullet_list(self, node):
self.list_counter.append(-1)
def depart_bullet_list(self, node):
self.list_counter.pop()
def visit_enumerated_list(self, node):
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
self.list_counter.pop()
def visit_definition_list(self, node):
self.list_counter.append(-2)
def depart_definition_list(self, node):
self.list_counter.pop()
def visit_list_item(self, node):
if self.list_counter[-1] == -1:
self.new_state(2)
elif self.list_counter[-1] == -2:
pass
else:
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.new_state(0)
def depart_term(self, node):
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_termsep(self, node):
warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
'This warning is displayed because some Sphinx extension '
'uses sphinx.addnodes.termsep. Please report it to '
'author of the extension.', RemovedInSphinx16Warning)
self.add_text(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
self.new_state()
def depart_definition(self, node):
self.end_state()
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_name(self, node):
self.new_state(0)
def depart_field_name(self, node):
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
self.new_state()
def depart_field_body(self, node):
self.end_state()
def visit_centered(self, node):
pass
def depart_centered(self, node):
pass
def visit_hlist(self, node):
pass
def depart_hlist(self, node):
pass
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_admonition(self, node):
self.new_state(0)
def depart_admonition(self, node):
self.end_state()
def _visit_admonition(self, node):
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
def _make_depart_admonition(name):
def depart_admonition(self, node):
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
visit_attention = _visit_admonition
depart_attention = _make_depart_admonition('attention')
visit_caution = _visit_admonition
depart_caution = _make_depart_admonition('caution')
visit_danger = _visit_admonition
depart_danger = _make_depart_admonition('danger')
visit_error = _visit_admonition
depart_error = _make_depart_admonition('error')
visit_hint = _visit_admonition
depart_hint = _make_depart_admonition('hint')
visit_important = _visit_admonition
depart_important = _make_depart_admonition('important')
visit_note = _visit_admonition
depart_note = _make_depart_admonition('note')
visit_tip = _visit_admonition
depart_tip = _make_depart_admonition('tip')
visit_warning = _visit_admonition
depart_warning = _make_depart_admonition('warning')
visit_seealso = _visit_admonition
depart_seealso = _make_depart_admonition('seealso')
def visit_versionmodified(self, node):
self.new_state(0)
def depart_versionmodified(self, node):
self.end_state()
def visit_literal_block(self, node):
self.new_state()
def depart_literal_block(self, node):
self.end_state(wrap=False)
def visit_doctest_block(self, node):
self.new_state(0)
def depart_doctest_block(self, node):
self.end_state(wrap=False)
def visit_line_block(self, node):
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.add_text('\n')
def visit_block_quote(self, node):
self.new_state()
def depart_block_quote(self, node):
self.end_state()
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
def visit_reference(self, node):
pass
def depart_reference(self, node):
pass
def visit_number_reference(self, node):
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_emphasis(self, node):
self.add_text('*')
def depart_emphasis(self, node):
self.add_text('*')
def visit_literal_emphasis(self, node):
self.add_text('*')
def depart_literal_emphasis(self, node):
self.add_text('*')
def visit_strong(self, node):
self.add_text('**')
def depart_strong(self, node):
self.add_text('**')
def visit_literal_strong(self, node):
self.add_text('**')
def depart_literal_strong(self, node):
self.add_text('**')
def visit_abbreviation(self, node):
self.add_text('')
def depart_abbreviation(self, node):
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_manpage(self, node):
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
self.add_text('*')
def depart_title_reference(self, node):
self.add_text('*')
def visit_literal(self, node):
self.add_text('"')
def depart_literal(self, node):
self.add_text('"')
def visit_subscript(self, node):
self.add_text('_')
def depart_subscript(self, node):
pass
def visit_superscript(self, node):
self.add_text('^')
def depart_superscript(self, node):
pass
def visit_footnote_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
self.add_text(node.astext())
def depart_Text(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_problematic(self, node):
self.add_text('>>')
def depart_problematic(self, node):
self.add_text('<<')
def visit_system_message(self, node):
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
raise nodes.SkipNode
def visit_meta(self, node):
raise nodes.SkipNode
def visit_raw(self, node):
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
self.end_state(wrap = False)
raise nodes.SkipNode
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html',
(self.builder.current_docname, node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
| true | true |
f7366bb4541a785a76fcbf18c40fbef0d57f5292 | 8,605 | bzl | Python | scala/private/rules/scala_library.bzl | tian000/rules_scala | b3fc6a26db404ce904f98d525ae27a21d3f49f06 | [
"Apache-2.0"
] | null | null | null | scala/private/rules/scala_library.bzl | tian000/rules_scala | b3fc6a26db404ce904f98d525ae27a21d3f49f06 | [
"Apache-2.0"
] | null | null | null | scala/private/rules/scala_library.bzl | tian000/rules_scala | b3fc6a26db404ce904f98d525ae27a21d3f49f06 | [
"Apache-2.0"
] | null | null | null | load("@io_bazel_rules_scala//scala:providers.bzl", "create_scala_provider")
load(
"@io_bazel_rules_scala//scala/private:common.bzl",
"collect_jars",
"collect_srcjars",
"sanitize_string_for_usage",
"write_manifest",
)
load(
"@io_bazel_rules_scala//scala/private:common_attributes.bzl",
"common_attrs",
"common_attrs_for_plugin_bootstrapping",
"implicit_deps",
"resolve_deps",
)
load("@io_bazel_rules_scala//scala/private:common_outputs.bzl", "common_outputs")
load(
"@io_bazel_rules_scala//scala/private:coverage_replacements_provider.bzl",
_coverage_replacements_provider = "coverage_replacements_provider",
)
load(
"@io_bazel_rules_scala//scala/private:rule_impls.bzl",
"collect_jars_from_common_ctx",
"compile_or_empty",
"get_scalac_provider",
"get_unused_dependency_checker_mode",
"merge_jars",
"pack_source_jars",
)
##
# Common stuff to _library rules
##
_library_attrs = {
"main_class": attr.string(),
"exports": attr.label_list(
allow_files = False,
aspects = [_coverage_replacements_provider.aspect],
),
}
def _lib(
ctx,
base_classpath,
non_macro_lib,
unused_dependency_checker_mode,
unused_dependency_checker_ignored_targets):
# Build up information from dependency-like attributes
# This will be used to pick up srcjars from non-scala library
# targets (like thrift code generation)
srcjars = collect_srcjars(ctx.attr.deps)
unused_dependency_checker_is_off = unused_dependency_checker_mode == "off"
jars = collect_jars_from_common_ctx(
ctx,
base_classpath,
unused_dependency_checker_is_off = unused_dependency_checker_is_off,
)
(cjars, transitive_rjars) = (jars.compile_jars, jars.transitive_runtime_jars)
write_manifest(ctx)
outputs = compile_or_empty(
ctx,
ctx.outputs.manifest,
cjars,
srcjars,
non_macro_lib,
jars.transitive_compile_jars,
jars.jars2labels.jars_to_labels,
[],
unused_dependency_checker_ignored_targets = [
target.label
for target in base_classpath + ctx.attr.exports +
unused_dependency_checker_ignored_targets
],
unused_dependency_checker_mode = unused_dependency_checker_mode,
deps_providers = jars.deps_providers,
)
transitive_rjars = depset(outputs.full_jars, transitive = [transitive_rjars])
merge_jars(
actions = ctx.actions,
deploy_jar = ctx.outputs.deploy_jar,
singlejar_executable = ctx.executable._singlejar,
jars_list = transitive_rjars.to_list(),
main_class = getattr(ctx.attr, "main_class", ""),
progress_message = "Merging Scala library jar: %s" % ctx.label,
)
# Using transitive_files since transitive_rjars a depset and avoiding linearization
runfiles = ctx.runfiles(
transitive_files = transitive_rjars,
collect_data = True,
)
# Add information from exports (is key that AFTER all build actions/runfiles analysis)
# Since after, will not show up in deploy_jar or old jars runfiles
# Notice that compile_jars is intentionally transitive for exports
exports_jars = collect_jars(ctx.attr.exports)
transitive_rjars = depset(
transitive = [transitive_rjars, exports_jars.transitive_runtime_jars],
)
source_jars = pack_source_jars(ctx) + outputs.source_jars
scalaattr = create_scala_provider(
class_jar = outputs.class_jar,
compile_jars = depset(
outputs.ijars,
transitive = [exports_jars.compile_jars],
),
deploy_jar = ctx.outputs.deploy_jar,
full_jars = outputs.full_jars,
ijar = outputs.ijar,
source_jars = source_jars,
statsfile = ctx.outputs.statsfile,
transitive_runtime_jars = transitive_rjars,
)
return struct(
files = depset([ctx.outputs.jar] + outputs.full_jars), # Here is the default output
instrumented_files = outputs.coverage.instrumented_files,
jars_to_labels = jars.jars2labels,
providers = [outputs.merged_provider, jars.jars2labels] + outputs.coverage.providers,
runfiles = runfiles,
scala = scalaattr,
)
##
# scala_library
##
def _scala_library_impl(ctx):
if ctx.attr.jvm_flags:
print("'jvm_flags' for scala_library is deprecated. It does nothing today and will be removed from scala_library to avoid confusion.")
scalac_provider = get_scalac_provider(ctx)
unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)
return _lib(
ctx,
scalac_provider.default_classpath,
True,
unused_dependency_checker_mode,
ctx.attr.unused_dependency_checker_ignored_targets,
)
_scala_library_attrs = {}
_scala_library_attrs.update(implicit_deps)
_scala_library_attrs.update(common_attrs)
_scala_library_attrs.update(_library_attrs)
_scala_library_attrs.update(resolve_deps)
scala_library = rule(
attrs = _scala_library_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_library_impl,
)
# Scala library suite generates a series of scala libraries
# then it depends on them with a meta one which exports all the sub targets
def scala_library_suite(
name,
srcs = [],
exports = [],
visibility = None,
**kwargs):
ts = []
for src_file in srcs:
n = "%s_lib_%s" % (name, sanitize_string_for_usage(src_file))
scala_library(
name = n,
srcs = [src_file],
visibility = visibility,
exports = exports,
unused_dependency_checker_mode = "off",
**kwargs
)
ts.append(n)
scala_library(
name = name,
visibility = visibility,
exports = exports + ts,
deps = ts,
)
##
# scala_library_for_plugin_bootstrapping
##
def _scala_library_for_plugin_bootstrapping_impl(ctx):
scalac_provider = get_scalac_provider(ctx)
return _lib(
ctx,
scalac_provider.default_classpath,
True,
unused_dependency_checker_ignored_targets = [],
unused_dependency_checker_mode = "off",
)
# the scala compiler plugin used for dependency analysis is compiled using `scala_library`.
# in order to avoid cyclic dependencies `scala_library_for_plugin_bootstrapping` was created for this purpose,
# which does not contain plugin related attributes, and thus avoids the cyclic dependency issue
_scala_library_for_plugin_bootstrapping_attrs = {}
_scala_library_for_plugin_bootstrapping_attrs.update(implicit_deps)
_scala_library_for_plugin_bootstrapping_attrs.update(_library_attrs)
_scala_library_for_plugin_bootstrapping_attrs.update(resolve_deps)
_scala_library_for_plugin_bootstrapping_attrs.update(
common_attrs_for_plugin_bootstrapping,
)
scala_library_for_plugin_bootstrapping = rule(
attrs = _scala_library_for_plugin_bootstrapping_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_library_for_plugin_bootstrapping_impl,
)
##
# scala_macro_library
##
def _scala_macro_library_impl(ctx):
scalac_provider = get_scalac_provider(ctx)
unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)
return _lib(
ctx,
scalac_provider.default_macro_classpath,
False, # don't build the ijar for macros
unused_dependency_checker_mode,
ctx.attr.unused_dependency_checker_ignored_targets,
)
_scala_macro_library_attrs = {
"main_class": attr.string(),
"exports": attr.label_list(allow_files = False),
}
_scala_macro_library_attrs.update(implicit_deps)
_scala_macro_library_attrs.update(common_attrs)
_scala_macro_library_attrs.update(_library_attrs)
_scala_macro_library_attrs.update(resolve_deps)
# Set unused_dependency_checker_mode default to off for scala_macro_library
_scala_macro_library_attrs["unused_dependency_checker_mode"] = attr.string(
default = "off",
values = [
"warn",
"error",
"off",
"",
],
mandatory = False,
)
scala_macro_library = rule(
attrs = _scala_macro_library_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_macro_library_impl,
)
| 30.842294 | 142 | 0.708542 | load("@io_bazel_rules_scala//scala:providers.bzl", "create_scala_provider")
load(
"@io_bazel_rules_scala//scala/private:common.bzl",
"collect_jars",
"collect_srcjars",
"sanitize_string_for_usage",
"write_manifest",
)
load(
"@io_bazel_rules_scala//scala/private:common_attributes.bzl",
"common_attrs",
"common_attrs_for_plugin_bootstrapping",
"implicit_deps",
"resolve_deps",
)
load("@io_bazel_rules_scala//scala/private:common_outputs.bzl", "common_outputs")
load(
"@io_bazel_rules_scala//scala/private:coverage_replacements_provider.bzl",
_coverage_replacements_provider = "coverage_replacements_provider",
)
load(
"@io_bazel_rules_scala//scala/private:rule_impls.bzl",
"collect_jars_from_common_ctx",
"compile_or_empty",
"get_scalac_provider",
"get_unused_dependency_checker_mode",
"merge_jars",
"pack_source_jars",
)
_library_attrs = {
"main_class": attr.string(),
"exports": attr.label_list(
allow_files = False,
aspects = [_coverage_replacements_provider.aspect],
),
}
def _lib(
ctx,
base_classpath,
non_macro_lib,
unused_dependency_checker_mode,
unused_dependency_checker_ignored_targets):
srcjars = collect_srcjars(ctx.attr.deps)
unused_dependency_checker_is_off = unused_dependency_checker_mode == "off"
jars = collect_jars_from_common_ctx(
ctx,
base_classpath,
unused_dependency_checker_is_off = unused_dependency_checker_is_off,
)
(cjars, transitive_rjars) = (jars.compile_jars, jars.transitive_runtime_jars)
write_manifest(ctx)
outputs = compile_or_empty(
ctx,
ctx.outputs.manifest,
cjars,
srcjars,
non_macro_lib,
jars.transitive_compile_jars,
jars.jars2labels.jars_to_labels,
[],
unused_dependency_checker_ignored_targets = [
target.label
for target in base_classpath + ctx.attr.exports +
unused_dependency_checker_ignored_targets
],
unused_dependency_checker_mode = unused_dependency_checker_mode,
deps_providers = jars.deps_providers,
)
transitive_rjars = depset(outputs.full_jars, transitive = [transitive_rjars])
merge_jars(
actions = ctx.actions,
deploy_jar = ctx.outputs.deploy_jar,
singlejar_executable = ctx.executable._singlejar,
jars_list = transitive_rjars.to_list(),
main_class = getattr(ctx.attr, "main_class", ""),
progress_message = "Merging Scala library jar: %s" % ctx.label,
)
runfiles = ctx.runfiles(
transitive_files = transitive_rjars,
collect_data = True,
)
exports_jars = collect_jars(ctx.attr.exports)
transitive_rjars = depset(
transitive = [transitive_rjars, exports_jars.transitive_runtime_jars],
)
source_jars = pack_source_jars(ctx) + outputs.source_jars
scalaattr = create_scala_provider(
class_jar = outputs.class_jar,
compile_jars = depset(
outputs.ijars,
transitive = [exports_jars.compile_jars],
),
deploy_jar = ctx.outputs.deploy_jar,
full_jars = outputs.full_jars,
ijar = outputs.ijar,
source_jars = source_jars,
statsfile = ctx.outputs.statsfile,
transitive_runtime_jars = transitive_rjars,
)
return struct(
files = depset([ctx.outputs.jar] + outputs.full_jars),
instrumented_files = outputs.coverage.instrumented_files,
jars_to_labels = jars.jars2labels,
providers = [outputs.merged_provider, jars.jars2labels] + outputs.coverage.providers,
runfiles = runfiles,
scala = scalaattr,
)
def _scala_library_impl(ctx):
if ctx.attr.jvm_flags:
print("'jvm_flags' for scala_library is deprecated. It does nothing today and will be removed from scala_library to avoid confusion.")
scalac_provider = get_scalac_provider(ctx)
unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)
return _lib(
ctx,
scalac_provider.default_classpath,
True,
unused_dependency_checker_mode,
ctx.attr.unused_dependency_checker_ignored_targets,
)
_scala_library_attrs = {}
_scala_library_attrs.update(implicit_deps)
_scala_library_attrs.update(common_attrs)
_scala_library_attrs.update(_library_attrs)
_scala_library_attrs.update(resolve_deps)
scala_library = rule(
attrs = _scala_library_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_library_impl,
)
def scala_library_suite(
name,
srcs = [],
exports = [],
visibility = None,
**kwargs):
ts = []
for src_file in srcs:
n = "%s_lib_%s" % (name, sanitize_string_for_usage(src_file))
scala_library(
name = n,
srcs = [src_file],
visibility = visibility,
exports = exports,
unused_dependency_checker_mode = "off",
**kwargs
)
ts.append(n)
scala_library(
name = name,
visibility = visibility,
exports = exports + ts,
deps = ts,
)
def _scala_library_for_plugin_bootstrapping_impl(ctx):
scalac_provider = get_scalac_provider(ctx)
return _lib(
ctx,
scalac_provider.default_classpath,
True,
unused_dependency_checker_ignored_targets = [],
unused_dependency_checker_mode = "off",
)
_scala_library_for_plugin_bootstrapping_attrs = {}
_scala_library_for_plugin_bootstrapping_attrs.update(implicit_deps)
_scala_library_for_plugin_bootstrapping_attrs.update(_library_attrs)
_scala_library_for_plugin_bootstrapping_attrs.update(resolve_deps)
_scala_library_for_plugin_bootstrapping_attrs.update(
common_attrs_for_plugin_bootstrapping,
)
scala_library_for_plugin_bootstrapping = rule(
attrs = _scala_library_for_plugin_bootstrapping_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_library_for_plugin_bootstrapping_impl,
)
def _scala_macro_library_impl(ctx):
scalac_provider = get_scalac_provider(ctx)
unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)
return _lib(
ctx,
scalac_provider.default_macro_classpath,
False,
unused_dependency_checker_mode,
ctx.attr.unused_dependency_checker_ignored_targets,
)
_scala_macro_library_attrs = {
"main_class": attr.string(),
"exports": attr.label_list(allow_files = False),
}
_scala_macro_library_attrs.update(implicit_deps)
_scala_macro_library_attrs.update(common_attrs)
_scala_macro_library_attrs.update(_library_attrs)
_scala_macro_library_attrs.update(resolve_deps)
# Set unused_dependency_checker_mode default to off for scala_macro_library
_scala_macro_library_attrs["unused_dependency_checker_mode"] = attr.string(
default = "off",
values = [
"warn",
"error",
"off",
"",
],
mandatory = False,
)
scala_macro_library = rule(
attrs = _scala_macro_library_attrs,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_macro_library_impl,
)
| true | true |
f7366c0f1b8be7fbd52f8cb9f823f28077e8b0d4 | 2,736 | py | Python | test/nn/conv/test_film_conv.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | 1 | 2022-02-21T13:23:19.000Z | 2022-02-21T13:23:19.000Z | test/nn/conv/test_film_conv.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | null | null | null | test/nn/conv/test_film_conv.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | null | null | null | import torch
from torch_sparse import SparseTensor
from torch_geometric.nn import FiLMConv
def test_film_conv():
x1 = torch.randn(4, 4)
x2 = torch.randn(2, 16)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1])
row, col = edge_index
adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4))
conv = FiLMConv(4, 32)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=1)'
out1 = conv(x1, edge_index)
assert out1.size() == (4, 32)
assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv(4, 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=2)'
out1 = conv(x1, edge_index, edge_type)
assert out1.size() == (4, 32)
assert conv(x1, adj.t()).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index, edge_type).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t()).tolist() == out1.tolist()
adj = adj.sparse_resize((4, 2))
conv = FiLMConv((4, 16), 32)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=1)'
out1 = conv((x1, x2), edge_index)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv((4, 16), 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=2)'
out1 = conv((x1, x2), edge_index, edge_type)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
| 40.235294 | 78 | 0.621711 | import torch
from torch_sparse import SparseTensor
from torch_geometric.nn import FiLMConv
def test_film_conv():
x1 = torch.randn(4, 4)
x2 = torch.randn(2, 16)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1])
row, col = edge_index
adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4))
conv = FiLMConv(4, 32)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=1)'
out1 = conv(x1, edge_index)
assert out1.size() == (4, 32)
assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv(4, 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=2)'
out1 = conv(x1, edge_index, edge_type)
assert out1.size() == (4, 32)
assert conv(x1, adj.t()).tolist() == out1.tolist()
t = '(Tensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, edge_index, edge_type).tolist() == out1.tolist()
t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit(x1, adj.t()).tolist() == out1.tolist()
adj = adj.sparse_resize((4, 2))
conv = FiLMConv((4, 16), 32)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=1)'
out1 = conv((x1, x2), edge_index)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()
conv = FiLMConv((4, 16), 32, num_relations=2)
assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=2)'
out1 = conv((x1, x2), edge_index, edge_type)
assert out1.size() == (2, 32)
assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
t = '(PairTensor, Tensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist()
t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
| true | true |
f7366dd76a0f042bd795bb1d0879f0ea7217a5ad | 10,253 | py | Python | qa/rpc-tests/listtransactions.py | unifycoin/unifycoin | 7d0d5245610daab81e8b124c9b4dc03a73020b8f | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | unifycoin/unifycoin | 7d0d5245610daab81e8b124c9b4dc03a73020b8f | [
"MIT"
] | null | null | null | qa/rpc-tests/listtransactions.py | unifycoin/unifycoin | 7d0d5245610daab81e8b124c9b4dc03a73020b8f | [
"MIT"
] | 2 | 2019-06-28T12:47:30.000Z | 2019-12-16T04:56:50.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
#Unifycoin: Disabled RBF
#self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 49.771845 | 113 | 0.598752 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
def run_rbf_opt_in_test(self):
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN)
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| true | true |
f7366fb0623d365f2c6a0afdb0e6e6f704b8c08b | 431 | py | Python | output/models/ms_data/element/elem_z003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/element/elem_z003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/element/elem_z003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.element.elem_z003_xsd.elem_z003 import (
AType,
BType,
Container1,
CType,
DType,
EType,
FType,
GType,
A,
B,
C,
Container,
D,
E,
F,
G,
)
__all__ = [
"AType",
"BType",
"Container1",
"CType",
"DType",
"EType",
"FType",
"GType",
"A",
"B",
"C",
"Container",
"D",
"E",
"F",
"G",
]
| 11.342105 | 67 | 0.433875 | from output.models.ms_data.element.elem_z003_xsd.elem_z003 import (
AType,
BType,
Container1,
CType,
DType,
EType,
FType,
GType,
A,
B,
C,
Container,
D,
E,
F,
G,
)
__all__ = [
"AType",
"BType",
"Container1",
"CType",
"DType",
"EType",
"FType",
"GType",
"A",
"B",
"C",
"Container",
"D",
"E",
"F",
"G",
]
| true | true |
f7366fb79cc98cc8da4a20593adf754830266e3b | 6,539 | py | Python | bibpy/lexers/base_lexer.py | MisanthropicBit/bibpy | 3195790105544672f622ed893213a627b5280f2b | [
"BSD-3-Clause"
] | 1 | 2021-08-18T13:17:10.000Z | 2021-08-18T13:17:10.000Z | bibpy/lexers/base_lexer.py | MisanthropicBit/bibpy | 3195790105544672f622ed893213a627b5280f2b | [
"BSD-3-Clause"
] | 7 | 2018-02-18T12:29:20.000Z | 2020-05-14T18:08:48.000Z | bibpy/lexers/base_lexer.py | MisanthropicBit/bibpy | 3195790105544672f622ed893213a627b5280f2b | [
"BSD-3-Clause"
] | 3 | 2018-02-17T18:27:43.000Z | 2022-01-20T02:28:58.000Z | # -*- coding: utf-8 -*-
"""Base class for all lexers."""
import re
from funcparserlib.lexer import Token
class LexerError(Exception):
"""General lexer error."""
def __init__(self, msg, pos, char, lnum, brace_level, line):
"""Initialise with information on where the error occurred."""
self.msg = msg
self.pos = pos
self.char = char
self.lnum = lnum
self.brace_level = brace_level
self.line = line
def __str__(self):
return "Failed at line {0}, char '{1}', position {2}, "\
"brace level {3}: {4} (line: '{5}')"\
.format(
self.lnum,
self.char,
self.pos,
self.brace_level,
self.msg,
self.line,
)
class BaseLexer:
"""Base class for all bibpy lexers."""
def __init__(self):
"""Initialise the lexer."""
self._modes = {}
self._patterns = None
def reset(self, string):
"""Reset the internal state of the lexer."""
self.pos = 0
self.lastpos = 0
self.maxpos = len(string)
self.char = 1
self.lnum = 1
self.last_lnum = 1
self.brace_level = 0
self.ignore_whitespace = False
self.string = string
def _compile_regexes(self, patterns):
"""Compile a set of patterns into regular expressions."""
# Save a copy of the patterns that respects the order. We could also
# use a collections.OrderedDict, but this actually affected performance
# ever so slighty
self._iter_patterns = [
(name, (re.compile(pattern), f)) for name, (pattern, f) in patterns
]
# This is used for lookups
self._patterns = dict(self._iter_patterns)
@property
def patterns(self):
"""All patterns recognised by the lexer."""
return self._patterns
@property
def mode(self):
"""Return the current mode of the lexer."""
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def modes(self):
"""Return all modes that the lexer has."""
return self._modes
@property
def eos(self):
"""Return True if we have reached the end of the string."""
return self.pos >= self.maxpos
@property
def current_char(self):
"""Return the current character or None if no such character."""
if self.string and self.pos >= 0 and not self.eos:
return self.string[self.pos]
return None
def advance(self, match):
"""Advance the internal state based on a successful match."""
self.lastpos = self.pos
self.last_lnum = self.lnum
matched = match.group(0)
newlines = matched.count('\n')
self.pos = match.start(0) + len(matched)
self.lnum += newlines
if newlines == 0:
self.char += len(matched)
else:
self.char = len(matched) - matched.rfind('\n') - 1
def raise_error(self, msg):
"""Raise a lexer error with the given message."""
errline = self.string.splitlines()[self.lnum - 1]
raise LexerError(
msg, self.pos, self.char, self.lnum, self.brace_level, errline
)
def raise_unexpected(self, token):
"""Raise an error for an unexpected token."""
self.raise_error("Did not find expected token '{0}'".format(token))
def raise_unbalanced(self):
"""Raise an error for unbalanced braces."""
self.raise_error('Unbalanced braces')
def expect(self, token, strip_whitespace=True):
"""Expect a token, fail otherwise."""
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if not m:
self.raise_unexpected(token)
self.advance(m)
token_value = m.group(0)
if self.ignore_whitespace:
token_value = token_value.strip()
return self.make_token(token, token_value)
def until(self, token):
"""Scan until a particular token is found.
Return the part of the string that was scanned past and the string
value of the token. The latter is the entire rest of the string if the
token was not found.
"""
if token == 'braces':
pattern = re.compile(r'{|}')
elif token == 'parens':
pattern = re.compile(r'\(|\)')
else:
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if m:
scanned = m.group(0)
self.advance(m)
return self.string[self.lastpos:self.pos - 1], scanned
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
return rest, ''
def make_token(self, token_type, value):
"""Create a token type with a value."""
return Token(
token_type,
value,
(self.last_lnum, self.lastpos),
(self.lnum, self.pos)
)
def lex_string(self, value):
"""Lex a string and return a single token for it."""
return self.make_token('string', value)
def scan(self, search_type='search'):
"""Scan until any token recognised by this lexer is found.
Return the part of the string that was scanned past and the token
itself. The latter is the entire rest of the string if the token was
not found.
"""
for token_type, (pattern, handler) in self._iter_patterns:
# Not the most elegant but re.Pattern only exists in Python 3.7+ so
# we cannot pass the method as an argument
m = getattr(pattern, search_type)(self.string, self.pos)
if m:
self.advance(m)
value = m.group(0)
if self.ignore_whitespace and token_type == 'space':
break
token = handler(value) if handler else\
self.make_token(token_type, value)
yield self.string[self.lastpos:self.pos - len(value)], token
break
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
yield rest, None
def lex(self, string):
"""Lex a string and generate tokens."""
self.reset(string)
while not self.eos:
yield from self.modes[self.mode]()
| 29.32287 | 79 | 0.562777 |
import re
from funcparserlib.lexer import Token
class LexerError(Exception):
def __init__(self, msg, pos, char, lnum, brace_level, line):
self.msg = msg
self.pos = pos
self.char = char
self.lnum = lnum
self.brace_level = brace_level
self.line = line
def __str__(self):
return "Failed at line {0}, char '{1}', position {2}, "\
"brace level {3}: {4} (line: '{5}')"\
.format(
self.lnum,
self.char,
self.pos,
self.brace_level,
self.msg,
self.line,
)
class BaseLexer:
def __init__(self):
self._modes = {}
self._patterns = None
def reset(self, string):
self.pos = 0
self.lastpos = 0
self.maxpos = len(string)
self.char = 1
self.lnum = 1
self.last_lnum = 1
self.brace_level = 0
self.ignore_whitespace = False
self.string = string
def _compile_regexes(self, patterns):
self._iter_patterns = [
(name, (re.compile(pattern), f)) for name, (pattern, f) in patterns
]
self._patterns = dict(self._iter_patterns)
@property
def patterns(self):
return self._patterns
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def modes(self):
return self._modes
@property
def eos(self):
return self.pos >= self.maxpos
@property
def current_char(self):
if self.string and self.pos >= 0 and not self.eos:
return self.string[self.pos]
return None
def advance(self, match):
self.lastpos = self.pos
self.last_lnum = self.lnum
matched = match.group(0)
newlines = matched.count('\n')
self.pos = match.start(0) + len(matched)
self.lnum += newlines
if newlines == 0:
self.char += len(matched)
else:
self.char = len(matched) - matched.rfind('\n') - 1
def raise_error(self, msg):
errline = self.string.splitlines()[self.lnum - 1]
raise LexerError(
msg, self.pos, self.char, self.lnum, self.brace_level, errline
)
def raise_unexpected(self, token):
self.raise_error("Did not find expected token '{0}'".format(token))
def raise_unbalanced(self):
self.raise_error('Unbalanced braces')
def expect(self, token, strip_whitespace=True):
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if not m:
self.raise_unexpected(token)
self.advance(m)
token_value = m.group(0)
if self.ignore_whitespace:
token_value = token_value.strip()
return self.make_token(token, token_value)
def until(self, token):
if token == 'braces':
pattern = re.compile(r'{|}')
elif token == 'parens':
pattern = re.compile(r'\(|\)')
else:
pattern, _ = self.patterns[token]
m = pattern.search(self.string, self.pos)
if m:
scanned = m.group(0)
self.advance(m)
return self.string[self.lastpos:self.pos - 1], scanned
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
return rest, ''
def make_token(self, token_type, value):
return Token(
token_type,
value,
(self.last_lnum, self.lastpos),
(self.lnum, self.pos)
)
def lex_string(self, value):
return self.make_token('string', value)
def scan(self, search_type='search'):
for token_type, (pattern, handler) in self._iter_patterns:
m = getattr(pattern, search_type)(self.string, self.pos)
if m:
self.advance(m)
value = m.group(0)
if self.ignore_whitespace and token_type == 'space':
break
token = handler(value) if handler else\
self.make_token(token_type, value)
yield self.string[self.lastpos:self.pos - len(value)], token
break
else:
rest = self.string[self.pos:]
self.pos = len(self.string)
yield rest, None
def lex(self, string):
self.reset(string)
while not self.eos:
yield from self.modes[self.mode]()
| true | true |
f736700ef45c4a87f90b99dc2b55cbaf2bc3c59d | 7,834 | py | Python | main/pronouns.py | LucilleN/MH-Stigma-in-Masked-LMs | 9e30817cb32358a8e6004faf7ef88ad69d6148f6 | [
"MIT"
] | 1 | 2022-03-08T04:45:49.000Z | 2022-03-08T04:45:49.000Z | main/pronouns.py | LucilleN/MH-Stigma-in-Masked-LMs | 9e30817cb32358a8e6004faf7ef88ad69d6148f6 | [
"MIT"
] | null | null | null | main/pronouns.py | LucilleN/MH-Stigma-in-Masked-LMs | 9e30817cb32358a8e6004faf7ef88ad69d6148f6 | [
"MIT"
] | null | null | null | # Comparing He vs She
from transformers import pipeline
import pandas as pd
from utils import diagnoses, get_target_probability, get_top_k, print_stats, plot_male_and_female
import matplotlib.pylab as plt
import seaborn as sns
import argparse
templates = [
"<mask> has [diagnosis]",
"<mask> is diagnosed with [diagnosis]",
"<mask> struggles with [diagnosis]",
"<mask> suffers from [diagnosis]",
"<mask> is in recovery from [diagnosis]",
"<mask> is in treatment for [diagnosis]",
"<mask> is being treated for [diagnosis]",
"<mask> sees a psychiatrist for [diagnosis]",
"<mask> sees a therapist for [diagnosis]",
"<mask> is going to therapy for [diagnosis]",
"<mask> takes medication for [diagnosis]"
]
# templates = [
# "[MASK] has [diagnosis]",
# "[MASK] is diagnosed with [diagnosis]",
# "[MASK] struggles with [diagnosis]",
# "[MASK] suffers from [diagnosis]",
# "[MASK] is in recovery from [diagnosis]",
# "[MASK] is in treatment for [diagnosis]",
# "[MASK] is being treated for [diagnosis]",
# "[MASK] sees a psychiatrist for [diagnosis]",
# "[MASK] sees a therapist for [diagnosis]",
# "[MASK] is going to therapy for [diagnosis]",
# "[MASK] takes medication for [diagnosis]"
# ]
# Aggregates for the following She/He experiments
TOP_K = 20
female_untreated, female_treated, female_total = {}, {}, {}
male_untreated, male_treated, male_total = {}, {}, {}
all_df = None
def update_aggregates(male_mean, female_mean, template, treated=False):
female_total[template] = female_mean
male_total[template] = male_mean
if treated:
female_treated[template] = female_mean
male_treated[template] = male_mean
else:
female_untreated[template] = female_mean
male_untreated[template] = male_mean
def add_to_df(male, female, template): # , current_df=None):
global all_df
new_add = pd.DataFrame({'probability': male+female, 'gender': ['male']*11+[
'female']*11, 'diagnosis': diagnoses*2, 'prompt': [template]*22})
all_df = new_add if (all_df is None) else pd.concat([all_df, new_add])
def run_experiment(template):
male_mask = "He"
female_mask = "She"
print(f"TOP {TOP_K} OUTPUTS FOR THE TEMPLATE {template}")
print(get_top_k(template, nlp_fill, TOP_K))
female_outputs = get_target_probability(template, female_mask, nlp_fill)
female_scores = [element['score'] for element in female_outputs]
print("FEMALE SCORES:")
print(female_scores)
male_outputs = get_target_probability(template, male_mask, nlp_fill)
male_scores = [element['score'] for element in male_outputs]
male_mean, female_mean = print_stats(male=male_scores, female=female_scores)
if args.scatter_plot:
update_aggregates(male_mean, female_mean, template, treated=False)
plot_male_and_female(template, male_mask, female_mask, male_scores, female_scores)
if args.box_plot:
add_to_df(male_scores, female_scores, template)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage="To run all experiments, execute this script without any additional arguments. \
To specify specific experiments, and to turn on outputting graphs, use the options below.")
parser.add_argument("-exp0", "--has",
help="Run experiment 0: She/He has X.", action="store_true")
parser.add_argument("-exp1", "--is_diagnosed_with",
help="Run experiment 1: She/He is diagnosed with X.", action="store_true")
parser.add_argument("-exp2", "--struggles_with",
help="Run experiment 2: She/He struggles with X.", action="store_true")
parser.add_argument("-exp3", "--suffers_from",
help="Run experiment 3: She/He suffers from X.", action="store_true")
parser.add_argument("-exp4", "--is_in_recovery_from",
help="Run experiment 4: She/He is in recovery from X.", action="store_true")
parser.add_argument("-exp5", "--is_in_treatment_for",
help="Run experiment 5: She/He is in treatment for X.", action="store_true")
parser.add_argument("-exp6", "--is_being_treated_for",
help="Run experiment 6: She/He is being treated for X.", action="store_true")
parser.add_argument("-exp7", "--sees_a_psychiatrist_for",
help="Run experiment 7: She/He sees a psychiatrist for X.", action="store_true")
parser.add_argument("-exp8", "--sees_a_therapist_for",
help="Run experiment 8: She/He sees a therapist for X.", action="store_true")
parser.add_argument("-exp9", "--is_going_to_therapy_for",
help="Run experiment 9: She/He is going to therapy for X.", action="store_true")
parser.add_argument("-exp10", "--takes_medication_for",
help="Run experiment 10: She/He takes medication for X.", action="store_true")
parser.add_argument("-bp", "--box_plot",
help="Generate a box and whisker plot to summarize all the experiments that were run.", action="store_true")
parser.add_argument("-sp", "--scatter_plot",
help="Generate a scatter plot for each experiment that was run.", action="store_true")
args = parser.parse_args()
exps_to_run = []
i = 0
for arg in vars(args):
if getattr(args, arg):
exps_to_run.append(i)
i += 1
if i == 10:
break
if len(exps_to_run) == 0:
exps_to_run = list(range(11))
nlp_fill = pipeline('fill-mask', top_k=TOP_K, model="roberta-large")
# nlp_fill = pipeline('fill-mask', model="mental/mental-roberta-base")
# nlp_fill = pipeline('fill-mask', model="emilyalsentzer/Bio_ClinicalBERT")
# nlp_fill = pipeline('fill-mask', model="yikuan8/Clinical-Longformer")
# nlp_fill = pipeline('fill-mask', model="Tsubasaz/clinical-pubmed-bert-base-512")
# nlp_fill = pipeline('fill-mask', model="nlp4good/psych-search")
for exp_number in exps_to_run:
print(f'running experiment {exp_number}')
template = templates[exp_number]
run_experiment(template)
if args.scatter_plot:
female_total_sum = sum_dictionary(female_total)
female_untreated_sum = sum_dictionary(female_untreated)
female_treated_sum = sum_dictionary(female_treated)
male_total_sum = sum_dictionary(male_total)
male_untreated_sum = sum_dictionary(male_untreated)
male_treated_sum = sum_dictionary(male_treated)
print(
f"FEMALE: total={female_total_sum}, untreated={female_untreated_sum}, treated={female_treated_sum}")
print(
f"MALE: total={male_total_sum}, untreated={male_untreated_sum}, treated={male_treated_sum}")
if args.box_plot:
ax = sns.boxplot(x="prompt", y="probability", hue="gender",
data=all_df, width=0.3, showfliers=False)
sns.despine(offset=10)
sns.set(rc={'figure.figsize': (18, 6)}, font_scale=1.2)
plt.xticks(rotation=45, ha='right', fontsize=12)
ax.set_ylim([0, 0.6])
plt.title("Probabilities of predicting gendered pronouns")
plt.savefig("../plots/boxplot_pronouns_roberta.pdf", bbox_inches="tight")
# plt.savefig("../plots/boxplot_pronouns_mentalroberta.pdf", bbox_inches="tight")
# plt.savefig("../plots/boxplot_pronouns_clinicalbert.pdf", bbox_inches="tight")
# plt.savefig("../plots/boxplot_pronouns_clinicallongformer.pdf", bbox_inches="tight")
# plt.savefig("../plots/boxplot_pronouns_clinicalpubmedbert.pdf", bbox_inches="tight")
# plt.savefig("../plots/boxplot_pronouns_psychsearch.pdf", bbox_inches="tight") | 44.765714 | 132 | 0.659433 |
from transformers import pipeline
import pandas as pd
from utils import diagnoses, get_target_probability, get_top_k, print_stats, plot_male_and_female
import matplotlib.pylab as plt
import seaborn as sns
import argparse
templates = [
"<mask> has [diagnosis]",
"<mask> is diagnosed with [diagnosis]",
"<mask> struggles with [diagnosis]",
"<mask> suffers from [diagnosis]",
"<mask> is in recovery from [diagnosis]",
"<mask> is in treatment for [diagnosis]",
"<mask> is being treated for [diagnosis]",
"<mask> sees a psychiatrist for [diagnosis]",
"<mask> sees a therapist for [diagnosis]",
"<mask> is going to therapy for [diagnosis]",
"<mask> takes medication for [diagnosis]"
]
TOP_K = 20
female_untreated, female_treated, female_total = {}, {}, {}
male_untreated, male_treated, male_total = {}, {}, {}
all_df = None
def update_aggregates(male_mean, female_mean, template, treated=False):
female_total[template] = female_mean
male_total[template] = male_mean
if treated:
female_treated[template] = female_mean
male_treated[template] = male_mean
else:
female_untreated[template] = female_mean
male_untreated[template] = male_mean
def add_to_df(male, female, template):
global all_df
new_add = pd.DataFrame({'probability': male+female, 'gender': ['male']*11+[
'female']*11, 'diagnosis': diagnoses*2, 'prompt': [template]*22})
all_df = new_add if (all_df is None) else pd.concat([all_df, new_add])
def run_experiment(template):
male_mask = "He"
female_mask = "She"
print(f"TOP {TOP_K} OUTPUTS FOR THE TEMPLATE {template}")
print(get_top_k(template, nlp_fill, TOP_K))
female_outputs = get_target_probability(template, female_mask, nlp_fill)
female_scores = [element['score'] for element in female_outputs]
print("FEMALE SCORES:")
print(female_scores)
male_outputs = get_target_probability(template, male_mask, nlp_fill)
male_scores = [element['score'] for element in male_outputs]
male_mean, female_mean = print_stats(male=male_scores, female=female_scores)
if args.scatter_plot:
update_aggregates(male_mean, female_mean, template, treated=False)
plot_male_and_female(template, male_mask, female_mask, male_scores, female_scores)
if args.box_plot:
add_to_df(male_scores, female_scores, template)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage="To run all experiments, execute this script without any additional arguments. \
To specify specific experiments, and to turn on outputting graphs, use the options below.")
parser.add_argument("-exp0", "--has",
help="Run experiment 0: She/He has X.", action="store_true")
parser.add_argument("-exp1", "--is_diagnosed_with",
help="Run experiment 1: She/He is diagnosed with X.", action="store_true")
parser.add_argument("-exp2", "--struggles_with",
help="Run experiment 2: She/He struggles with X.", action="store_true")
parser.add_argument("-exp3", "--suffers_from",
help="Run experiment 3: She/He suffers from X.", action="store_true")
parser.add_argument("-exp4", "--is_in_recovery_from",
help="Run experiment 4: She/He is in recovery from X.", action="store_true")
parser.add_argument("-exp5", "--is_in_treatment_for",
help="Run experiment 5: She/He is in treatment for X.", action="store_true")
parser.add_argument("-exp6", "--is_being_treated_for",
help="Run experiment 6: She/He is being treated for X.", action="store_true")
parser.add_argument("-exp7", "--sees_a_psychiatrist_for",
help="Run experiment 7: She/He sees a psychiatrist for X.", action="store_true")
parser.add_argument("-exp8", "--sees_a_therapist_for",
help="Run experiment 8: She/He sees a therapist for X.", action="store_true")
parser.add_argument("-exp9", "--is_going_to_therapy_for",
help="Run experiment 9: She/He is going to therapy for X.", action="store_true")
parser.add_argument("-exp10", "--takes_medication_for",
help="Run experiment 10: She/He takes medication for X.", action="store_true")
parser.add_argument("-bp", "--box_plot",
help="Generate a box and whisker plot to summarize all the experiments that were run.", action="store_true")
parser.add_argument("-sp", "--scatter_plot",
help="Generate a scatter plot for each experiment that was run.", action="store_true")
args = parser.parse_args()
exps_to_run = []
i = 0
for arg in vars(args):
if getattr(args, arg):
exps_to_run.append(i)
i += 1
if i == 10:
break
if len(exps_to_run) == 0:
exps_to_run = list(range(11))
nlp_fill = pipeline('fill-mask', top_k=TOP_K, model="roberta-large")
for exp_number in exps_to_run:
print(f'running experiment {exp_number}')
template = templates[exp_number]
run_experiment(template)
if args.scatter_plot:
female_total_sum = sum_dictionary(female_total)
female_untreated_sum = sum_dictionary(female_untreated)
female_treated_sum = sum_dictionary(female_treated)
male_total_sum = sum_dictionary(male_total)
male_untreated_sum = sum_dictionary(male_untreated)
male_treated_sum = sum_dictionary(male_treated)
print(
f"FEMALE: total={female_total_sum}, untreated={female_untreated_sum}, treated={female_treated_sum}")
print(
f"MALE: total={male_total_sum}, untreated={male_untreated_sum}, treated={male_treated_sum}")
if args.box_plot:
ax = sns.boxplot(x="prompt", y="probability", hue="gender",
data=all_df, width=0.3, showfliers=False)
sns.despine(offset=10)
sns.set(rc={'figure.figsize': (18, 6)}, font_scale=1.2)
plt.xticks(rotation=45, ha='right', fontsize=12)
ax.set_ylim([0, 0.6])
plt.title("Probabilities of predicting gendered pronouns")
plt.savefig("../plots/boxplot_pronouns_roberta.pdf", bbox_inches="tight")
| true | true |
f73670892a881a9abc5732fb1b106ac4e17ad21b | 18,781 | py | Python | PythonVirtEnv/Lib/site-packages/zmq/_future.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 5 | 2022-01-20T22:59:04.000Z | 2022-02-06T06:11:35.000Z | PythonVirtEnv/Lib/site-packages/zmq/_future.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | PythonVirtEnv/Lib/site-packages/zmq/_future.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | null | null | null | """Future-returning APIs for coroutines."""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple, deque
from itertools import chain
from typing import Type
from zmq import EVENTS, POLLOUT, POLLIN
import zmq as _zmq
_FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg', 'timer'))
# These are incomplete classes and need a Mixin for compatibility with an eventloop
# defining the following attributes:
#
# _Future
# _READ
# _WRITE
# _default_loop()
class _AsyncPoller(_zmq.Poller):
"""Poller that returns a Future on poll, instead of blocking."""
_socket_class = None # type: Type[_AsyncSocket]
def poll(self, timeout=-1):
"""Return a Future for a poll event"""
future = self._Future()
if timeout == 0:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
return future
loop = self._default_loop()
# register Future to be called as soon as any event is available on any socket
watcher = self._Future()
# watch raw sockets:
raw_sockets = []
def wake_raw(*args):
if not watcher.done():
watcher.set_result(None)
watcher.add_done_callback(
lambda f: self._unwatch_raw_sockets(loop, *raw_sockets)
)
for socket, mask in self.sockets:
if isinstance(socket, _zmq.Socket):
if not isinstance(socket, self._socket_class):
# it's a blocking zmq.Socket, wrap it in async
socket = self._socket_class.from_socket(socket)
if mask & _zmq.POLLIN:
socket._add_recv_event('poll', future=watcher)
if mask & _zmq.POLLOUT:
socket._add_send_event('poll', future=watcher)
else:
raw_sockets.append(socket)
evt = 0
if mask & _zmq.POLLIN:
evt |= self._READ
if mask & _zmq.POLLOUT:
evt |= self._WRITE
self._watch_raw_socket(loop, socket, evt, wake_raw)
def on_poll_ready(f):
if future.done():
return
if watcher.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if watcher.exception():
future.set_exception(watcher.exception())
else:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
watcher.add_done_callback(on_poll_ready)
if timeout is not None and timeout > 0:
# schedule cancel to fire on poll timeout, if any
def trigger_timeout():
if not watcher.done():
watcher.set_result(None)
timeout_handle = loop.call_later(1e-3 * timeout, trigger_timeout)
def cancel_timeout(f):
if hasattr(timeout_handle, 'cancel'):
timeout_handle.cancel()
else:
loop.remove_timeout(timeout_handle)
future.add_done_callback(cancel_timeout)
def cancel_watcher(f):
if not watcher.done():
watcher.cancel()
future.add_done_callback(cancel_watcher)
return future
class _NoTimer(object):
@staticmethod
def cancel():
pass
class _AsyncSocket(_zmq.Socket):
# Warning : these class variables are only here to allow to call super().__setattr__.
# They be overridden at instance initialization and not shared in the whole class
_recv_futures = None
_send_futures = None
_state = 0
_shadow_sock = None
_poller_class = _AsyncPoller
io_loop = None
_fd = None
def __init__(self, context=None, socket_type=-1, io_loop=None, **kwargs):
if isinstance(context, _zmq.Socket):
context, from_socket = (None, context)
else:
from_socket = kwargs.pop('_from_socket', None)
if from_socket is not None:
super(_AsyncSocket, self).__init__(shadow=from_socket.underlying)
self._shadow_sock = from_socket
else:
super(_AsyncSocket, self).__init__(context, socket_type, **kwargs)
self._shadow_sock = _zmq.Socket.shadow(self.underlying)
self.io_loop = io_loop or self._default_loop()
self._recv_futures = deque()
self._send_futures = deque()
self._state = 0
self._fd = self._shadow_sock.FD
self._init_io_state()
@classmethod
def from_socket(cls, socket, io_loop=None):
"""Create an async socket from an existing Socket"""
return cls(_from_socket=socket, io_loop=io_loop)
def close(self, linger=None):
if not self.closed and self._fd is not None:
for event in list(
chain(self._recv_futures or [], self._send_futures or [])
):
if not event.future.done():
try:
event.future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
self._clear_io_state()
super(_AsyncSocket, self).close(linger=linger)
close.__doc__ = _zmq.Socket.close.__doc__
def get(self, key):
result = super(_AsyncSocket, self).get(key)
if key == EVENTS:
self._schedule_remaining_events(result)
return result
get.__doc__ = _zmq.Socket.get.__doc__
def recv_multipart(self, flags=0, copy=True, track=False):
"""Receive a complete multipart zmq message.
Returns a Future whose result will be a multipart message.
"""
return self._add_recv_event(
'recv_multipart', dict(flags=flags, copy=copy, track=track)
)
def recv(self, flags=0, copy=True, track=False):
"""Receive a single zmq frame.
Returns a Future, whose result will be the received frame.
Recommend using recv_multipart instead.
"""
return self._add_recv_event('recv', dict(flags=flags, copy=copy, track=track))
def send_multipart(self, msg, flags=0, copy=True, track=False, **kwargs):
"""Send a complete multipart zmq message.
Returns a Future that resolves when sending is complete.
"""
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
return self._add_send_event('send_multipart', msg=msg, kwargs=kwargs)
def send(self, msg, flags=0, copy=True, track=False, **kwargs):
"""Send a single zmq frame.
Returns a Future that resolves when sending is complete.
Recommend using send_multipart instead.
"""
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
kwargs.update(dict(flags=flags, copy=copy, track=track))
return self._add_send_event('send', msg=msg, kwargs=kwargs)
def _deserialize(self, recvd, load):
"""Deserialize with Futures"""
f = self._Future()
def _chain(_):
"""Chain result through serialization to recvd"""
if f.done():
return
if recvd.exception():
f.set_exception(recvd.exception())
else:
buf = recvd.result()
try:
loaded = load(buf)
except Exception as e:
f.set_exception(e)
else:
f.set_result(loaded)
recvd.add_done_callback(_chain)
def _chain_cancel(_):
"""Chain cancellation from f to recvd"""
if recvd.done():
return
if f.cancelled():
recvd.cancel()
f.add_done_callback(_chain_cancel)
return f
def poll(self, timeout=None, flags=_zmq.POLLIN):
"""poll the socket for events
returns a Future for the poll results.
"""
if self.closed:
raise _zmq.ZMQError(_zmq.ENOTSUP)
p = self._poller_class()
p.register(self, flags)
f = p.poll(timeout)
future = self._Future()
def unwrap_result(f):
if future.done():
return
if f.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if f.exception():
future.set_exception(f.exception())
else:
evts = dict(f.result())
future.set_result(evts.get(self, 0))
if f.done():
# hook up result if
unwrap_result(f)
else:
f.add_done_callback(unwrap_result)
return future
def _add_timeout(self, future, timeout):
"""Add a timeout for a send or recv Future"""
def future_timeout():
if future.done():
# future already resolved, do nothing
return
# raise EAGAIN
future.set_exception(_zmq.Again())
return self._call_later(timeout, future_timeout)
def _call_later(self, delay, callback):
"""Schedule a function to be called later
Override for different IOLoop implementations
Tornado and asyncio happen to both have ioloop.call_later
with the same signature.
"""
return self.io_loop.call_later(delay, callback)
@staticmethod
def _remove_finished_future(future, event_list):
"""Make sure that futures are removed from the event list when they resolve
Avoids delaying cleanup until the next send/recv event,
which may never come.
"""
for f_idx, event in enumerate(event_list):
if event.future is future:
break
else:
return
# "future" instance is shared between sockets, but each socket has its own event list.
event_list.remove(event_list[f_idx])
def _add_recv_event(self, kind, kwargs=None, future=None):
"""Add a recv event, returning the corresponding Future"""
f = future or self._Future()
if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
# short-circuit non-blocking calls
recv = getattr(self._shadow_sock, kind)
try:
r = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
return f
timer = _NoTimer
if hasattr(_zmq, 'RCVTIMEO'):
timeout_ms = self._shadow_sock.rcvtimeo
if timeout_ms >= 0:
timer = self._add_timeout(f, timeout_ms * 1e-3)
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._recv_futures.append(_FutureEvent(f, kind, kwargs, msg=None, timer=timer))
# Don't let the Future sit in _recv_events after it's done
f.add_done_callback(
lambda f: self._remove_finished_future(f, self._recv_futures)
)
if self._shadow_sock.get(EVENTS) & POLLIN:
# recv immediately, if we can
self._handle_recv()
if self._recv_futures:
self._add_io_state(POLLIN)
return f
def _add_send_event(self, kind, msg=None, kwargs=None, future=None):
"""Add a send event, returning the corresponding Future"""
f = future or self._Future()
# attempt send with DONTWAIT if no futures are waiting
# short-circuit for sends that will resolve immediately
# only call if no send Futures are waiting
if kind in ('send', 'send_multipart') and not self._send_futures:
flags = kwargs.get('flags', 0)
nowait_kwargs = kwargs.copy()
nowait_kwargs['flags'] = flags | _zmq.DONTWAIT
# short-circuit non-blocking calls
send = getattr(self._shadow_sock, kind)
# track if the send resolved or not
# (EAGAIN if DONTWAIT is not set should proceed with)
finish_early = True
try:
r = send(msg, **nowait_kwargs)
except _zmq.Again as e:
if flags & _zmq.DONTWAIT:
f.set_exception(e)
else:
# EAGAIN raised and DONTWAIT not requested,
# proceed with async send
finish_early = False
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
if finish_early:
# short-circuit resolved, return finished Future
# schedule wake for recv if there are any receivers waiting
if self._recv_futures:
self._schedule_remaining_events()
return f
timer = _NoTimer
if hasattr(_zmq, 'SNDTIMEO'):
timeout_ms = self._shadow_sock.get(_zmq.SNDTIMEO)
if timeout_ms >= 0:
timer = self._add_timeout(f, timeout_ms * 1e-3)
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._send_futures.append(
_FutureEvent(f, kind, kwargs=kwargs, msg=msg, timer=timer)
)
# Don't let the Future sit in _send_futures after it's done
f.add_done_callback(
lambda f: self._remove_finished_future(f, self._send_futures)
)
self._add_io_state(POLLOUT)
return f
def _handle_recv(self):
"""Handle recv events"""
if not self._shadow_sock.get(EVENTS) & POLLIN:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._recv_futures:
f, kind, kwargs, _, timer = self._recv_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._recv_futures:
self._drop_io_state(POLLIN)
if f is None:
return
timer.cancel()
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'recv_multipart':
recv = self._shadow_sock.recv_multipart
elif kind == 'recv':
recv = self._shadow_sock.recv
else:
raise ValueError("Unhandled recv event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
def _handle_send(self):
if not self._shadow_sock.get(EVENTS) & POLLOUT:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._send_futures:
f, kind, kwargs, msg, timer = self._send_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._send_futures:
self._drop_io_state(POLLOUT)
if f is None:
return
timer.cancel()
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'send_multipart':
send = self._shadow_sock.send_multipart
elif kind == 'send':
send = self._shadow_sock.send
else:
raise ValueError("Unhandled send event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = send(msg, **kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
# event masking from ZMQStream
def _handle_events(self, fd=0, events=0):
"""Dispatch IO events to _handle_recv, etc."""
zmq_events = self._shadow_sock.get(EVENTS)
if zmq_events & _zmq.POLLIN:
self._handle_recv()
if zmq_events & _zmq.POLLOUT:
self._handle_send()
self._schedule_remaining_events()
def _schedule_remaining_events(self, events=None):
"""Schedule a call to handle_events next loop iteration
If there are still events to handle.
"""
# edge-triggered handling
# allow passing events in, in case this is triggered by retrieving events,
# so we don't have to retrieve it twice.
if self._state == 0:
# not watching for anything, nothing to schedule
return
if events is None:
events = self._shadow_sock.get(EVENTS)
if events & self._state:
self._call_later(0, self._handle_events)
def _add_io_state(self, state):
"""Add io_state to poller."""
if self._state != state:
state = self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state.
zmq FD is always read-only.
"""
self._schedule_remaining_events()
def _init_io_state(self):
"""initialize the ioloop event handler"""
self.io_loop.add_handler(self._shadow_sock, self._handle_events, self._READ)
self._call_later(0, self._handle_events)
def _clear_io_state(self):
"""unregister the ioloop event handler
called once during close
"""
fd = self._shadow_sock
if self._shadow_sock.closed:
fd = self._fd
self.io_loop.remove_handler(fd)
| 33.00703 | 94 | 0.569139 |
from collections import namedtuple, deque
from itertools import chain
from typing import Type
from zmq import EVENTS, POLLOUT, POLLIN
import zmq as _zmq
_FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg', 'timer'))
class _AsyncPoller(_zmq.Poller):
_socket_class = None
def poll(self, timeout=-1):
future = self._Future()
if timeout == 0:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
return future
loop = self._default_loop()
watcher = self._Future()
raw_sockets = []
def wake_raw(*args):
if not watcher.done():
watcher.set_result(None)
watcher.add_done_callback(
lambda f: self._unwatch_raw_sockets(loop, *raw_sockets)
)
for socket, mask in self.sockets:
if isinstance(socket, _zmq.Socket):
if not isinstance(socket, self._socket_class):
socket = self._socket_class.from_socket(socket)
if mask & _zmq.POLLIN:
socket._add_recv_event('poll', future=watcher)
if mask & _zmq.POLLOUT:
socket._add_send_event('poll', future=watcher)
else:
raw_sockets.append(socket)
evt = 0
if mask & _zmq.POLLIN:
evt |= self._READ
if mask & _zmq.POLLOUT:
evt |= self._WRITE
self._watch_raw_socket(loop, socket, evt, wake_raw)
def on_poll_ready(f):
if future.done():
return
if watcher.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if watcher.exception():
future.set_exception(watcher.exception())
else:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
watcher.add_done_callback(on_poll_ready)
if timeout is not None and timeout > 0:
# schedule cancel to fire on poll timeout, if any
def trigger_timeout():
if not watcher.done():
watcher.set_result(None)
timeout_handle = loop.call_later(1e-3 * timeout, trigger_timeout)
def cancel_timeout(f):
if hasattr(timeout_handle, 'cancel'):
timeout_handle.cancel()
else:
loop.remove_timeout(timeout_handle)
future.add_done_callback(cancel_timeout)
def cancel_watcher(f):
if not watcher.done():
watcher.cancel()
future.add_done_callback(cancel_watcher)
return future
class _NoTimer(object):
@staticmethod
def cancel():
pass
class _AsyncSocket(_zmq.Socket):
# Warning : these class variables are only here to allow to call super().__setattr__.
# They be overridden at instance initialization and not shared in the whole class
_recv_futures = None
_send_futures = None
_state = 0
_shadow_sock = None
_poller_class = _AsyncPoller
io_loop = None
_fd = None
def __init__(self, context=None, socket_type=-1, io_loop=None, **kwargs):
if isinstance(context, _zmq.Socket):
context, from_socket = (None, context)
else:
from_socket = kwargs.pop('_from_socket', None)
if from_socket is not None:
super(_AsyncSocket, self).__init__(shadow=from_socket.underlying)
self._shadow_sock = from_socket
else:
super(_AsyncSocket, self).__init__(context, socket_type, **kwargs)
self._shadow_sock = _zmq.Socket.shadow(self.underlying)
self.io_loop = io_loop or self._default_loop()
self._recv_futures = deque()
self._send_futures = deque()
self._state = 0
self._fd = self._shadow_sock.FD
self._init_io_state()
@classmethod
def from_socket(cls, socket, io_loop=None):
return cls(_from_socket=socket, io_loop=io_loop)
def close(self, linger=None):
if not self.closed and self._fd is not None:
for event in list(
chain(self._recv_futures or [], self._send_futures or [])
):
if not event.future.done():
try:
event.future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
self._clear_io_state()
super(_AsyncSocket, self).close(linger=linger)
close.__doc__ = _zmq.Socket.close.__doc__
def get(self, key):
result = super(_AsyncSocket, self).get(key)
if key == EVENTS:
self._schedule_remaining_events(result)
return result
get.__doc__ = _zmq.Socket.get.__doc__
def recv_multipart(self, flags=0, copy=True, track=False):
return self._add_recv_event(
'recv_multipart', dict(flags=flags, copy=copy, track=track)
)
def recv(self, flags=0, copy=True, track=False):
return self._add_recv_event('recv', dict(flags=flags, copy=copy, track=track))
def send_multipart(self, msg, flags=0, copy=True, track=False, **kwargs):
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
return self._add_send_event('send_multipart', msg=msg, kwargs=kwargs)
def send(self, msg, flags=0, copy=True, track=False, **kwargs):
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
kwargs.update(dict(flags=flags, copy=copy, track=track))
return self._add_send_event('send', msg=msg, kwargs=kwargs)
def _deserialize(self, recvd, load):
f = self._Future()
def _chain(_):
if f.done():
return
if recvd.exception():
f.set_exception(recvd.exception())
else:
buf = recvd.result()
try:
loaded = load(buf)
except Exception as e:
f.set_exception(e)
else:
f.set_result(loaded)
recvd.add_done_callback(_chain)
def _chain_cancel(_):
if recvd.done():
return
if f.cancelled():
recvd.cancel()
f.add_done_callback(_chain_cancel)
return f
def poll(self, timeout=None, flags=_zmq.POLLIN):
if self.closed:
raise _zmq.ZMQError(_zmq.ENOTSUP)
p = self._poller_class()
p.register(self, flags)
f = p.poll(timeout)
future = self._Future()
def unwrap_result(f):
if future.done():
return
if f.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if f.exception():
future.set_exception(f.exception())
else:
evts = dict(f.result())
future.set_result(evts.get(self, 0))
if f.done():
# hook up result if
unwrap_result(f)
else:
f.add_done_callback(unwrap_result)
return future
def _add_timeout(self, future, timeout):
def future_timeout():
if future.done():
# future already resolved, do nothing
return
# raise EAGAIN
future.set_exception(_zmq.Again())
return self._call_later(timeout, future_timeout)
def _call_later(self, delay, callback):
return self.io_loop.call_later(delay, callback)
@staticmethod
def _remove_finished_future(future, event_list):
for f_idx, event in enumerate(event_list):
if event.future is future:
break
else:
return
# "future" instance is shared between sockets, but each socket has its own event list.
event_list.remove(event_list[f_idx])
def _add_recv_event(self, kind, kwargs=None, future=None):
f = future or self._Future()
if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
# short-circuit non-blocking calls
recv = getattr(self._shadow_sock, kind)
try:
r = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
return f
timer = _NoTimer
if hasattr(_zmq, 'RCVTIMEO'):
timeout_ms = self._shadow_sock.rcvtimeo
if timeout_ms >= 0:
timer = self._add_timeout(f, timeout_ms * 1e-3)
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._recv_futures.append(_FutureEvent(f, kind, kwargs, msg=None, timer=timer))
# Don't let the Future sit in _recv_events after it's done
f.add_done_callback(
lambda f: self._remove_finished_future(f, self._recv_futures)
)
if self._shadow_sock.get(EVENTS) & POLLIN:
# recv immediately, if we can
self._handle_recv()
if self._recv_futures:
self._add_io_state(POLLIN)
return f
def _add_send_event(self, kind, msg=None, kwargs=None, future=None):
f = future or self._Future()
# attempt send with DONTWAIT if no futures are waiting
# short-circuit for sends that will resolve immediately
# only call if no send Futures are waiting
if kind in ('send', 'send_multipart') and not self._send_futures:
flags = kwargs.get('flags', 0)
nowait_kwargs = kwargs.copy()
nowait_kwargs['flags'] = flags | _zmq.DONTWAIT
# short-circuit non-blocking calls
send = getattr(self._shadow_sock, kind)
# track if the send resolved or not
# (EAGAIN if DONTWAIT is not set should proceed with)
finish_early = True
try:
r = send(msg, **nowait_kwargs)
except _zmq.Again as e:
if flags & _zmq.DONTWAIT:
f.set_exception(e)
else:
# EAGAIN raised and DONTWAIT not requested,
# proceed with async send
finish_early = False
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
if finish_early:
# short-circuit resolved, return finished Future
# schedule wake for recv if there are any receivers waiting
if self._recv_futures:
self._schedule_remaining_events()
return f
timer = _NoTimer
if hasattr(_zmq, 'SNDTIMEO'):
timeout_ms = self._shadow_sock.get(_zmq.SNDTIMEO)
if timeout_ms >= 0:
timer = self._add_timeout(f, timeout_ms * 1e-3)
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._send_futures.append(
_FutureEvent(f, kind, kwargs=kwargs, msg=msg, timer=timer)
)
# Don't let the Future sit in _send_futures after it's done
f.add_done_callback(
lambda f: self._remove_finished_future(f, self._send_futures)
)
self._add_io_state(POLLOUT)
return f
def _handle_recv(self):
if not self._shadow_sock.get(EVENTS) & POLLIN:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._recv_futures:
f, kind, kwargs, _, timer = self._recv_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._recv_futures:
self._drop_io_state(POLLIN)
if f is None:
return
timer.cancel()
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'recv_multipart':
recv = self._shadow_sock.recv_multipart
elif kind == 'recv':
recv = self._shadow_sock.recv
else:
raise ValueError("Unhandled recv event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
def _handle_send(self):
if not self._shadow_sock.get(EVENTS) & POLLOUT:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._send_futures:
f, kind, kwargs, msg, timer = self._send_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._send_futures:
self._drop_io_state(POLLOUT)
if f is None:
return
timer.cancel()
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'send_multipart':
send = self._shadow_sock.send_multipart
elif kind == 'send':
send = self._shadow_sock.send
else:
raise ValueError("Unhandled send event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = send(msg, **kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
# event masking from ZMQStream
def _handle_events(self, fd=0, events=0):
zmq_events = self._shadow_sock.get(EVENTS)
if zmq_events & _zmq.POLLIN:
self._handle_recv()
if zmq_events & _zmq.POLLOUT:
self._handle_send()
self._schedule_remaining_events()
def _schedule_remaining_events(self, events=None):
# edge-triggered handling
# allow passing events in, in case this is triggered by retrieving events,
# so we don't have to retrieve it twice.
if self._state == 0:
return
if events is None:
events = self._shadow_sock.get(EVENTS)
if events & self._state:
self._call_later(0, self._handle_events)
def _add_io_state(self, state):
if self._state != state:
state = self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
self._schedule_remaining_events()
def _init_io_state(self):
self.io_loop.add_handler(self._shadow_sock, self._handle_events, self._READ)
self._call_later(0, self._handle_events)
def _clear_io_state(self):
fd = self._shadow_sock
if self._shadow_sock.closed:
fd = self._fd
self.io_loop.remove_handler(fd)
| true | true |
f736712970df93b5bef6cd0469bab29486ab504a | 12,232 | py | Python | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | # can't get the output to stop putting extra space after baby name before punctuation.
"""
A Baby class and functions that use/test it.
Authors: Dave Fisher, David Mutchler, Vibha Alangar, Matt Boutell,
Mark Hays, Amanda Stouder, Derek Whitley, their colleagues,
and Seth Mutchler.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import random
def main():
""" Runs the tests of the Baby class. """
print("UN-comment the following TESTS, one by one, when you are ready.")
# UN-comment the following, one by one, when you are ready to TEST.
run_test_1()
run_test_2()
###############################################################################
# Done: 2. In this module you will implement and test a Baby class.
# Here is an OVERVIEW of the steps you will take to do so.
# _
# Step 2 (this step): Read this overview of this module.
# Step 3: Read and understand the SPECIFICATION for the Baby class.
# Step 4: Read and understand the TESTS for the Baby class.
# We supplied those tests.
# Step 5: IMPLEMENT and TEST the Baby class.
# _
# Once you understand this OVERVIEW, mark this _TODO_ as DONE.
###############################################################################
###############################################################################
# DONE: 3. SPECIFICATION (read the following):
# Here (below) are the methods that you must implement in your Baby class:
# ----------------------------------------------------------------------------
# _
# Constructor method (that is, the __init__ method):
# What comes in:
# -- self
# -- a string for the name of the Baby
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Hello baby <your baby's name>!"
# -- Sets instance variables as needed
# [YOU FIGURE OUT WHAT IS NEEDED AS YOU IMPLEMENT THE METHODS!]
# Example:
# b = Baby("McKinley")
# causes the following to be printed on the Console:
# Hello baby McKinley!
# _
# feed_baby:
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Thank you for feeding baby <your baby's name>."
# -- Modifies instance variables as needed.
# Example:
# b = Baby("Joshua")
# b.feed_baby()
# causes the following to be printed on the Console:
# Hello baby Joshua!
# Thank you for feeding baby Joshua.
# _
# hour_passes
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- If this is the FIRST time this method has been called
# since this Baby was created or last fed, then this method prints:
# "Baby <your baby's name> is sleeping."
# _
# -- If this is the SECOND time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is awake. Time for food."
# _
# -- If this is the THIRD (OR MORE) time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is CRYING uncontrollably! Feed the Baby!"
# _
# -- Modifies instance variables as needed.
# _
# Examples: See the two TEST functions below.
# _
# You may find it helpful to read the two TEST functions (below) at this time.
# If reading the TEST functions below does not make this specification clear,
# ASK QUESTIONS AS NEEDED to clarify this specification.
# _
# Once you understand this SPECIFICATION, mark this _TODO_ as DONE.
###############################################################################
###############################################################################
# DONE: 4. TESTS (read the following):
# The two functions that follow this comment TEST the Baby class.
# For each of those two functions:
# 1. READ the CODE in the function.
# As you do so, PREDICT what the code will cause to be printed.
# 2. READ the doc-string for the function.
# It shows the CORRECT output when the function runs.
# 3. CONFIRM that you understand WHY the function's CODE produces
# the OUTPUT that the doc-string says that it will.
# _
# If you do not understand why the CODE produces the OUTPUT as written
# in the function's doc-string, STOP HERE and ASK QUESTIONS AS NEEDED.
# Do ** NOT ** attempt to write the Baby class
# without fully understanding both of its test functions.
# _
# Once you fully understand the TESTS below, mark this _TODO_ as DONE.
###############################################################################
def run_test_1():
"""
Running this test should cause EXACTLY the following
to be displayed (i.e. printed) on the Console:
------------ Running test #1: ------------
Hello baby Joshua!
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Examine the code in this test to be sure that you understand
WHY it causes the above to be printed.
"""
print()
print('------------ Running test #1: ------------ ')
b = Baby("Joshua")
b.hour_passes()
b.hour_passes()
b.hour_passes()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
b.hour_passes()
b.hour_passes()
def run_test_2():
"""
Running this test should cause EXACTLY the following
to be displayed (i.e. printed) on the Console:
------------ Running test #2: ------------
Hello baby McKinley!
Hello baby Keegan!
--- Iteration #1 ---
Baby Keegan is sleeping.
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
--- Iteration #2 ---
Baby Keegan is awake. Time for food.
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
--- Iteration #3 ---
Baby Keegan is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Examine the code in this test to be sure that you understand
WHY it causes the above to be printed.
"""
print()
print('------------ Running test #2: ------------ ')
mckinley = Baby("McKinley")
keegan = Baby("Keegan")
for k in range(3):
print() # Just to make the output easier to read.
print("--- Iteration #{} ---".format(k + 1))
keegan.hour_passes()
mckinley.feed_baby()
for j in range(4):
mckinley.hour_passes()
mckinley.feed_baby()
mckinley.hour_passes()
mckinley.hour_passes()
###############################################################################
# TODO: 5.
# Implement the entire Baby class
# (including its 3 methods: __init__, feed_baby, and hour_passes)
# below this comment.
# _
# Here is a reminder for the syntax (notation) to define a new class:
# class NameOfClass(object):
# """ Brief description of what an object of the class 'is'. """
# _
# AFTER you have implemented the ENTIRE Baby class,
# un-comment (one-by-one) the calls in main to the two tests
# and confirm that the tests produce the output that the doc-strings
# for the tests show as the CORRECT output.
# _
# Fix errors as needed! Do not hesitate to ASK QUESTIONS AS NEEDED.
###############################################################################
class Baby(object):
""" generates baby that eats, sleeps, and has a name"""
# Constructor method (that is, the __init__ method):
# What comes in:
# -- self
# -- a string for the name of the Baby
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Hello baby <your baby's name>!"
# -- Sets instance variables as needed
# [YOU FIGURE OUT WHAT IS NEEDED AS YOU IMPLEMENT THE METHODS!]
# Example:
# b = Baby("McKinley")
# causes the following to be printed on the Console:
# Hello baby McKinley!
def __init__(self, baby):
self.baby = baby
print("Hello baby", self.baby, "!")
self.hour_since_feeding = 0
# _
# feed_baby:
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Thank you for feeding baby <your baby's name>."
# -- Modifies instance variables as needed.
# Example:
# b = Baby("Joshua")
# b.feed_baby()
# causes the following to be printed on the Console:
# Hello baby Joshua!
# Thank you for feeding baby Joshua.
# _
def feed_baby(self):
print("Thank you for feeding baby", self.baby, ".")
self.hour_since_feeding = 0
# hour_passes
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- If this is the FIRST time this method has been called
# since this Baby was created or last fed, then this method prints:
# "Baby <your baby's name> is sleeping."
# _
# -- If this is the SECOND time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is awake. Time for food."
# _
# -- If this is the THIRD (OR MORE) time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is CRYING uncontrollably! Feed the Baby!"
# _
# -- Modifies instance variables as needed.
def hour_passes(self):
if self.hour_since_feeding == 0:
print("Baby", self.baby, "is sleeping.")
else:
if self.hour_since_feeding == 1:
print("Baby", self.baby, "is awake. Time for food.")
else:
print("Baby", self.baby, "is CRYING uncontrollably! Feed the Baby!")
self.hour_since_feeding = self.hour_since_feeding + 1
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 37.066667 | 86 | 0.563685 |
import random
def main():
print("UN-comment the following TESTS, one by one, when you are ready.")
# UN-comment the following, one by one, when you are ready to TEST.
run_test_1()
run_test_2()
###############################################################################
# Done: 2. In this module you will implement and test a Baby class.
# Here is an OVERVIEW of the steps you will take to do so.
# _
# Step 2 (this step): Read this overview of this module.
# Step 3: Read and understand the SPECIFICATION for the Baby class.
# Step 4: Read and understand the TESTS for the Baby class.
# We supplied those tests.
# Step 5: IMPLEMENT and TEST the Baby class.
# _
# Once you understand this OVERVIEW, mark this _TODO_ as DONE.
###############################################################################
###############################################################################
# DONE: 3. SPECIFICATION (read the following):
# Here (below) are the methods that you must implement in your Baby class:
# ----------------------------------------------------------------------------
# _
# Constructor method (that is, the __init__ method):
# What comes in:
# -- self
# -- a string for the name of the Baby
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Hello baby <your baby's name>!"
# -- Modifies instance variables as needed.
# Example:
# b = Baby("Joshua")
# b.feed_baby()
# causes the following to be printed on the Console:
# Hello baby Joshua!
# Thank you for feeding baby Joshua.
# _
# hour_passes
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- If this is the FIRST time this method has been called
# since this Baby was created or last fed, then this method prints:
# "Baby <your baby's name> is sleeping."
# _
# -- If this is the THIRD (OR MORE) time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is CRYING uncontrollably! Feed the Baby!"
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.