hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a08f17904d9037f2d71ad0f48a5328f02383132
| 7,645
|
py
|
Python
|
src/largest50/models/t5/abln_prott5.py
|
vam-sin/CATHe
|
230d64b3a1268650e6c4d0ec88df0a9bd729cad2
|
[
"MIT"
] | 2
|
2021-11-29T16:05:03.000Z
|
2022-02-18T08:32:51.000Z
|
src/largest50/models/t5/abln_prott5.py
|
vam-sin/CATHe
|
230d64b3a1268650e6c4d0ec88df0a9bd729cad2
|
[
"MIT"
] | null | null | null |
src/largest50/models/t5/abln_prott5.py
|
vam-sin/CATHe
|
230d64b3a1268650e6c4d0ec88df0a9bd729cad2
|
[
"MIT"
] | null | null | null |
#libraries
import pandas as pd
from sklearn import preprocessing
import numpy as np
import pickle
from sklearn.preprocessing import OneHotEncoder
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler, LabelEncoder, normalize
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold
import math
from keras.utils import to_categorical
import tensorflow as tf
from keras.models import Model
from keras.models import load_model
from keras import optimizers
from keras.layers import Dense, Dropout, BatchNormalization, Conv1D, Flatten, Input, Add, LSTM, Bidirectional, Reshape
from keras_self_attention import SeqSelfAttention
from keras.regularizers import l2
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, classification_report, matthews_corrcoef, balanced_accuracy_score
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from keras import regularizers
from keras import backend as K
import keras
from sklearn.model_selection import KFold
# GPU config for Vamsi's Laptop
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
tf.keras.backend.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
LIMIT = 3 * 1024
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
# dataset import
# train
ds_train = pd.read_csv('Y_Train_SF.csv')
y_train = list(ds_train["SF"])
filename = 'SF_Train_ProtT5.npz'
X_train = np.load(filename)['arr_0']
X_train = np.expand_dims(X_train, axis = 1)
# val
ds_val = pd.read_csv('Y_Val_SF.csv')
y_val = list(ds_val["SF"])
filename = 'SF_Val_ProtT5.npz'
X_val = np.load(filename)['arr_0']
X_val = np.expand_dims(X_val, axis = 1)
# test
ds_test = pd.read_csv('Y_Test_SF.csv')
y_test = list(ds_test["SF"])
filename = 'SF_Test_ProtT5.npz'
X_test = np.load(filename)['arr_0']
X_test = np.expand_dims(X_test, axis = 1)
# y process
y_tot = []
for i in range(len(y_train)):
y_tot.append(y_train[i])
for i in range(len(y_val)):
y_tot.append(y_val[i])
for i in range(len(y_test)):
y_tot.append(y_test[i])
le = preprocessing.LabelEncoder()
le.fit(y_tot)
y_train = np.asarray(le.transform(y_train))
y_val = np.asarray(le.transform(y_val))
y_test = np.asarray(le.transform(y_test))
num_classes = len(np.unique(y_tot))
print(num_classes)
print("Loaded X and y")
# X, y = shuffle(X, y, random_state=42)
# print("Shuffled")
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# print("Conducted Train-Test Split")
# num_classes_train = len(np.unique(y_train))
# num_classes_test = len(np.unique(y_test))
# print(num_classes_train, num_classes_test)
# assert num_classes_test == num_classes_train, "Split not conducted correctly"
# generator
def bm_generator(X_t, y_t, batch_size):
val = 0
while True:
X_batch = []
y_batch = []
for j in range(batch_size):
if val == len(X_t):
val = 0
X_batch.append(X_t[val])
y_enc = np.zeros((num_classes))
y_enc[y_t[val]] = 1
y_batch.append(y_enc)
val += 1
X_batch = np.asarray(X_batch)
y_batch = np.asarray(y_batch)
yield X_batch, y_batch
# batch size
bs = 256
# test and train generators
# train_gen = bm_generator(X_train, y_train, bs)
# test_gen = bm_generator(X_test, y_test, bs)
# num_classes = 1707
# sensitivity metric
def sensitivity(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
return true_positives / (possible_positives + K.epsilon())
# Keras NN Model
def create_model():
input_ = Input(shape = (3,100,))
x = Bidirectional(LSTM(256, activation = 'tanh', return_sequences = True))(input_)
x = Dropout(0.2)(x)
x = Bidirectional(LSTM(256, activation = 'tanh', return_sequences = True))(x)
x = Dropout(0.2)(x)
x = Bidirectional(LSTM(256, activation = 'tanh', return_sequences = True))(x)
x = Dropout(0.2)(x)
x = SeqSelfAttention(attention_activation='sigmoid')(x)
x = Flatten()(x)
out = Dense(num_classes, activation = 'softmax')(x)
classifier = Model(input_, out)
return classifier
# training
num_epochs = 200
with tf.device('/gpu:0'):
# model
model = create_model()
# adam optimizer
opt = keras.optimizers.Adam(learning_rate = 1e-5)
model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics=['accuracy'])
# callbacks
mcp_save = keras.callbacks.ModelCheckpoint('saved_models/abln_prott5.h5', save_best_only=True, monitor='val_accuracy', verbose=1)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=10, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
callbacks_list = [reduce_lr, mcp_save]
# test and train generators
train_gen = bm_generator(X_train, y_train, bs)
val_gen = bm_generator(X_val, y_val, bs)
test_gen = bm_generator(X_test, y_test, bs)
history = model.fit_generator(train_gen, epochs = num_epochs, steps_per_epoch = math.ceil(len(X_train)/(bs)), verbose=1, validation_data = val_gen, validation_steps = len(X_val)/bs, workers = 0, shuffle = True, callbacks = callbacks_list)
model = load_model('saved_models/abln_prott5.h5', custom_objects=SeqSelfAttention.get_custom_objects())
print("Validation")
y_pred_val = model.predict(X_val)
f1_score_val = f1_score(y_val, y_pred_val.argmax(axis=1), average = 'weighted')
acc_score_val = accuracy_score(y_val, y_pred_val.argmax(axis=1))
print("F1 Score: ", f1_score_val)
print("Acc Score", acc_score_val)
print("Testing")
y_pred_test = model.predict(X_test)
f1_score_test = f1_score(y_test, y_pred_test.argmax(axis=1), average = 'weighted')
acc_score_test = accuracy_score(y_test, y_pred_test.argmax(axis=1))
mcc_score = matthews_corrcoef(y_test, y_pred_test.argmax(axis=1))
bal_acc = balanced_accuracy_score(y_test, y_pred_test.argmax(axis=1))
print("F1 Score: ", f1_score_test)
print("Acc Score: ", acc_score_test)
print("MCC: ", mcc_score)
print("Bal Acc: ", bal_acc)
with tf.device('/cpu:0'):
y_pred = model.predict(X_test)
print("Classification Report Validation")
cr = classification_report(y_test, y_pred.argmax(axis=1), output_dict = True)
df = pd.DataFrame(cr).transpose()
df.to_csv('results/CR_CNN_BioVec.csv')
print("Confusion Matrix")
matrix = confusion_matrix(y_test, y_pred.argmax(axis=1))
print(matrix)
print("F1 Score")
print(f1_score(y_test, y_pred.argmax(axis=1), average = 'weighted'))
'''
/saved_models/abln_protbert.h5 (Beaker)
Validation
F1 Score: 0.9609832563891313
Acc Score 0.9609223929485433
Testing
F1 Score: 0.9608121715284709
Acc Score: 0.9607542448975473
MCC: 0.9590013496263621
Bal Acc: 0.9417163143702381
'''
| 32.811159
| 242
| 0.722956
|
4a08f32339df3e9822a06eca358e0c96d052b087
| 18,113
|
py
|
Python
|
platform/psvita/cg_builders.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
platform/psvita/cg_builders.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
platform/psvita/cg_builders.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
"""Functions used to generate source files during build time
All such functions are invoked in a subprocess on Windows to prevent build flakiness.
"""
import errno
from subprocess import Popen, PIPE, STDOUT
from platform_methods import subprocess_main
excluded_cond = ["GL_ES"]
def runexe(exe, **kwargs):
try:
p = Popen(exe, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True)
return p.communicate(**kwargs)[0].strip().splitlines()
except OSError as e:
if e.errno == errno.ENOENT:
return None
return []
class LegacyCGHeaderStruct:
def __init__(self):
self.vertex_lines = []
self.fragment_lines = []
self.uniforms = []
self.attributes = []
self.conditionals = []
self.enums = {}
self.texunits = []
self.texunit_names = []
self.vertex_included_files = []
self.fragment_included_files = []
self.reading = ""
self.line_offset = 0
self.vertex_offset = 0
self.fragment_offset = 0
def include_file_in_legacygl_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
while line:
if line.find("[vertex]") != -1:
header_data.reading = "vertex"
line = fs.readline()
header_data.line_offset += 1
header_data.vertex_offset = header_data.line_offset
continue
if line.find("[fragment]") != -1:
header_data.reading = "fragment"
line = fs.readline()
header_data.line_offset += 1
header_data.fragment_offset = header_data.line_offset
continue
while line.find("#include ") != -1:
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if not included_file in header_data.vertex_included_files and header_data.reading == "vertex":
header_data.vertex_included_files += [included_file]
if include_file_in_legacygl_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif not included_file in header_data.fragment_included_files and header_data.reading == "fragment":
header_data.fragment_included_files += [included_file]
if include_file_in_legacygl_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
line = fs.readline()
if line.find("#ifdef ") != -1:
if line.find("#ifdef ") != -1:
ifdefline = line.replace("#ifdef ", "").strip()
if line.find("_EN_") != -1:
enumbase = ifdefline[: ifdefline.find("_EN_")]
ifdefline = ifdefline.replace("_EN_", "_")
line = line.replace("_EN_", "_")
if enumbase not in header_data.enums:
header_data.enums[enumbase] = []
if ifdefline not in header_data.enums[enumbase]:
header_data.enums[enumbase].append(ifdefline)
elif not ifdefline in header_data.conditionals:
if not ifdefline in excluded_cond:
header_data.conditionals += [ifdefline]
if line.find("uniform") != -1 and line.lower().find("texunit:") != -1:
# texture unit
texunitstr = line[line.find(":") + 1 :].strip()
if texunitstr == "auto":
texunit = "-1"
else:
texunit = str(int(texunitstr))
uline = line[: line.lower().find("//")]
uline = uline.replace("uniform", "")
uline = uline.replace("highp", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1 :]
if x.find("[") != -1:
# unfiorm array
x = x[: x.find("[")]
if not x in header_data.texunit_names:
header_data.texunits += [(x, texunit)]
header_data.texunit_names += [x]
elif line.find("uniform") != -1 and line.find("{") == -1 and line.find(";") != -1:
uline = line.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1 :]
if x.find("[") != -1:
# unfiorm array
x = x[: x.find("[")]
if not x in header_data.uniforms:
header_data.uniforms += [x]
if line.strip().find("attribute ") == 0 and line.find("attrib:") != -1:
uline = line.replace("in ", "")
uline = uline.replace("attribute ", "")
uline = uline.replace("highp ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" ") :].strip()
if uline.find("//") != -1:
name, bind = uline.split("//")
if bind.find("attrib:") != -1:
name = name.strip()
bind = bind.replace("attrib:", "").strip()
header_data.attributes += [(name, bind)]
line = line.replace("\r", "")
line = line.replace("\n", "")
if header_data.reading == "vertex":
header_data.vertex_lines += [line]
if header_data.reading == "fragment":
header_data.fragment_lines += [line]
line = fs.readline()
header_data.line_offset += 1
fs.close()
return header_data
def build_legacycg_header(filename, include, class_suffix, output_attribs):
header_data = LegacyCGHeaderStruct()
include_file_in_legacygl_header(filename, header_data, 0)
cg_vert_src = runexe(
["platform/psvita/video/tools/shader_compiler", "glsl2hlsl"], input="\n".join(header_data.vertex_lines)
)
cg_frag_src = runexe(
["platform/psvita/video/tools/shader_compiler", "glsl2hlsl"], input="\n".join(header_data.fragment_lines)
)
out_file = filename.replace(".glsl", ".cg") + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1 :]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1 :]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "\n")
fd.write("#define " + out_file_ifdef + "\n")
out_file_class = (
out_file_base.replace(".cg.gen.h", "").title().replace("_", "").replace(".", "") + "Shader" + class_suffix
)
fd.write("\n\n")
fd.write('#include "' + include + '"\n\n\n')
fd.write("class " + out_file_class + " : public Shader" + class_suffix + " {\n\n")
fd.write('\t virtual String get_shader_name() const { return "' + out_file_class + '"; }\n')
fd.write("public:\n\n")
if header_data.conditionals:
fd.write("\tenum Conditionals {\n")
for x in header_data.conditionals:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
if header_data.uniforms:
fd.write("\tenum Uniforms {\n")
for x in header_data.uniforms:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n")
if header_data.conditionals:
fd.write(
"\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n"
)
fd.write("\t#ifdef DEBUG_ENABLED\n ")
fd.write(
"\t#define _FU if (get_uniform(p_uniform)<0) return; if (!is_version_valid()) return; ERR_FAIL_COND( get_active()!=this ); \n\n "
)
fd.write("\t#else\n ")
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; \n\n ")
fd.write("\t#endif\n")
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Size2i& p_vec2) { _FU GLint vec2[2]={p_vec2.x,p_vec2.y}; glUniform2iv(get_uniform(p_uniform),1,vec2); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n"
)
fd.write(
"\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n"
)
fd.write(
"""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
"""
)
fd.write(
"""_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU
const Transform2D &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
"""
)
fd.write(
"""_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}"""
)
fd.write("\n\n#undef _FU\n\n\n")
fd.write("\tvirtual void init() {\n\n")
enum_value_count = 0
if header_data.enums:
fd.write("\t\t//Written using math, given nonstandarity of 64 bits integer constants..\n")
fd.write("\t\tstatic const Enum _enums[]={\n")
bitofs = len(header_data.conditionals)
enum_vals = []
for xv in header_data.enums:
x = header_data.enums[xv]
bits = 1
amt = len(x)
while 2 ** bits < amt:
bits += 1
strs = "{"
for i in range(amt):
strs += '"#define ' + x[i] + '\\n",'
c = {}
c["set_mask"] = "uint64_t(" + str(i) + ")<<" + str(bitofs)
c["clear_mask"] = (
"((uint64_t(1)<<40)-1) ^ (((uint64_t(1)<<" + str(bits) + ") - 1)<<" + str(bitofs) + ")"
)
enum_vals.append(c)
enum_constants.append(x[i])
strs += "NULL}"
fd.write(
"\t\t\t{(uint64_t(1<<" + str(bits) + ")-1)<<" + str(bitofs) + "," + str(bitofs) + "," + strs + "},\n"
)
bitofs += bits
fd.write("\t\t};\n\n")
fd.write("\t\tstatic const EnumValue _enum_values[]={\n")
enum_value_count = len(enum_vals)
for x in enum_vals:
fd.write("\t\t\t{" + x["set_mask"] + "," + x["clear_mask"] + "},\n")
fd.write("\t\t};\n\n")
conditionals_found = []
if header_data.conditionals:
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if header_data.conditionals:
for x in header_data.conditionals:
fd.write('\t\t\t"#define ' + x + '\\n",\n')
conditionals_found.append(x)
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if header_data.uniforms:
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if header_data.uniforms:
for x in header_data.uniforms:
fd.write('\t\t\t"' + x + '",\n')
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if output_attribs:
if header_data.attributes:
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in header_data.attributes:
fd.write('\t\t\t{"' + x[0] + '",' + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
if header_data.texunits:
fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n")
for x in header_data.texunits:
fd.write('\t\t\t{"' + x[0] + '",' + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n")
fd.write("\t\tstatic const char _vertex_code[]={\n")
for x in header_data.vertex_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const int _vertex_code_start=" + str(header_data.vertex_offset) + ";\n")
fd.write("\t\tstatic const char _fragment_code[]={\n")
for x in header_data.fragment_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const int _fragment_code_start=" + str(header_data.fragment_offset) + ";\n")
if output_attribs:
fd.write(
"\t\tsetup(_conditional_strings,"
+ str(len(header_data.conditionals))
+ ",_uniform_strings,"
+ str(len(header_data.uniforms))
+ ",_attribute_pairs,"
+ str(len(header_data.attributes))
+ ", _texunit_pairs,"
+ str(len(header_data.texunits))
+ ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n"
)
else:
fd.write(
"\t\tsetup(_conditional_strings,"
+ str(len(header_data.conditionals))
+ ",_uniform_strings,"
+ str(len(header_data.uniforms))
+ ",_texunit_pairs,"
+ str(len(header_data.texunits))
+ ",_enums,"
+ str(len(header_data.enums))
+ ",_enum_values,"
+ str(enum_value_count)
+ ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n"
)
fd.write("\t}\n\n")
if enum_constants:
fd.write("\tenum EnumConditionals {\n")
for x in enum_constants:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\tvoid set_enum_conditional(EnumConditionals p_cond) { _set_enum_conditional(p_cond); }\n")
fd.write("};\n\n")
if cg_vert_src or cg_frag_src:
fd.write("/* Automatic conversion results:\n")
fd.write("================================\n")
line = 1
for ln in cg_vert_src:
fd.write("%d: %s\n" % (line, ln))
line = line + 1
fd.write("--------------------------------\n")
line = 1
for ln in cg_frag_src:
fd.write("%d: %s\n" % (line, ln))
line = line + 1
fd.write("*/\n\n")
fd.write("#endif\n\n")
fd.close()
def build_cg_headers(target, source, env):
for x in source:
build_legacycg_header(str(x), include="drivers/gles2/shader_gles2.h", class_suffix="GLES2", output_attribs=True)
if __name__ == "__main__":
subprocess_main(globals())
| 34.435361
| 199
| 0.565064
|
4a08f37e24e84e96806ff9d54c1c4dd992d40b5b
| 10,631
|
py
|
Python
|
clu/fs/abc.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | 1
|
2019-07-02T08:17:59.000Z
|
2019-07-02T08:17:59.000Z
|
clu/fs/abc.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | 13
|
2019-12-17T02:28:30.000Z
|
2021-11-17T03:46:10.000Z
|
clu/fs/abc.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
from tempfile import _TemporaryFileWrapper as TemporaryFileWrapperBase
import abc
import clu.abstract
import collections.abc
import contextlib
import sys, os
import weakref
abstract = abc.abstractmethod
from clu.constants.consts import λ, ENCODING
from clu.constants.exceptions import FilesystemError
from clu.fs.misc import differentfile, temporary, u8str
from clu.predicates import ancestral_union
from clu.repr import strfields
from clu.typology import isnotpath
from clu.exporting import Exporter
exporter = Exporter(path=__file__)
export = exporter.decorator()
@export
class TypeLocker(abc.ABCMeta):
""" `clu.fs.abc.TypeLocker` is a metaclass that does two things
with the types for whom it is designated as meta:
1) It keeps an index of those types in a dictionary member of
the `TypeLocker` metaclass itself; and
2) During class creation – the call to `TypeLocker.__new__(…)` –
it installs a class method called “directory(…)” that will,
when invoked, always return a new `Directory` instance that has
been initialized with the one provided argument “pth” (if one
was passed).
… The point of this is to allow any of the classes throughout the
“clu.fs” subpackage, regardless of where they are defined or from
whom they inherit, to make use of cheaply-constructed `Directory`
instances wherever convenient.
Because the “directory(…)” method installed by `TypeLocker` performs
a lazy-lookup of the `Directory` class, using its own type index dict,
the order of definition does not matter i.e. the `TemporaryName` class
(q.v. definition immediately sub.) can use Directories despite its
definition occuring before `Directory` – in fact `TemporaryName` itself
is utilized within at least one `Directory` method – sans any issues.
"""
# The metaclass-internal dictionary of descendant type weakrefs:
types = weakref.WeakValueDictionary()
def __new__(metacls, name, bases, attributes, **kwargs):
""" All classes are initialized with a “directory(…)”
static method, lazily returning an instance of the
`clu.fs.filesystem.Directory(…)` class.
A read-only descriptor shadows the “types” attribute,
to block access to the metaclass type-registry dict
from generated subtypes, as well.
"""
# Fill in the “types” attribute to prevent the metaclass’
# registry dict from leaking into subtypes:
attributes['types'] = clu.abstract.ValueDescriptor(tuple())
# Always replace the “directory” method anew:
directory = lambda pth=None: metacls.types['Directory'](pth=pth)
directory.__name__ = 'directory'
directory.__qualname__ = f'{name}.directory'
directory.__lambda_name__ = λ
attributes['directory'] = staticmethod(directory)
# Call up (using a vanilla attributes dict):
cls = super(TypeLocker, metacls).__new__(metacls, name,
bases,
attributes,
**kwargs)
# Register with `clu.fs.TypeLocker` and `os.PathLike`:
metacls.types[name] = cls
os.PathLike.register(cls)
# Return the new type:
return cls
@export
class BaseFSName(collections.abc.Hashable,
clu.abstract.ReprWrapper,
os.PathLike,
metaclass=TypeLocker):
fields = ('name', 'exists')
@property
@abstract
def name(self):
""" The instances’ target path. """
...
@property
def basename(self):
""" The basename (aka the name of the instance, like as opposed to the
entire fucking absolute path) of the target instance.
"""
return os.path.basename(self.name)
@property
def dirname(self):
""" The dirname (aka the path of the enclosing directory) of the target
instance, wrapped in a new Directory instance.
"""
return self.parent()
@property
def exists(self):
""" Whether or not the instances’ target path exists as a directory. """
return os.path.exists(self.name)
def split(self):
""" Return a two-tuple containing `(dirname, basename)` – like e.g.
for `/yo/dogg/i/heard/youlike`, your return value will be like
`(Directory("/yo/dogg/i/heard"), "youlike")`.
"""
return self.dirname, self.basename
def realpath(self, source=None):
""" Sugar for calling `os.path.realpath(self.name)` with additional
assurances that the path string in question will be UTF-8 Unicode
data and not a byte-string type.
"""
return u8str(
os.path.realpath(
os.fspath(source or self.name)))
def parent(self):
""" Sugar for `self.directory(os.path.abspath(os.path.dirname(self.name)))`
…which, if you are curious, gets you the parent directory of the target
instance, wrapped in a new `Directory` instance.
"""
return self.directory(os.path.abspath(os.path.dirname(self.name)))
def relparent(self, path):
""" Relativize a path, relative to its directory parent, and return it
as a string.
"""
return os.path.relpath(path, start=os.path.abspath(os.path.dirname(self.name)))
def relprefix(self, path, separator='_'):
""" Return a “prefix” string based on a file path – the actual path
separators are replaced with underscores, with which individual
path segments are joined, creating a single long string that is
unique to the original filesystem path.
"""
return (self.relparent(path) + os.sep).replace(os.sep, separator)
def symlink(self, destination, source=None):
""" Create a symlink at `destination`, pointing to this instances’
path location (or an alternative source path, if specified).
The `destination` argument can be anything path-like: instances of
`str`, `unicode`, `bytes`, `bytearray`, `pathlib.Path`, `os.PathLike`,
or anything with an `__fspath__(…)` method – which this includes
`clu.fs.filesystem.TemporaryName` and `clu.fs.filesystem.Directory`
instances and relevant derived-type instances thereof.
"""
if destination is None:
raise FilesystemError("“symlink(…)” destination path cannot be None")
target = source or self.name
os.symlink(os.fspath(target),
os.fspath(destination),
target_is_directory=os.path.isdir(target))
return self
def close(self):
""" Stub method -- always returns True: """
return True
def to_string(self):
return strfields(self,
ancestral_union('fields', type(self)),
try_callables=False)
def inner_repr(self):
return self.to_string()
def __str__(self):
if self.exists:
return os.path.realpath(self.name)
return self.name
def __bytes__(self):
return bytes(str(self), encoding=ENCODING)
def __fspath__(self):
return self.name
def __bool__(self):
return self.exists
def __eq__(self, other):
if isnotpath(other):
return NotImplemented
try:
return os.path.samefile(self.name,
os.fspath(other))
except FileNotFoundError:
return False
def __ne__(self, other):
if isnotpath(other):
return NotImplemented
try:
return differentfile(self.name,
os.fspath(other))
except FileNotFoundError:
return True
def __hash__(self):
return hash((self.name, self.exists))
@export
class TemporaryFileWrapper(TemporaryFileWrapperBase,
collections.abc.Iterable,
contextlib.AbstractContextManager,
os.PathLike,
metaclass=TypeLocker):
""" Local subclass of `tempfile._TemporaryFileWrapper`.
We also inherit from both `contextlib.AbstractContextManager`
and the `os.PathLike` abstract bases -- the latter requires
that we implement an `__fspath__(…)` method (q.v. implementation,
sub.) -- and additionally, `clu.fs.abc.TypeLocker` is named as
the metaclass (q.v. metaclass `__new__(…)` implementation supra.)
to cache its type and register it as an os.PathLike subclass.
… Basically a better deal than the original ancestor, like
all-around. Plus it does not have a name prefixed with an
underscore, which if it’s not your implementation dogg that
can be a bit lexically irritating.
"""
def __fspath__(self):
return self.name
# Assign the modules’ `__all__` and `__dir__` using the exporter:
__all__, __dir__ = exporter.all_and_dir()
def test():
from clu.testing.utils import inline
@inline
def test_one():
""" Subclass BaseFSName and TemporaryFileWrapper """
class TemporaryFileName(TemporaryFileWrapper, BaseFSName):
def __init__(self, prefix='', suffix='tmp', mode='wb', delete=True):
file = open(temporary(prefix=prefix, suffix=suffix), mode=mode)
super().__init__(file,
file.name,
delete=delete)
@property
def name(self):
return self.file.name
@name.setter
def name(self, value):
pass
def to_string(self):
return self.name
with TemporaryFileName() as tf:
assert os.path.isfile(tf.name)
assert not os.path.exists(tf.name)
return inline.test(100)
if __name__ == '__main__':
sys.exit(test())
| 36.913194
| 87
| 0.594582
|
4a08f41d3ce27c636cd989a2ad7a694ad4ad4ade
| 155
|
py
|
Python
|
Tests/runlibregr.py
|
amaeckelberghe/Pyjion
|
cdf8fbd3f3808d398a71fca085420f71c7dff106
|
[
"MIT"
] | null | null | null |
Tests/runlibregr.py
|
amaeckelberghe/Pyjion
|
cdf8fbd3f3808d398a71fca085420f71c7dff106
|
[
"MIT"
] | null | null | null |
Tests/runlibregr.py
|
amaeckelberghe/Pyjion
|
cdf8fbd3f3808d398a71fca085420f71c7dff106
|
[
"MIT"
] | null | null | null |
import pyjion
import gc
pyjion.enable()
from test.libregrtest import main
main()
gc.collect()
pyjion.disable()
print("Disabling JIT")
print(pyjion.stats())
| 17.222222
| 33
| 0.774194
|
4a08f692f4091f2d1ebdeb8daf337c9b16fc8f47
| 1,112
|
py
|
Python
|
mri_works/NodeEditor/modules/Matlab/MP3_ANTS.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 2
|
2020-08-20T21:00:53.000Z
|
2021-08-16T15:28:51.000Z
|
mri_works/NodeEditor/modules/Matlab/MP3_ANTS.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 3
|
2020-09-24T06:50:43.000Z
|
2020-12-15T11:02:04.000Z
|
mri_works/NodeEditor/modules/Matlab/MP3_ANTS.py
|
montigno/mri_works
|
8ec6ff1500aa34d3540e44e4b0148023cf821f61
|
[
"CECILL-B"
] | 1
|
2020-08-20T21:00:59.000Z
|
2020-08-20T21:00:59.000Z
|
class MP3_Atlas_ANTS():
def __init__(self,
mat_eng='',
Nifti_in='path',
Ref_in='path',
Mask_in='path',
file_Atlas_name='path',
file_Label_name='path',
**options):
import matlab.engine
import os
files_in, files_out = {}, {}
options['flag_test'] = 0
options['Table_in'] = {}
base = os.path.basename(Nifti_in)
options['Table_in']['Filename'] = os.path.splitext(base)[0]
files_in['In1'] = [Nifti_in]
files_in['In2'] = [Ref_in]
if Mask_in != 'path':
files_in['In3'] = [Mask_in]
files_out['In1'] = [file_Atlas_name]
files_out['In2'] = [file_Label_name]
mat_eng.Module_Atlas_ANTs(files_in, files_out, options)
self.mat_eng = mat_eng
self.map = [file_Atlas_name, file_Label_name]
def mat_eng(self: 'str'):
return self.mat_eng
def file_out1(self: 'path'):
return self.map[0]
def file_out2(self: 'path'):
return self.map[1]
| 31.771429
| 67
| 0.528777
|
4a08f6c876bacb96603d1d6db09016f70b3becb9
| 479
|
py
|
Python
|
output/models/nist_data/list_pkg/double/schema_instance/nistschema_sv_iv_list_double_min_length_5_xsd/nistschema_sv_iv_list_double_min_length_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/double/schema_instance/nistschema_sv_iv_list_double_min_length_5_xsd/nistschema_sv_iv_list_double_min_length_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/double/schema_instance/nistschema_sv_iv_list_double_min_length_5_xsd/nistschema_sv_iv_list_double_min_length_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-double-minLength-5-NS"
@dataclass
class NistschemaSvIvListDoubleMinLength5:
class Meta:
name = "NISTSchema-SV-IV-list-double-minLength-5"
namespace = "NISTSchema-SV-IV-list-double-minLength-5-NS"
value: List[float] = field(
default_factory=list,
metadata={
"min_length": 10,
"tokens": True,
}
)
| 23.95
| 65
| 0.651357
|
4a08f7753a8b83ce1b69a2977096c0d1ecd463f5
| 10,749
|
py
|
Python
|
edith/app/ingest/ingestProcesses.py
|
BAM-PFA/edith
|
a9bca397a7878e76fd6ac148aa122f410751b32c
|
[
"BSD-2-Clause"
] | 8
|
2018-10-18T19:14:55.000Z
|
2020-07-29T08:10:46.000Z
|
edith/app/ingest/ingestProcesses.py
|
BAM-PFA/edith
|
a9bca397a7878e76fd6ac148aa122f410751b32c
|
[
"BSD-2-Clause"
] | 27
|
2018-10-06T22:50:06.000Z
|
2019-07-08T20:12:27.000Z
|
edith/app/ingest/ingestProcesses.py
|
BAM-PFA/resourcespace
|
40aeb1f40f9283d2e452e75cb98d41ea951d33a6
|
[
"BSD-2-Clause"
] | 1
|
2018-11-16T18:52:41.000Z
|
2018-11-16T18:52:41.000Z
|
#!/usr/bin/env python3
# standard library modules
import ast
import json
import os
import re
import subprocess
import sys
import uuid
# non-standard modules
from flask_login import current_user
# local modules
from . import dataSourceAccess
from . import metadataMaster
from ..pymm import ingestSip
from .. import resourcespaceFunctions
from .. import sshStuff
from .. import utils
class IngestProcess:
def __init__(self):
self.user = self.get_user()
self._uuid = str(uuid.uuid4())
self.status = None
# this is a list of objects being ingested
self.Ingestibles = []
def get_user(self):
userFirstName = current_user.first_name
userLastName = current_user.last_name
# Construct the user's full name, unless the user is missing
# one of these values (they shouldn't be...)
if not any(x in (userFirstName,userLastName) for x in ("None",None)):
user = "{} {}".format(userFirstName,userLastName)
else:
# otherwise default to the user's email address
user = current_user.email
return user
class Ingestible:
'''
This is a single object selected by a user to be ingested.
'''
def __init__(self, inputPath):
self.inputPath = inputPath
self.metadata = metadataMaster.Metadata(self.inputPath)
self.doProres = None
self.deliverMezzanine = None
self.concatReels = None
self.pymmArgv = []
self.pymmResult = None
self.accessCopyPath = None
self.ingestWarnings = []
self.ingestMessages = []
self.status = None
def grab_remote_files(targetFilepath):
# prep credentials to grab stuff from remote shared dir
hostName, \
sourceDir, \
remoteAddress, \
remoteUser, \
remotePassword, \
sshKeyfile = utils.get_remote_credentials()
processingDir = utils.get_temp_dir()
# print(processingDir)
# double check that it's not on the current filesystem
if not os.path.isfile(targetFilepath):
if not os.path.isdir(targetFilepath):
try:
subprocess.call([
'rsync','-rtvPihe','ssh',
'{0}@{1}:{2}'.format(remoteUser,remoteAddress,targetFilepath),
processingDir
])
except:
print("Couldn't rsync the file...")
else:
print(
"Your files are already on the current filesystem, "
"so don't need to rsync anything."
)
def add_metadata(CurrentIngest):
for _object in CurrentIngest.Ingestibles:
metadataSourceID = int(_object.metadata.metadataSource)
if not metadataSourceID == 0:
dataSourceAccessDetails = dataSourceAccess.main(metadataSourceID)
else:
dataSourceAccessDetails = None
# go get some metadata
_object.metadata.fetch_metadata(dataSourceAccessDetails)
if _object.metadata.retrievedExternalMetadata == True:
print("HELLO THERE WE ADDED METADATA!")
else:
_object.ingestWarnings.append(
"Note: we did not retrieve metadata from any BAMPFA "\
"database."
)
_object.metadata.set_hasBAMPFAmetadata()
_object.metadata.clear_empty_metadata_fields()
if _object.metadata.innerMetadataDict['hasBAMPFAmetadata'] == True:
_object.metadata.write_json_file()
def set_pymm_sys_args(CurrentIngest,_object):
_object.pymmArgv = [
'',
'-i',_object.inputPath, # input path
'-u',CurrentIngest.user,# user gets recorded
'-dz' # report to db and delete originals
]
metadataJSONpath = _object.metadata.metadataJSONpath
# IMPORTANT call to `777` the JSON file so pymm can read it
if os.path.isfile(metadataJSONpath):
os.chmod(metadataJSONpath,0o777)
_object.pymmArgv.extend(['-j',metadataJSONpath])
if _object.concatReels:
_object.pymmArgv.extend(['-c'])
def parse_raw_ingest_form(formData,CurrentIngest):
'''
Logic to parse the raw form data from ingest.views.status
'''
results = {}
toIngest =[]
targetPaths = []
doProresYES = []
proresToDaveYES = []
doConcatYES =[]
metadataSourceSelection = {}
metadataEntries = {}
for key, value in formData.items():
# get names/paths of files we actually want to process
if 'runIngest' in key:
toIngest.append(key.replace('runIngest-',''))
# targetPath is the path of the item coming from the form
# I think targetPath includes *all the things* from the list,
# not just selected ones
elif 'targetPath' in key:
targetPaths.append(value[0])
elif 'doProres' in key:
doProresYES.append(key.replace('doProres-',''))
elif 'proresToDave' in key:
proresToDaveYES.append(key.replace('proresToDave-',''))
elif 'doConcat' in key:
doConcatYES.append(key.replace('doConcat-',''))
elif 'metadataSource' in key:
pattern = r'(metadataSource-)(.*)'
mySearch = re.search(pattern,key)
theObject = mySearch.group(2)
metadataSourceSelection[theObject] = value[0]
# start trawling for metadata entries
# skip entries that are blank
# -> n.b. this should result in no userMetadata dict
# if there isn't any user md
elif 'metadataForm' in key and not value == ['']:
# print(key)
# get the field label and object via regex
pattern = r'(metadataForm-)([a-zA-Z0-9_]+)(-)(.*)'
fieldSearch = re.search(pattern,key)
# raw fields are formed as userMD_1_eventLocation
field = re.sub(r"(userMD_)(\d)(_)", '', fieldSearch.group(2))
theObject = fieldSearch.group(4)
# print(field,theObject)
if not theObject in metadataEntries:
# see if its been added, if not make a new temp dict
metadataEntries[theObject] = {}
# `value` here is returned as a list
# from the metadata FormField
metadataEntries[theObject][field] = value[0]
else:
metadataEntries[theObject][field] = value[0]
for _object in toIngest:
# build a dict of files:options
for _path in targetPaths:
if _object == os.path.basename(_path):
ingestMe = Ingestible(_path)
if _object in metadataEntries:
# this line actually adds the user metadata to the
# object that's selected
ingestMe.metadata.add_more_metadata(
metadataEntries[_object]
)
# print(ingestMe.metadata.innerMetadataDict)
if _object in metadataSourceSelection:
ingestMe.metadata.metadataSource = \
metadataSourceSelection[_object]
CurrentIngest.Ingestibles.append(ingestMe)
# add boolean options to dict
for ingestible in CurrentIngest.Ingestibles:
if ingestible.metadata.basename in doProresYES:
ingestible.doProres = True
if ingestible.metadata.basename in proresToDaveYES:
ingestible.deliverMezzanine = True
if ingestible.metadata.basename in doConcatYES:
ingestible.concatReels = True
return CurrentIngest
def main(CurrentIngest):
# TAKE IN AN `INGEST` OBJECT
# IT SHOULD CONTAIN AT LEAST ONE `INGESTIBLE`
# run `pymm` on Ingestibles
# post access copies to resourcespace
# GET THE PYMM PATH TO CALL IN A SEC
pymmPath = utils.get_pymm_path()
ingestSipPath = os.path.join(pymmPath,'ingestSip.py')
print("INGEST LOOKS LIKE THIS NOW")
for item in CurrentIngest.Ingestibles:
print(item.inputPath)
print(item.metadata.metadataDict)
print("------")
# get the hostname of the shared dir:
_, hostName, _ = utils.get_shared_dir_stuff('shared')
####################
##### FETCH METADATA
####################
add_metadata(CurrentIngest)
##############
#### CALL PYMM
##############
if not hostName == 'localhost':
'''
THIS SECTION IS DEAD... WOULD NEED TO BE REVISED
IF IT EVER SEEMED LIKE WE WANT TO MESS WITH REMOTE SHARED DIR
for objectPath in ingestDict.keys():
try:
grab_remote_files(objectPath)
except:
print("no dice.")
'''
else:
for _object in CurrentIngest.Ingestibles:
metadataJSONpath = _object.metadata.metadataJSONpath
set_pymm_sys_args(CurrentIngest,_object)
pymmIngest = None
try:
sys.argv = _object.pymmArgv
pymmIngest = ingestSip.main()
_object.pymmIngest = pymmIngest
_object.pymmResult = pymmIngest.ingestResults
# print("PYMM OUTPUT\n",_object.pymmResult)
# sys.exit()
# now work on metadata
if not _object.pymmResult['status'] == False:
_object.ingestMessages.append(
'Archival information package'\
' creation succeeeded'
)
if _object.pymmResult['notes'] != '':
_object.ingestWarnings.append(
_object.pymmResult['notes']
)
# get the UUID,
# which we'll add to the metadata file in a sec
ingestUUID = _object.pymmResult['ingestUUID']
canonicalName = _object.pymmIngest.InputObject.canonicalName
inputType = _object.pymmIngest.InputObject.inputTypeDetail
try:
with open(metadataJSONpath,'r+') as mdread:
# print('opened the md file')
data = json.load(mdread)
key = list(data.keys())[0]
data[key]['metadata']['ingestUUID'] = ingestUUID
data[key]['metadata']['canonicalName'] = canonicalName
data[key]['metadata']['sourceInputType'] = inputType
theGoods = data[key]['metadata']
# also update the Ingestible attributes
_object.metadata.innerMetadataDict = theGoods
with open(metadataJSONpath,'w+') as mdwrite:
json.dump(theGoods,mdwrite)
# print('wrote to the md file')
_object.ingestMessages.append(
'Added metadata to sidecar JSON file: {}'.format(
metadataJSONpath
)
)
except:
_object.ingestWarnings.append(
'Warning: Problem writing to JSON metadata file:'\
' {}.\nCheck file/folder permissions.'.format(
metadataJSONpath
)
)
else:
_object.ingestWarnings.append(
"Warning: "+str(_object.pymmResult['abortReason'])
)
except Exception as e:
print(e)
_object.ingestWarnings.append(
'Warning: Archival information package'\
' creation failed'
)
# print(_object.ingestWarnings,_object.ingestMessages)
########################
#### RESOURCESPACE STUFF
########################
rsDir = utils.get_rs_dir()
if _object.pymmResult != None and pymmIngest != None:
if _object.pymmResult['status'] != False:
_object.accessCopyPath = pymmIngest.accessDelivery
basename = _object.metadata.basename
if os.path.exists(_object.accessCopyPath):
# print("WOOOT")
# rsStatus is True/False result
rsStatus = resourcespaceFunctions.do_resourcespace(
_object
)
if rsStatus:
_object.ingestMessages.append(
'Added proxy file(s) '\
'and metadata to resourcespace'
)
else:
_object.ingestWarnings.append(
'Warning: Problem sending file or metadata '\
'or both to ResourceSpace.'
)
else:
print("PROXY FILE PATH PROBLEMO")
_object.ingestWarnings.append(
"Warning: Problem accessing the resourcespace "\
"proxy file. Maybe it didn't get created? "\
"Maybe check folder permissions."
)
else:
pass
_object.metadata.delete_temp_JSON_file()
return(CurrentIngest)
| 29.051351
| 71
| 0.690948
|
4a08f8d6ce0c1ab7e3368c80009caf081abad6f6
| 4,225
|
py
|
Python
|
lib/spack/spack/schema/gitlab_ci.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
lib/spack/spack/schema/gitlab_ci.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
lib/spack/spack/schema/gitlab_ci.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for gitlab-ci.yaml configuration file.
.. literalinclude:: ../spack/schema/gitlab_ci.py
:lines: 13-
"""
from llnl.util.lang import union_dicts
image_schema = {
'oneOf': [
{
'type': 'string'
}, {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'entrypoint': {
'type': 'array',
'items': {
'type': 'string',
},
},
},
},
],
}
runner_attributes_schema_items = {
'image': image_schema,
'tags': {
'type': 'array',
'items': {'type': 'string'}
},
'variables': {
'type': 'object',
'patternProperties': {
r'[\w\d\-_\.]+': {
'type': 'string',
},
},
},
'before_script': {
'type': 'array',
'items': {'type': 'string'}
},
'script': {
'type': 'array',
'items': {'type': 'string'}
},
'after_script': {
'type': 'array',
'items': {'type': 'string'}
},
}
runner_selector_schema = {
'type': 'object',
'additionalProperties': False,
'required': ['tags'],
'properties': runner_attributes_schema_items,
}
core_shared_properties = union_dicts(
runner_attributes_schema_items,
{
'bootstrap': {
'type': 'array',
'items': {
'anyOf': [
{
'type': 'string',
}, {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'name': {
'type': 'string',
},
'compiler-agnostic': {
'type': 'boolean',
'default': False,
},
},
},
],
},
},
'mappings': {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'required': ['match'],
'properties': {
'match': {
'type': 'array',
'items': {
'type': 'string',
},
},
'runner-attributes': runner_selector_schema,
},
},
},
'service-job-attributes': runner_selector_schema,
'rebuild-index': {'type': 'boolean'},
'broken-specs-url': {'type': 'string'},
},
)
gitlab_ci_properties = {
'anyOf': [
{
'type': 'object',
'additionalProperties': False,
'required': ['mappings'],
'properties': union_dicts(
core_shared_properties,
{
'enable-artifacts-buildcache': {
'type': 'boolean',
},
},
),
},
{
'type': 'object',
'additionalProperties': False,
'required': ['mappings'],
'properties': union_dicts(
core_shared_properties,
{
'temporary-storage-url-prefix': {
'type': 'string',
},
},
),
},
]
}
#: Properties for inclusion in other schemas
properties = {
'gitlab-ci': gitlab_ci_properties,
}
#: Full schema with metadata
schema = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack gitlab-ci configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
| 26.080247
| 73
| 0.394083
|
4a08f9f69fdb38c38e890667963a0947e47843c6
| 1,323
|
py
|
Python
|
setup.py
|
kapetan/MicroPython_VL53L0X
|
1cb1518e44f92c4b4b7942239018b1a8f304063c
|
[
"MIT"
] | null | null | null |
setup.py
|
kapetan/MicroPython_VL53L0X
|
1cb1518e44f92c4b4b7942239018b1a8f304063c
|
[
"MIT"
] | null | null | null |
setup.py
|
kapetan/MicroPython_VL53L0X
|
1cb1518e44f92c4b4b7942239018b1a8f304063c
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# To use a consistent encoding
import codecs
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with codecs.open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="micropython-vl53l0x",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="CircuitPython library for VL53L0X time of flight distance sensor.",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/kapetan/MicroPython_VL53L0X",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
keywords="vl53l0x time flight distance sensor"
"breakout hardware micropython",
py_modules=find_packages()
)
| 31.5
| 84
| 0.690098
|
4a08faa94378f1301dfe343e996cb51e27dd1143
| 3,315
|
py
|
Python
|
tools/mo/openvino/tools/mo/subprocess_main.py
|
pfinashx/openvino
|
1d417e888b508415510fb0a92e4a9264cf8bdef7
|
[
"Apache-2.0"
] | 1
|
2022-02-26T17:33:44.000Z
|
2022-02-26T17:33:44.000Z
|
tools/mo/openvino/tools/mo/subprocess_main.py
|
pfinashx/openvino
|
1d417e888b508415510fb0a92e4a9264cf8bdef7
|
[
"Apache-2.0"
] | 18
|
2022-01-21T08:42:58.000Z
|
2022-03-28T13:21:31.000Z
|
tools/mo/openvino/tools/mo/subprocess_main.py
|
pfinashx/openvino
|
1d417e888b508415510fb0a92e4a9264cf8bdef7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import os
import subprocess
from multiprocessing import Process, Queue
import sys
def check_python_version():
"""
Checks python version to be greater or equal than 3.4
:return: exit code (1 - error, None - successful)
"""
if sys.version_info < (3, 4):
print('Python version should be of version 3.4 or newer')
return 1
def log_ie_not_found():
log.error("Could not find the Inference Engine or nGraph Python API.\n"
"Consider building the Inference Engine and nGraph Python APIs"
" from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
.format("bat" if sys.platform == "windows" else "sh"))
def log_mo_root_dir_not_found():
log.error("Could not find the ModelOptimizer root module directory.\n"
"Consider setting PYTHONPATH to the openvino tools folder (usually openvino/tools/mo)")
def setup_env():
ret_code = check_python_version()
if ret_code:
sys.exit(ret_code)
mo_root_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)
# Check that MO root directory already set to the PYTHONPATH
status = subprocess.run([sys.executable, "-c", "try: import openvino.tools.mo \nexcept: exit(1)"], env=os.environ)
if status.returncode != 0:
# If no, we try to set it manually based on relative path
python_path_key = 'PYTHONPATH'
if python_path_key not in os.environ:
os.environ[python_path_key] = mo_root_path
else:
os.environ[python_path_key] = os.pathsep.join([os.environ[python_path_key], mo_root_path])
sys.path.append(mo_root_path)
status = subprocess.run([sys.executable, "-c", "try: import openvino.tools.mo \nexcept: exit(1)"], env=os.environ)
if status.returncode != 0:
log_mo_root_dir_not_found()
sys.exit(1)
ie_found = True
try:
from openvino.tools.mo.utils.find_ie_version import find_ie_version # pylint: disable=no-name-in-module
ie_found = find_ie_version(silent=True)
except Exception as e:
log.error(e)
ie_found = False
if not ie_found:
log_ie_not_found()
sys.exit(1)
return True
def subprocess_main(framework=None):
"""
Please keep this file compatible with python2 in order to check user python version.
This function checks that Inference Engine Python API available and working as expected
and then in sub-process it executes main_<fw>.py files. Due to some OSs specifics we can't
just add paths to Python modules and libraries into current env. So to make Inference Engine
Python API to be available inside MO we need to use subprocess with new env.
"""
setup_env()
path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)),
'main_{}.py'.format(framework) if framework else 'main.py')
# python2 compatible code. Do not remove.
args = [sys.executable, path_to_main]
for arg in sys.argv[1:]:
args.append(arg)
status = subprocess.run(args, env=os.environ)
sys.exit(status.returncode)
| 35.645161
| 122
| 0.669985
|
4a08fc555598c702718c5fc896a6a45d227b5f79
| 3,569
|
py
|
Python
|
galaxies/galaxies.py
|
philrosenfield/ResolvedStellarPops
|
ab24083ae5080545165ccf7589d5a22c7989ce75
|
[
"BSD-3-Clause"
] | null | null | null |
galaxies/galaxies.py
|
philrosenfield/ResolvedStellarPops
|
ab24083ae5080545165ccf7589d5a22c7989ce75
|
[
"BSD-3-Clause"
] | null | null | null |
galaxies/galaxies.py
|
philrosenfield/ResolvedStellarPops
|
ab24083ae5080545165ccf7589d5a22c7989ce75
|
[
"BSD-3-Clause"
] | null | null | null |
'''
wrapper for lists of galaxy objects, each method returns lists, unless they
are setting attributes.
'''
import numpy as np
import itertools
from .starpop import StarPop
__all__ = ['Galaxies']
class Galaxies(StarPop):
'''
wrapper for lists of galaxy objects, each method returns lists, unless they
are setting attributes.
'''
def __init__(self, galaxy_objects):
self.galaxies = np.asarray(galaxy_objects)
self.filter1s = np.unique([g.filter1 for g in galaxy_objects])
self.filter2s = np.unique([g.filter2 for g in galaxy_objects])
def sum_attr(self, *attrs):
for attr, g in itertools.product(attrs, self.galaxies):
g.__setattr__('sum_%s' % attr, np.sum(g.data.get_col(attr)))
def all_stages(self, *stages):
'''
adds the indices of any stage as attributes to galaxy.
If the stage isn't found, -1 is returned.
'''
[g.all_stages(*stages) for g in self.galaxies]
return
def squish(self, *attrs, **kwargs):
'''
concatenates an attribute or many attributes and adds them to galaxies
instance -- with an 's' at the end to pluralize them... that might
be stupid.
ex
for gal in gals:
gal.ra = gal.data['ra']
gal.dec = gal.data['dec']
gs = Galaxies.galaxies(gals)
gs.squish('color', 'mag2', 'ra', 'dec')
gs.ras ...
'''
inds = kwargs.get('inds', np.arange(len(self.galaxies)))
new_attrs = kwargs.get('new_attrs', None)
if new_attrs is not None:
assert len(new_attrs) == len(attrs), \
'new attribute titles must be list same length as given attributes.'
for i, attr in enumerate(attrs):
# do we have a name for the new attribute?
if new_attrs is not None:
new_attr = new_attrs[i]
else:
new_attr = '%ss' % attr
new_list = [g.__getattribute__(attr) for g in self.galaxies[inds]]
# is attr an array of arrays, or is it now an array?
try:
new_val = np.concatenate(new_list)
except ValueError:
new_val = np.array(new_list)
self.__setattr__(new_attr, new_val)
def finite_key(self, key):
return [g for g in self.galaxies if np.isfinite(g.__dict__[key])]
def select_on_key(self, key, val):
''' ex filter2 == F814W works great with strings or exact g.key==val.
rounds z to four places, no error handling.
'''
key = key.lower()
if key == 'z':
gs = [g for g in self.galaxies if
np.round(g.__dict__[key], 4) == val]
else:
gs = [g for g in self.galaxies if g.__dict__[key] == val]
return gs
def group_by_z(self):
if not hasattr(self, 'zs'):
return
zsf = self.zs[np.isfinite(self.zs)]
d = {}
for z in zsf:
key = 'Z%.4f' % z
d[key] = self.select_on_key('z', z)
d['no z'] = [g for g in self.galaxies if np.isnan(g.z)]
return d
def intersection(self, **kwargs):
'''
ex kwargs = {'filter2':'F814W', 'filter1':'F555W'}
will return a list of galaxy objects that match all kwarg values.
'''
gs_tmp = self.galaxies
gs = [self.select_on_key(k, v) for k, v in kwargs.items()]
for i in range(len(gs)):
gs_tmp = list(set(gs_tmp) & set(gs[i]))
return gs_tmp
| 33.046296
| 84
| 0.564864
|
4a08fe108edd6a35f797954c4bd83515300cae14
| 9,124
|
py
|
Python
|
tests/test_ec2/test_internet_gateways.py
|
argos83/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | 1
|
2021-03-06T22:01:41.000Z
|
2021-03-06T22:01:41.000Z
|
tests/test_ec2/test_internet_gateways.py
|
marciogh/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ec2/test_internet_gateways.py
|
marciogh/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | 1
|
2017-10-19T00:53:28.000Z
|
2017-10-19T00:53:28.000Z
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import re
import boto
from boto.exception import EC2ResponseError, JSONResponseError
import sure # noqa
from moto import mock_ec2
VPC_CIDR="10.0.0.0/16"
BAD_VPC="vpc-deadbeef"
BAD_IGW="igw-deadbeef"
@mock_ec2
def test_igw_create():
""" internet gateway create """
conn = boto.connect_vpc('the_key', 'the_secret')
conn.get_all_internet_gateways().should.have.length_of(0)
with assert_raises(JSONResponseError) as ex:
igw = conn.create_internet_gateway(dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set')
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
igw.id.should.match(r'igw-[0-9a-f]+')
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_attach():
""" internet gateway attach """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
with assert_raises(JSONResponseError) as ex:
conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.attach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments[0].vpc_id.should.be.equal(vpc.id)
@mock_ec2
def test_igw_attach_bad_vpc():
""" internet gateway fail to attach w/ bad vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_attach_twice():
""" internet gateway fail to attach twice """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Resource.AlreadyAssociated')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach():
""" internet gateway detach"""
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(JSONResponseError) as ex:
conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.detach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_detach_wrong_vpc():
""" internet gateway fail to detach w/ wrong vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_invalid_vpc():
""" internet gateway fail to detach w/ invalid vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_unattached():
""" internet gateway fail to detach unattached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_delete():
""" internet gateway delete"""
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc(VPC_CIDR)
conn.get_all_internet_gateways().should.have.length_of(0)
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
with assert_raises(JSONResponseError) as ex:
conn.delete_internet_gateway(igw.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.delete_internet_gateway(igw.id)
conn.get_all_internet_gateways().should.have.length_of(0)
@mock_ec2
def test_igw_delete_attached():
""" internet gateway fail to delete attached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.delete_internet_gateway(igw.id)
cm.exception.code.should.equal('DependencyViolation')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_desribe():
""" internet gateway fetch by id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
igw_by_search = conn.get_all_internet_gateways([igw.id])[0]
igw.id.should.equal(igw_by_search.id)
@mock_ec2
def test_igw_desribe_bad_id():
""" internet gateway fail to fetch by bad id """
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_internet_gateways([BAD_IGW])
cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_filter_by_vpc_id():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.vpc-id": vpc.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_tags():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
igw1.add_tag("tests", "yes")
result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_internet_gateway_id():
""" internet gateway filter by internet gateway id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
result = conn.get_all_internet_gateways(filters={"internet-gateway-id": igw1.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_attachment_state():
""" internet gateway filter by attachment state """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.state": "available"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
| 36.350598
| 179
| 0.738053
|
4a08fea9ab9a9c47049abaa03a1e55fb84be7817
| 1,330
|
py
|
Python
|
app/api/events.py
|
benranderson/run
|
c2079a61b16311646e88494f9ac4742c970fec98
|
[
"MIT"
] | null | null | null |
app/api/events.py
|
benranderson/run
|
c2079a61b16311646e88494f9ac4742c970fec98
|
[
"MIT"
] | 22
|
2018-01-23T15:20:14.000Z
|
2018-03-01T23:44:12.000Z
|
app/api/events.py
|
benranderson/run
|
c2079a61b16311646e88494f9ac4742c970fec98
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request, url_for
from app import db
from app.api import api
from app.models import Event
@api.route('/events/<int:id>', methods=['GET'])
def get_event(id):
return jsonify(Event.query.get_or_404(id).to_dict())
@api.route('/events', methods=['GET'])
def get_events():
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = Event.to_collection_dict(
Event.query, page, per_page, 'api.get_events')
return jsonify(data)
@api.route('/events', methods=['POST'])
def create_event():
event = Event()
data = request.get_json()
event.from_dict(data)
db.session.add(event)
db.session.commit()
response = jsonify(event.to_dict())
response.status_code = 201
response.headers['Location'] = url_for('api.get_event', id=event.id)
return response
@api.route('/events/<int:id>', methods=['PUT'])
def update_event(id):
event = Event.query.get_or_404(id)
data = request.get_json() or {}
event.from_dict(request.get_json() or {})
db.session.commit()
return jsonify(event.to_dict())
@api.route('/events/<int:id>', methods=['DELETE'])
def delete_event(id):
event = Event.query.get_or_404(id)
db.session.delete(event)
db.session.commit()
return jsonify({})
| 27.142857
| 72
| 0.670677
|
4a09011b5150ab35a720a3403dda3839e05b6d47
| 164
|
py
|
Python
|
utilities/functions.py
|
mlists/pyinfiniterecharge
|
3ac5697767e87c669dab5b089f0831909466f3b7
|
[
"BSD-3-Clause"
] | 3
|
2020-02-04T20:39:10.000Z
|
2022-01-08T23:24:10.000Z
|
utilities/functions.py
|
mlists/pyinfiniterecharge
|
3ac5697767e87c669dab5b089f0831909466f3b7
|
[
"BSD-3-Clause"
] | 48
|
2020-01-18T05:00:37.000Z
|
2021-05-01T02:05:56.000Z
|
utilities/functions.py
|
mlists/pyinfiniterecharge
|
3ac5697767e87c669dab5b089f0831909466f3b7
|
[
"BSD-3-Clause"
] | 16
|
2020-01-17T23:36:27.000Z
|
2021-03-03T06:31:33.000Z
|
import math
def constrain_angle(angle: float) -> float:
"""Wrap an angle to the interval [-pi,pi]."""
return math.atan2(math.sin(angle), math.cos(angle))
| 23.428571
| 55
| 0.670732
|
4a09016895fcbd53a06b2ad55065ee97726aa3dc
| 37,517
|
py
|
Python
|
src/prefect/core/task.py
|
karmijo/prefect
|
6e3f3e094e770f0b490a626a0fad7195ee3f79e2
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/core/task.py
|
karmijo/prefect
|
6e3f3e094e770f0b490a626a0fad7195ee3f79e2
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/core/task.py
|
karmijo/prefect
|
6e3f3e094e770f0b490a626a0fad7195ee3f79e2
|
[
"Apache-2.0"
] | null | null | null |
import collections
import copy
import inspect
import uuid
import warnings
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Set, Tuple, Union
import prefect
import prefect.engine.cache_validators
import prefect.engine.signals
import prefect.triggers
from prefect.utilities import logging
from prefect.utilities.notifications import callback_factory
if TYPE_CHECKING:
from prefect.core.flow import Flow # pylint: disable=W0611
from prefect.engine.result_handlers import ResultHandler
from prefect.engine.state import State
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
def _validate_run_signature(run: Callable) -> None:
func = getattr(run, "__wrapped__", run)
run_sig = inspect.getfullargspec(func)
if run_sig.varargs:
raise ValueError(
"Tasks with variable positional arguments (*args) are not "
"supported, because all Prefect arguments are stored as "
"keywords. As a workaround, consider modifying the run() "
"method to accept **kwargs and feeding the values "
"to *args."
)
reserved_kwargs = ["upstream_tasks", "mapped", "task_args", "flow"]
violations = [kw for kw in reserved_kwargs if kw in run_sig.args]
if violations:
msg = "Tasks cannot have the following argument names: {}.".format(
", ".join(violations)
)
msg += " These are reserved keyword arguments."
raise ValueError(msg)
class SignatureValidator(type):
def __new__(cls, name: str, parents: tuple, methods: dict) -> type:
run = methods.get("run", lambda: None)
_validate_run_signature(run)
# necessary to ensure classes that inherit from parent class
# also get passed through __new__
return type.__new__(cls, name, parents, methods)
class Task(metaclass=SignatureValidator):
"""
The Task class which is used as the full representation of a unit of work.
This Task class can be used directly as a first class object where it must
be inherited from by a class which implements the `run` method. For a more
functional way of generating Tasks, see [the task decorator](../utilities/tasks.html).
Inheritance example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
```
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
An instance of a `Task` can be used functionally to generate other task instances
with the same attributes but with different values bound to their `run` methods.
Example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
a = AddTask()
with Flow("My Flow") as f:
t1 = a(1, 2) # t1 != a
t2 = a(5, 7) # t2 != a
```
To bind values to a Task's run method imperatively (and without making a copy), see `Task.bind`.
Args:
- name (str, optional): The name of this task
- slug (str, optional): The slug for this task, it must be unique within a given Flow
- tags ([str], optional): A list of tags for this task
- max_retries (int, optional): The maximum amount of times this task can be retried
- retry_delay (timedelta, optional): The amount of time to wait until task is retried
- timeout (int, optional): The amount of time (in seconds) to wait while
running this task before a timeout occurs; note that sub-second resolution is not supported
- trigger (callable, optional): a function that determines whether the task should run, based
on the states of any upstream tasks.
- skip_on_upstream_skip (bool, optional): if `True`, if any immediately
upstream tasks are skipped, this task will automatically be skipped as well,
regardless of trigger. By default, this prevents tasks from attempting to use either state or data
from tasks that didn't run. If `False`, the task's trigger will be called as normal,
with skips considered successes. Defaults to `True`.
- cache_for (timedelta, optional): The amount of time to maintain a cache
of the outputs of this task. Useful for situations where the containing Flow
will be rerun multiple times, but this task doesn't need to be.
- cache_validator (Callable, optional): Validator which will determine
whether the cache for this task is still valid (only required if `cache_for`
is provided; defaults to `prefect.engine.cache_validators.duration_only`)
- checkpoint (bool, optional): if this Task is successful, whether to
store its result using the `result_handler` available during the run; defaults to the value of
`tasks.defaults.checkpoint` in your user config
- result_handler (ResultHandler, optional): the handler to use for
retrieving and storing state results during execution; if not provided, will default to the
one attached to the Flow
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(task: Task, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- on_failure (Callable, optional): A function with signature `fn(task: Task, state: State) -> None`
with will be called anytime this Task enters a failure state
Raises:
- TypeError: if `tags` is of type `str`
- TypeError: if `timeout` is not of type `int`
"""
# Tasks are not iterable, though they do have a __getitem__ method
__iter__ = None
def __init__(
self,
name: str = None,
slug: str = None,
tags: Iterable[str] = None,
max_retries: int = None,
retry_delay: timedelta = None,
timeout: int = None,
trigger: Callable[[Set["State"]], bool] = None,
skip_on_upstream_skip: bool = True,
cache_for: timedelta = None,
cache_validator: Callable = None,
checkpoint: bool = None,
result_handler: "ResultHandler" = None,
state_handlers: List[Callable] = None,
on_failure: Callable = None,
):
self.name = name or type(self).__name__
self.slug = slug
self.id = str(uuid.uuid4())
self.logger = logging.get_logger("Task")
# avoid silently iterating over a string
if isinstance(tags, str):
raise TypeError("Tags should be a set of tags, not a string.")
current_tags = set(prefect.context.get("tags", set()))
self.tags = (set(tags) if tags is not None else set()) | current_tags
max_retries = (
max_retries
if max_retries is not None
else prefect.config.tasks.defaults.max_retries
)
retry_delay = (
retry_delay
if retry_delay is not None
else prefect.config.tasks.defaults.retry_delay
)
timeout = (
timeout if timeout is not None else prefect.config.tasks.defaults.timeout
)
if max_retries > 0 and retry_delay is None:
raise ValueError(
"A datetime.timedelta `retry_delay` must be provided if max_retries > 0"
)
if timeout is not None and not isinstance(timeout, int):
raise TypeError(
"Only integer timeouts (representing seconds) are supported."
)
self.max_retries = max_retries
self.retry_delay = retry_delay
self.timeout = timeout
self.trigger = trigger or prefect.triggers.all_successful
self.skip_on_upstream_skip = skip_on_upstream_skip
if cache_for is None and (
cache_validator is not None
and cache_validator is not prefect.engine.cache_validators.never_use
):
warnings.warn(
"cache_validator provided without specifying cache expiration (cache_for); this Task will not be cached."
)
self.cache_for = cache_for
default_validator = (
prefect.engine.cache_validators.never_use
if cache_for is None
else prefect.engine.cache_validators.duration_only
)
self.cache_validator = cache_validator or default_validator
self.checkpoint = (
checkpoint
if checkpoint is not None
else prefect.config.tasks.defaults.checkpoint
)
self.result_handler = result_handler
if state_handlers and not isinstance(state_handlers, collections.Sequence):
raise TypeError("state_handlers should be iterable.")
self.state_handlers = state_handlers or []
if on_failure is not None:
self.state_handlers.append(
callback_factory(on_failure, check=lambda s: s.is_failed())
)
def __repr__(self) -> str:
return "<Task: {self.name}>".format(self=self)
# reimplement __hash__ because we override __eq__
def __hash__(self) -> int:
return id(self)
@property
def id(self) -> str:
return self._id
@id.setter
def id(self, value: str) -> None:
"""
Args:
- value (str): a UUID-formatted string
"""
try:
uuid.UUID(value)
except Exception:
raise ValueError("Badly formatted UUID string: {}".format(value))
self._id = value
# Run --------------------------------------------------------------------
def run(self) -> None:
"""
The `run()` method is called (with arguments, if appropriate) to run a task.
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
If a task has arguments in its `run()` method, these can be bound either by using the functional
API and _calling_ the task instance, or by using `self.bind` directly.
In addition to running arbitrary functions, tasks can interact with Prefect in a few ways:
<ul><li> Return an optional result. When this function runs successfully,
the task is considered successful and the result (if any) can be
made available to downstream tasks. </li>
<li> Raise an error. Errors are interpreted as failure. </li>
<li> Raise a [signal](../engine/signals.html). Signals can include `FAIL`, `SUCCESS`, `RETRY`, `SKIP`, etc.
and indicate that the task should be put in the indicated state.
<ul>
<li> `FAIL` will lead to retries if appropriate </li>
<li> `SUCCESS` will cause the task to be marked successful </li>
<li> `RETRY` will cause the task to be marked for retry, even if `max_retries`
has been exceeded </li>
<li> `SKIP` will skip the task and possibly propogate the skip state through the
flow, depending on whether downstream tasks have `skip_on_upstream_skip=True`. </li></ul>
</li></ul>
"""
pass
# Dependencies -------------------------------------------------------------
def copy(self, **task_args: Any) -> "Task":
"""
Creates and returns a copy of the current Task.
Args:
- **task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a copy of the current Task, with any attributes updated from `task_args`
"""
flow = prefect.context.get("flow", None)
if (
flow
and self in flow.tasks
and (flow.edges_to(self) or flow.edges_from(self))
):
warnings.warn(
"You are making a copy of a task that has dependencies on or to other tasks "
"in the active flow context. The copy will not retain those dependencies."
)
new = copy.copy(self)
# check task_args
for attr, val in task_args.items():
if not hasattr(new, attr):
raise AttributeError(
"{0} does not have {1} as an attribute".format(self, attr)
)
else:
setattr(new, attr, val)
# assign new id
new.id = str(uuid.uuid4())
new.tags = copy.deepcopy(self.tags).union(set(new.tags))
tags = set(prefect.context.get("tags", set()))
new.tags.update(tags)
return new
def __call__(
self,
*args: Any,
mapped: bool = False,
task_args: dict = None,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Calling a Task instance will first create a _copy_ of the instance, and then
bind any passed `args` / `kwargs` to the run method of the copy. This new task
is then returned.
Args:
- *args: arguments to bind to the new Task's `run` method
- **kwargs: keyword arguments to bind to the new Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
- upstream_tasks ([Task], optional): a list of upstream dependencies
for the new task. This kwarg can be used to functionally specify
dependencies without binding their result to `run()`
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Returns:
- Task: a new Task instance
"""
new = self.copy(**(task_args or {}))
new.bind(
*args, mapped=mapped, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
return new
def bind(
self,
*args: Any,
mapped: bool = False,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Binding a task to (keyword) arguments creates a _keyed_ edge in the active Flow
which will pass data from the arguments (whether Tasks or constants) to the
Task's `run` method under the appropriate key. Once a Task is bound in this
manner, the same task instance cannot be bound a second time in the same Flow.
To bind arguments to a _copy_ of this Task instance, see `__call__`.
Additionally, non-keyed edges can be created by passing any upstream
dependencies through `upstream_tasks`.
Args:
- *args: arguments to bind to the current Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- upstream_tasks ([Task], optional): a list of upstream dependencies for the
current task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- **kwargs: keyword arguments to bind to the current Task's `run` method
Returns:
- Task: the current Task instance
"""
# this will raise an error if callargs weren't all provided
signature = inspect.signature(self.run)
callargs = dict(signature.bind(*args, **kwargs).arguments) # type: Dict
# bind() compresses all variable keyword arguments under the ** argument name,
# so we expand them explicitly
var_kw_arg = next(
(p for p in signature.parameters.values() if p.kind == VAR_KEYWORD), None
)
if var_kw_arg:
callargs.update(callargs.pop(var_kw_arg.name, {}))
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError("Could not infer an active Flow context.")
self.set_dependencies(
flow=flow,
upstream_tasks=upstream_tasks,
keyword_tasks=callargs,
mapped=mapped,
)
tags = set(prefect.context.get("tags", set()))
self.tags.update(tags)
return self
def map(
self,
*args: Any,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Map the Task elementwise across one or more Tasks. Arguments which should _not_ be mapped over
should be placed in the `prefect.utilities.tasks.unmapped` container.
For example:
```
task.map(x=X, y=unmapped(Y))
```
will map over the values of `X`, but not over the values of `Y`
Args:
- *args: arguments to map over, which will elementwise be bound to the Task's `run` method
- upstream_tasks ([Task], optional): a list of upstream dependencies
to map over
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- **kwargs: keyword arguments to map over, which will elementwise be bound to the Task's `run` method
Returns: - Task: a new Task instance
"""
new = self.copy()
return new.bind(
*args, mapped=True, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
def set_dependencies(
self,
flow: "Flow" = None,
upstream_tasks: Iterable[object] = None,
downstream_tasks: Iterable[object] = None,
keyword_tasks: Dict[str, object] = None,
mapped: bool = False,
validate: bool = True,
) -> None:
"""
Set dependencies for a flow either specified or in the current context using this task
Args:
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- upstream_tasks ([object], optional): A list of upstream tasks for this task
- downstream_tasks ([object], optional): A list of downtream tasks for this task
- keyword_tasks ({str, object}}, optional): The results of these tasks will be provided
to the task under the specified keyword arguments.
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments
- validate (bool, optional): Whether or not to check the validity of the flow
Returns:
- None
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError(
"No Flow was passed, and could not infer an active Flow context."
)
flow.set_dependencies(
task=self,
upstream_tasks=upstream_tasks,
downstream_tasks=downstream_tasks,
keyword_tasks=keyword_tasks,
validate=validate,
mapped=mapped,
)
def set_upstream(self, task: object, flow: "Flow" = None) -> None:
"""
Sets the provided task as an upstream dependency of this task.
Equivalent to: `self.set_dependencies(upstream_tasks=[task])`
Args:
- task (object): A task or object that will be converted to a task that will be set
as a upstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
self.set_dependencies(flow=flow, upstream_tasks=[task])
def set_downstream(self, task: object, flow: "Flow" = None) -> None:
"""
Sets the provided task as a downstream dependency of this task.
Equivalent to: `self.set_dependencies(downstream_tasks=[task])`
Args:
- task (object): A task or object that will be converted to a task that will be set
as a downstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
self.set_dependencies(flow=flow, downstream_tasks=[task])
def inputs(self) -> Dict[str, Dict]:
"""
Describe the inputs for this task. The result is a dictionary that maps each input to
a `type`, `required`, and `default`. All values are inferred from the `run()`
signature; this method can be overloaded for more precise control.
Returns:
- dict
"""
inputs = {}
for name, parameter in inspect.signature(self.run).parameters.items():
input_type = parameter.annotation
if input_type is inspect._empty: # type: ignore
input_type = Any
input_default = parameter.default
input_required = False
if input_default is inspect._empty: # type: ignore
input_required = True
input_default = None
inputs[name] = dict(
type=input_type, default=input_default, required=input_required
)
return inputs
def outputs(self) -> Any:
"""
Get the output types for this task.
Returns:
- Any
"""
return_annotation = inspect.signature(self.run).return_annotation
if return_annotation is inspect._empty: # type: ignore
return_annotation = Any
return return_annotation
# Serialization ------------------------------------------------------------
def serialize(self) -> Dict[str, Any]:
"""
Creates a serialized representation of this task
Returns:
- dict representing this task
"""
return prefect.serialization.task.TaskSchema().dump(self)
# Operators ----------------------------------------------------------------
def is_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self == other`
This can't be implemented as the __eq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Equal().bind(self, other)
def is_not_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self != other`
This can't be implemented as the __neq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.NotEqual().bind(self, other)
def not_(self) -> "Task":
"""
Produces a Task that evaluates `not self`
Returns:
- Task
"""
return prefect.tasks.core.operators.Not().bind(self)
# Magic Method Interactions ----------------------------------------------------
def __getitem__(self, key: Any) -> "Task":
"""
Produces a Task that evaluates `self[key]`
Args:
- key (object): the object to use an an index for this task. It will be converted
to a Task if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GetItem().bind(self, key)
def __or__(self, other: object) -> object:
"""
Creates a state dependency between `self` and `other`
`self | other --> self.set_dependencies(downstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task (if it isn't one already)
and set as a downstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(downstream_tasks=[other])
return other
def __mifflin__(self) -> None:
"Calls Dunder Mifflin"
import webbrowser
webbrowser.open("https://cicdw.github.io/welcome.html")
def __ror__(self, other: object) -> "Task":
"""
Creates a state dependency between `self` and `other`:
`other | self --> self.set_dependencies(upstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task and set as an
upstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(upstream_tasks=[other])
return self
# Maginc Method Operators -----------------------------------------------------
def __add__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self + other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(self, other)
def __sub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self - other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(self, other)
def __mul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self * other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(self, other)
def __truediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self / other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(self, other)
def __floordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self // other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(self, other)
def __mod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self % other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(self, other)
def __pow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self ** other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(self, other)
def __and__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self & other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(self, other)
def __radd__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other + self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(other, self)
def __rsub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other - self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(other, self)
def __rmul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other * self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(other, self)
def __rtruediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other / self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(other, self)
def __rfloordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other // self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(other, self)
def __rmod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other % self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(other, self)
def __rpow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other ** self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(other, self)
def __rand__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other & self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(other, self)
def __gt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self > other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThan().bind(self, other)
def __ge__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self >= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThanOrEqual().bind(self, other)
def __lt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self < other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThan().bind(self, other)
def __le__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self <= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThanOrEqual().bind(self, other)
class Parameter(Task):
"""
A Parameter is a special task that defines a required flow input.
A parameter's "slug" is automatically -- and immutably -- set to the parameter name.
Flows enforce slug uniqueness across all tasks, so this ensures that the flow has
no other parameters by the same name.
Args:
- name (str): the Parameter name.
- required (bool, optional): If True, the Parameter is required and the default
value is ignored.
- default (any, optional): A default value for the parameter. If the default
is not None, the Parameter will not be required.
- tags ([str], optional): A list of tags for this parameter
"""
def __init__(
self,
name: str,
default: Any = None,
required: bool = True,
tags: Iterable[str] = None,
):
if default is not None:
required = False
self.required = required
self.default = default
from prefect.engine.result_handlers import JSONResultHandler
super().__init__(
name=name,
slug=name,
tags=tags,
checkpoint=True,
result_handler=JSONResultHandler(),
)
def __repr__(self) -> str:
return "<Parameter: {self.name}>".format(self=self)
@property # type: ignore
def name(self) -> str: # type: ignore
return self._name
@name.setter
def name(self, value: str) -> None:
if hasattr(self, "_name"):
raise AttributeError("Parameter name can not be changed")
self._name = value # pylint: disable=W0201
@property # type: ignore
def slug(self) -> str: # type: ignore
"""
A Parameter slug is always the same as its name. This information is used by
Flow objects to enforce parameter name uniqueness.
"""
return self.name
@slug.setter
def slug(self, value: str) -> None:
# slug is a property, so it's not actually set by this method, but the superclass
# attempts to set it and we need to allow that without error.
if value != self.name:
raise AttributeError("Parameter slug must be the same as its name.")
def run(self) -> Any:
params = prefect.context.get("parameters") or {}
if self.required and self.name not in params:
self.logger.debug(
'Parameter "{}" was required but not provided.'.format(self.name)
)
raise prefect.engine.signals.FAIL(
'Parameter "{}" was required but not provided.'.format(self.name)
)
return params.get(self.name, self.default)
# Serialization ------------------------------------------------------------
def serialize(self) -> Dict[str, Any]:
"""
Creates a serialized representation of this parameter
Returns:
- dict representing this parameter
"""
return prefect.serialization.task.ParameterSchema().dump(self)
| 36.21332
| 121
| 0.587547
|
4a090244465fe8f2b7a05fc12b621d0b53ceca0b
| 21,038
|
py
|
Python
|
spacy/pipeline/entity_linker.py
|
TeMU-BSC/spaCy-develop
|
5facdb031c64b9b3383e2f041c34474cfb32bb21
|
[
"Apache-2.0",
"MIT"
] | 22,040
|
2016-10-03T11:58:15.000Z
|
2022-03-31T21:08:19.000Z
|
spacy/pipeline/entity_linker.py
|
TeMU-BSC/spaCy-develop
|
5facdb031c64b9b3383e2f041c34474cfb32bb21
|
[
"Apache-2.0",
"MIT"
] | 6,927
|
2016-10-03T13:11:11.000Z
|
2022-03-31T17:01:25.000Z
|
spacy/pipeline/entity_linker.py
|
TeMU-BSC/spaCy-develop
|
5facdb031c64b9b3383e2f041c34474cfb32bb21
|
[
"Apache-2.0",
"MIT"
] | 4,403
|
2016-10-04T03:36:33.000Z
|
2022-03-31T14:12:34.000Z
|
from typing import Optional, Iterable, Callable, Dict, Union, List, Any
from thinc.types import Floats2d
from pathlib import Path
from itertools import islice
import srsly
import random
from thinc.api import CosineDistance, Model, Optimizer, Config
from thinc.api import set_dropout_rate
import warnings
from ..kb import KnowledgeBase, Candidate
from ..ml import empty_kb
from ..tokens import Doc, Span
from .pipe import deserialize_config
from .trainable_pipe import TrainablePipe
from ..language import Language
from ..vocab import Vocab
from ..training import Example, validate_examples, validate_get_examples
from ..errors import Errors, Warnings
from ..util import SimpleFrozenList
from .. import util
from ..scorer import Scorer
default_model_config = """
[model]
@architectures = "spacy.EntityLinker.v1"
[model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v2"
pretrained_vectors = null
width = 96
depth = 2
embed_size = 2000
window_size = 1
maxout_pieces = 3
subword_features = true
"""
DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"entity_linker",
requires=["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
assigns=["token.ent_kb_id"],
default_config={
"model": DEFAULT_NEL_MODEL,
"labels_discard": [],
"n_sents": 0,
"incl_prior": True,
"incl_context": True,
"entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
},
default_score_weights={
"nel_micro_f": 1.0,
"nel_micro_r": None,
"nel_micro_p": None,
},
)
def make_entity_linker(
nlp: Language,
name: str,
model: Model,
*,
labels_discard: Iterable[str],
n_sents: int,
incl_prior: bool,
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
):
"""Construct an EntityLinker component.
model (Model[List[Doc], Floats2d]): A model that learns document vector
representations. Given a batch of Doc objects, it should return a single
array, with one row per item in the batch.
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
n_sents (int): The number of neighbouring sentences to take into account.
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
"""
return EntityLinker(
nlp.vocab,
model,
name,
labels_discard=labels_discard,
n_sents=n_sents,
incl_prior=incl_prior,
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
)
class EntityLinker(TrainablePipe):
"""Pipeline component for named entity linking.
DOCS: https://spacy.io/api/entitylinker
"""
NIL = "NIL" # string used to refer to a non-existing link
def __init__(
self,
vocab: Vocab,
model: Model,
name: str = "entity_linker",
*,
labels_discard: Iterable[str],
n_sents: int,
incl_prior: bool,
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
) -> None:
"""Initialize an entity linker.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
n_sents (int): The number of neighbouring sentences to take into account.
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
DOCS: https://spacy.io/api/entitylinker#init
"""
self.vocab = vocab
self.model = model
self.name = name
self.labels_discard = list(labels_discard)
self.n_sents = n_sents
self.incl_prior = incl_prior
self.incl_context = incl_context
self.get_candidates = get_candidates
self.cfg: Dict[str, Any] = {}
self.distance = CosineDistance(normalize=False)
# how many neighbour sentences to take into account
# create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'.
self.kb = empty_kb(entity_vector_length)(self.vocab)
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will
create it using this object's vocab."""
if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
self.kb = kb_loader(self.vocab)
def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized.
if self.kb is None:
raise ValueError(Errors.E1018.format(name=self.name))
if len(self.kb) == 0:
raise ValueError(Errors.E139.format(name=self.name))
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None,
):
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize
"""
validate_get_examples(get_examples, "EntityLinker.initialize")
if kb_loader is not None:
self.set_kb(kb_loader)
self.validate_kb()
nO = self.kb.entity_vector_length
doc_sample = []
vector_sample = []
for example in islice(get_examples(), 10):
doc_sample.append(example.x)
vector_sample.append(self.model.ops.alloc1f(nO))
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
assert len(vector_sample) > 0, Errors.E923.format(name=self.name)
self.model.initialize(
X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32")
)
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/entitylinker#update
"""
self.validate_kb()
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
if not examples:
return losses
validate_examples(examples, "EntityLinker.update")
sentence_docs = []
for eg in examples:
sentences = [s for s in eg.reference.sents]
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
for ent in eg.reference.ents:
# KB ID of the first token is the same as the whole span
kb_id = kb_ids[ent.start]
if kb_id:
try:
# find the sentence in the list of sentences.
sent_index = sentences.index(ent.sent)
except AttributeError:
# Catch the exception when ent.sent is None and provide a user-friendly warning
raise RuntimeError(Errors.E030) from None
# get n previous sentences, if there are any
start_sentence = max(0, sent_index - self.n_sents)
# get n posterior sentences, or as many < n as there are
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
# get token positions
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
# append that span as a doc to training
sent_doc = eg.predicted[start_token:end_token].as_doc()
sentence_docs.append(sent_doc)
set_dropout_rate(self.model, drop)
if not sentence_docs:
warnings.warn(Warnings.W093.format(name="Entity Linker"))
return losses
sentence_encodings, bp_context = self.model.begin_update(sentence_docs)
loss, d_scores = self.get_loss(
sentence_encodings=sentence_encodings, examples=examples
)
bp_context(d_scores)
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d):
validate_examples(examples, "EntityLinker.get_loss")
entity_encodings = []
for eg in examples:
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
for ent in eg.reference.ents:
kb_id = kb_ids[ent.start]
if kb_id:
entity_encoding = self.kb.get_vector(kb_id)
entity_encodings.append(entity_encoding)
entity_encodings = self.model.ops.asarray(entity_encodings, dtype="float32")
if sentence_encodings.shape != entity_encodings.shape:
err = Errors.E147.format(
method="get_loss", msg="gold entities do not match up"
)
raise RuntimeError(err)
# TODO: fix typing issue here
gradients = self.distance.get_grad(sentence_encodings, entity_encodings) # type: ignore
loss = self.distance.get_loss(sentence_encodings, entity_encodings) # type: ignore
loss = loss / len(entity_encodings)
return float(loss), gradients
def predict(self, docs: Iterable[Doc]) -> List[str]:
"""Apply the pipeline's model to a batch of docs, without modifying them.
Returns the KB IDs for each entity in each doc, including NIL if there is
no prediction.
docs (Iterable[Doc]): The documents to predict.
RETURNS (List[str]): The models prediction for each document.
DOCS: https://spacy.io/api/entitylinker#predict
"""
self.validate_kb()
entity_count = 0
final_kb_ids: List[str] = []
if not docs:
return final_kb_ids
if isinstance(docs, Doc):
docs = [docs]
for i, doc in enumerate(docs):
sentences = [s for s in doc.sents]
if len(doc) > 0:
# Looping through each entity (TODO: rewrite)
for ent in doc.ents:
sent = ent.sent
sent_index = sentences.index(sent)
assert sent_index >= 0
# get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined)
xp = self.model.ops.xp
if self.incl_context:
sentence_encoding = self.model.predict([sent_doc])[0]
sentence_encoding_t = sentence_encoding.T
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
else:
candidates = list(self.get_candidates(self.kb, ent))
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL)
elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_)
else:
random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False
prior_probs = xp.asarray([c.prior_prob for c in candidates])
if not self.incl_prior:
prior_probs = xp.asarray([0.0 for _ in candidates])
scores = prior_probs
# add in similarity from the context
if self.incl_context:
entity_encodings = xp.asarray(
[c.entity_vector for c in candidates]
)
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if len(entity_encodings) != len(prior_probs):
raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
)
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
)
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding
best_index = scores.argmax().item()
best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_)
if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format(
method="predict", msg="result variables not of equal length"
)
raise RuntimeError(err)
return final_kb_ids
def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None:
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict.
DOCS: https://spacy.io/api/entitylinker#set_annotations
"""
count_ents = len([ent for doc in docs for ent in doc.ents])
if count_ents != len(kb_ids):
raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids)))
i = 0
for doc in docs:
for ent in doc.ents:
kb_id = kb_ids[i]
i += 1
for token in ent:
token.ent_kb_id_ = kb_id
def score(self, examples, **kwargs):
"""Score a batch of examples.
examples (Iterable[Example]): The examples to score.
RETURNS (Dict[str, Any]): The scores.
DOCS TODO: https://spacy.io/api/entity_linker#score
"""
validate_examples(examples, "EntityLinker.score")
return Scorer.score_links(examples, negative_labels=[self.NIL])
def to_bytes(self, *, exclude=tuple()):
"""Serialize the pipe to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/entitylinker#to_bytes
"""
self._validate_serialization_attrs()
serialize = {}
if hasattr(self, "cfg") and self.cfg is not None:
serialize["cfg"] = lambda: srsly.json_dumps(self.cfg)
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["kb"] = self.kb.to_bytes
serialize["model"] = self.model.to_bytes
return util.to_bytes(serialize, exclude)
def from_bytes(self, bytes_data, *, exclude=tuple()):
"""Load the pipe from a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (TrainablePipe): The loaded object.
DOCS: https://spacy.io/api/entitylinker#from_bytes
"""
self._validate_serialization_attrs()
def load_model(b):
try:
self.model.from_bytes(b)
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize = {}
if hasattr(self, "cfg") and self.cfg is not None:
deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b))
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude)
deserialize["kb"] = lambda b: self.kb.from_bytes(b)
deserialize["model"] = load_model
util.from_bytes(bytes_data, deserialize, exclude)
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Serialize the pipe to disk.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/entitylinker#to_disk
"""
serialize = {}
serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg)
serialize["kb"] = lambda p: self.kb.to_disk(p)
serialize["model"] = lambda p: self.model.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityLinker":
"""Load the pipe from disk. Modifies the object in place and returns it.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (EntityLinker): The modified EntityLinker object.
DOCS: https://spacy.io/api/entitylinker#from_disk
"""
def load_model(p):
try:
with p.open("rb") as infile:
self.model.from_bytes(infile.read())
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize: Dict[str, Callable[[Any], Any]] = {}
deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p))
deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude)
deserialize["kb"] = lambda p: self.kb.from_disk(p)
deserialize["model"] = load_model
util.from_disk(path, deserialize, exclude)
return self
def rehearse(self, examples, *, sgd=None, losses=None, **config):
raise NotImplementedError
def add_label(self, label):
raise NotImplementedError
| 42.415323
| 116
| 0.596207
|
4a09036f23a8ae7fa78360b51ab5573b3160e321
| 1,646
|
py
|
Python
|
alembic/versions/cb0e90ac778c_.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
alembic/versions/cb0e90ac778c_.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
alembic/versions/cb0e90ac778c_.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: cb0e90ac778c
Revises: 6bfa16c7b895
Create Date: 2022-03-13 15:25:56.616337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cb0e90ac778c'
down_revision = '6bfa16c7b895'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('order',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('order_date', sa.DateTime(), nullable=True),
sa.Column('order_amount', sa.Float(), nullable=True),
sa.Column('order_status', sa.String(), nullable=True),
sa.Column('shipping_address', sa.Text(), nullable=True),
sa.Column('customer_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['customer_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('order_details',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.Column('product_id', sa.Integer(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['order_id'], ['order.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('order_details')
op.drop_table('order')
# ### end Alembic commands ###
| 33.591837
| 81
| 0.681045
|
4a09046d04766846b6a302793afdfaaeded6d054
| 3,392
|
py
|
Python
|
src/apps/dive_log/migrations/0001_squashed_0004_auto_20150125_0811.py
|
GotlingSystem/apnea
|
6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1
|
[
"MIT"
] | null | null | null |
src/apps/dive_log/migrations/0001_squashed_0004_auto_20150125_0811.py
|
GotlingSystem/apnea
|
6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1
|
[
"MIT"
] | 3
|
2015-02-14T18:51:19.000Z
|
2015-02-24T07:44:05.000Z
|
src/apps/dive_log/migrations/0001_squashed_0004_auto_20150125_0811.py
|
GotlingSystem/apnea
|
6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
#replaces = [(b'dive_log', '0001_squashed_0003_auto_20150125_0429'), (b'dive_log', '0002_dive_duration'), (b'dive_log', '0003_auto_20150125_0804'), (b'dive_log', '0004_auto_20150125_0811')]
dependencies = [
('discipline', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Dive',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.TimeField(null=True, blank=True)),
('rest_duration', models.IntegerField(help_text='Sekunder', null=True, verbose_name='Vila innan')),
('dive_duration', models.IntegerField(help_text='Sekunder', null=True, verbose_name='Dyktid', blank=True)),
('distance', models.IntegerField(null=True, verbose_name='Distans')),
('temperature', models.IntegerField(null=True, verbose_name='Temperatur', blank=True)),
('comment', models.CharField(max_length=512, verbose_name='Kommentar', blank=True)),
],
options={
'verbose_name': 'Dyk',
'verbose_name_plural': 'Dyk',
},
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(verbose_name='Datum')),
('time', models.TimeField(verbose_name='Tid')),
('comment', models.CharField(max_length=512, verbose_name='Kommentar', blank=True)),
],
options={
'ordering': ['date', 'time'],
'verbose_name': 'Session',
'verbose_name_plural': 'Sessioner',
},
),
migrations.AddField(
model_name='dive',
name='session',
field=models.ForeignKey(to='dive_log.Session'),
),
migrations.AddField(
model_name='dive',
name='discipline',
field=models.ForeignKey(verbose_name='Disciplin', blank=True, to='discipline.Discipline', null=True),
),
migrations.RemoveField(
model_name='dive',
name='rest_duration',
),
migrations.RemoveField(
model_name='dive',
name='dive_duration',
),
migrations.AddField(
model_name='dive',
name='dive_duration',
field=models.DurationField(null=True, verbose_name='Dyktid', blank=True),
),
migrations.AddField(
model_name='dive',
name='rest_duration',
field=models.DurationField(null=True, verbose_name='Vila', blank=True),
),
migrations.AlterField(
model_name='dive',
name='distance',
field=models.IntegerField(help_text='i meter', null=True, verbose_name='Distans'),
),
migrations.AlterField(
model_name='dive',
name='temperature',
field=models.IntegerField(help_text='i celsius', null=True, verbose_name='Temperatur', blank=True),
),
]
| 39.905882
| 193
| 0.567217
|
4a0904dd15487643f64160a0eba4a44d586bfbb2
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/setuptools/_vendor/packaging/markers.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/setuptools/_vendor/packaging/markers.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/setuptools/_vendor/packaging/markers.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/96/28/51/82aa598cb33e256fafc652cfa94de49957bbf60f6fca9cb593199346e4
| 96
| 96
| 0.895833
|
4a0905bc8c8c78a22c6c83dd1f6d0f0a5432abf1
| 7,056
|
py
|
Python
|
networkx/algorithms/tests/test_euler.py
|
ErikBrendel/networkx
|
8f36023a8c97c9ab659cb8a956a4c0cdb0089b8e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-28T07:31:48.000Z
|
2021-02-28T07:31:48.000Z
|
networkx/algorithms/tests/test_euler.py
|
ErikBrendel/networkx
|
8f36023a8c97c9ab659cb8a956a4c0cdb0089b8e
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/tests/test_euler.py
|
ErikBrendel/networkx
|
8f36023a8c97c9ab659cb8a956a4c0cdb0089b8e
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T05:51:32.000Z
|
2020-12-03T05:51:32.000Z
|
import collections
import pytest
import networkx as nx
class TestIsEulerian:
def test_is_eulerian(self):
assert nx.is_eulerian(nx.complete_graph(5))
assert nx.is_eulerian(nx.complete_graph(7))
assert nx.is_eulerian(nx.hypercube_graph(4))
assert nx.is_eulerian(nx.hypercube_graph(6))
assert not nx.is_eulerian(nx.complete_graph(4))
assert not nx.is_eulerian(nx.complete_graph(6))
assert not nx.is_eulerian(nx.hypercube_graph(3))
assert not nx.is_eulerian(nx.hypercube_graph(5))
assert not nx.is_eulerian(nx.petersen_graph())
assert not nx.is_eulerian(nx.path_graph(4))
def test_is_eulerian2(self):
# not connected
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
assert not nx.is_eulerian(G)
# not strongly connected
G = nx.DiGraph()
G.add_nodes_from([1, 2, 3])
assert not nx.is_eulerian(G)
G = nx.MultiDiGraph()
G.add_edge(1, 2)
G.add_edge(2, 3)
G.add_edge(2, 3)
G.add_edge(3, 1)
assert not nx.is_eulerian(G)
class TestEulerianCircuit:
def test_eulerian_circuit_cycle(self):
G = nx.cycle_graph(4)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 3, 2, 1]
assert edges == [(0, 3), (3, 2), (2, 1), (1, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 3, 0]
assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)]
G = nx.complete_graph(3)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 2, 1]
assert edges == [(0, 2), (2, 1), (1, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 0]
assert edges == [(1, 2), (2, 0), (0, 1)]
def test_eulerian_circuit_digraph(self):
G = nx.DiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 1, 2, 3]
assert edges == [(0, 1), (1, 2), (2, 3), (3, 0)]
edges = list(nx.eulerian_circuit(G, source=1))
nodes = [u for u, v in edges]
assert nodes == [1, 2, 3, 0]
assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)]
def test_multigraph(self):
G = nx.MultiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
G.add_edge(1, 2)
G.add_edge(1, 2)
edges = list(nx.eulerian_circuit(G, source=0))
nodes = [u for u, v in edges]
assert nodes == [0, 3, 2, 1, 2, 1]
assert edges == [(0, 3), (3, 2), (2, 1), (1, 2), (2, 1), (1, 0)]
def test_multigraph_with_keys(self):
G = nx.MultiGraph()
nx.add_cycle(G, [0, 1, 2, 3])
G.add_edge(1, 2)
G.add_edge(1, 2)
edges = list(nx.eulerian_circuit(G, source=0, keys=True))
nodes = [u for u, v, k in edges]
assert nodes == [0, 3, 2, 1, 2, 1]
assert edges[:2] == [(0, 3, 0), (3, 2, 0)]
assert collections.Counter(edges[2:5]) == collections.Counter(
[(2, 1, 0), (1, 2, 1), (2, 1, 2)]
)
assert edges[5:] == [(1, 0, 0)]
def test_not_eulerian(self):
with pytest.raises(nx.NetworkXError):
f = list(nx.eulerian_circuit(nx.complete_graph(4)))
class TestIsSemiEulerian:
def test_is_semieulerian(self):
# Test graphs with Eulerian paths but no cycles return True.
assert nx.is_semieulerian(nx.path_graph(4))
G = nx.path_graph(6, create_using=nx.DiGraph)
assert nx.is_semieulerian(G)
# Test graphs with Eulerian cycles return False.
assert not nx.is_semieulerian(nx.complete_graph(5))
assert not nx.is_semieulerian(nx.complete_graph(7))
assert not nx.is_semieulerian(nx.hypercube_graph(4))
assert not nx.is_semieulerian(nx.hypercube_graph(6))
class TestHasEulerianPath:
def test_has_eulerian_path_cyclic(self):
# Test graphs with Eulerian cycles return True.
assert nx.has_eulerian_path(nx.complete_graph(5))
assert nx.has_eulerian_path(nx.complete_graph(7))
assert nx.has_eulerian_path(nx.hypercube_graph(4))
assert nx.has_eulerian_path(nx.hypercube_graph(6))
def test_has_eulerian_path_non_cyclic(self):
# Test graphs with Eulerian paths but no cycles return True.
assert nx.has_eulerian_path(nx.path_graph(4))
G = nx.path_graph(6, create_using=nx.DiGraph)
assert nx.has_eulerian_path(G)
def test_has_eulerian_path_directed_graph(self):
# Test directed graphs and returns False
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (0, 2)])
assert not nx.has_eulerian_path(G)
def test_has_eulerian_path_isolated_node(self):
# Test directed graphs without isolated node returns True
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 0)])
assert nx.has_eulerian_path(G)
# Test directed graphs with isolated node returns True
G.add_node(3)
assert nx.has_eulerian_path(G)
class TestFindPathStart:
def testfind_path_start(self):
find_path_start = nx.algorithms.euler._find_path_start
# Test digraphs return correct starting node.
G = nx.path_graph(6, create_using=nx.DiGraph)
assert find_path_start(G) == 0
edges = [(0, 1), (1, 2), (2, 0), (4, 0)]
assert find_path_start(nx.DiGraph(edges)) == 4
# Test graph with no Eulerian path return None.
edges = [(0, 1), (1, 2), (2, 3), (2, 4)]
assert find_path_start(nx.DiGraph(edges)) is None
class TestEulerianPath:
def test_eulerian_path(self):
x = [(4, 0), (0, 1), (1, 2), (2, 0)]
for e1, e2 in zip(x, nx.eulerian_path(nx.DiGraph(x))):
assert e1 == e2
class TestEulerize:
def test_disconnected(self):
with pytest.raises(nx.NetworkXError):
G = nx.from_edgelist([(0, 1), (2, 3)])
nx.eulerize(G)
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.eulerize(nx.Graph())
def test_null_multigraph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.eulerize(nx.MultiGraph())
def test_on_empty_graph(self):
with pytest.raises(nx.NetworkXError):
nx.eulerize(nx.empty_graph(3))
def test_on_eulerian(self):
G = nx.cycle_graph(3)
H = nx.eulerize(G)
assert nx.is_isomorphic(G, H)
def test_on_eulerian_multigraph(self):
G = nx.MultiGraph(nx.cycle_graph(3))
G.add_edge(0, 1)
H = nx.eulerize(G)
assert nx.is_eulerian(H)
def test_on_complete_graph(self):
G = nx.complete_graph(4)
assert nx.is_eulerian(nx.eulerize(G))
assert nx.is_eulerian(nx.eulerize(nx.MultiGraph(G)))
| 33.923077
| 72
| 0.595238
|
4a0905fdb046706689b4d11beaefeb4a30be69cd
| 15,715
|
py
|
Python
|
globus_sdk/auth/client_types/base.py
|
jaswilli/globus-sdk-python
|
35579f5520150a28ee4e375802d7c863ed3c21ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
globus_sdk/auth/client_types/base.py
|
jaswilli/globus-sdk-python
|
35579f5520150a28ee4e375802d7c863ed3c21ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
globus_sdk/auth/client_types/base.py
|
jaswilli/globus-sdk-python
|
35579f5520150a28ee4e375802d7c863ed3c21ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, unicode_literals
import collections
import logging
import six
from globus_sdk import exc
from globus_sdk.auth.token_response import OAuthTokenResponse
from globus_sdk.authorizers import NullAuthorizer
from globus_sdk.base import BaseClient, safe_stringify
logger = logging.getLogger(__name__)
class AuthClient(BaseClient):
"""
Client for the
`Globus Auth API <https://docs.globus.org/api/auth/>`_
This class provides helper methods for most common resources in the
Auth API, and the common low-level interface from
:class:`BaseClient <globus_sdk.base.BaseClient>` of ``get``, ``put``,
``post``, and ``delete`` methods, which can be used to access any API
resource.
There are generally two types of resources, distinguished by the type
of authentication which they use. Resources available to end users of
Globus are authenticated with a Globus Auth Token
("Authentication: Bearer ..."), while resources available to OAuth
Clients are authenticated using Basic Auth with the Client's ID and
Secret.
Some resources may be available with either authentication type.
**Examples**
Initializing an ``AuthClient`` to authenticate a user making calls to the
Globus Auth service with an access token takes the form
>>> from globus_sdk import AuthClient, AccessTokenAuthorizer
>>> ac = AuthClient(authorizer=AccessTokenAuthorizer('<token_string>'))
You can, of course, use other kinds of Authorizers (notably the
``RefreshTokenAuthorizer``).
.. automethodlist:: globus_sdk.AuthClient
"""
error_class = exc.AuthAPIError
def __init__(self, client_id=None, authorizer=None, **kwargs):
self.client_id = client_id
# an AuthClient may contain a GlobusOAuth2FlowManager in order to
# encapsulate the functionality of various different types of flow
# managers
self.current_oauth2_flow_manager = None
BaseClient.__init__(self, "auth", authorizer=authorizer, **kwargs)
def get_identities(self, usernames=None, ids=None, provision=False, **params):
r"""
GET /v2/api/identities
Given ``usernames=<U>`` or (exclusive) ``ids=<I>`` as keyword
arguments, looks up identity information for the set of identities
provided.
``<U>`` and ``<I>`` in this case are comma-delimited strings listing
multiple Identity Usernames or Identity IDs, or iterables of strings,
each of which is an Identity Username or Identity ID.
If Globus Auth's identity auto-provisioning behavior is desired,
``provision=True`` may be specified.
Available with any authentication/client type.
**Examples**
>>> ac = globus_sdk.AuthClient(...)
>>> # by IDs
>>> r = ac.get_identities(ids="46bd0f56-e24f-11e5-a510-131bef46955c")
>>> r.data
{u'identities': [{u'email': None,
u'id': u'46bd0f56-e24f-11e5-a510-131bef46955c',
u'identity_provider': u'7daddf46-70c5-45ee-9f0f-7244fe7c8707',
u'name': None,
u'organization': None,
u'status': u'unused',
u'username': u'globus@globus.org'}]}
>>> ac.get_identities(
>>> ids=",".join(
>>> ("46bd0f56-e24f-11e5-a510-131bef46955c",
>>> "168edc3d-c6ba-478c-9cf8-541ff5ebdc1c"))
...
>>> # or by usernames
>>> ac.get_identities(usernames='globus@globus.org')
...
>>> ac.get_identities(
>>> usernames='globus@globus.org,auth@globus.org')
...
You could also use iterables:
>>> ac.get_identities(
>>> usernames=['globus@globus.org', 'auth@globus.org'])
...
>>> ac.get_identities(
>>> ids=["46bd0f56-e24f-11e5-a510-131bef46955c",
>>> "168edc3d-c6ba-478c-9cf8-541ff5ebdc1c"])
...
**External Documentation**
See
`Identities Resources \
<https://docs.globus.org/api/auth/reference/
#v2_api_identities_resources>`_
in the API documentation for details.
"""
def _convert_listarg(val):
if isinstance(val, collections.Iterable) and not isinstance(
val, six.string_types
):
return ",".join(safe_stringify(x) for x in val)
else:
return safe_stringify(val)
self.logger.info("Looking up Globus Auth Identities")
# if either of these params has a truthy value, stringify it safely,
# letting us consume args whose `__str__` methods produce "the right
# thing"
# most notably, lets `ids` take a single UUID object safely
if usernames:
params["usernames"] = _convert_listarg(usernames)
params["provision"] = (
"false" if str(provision).lower() == "false" else "true"
)
if ids:
params["ids"] = _convert_listarg(ids)
self.logger.debug("params={}".format(params))
if "usernames" in params and "ids" in params:
self.logger.warning(
(
"get_identities call with both usernames and "
"identities set! Expected to result in errors"
)
)
return self.get("/v2/api/identities", params=params)
def oauth2_get_authorize_url(self, additional_params=None):
"""
Get the authorization URL to which users should be sent.
This method may only be called after ``oauth2_start_flow``
has been called on this ``AuthClient``.
:param additional_params: Additional query parameters to include in the
authorize URL. Primarily for internal use
:type additional_params: dict, optional
:rtype: ``string``
"""
if not self.current_oauth2_flow_manager:
self.logger.error(
("OutOfOrderOperations(" "get_authorize_url before start_flow)")
)
raise exc.GlobusSDKUsageError(
(
"Cannot get authorize URL until starting an OAuth2 flow. "
"Call the oauth2_start_flow() method on this "
"AuthClient to resolve"
)
)
auth_url = self.current_oauth2_flow_manager.get_authorize_url(
additional_params=additional_params
)
self.logger.info("Got authorization URL: {}".format(auth_url))
return auth_url
def oauth2_exchange_code_for_tokens(self, auth_code):
"""
Exchange an authorization code for a token or tokens.
:rtype: :class:`OAuthTokenResponse \
<globus_sdk.auth.token_response.OAuthTokenResponse>`
:param auth_code: An auth code typically obtained by sending the user to the
authorize URL. The code is a very short-lived credential which this method
is exchanging for tokens. Tokens are the credentials used to authenticate
against Globus APIs.
:type auth_code: str
"""
self.logger.info(
(
"Final Step of 3-legged OAuth2 Flows: "
"Exchanging authorization code for token(s)"
)
)
if not self.current_oauth2_flow_manager:
self.logger.error(
("OutOfOrderOperations(" "exchange_code before start_flow)")
)
raise exc.GlobusSDKUsageError(
(
"Cannot exchange auth code until starting an OAuth2 flow. "
"Call the oauth2_start_flow() method on this "
"AuthClient to resolve"
)
)
return self.current_oauth2_flow_manager.exchange_code_for_tokens(auth_code)
def oauth2_refresh_token(self, refresh_token, additional_params=None):
r"""
Exchange a refresh token for a :class:`OAuthTokenResponse
<globus_sdk.auth.token_response.OAuthTokenResponse>`, containing
an access token.
Does a token call of the form
.. code-block:: none
refresh_token=<refresh_token>
grant_type=refresh_token
plus any additional parameters you may specify.
:param refresh_token: A Globus Refresh Token as a string
:type refresh_token: str
:param additional_params: A dict of extra params to encode in the refresh call.
:type additional_params: dict, optional
"""
self.logger.info(
("Executing token refresh; " "typically requires client credentials")
)
form_data = {"refresh_token": refresh_token, "grant_type": "refresh_token"}
if additional_params:
form_data.update(additional_params)
return self.oauth2_token(form_data)
def oauth2_validate_token(self, token, additional_params=None):
"""
Validate a token. It can be an Access Token or a Refresh token.
This call can be used to check tokens issued to your client,
confirming that they are or are not still valid. The resulting response
has the form ``{"active": True}`` when the token is valid, and
``{"active": False}`` when it is not.
It is not necessary to validate tokens immediately after receiving them
from the service -- any tokens which you are issued will be valid at
that time. This is more for the purpose of doing checks like
- confirm that ``oauth2_revoke_token`` succeeded
- at application boot, confirm no need to do fresh login
:param token: The token which should be validated. Can be a refresh token or an
access token
:type token: str
:param additional_params: Additional parameters to include in the validation
body. Primarily for internal use
:type additional_params: dict, optional
**Examples**
Revoke a token and confirm that it is no longer active:
>>> from globus_sdk import ConfidentialAppAuthClient
>>> ac = ConfidentialAppAuthClient(CLIENT_ID, CLIENT_SECRET)
>>> ac.oauth2_revoke_token('<token_string>')
>>> data = ac.oauth2_validate_token('<token_string>')
>>> assert not data['active']
During application boot, check if the user needs to do a login, even
if a token is present:
>>> from globus_sdk import ConfidentialAppAuthClient
>>> ac = ConfidentialAppAuthClient(CLIENT_ID, CLIENT_SECRET)
>>> # this is not an SDK function, but a hypothetical function which
>>> # you use to load a token out of configuration data
>>> tok = load_token_from_config(...)
>>>
>>> if not tok or not ac.oauth2_validate_token(tok)['active']:
>>> # do_new_login() is another hypothetical helper
>>> tok = do_new_login()
>>> # at this point, tok is expected to be a valid token
"""
self.logger.info("Validating token")
body = {"token": token}
# if this client has no way of authenticating itself but
# it does have a client_id, we'll send that in the request
no_authentication = self.authorizer is None or isinstance(
self.authorizer, NullAuthorizer
)
if no_authentication and self.client_id:
self.logger.debug("Validating token with unauthenticated client")
body.update({"client_id": self.client_id})
if additional_params:
body.update(additional_params)
return self.post("/v2/oauth2/token/validate", text_body=body)
def oauth2_revoke_token(self, token, additional_params=None):
"""
Revoke a token. It can be an Access Token or a Refresh token.
This call should be used to revoke tokens issued to your client,
rendering them inert and not further usable. Typically, this is
incorporated into "logout" functionality, but it should also be used if
the client detects that its tokens are in an unsafe location (e.x.
found in a world-readable logfile).
You can check the "active" status of the token after revocation if you
want to confirm that it was revoked.
:param token: The token which should be revoked
:type token: str
:param additional_params: Additional parameters to include in the revocation
body, which can help speed the revocation process. Primarily for internal
use
**Examples**
>>> from globus_sdk import ConfidentialAppAuthClient
>>> ac = ConfidentialAppAuthClient(CLIENT_ID, CLIENT_SECRET)
>>> ac.oauth2_revoke_token('<token_string>')
"""
self.logger.info("Revoking token")
body = {"token": token}
# if this client has no way of authenticating itself but
# it does have a client_id, we'll send that in the request
no_authentication = self.authorizer is None or isinstance(
self.authorizer, NullAuthorizer
)
if no_authentication and self.client_id:
self.logger.debug("Revoking token with unauthenticated client")
body.update({"client_id": self.client_id})
if additional_params:
body.update(additional_params)
return self.post("/v2/oauth2/token/revoke", text_body=body)
def oauth2_token(self, form_data, response_class=OAuthTokenResponse):
"""
This is the generic form of calling the OAuth2 Token endpoint.
It takes ``form_data``, a dict which will be encoded in a form POST
body on the request.
Generally, users of the SDK should not call this method unless they are
implementing OAuth2 flows.
:param response_class: This is used by calls to the oauth2_token endpoint which
need to specialize their responses. For example,
:meth:`oauth2_get_dependent_tokens \
<globus_sdk.ConfidentialAppAuthClient.oauth2_get_dependent_tokens>`
requires a specialize response class to handle the dramatically different
format of the Dependent Token Grant response
:type response_class: class, optional
:rtype: ``response_class``
"""
self.logger.info("Fetching new token from Globus Auth")
# use the fact that requests implicitly encodes the `data` parameter as
# a form POST
return self.post(
"/v2/oauth2/token", response_class=response_class, text_body=form_data
)
def oauth2_userinfo(self):
"""
Call the Userinfo endpoint of Globus Auth.
Userinfo is specified as part of the OpenID Connect (OIDC) standard,
and Globus Auth's Userinfo is OIDC-compliant.
The exact data returned will depend upon the set of OIDC-related scopes
which were used to acquire the token being used for this call. For
details, see the **External Documentation** below.
**Examples**
>>> ac = AuthClient(...)
>>> info = ac.oauth2_userinfo()
>>> print('Effective Identity "{}" has Full Name "{}" and Email "{}"'
>>> .format(info["sub"], info["name"], info["email"]))
**External Documentation**
See
`Userinfo \
<https://docs.globus.org/api/auth/reference/
#get_or_post_v2_oauth2_userinfo_resource>`_
in the API documentation for details.
"""
self.logger.info("Looking up OIDC-style Userinfo from Globus Auth")
return self.get("/v2/oauth2/userinfo")
| 38.995037
| 87
| 0.632389
|
4a090613244b13254eca6211b913a154a63f4b62
| 20,008
|
py
|
Python
|
benchmark/memhred/model.py
|
BorisPolonsky/KdConv
|
124616c8a074ab32065757d47e315c5a54db259c
|
[
"Apache-2.0"
] | 294
|
2020-04-08T23:54:36.000Z
|
2022-03-30T09:29:32.000Z
|
benchmark/memhred/model.py
|
thu-coai/KNIVES
|
fcee15bf8d4f6b0a80ffe01a00739755a7f44e0a
|
[
"Apache-2.0"
] | 18
|
2020-04-16T03:37:07.000Z
|
2022-02-21T13:46:57.000Z
|
benchmark/memhred/model.py
|
thu-coai/KNIVES
|
fcee15bf8d4f6b0a80ffe01a00739755a7f44e0a
|
[
"Apache-2.0"
] | 51
|
2020-04-09T02:50:58.000Z
|
2022-03-29T08:04:52.000Z
|
#coding=utf8
import numpy as np
import tensorflow as tf
import time
from itertools import chain
from tensorflow.python.ops.nn import dynamic_rnn
from utils.output_projection import output_projection_layer, MyDense, MyInferenceHelper, MyAttention
from utils import SummaryHelper
import os
import jieba
import json
class HredModel(object):
def __init__(self, data, args, embed):
self.init_states = tf.placeholder(tf.float32, (None, args.ch_size), 'ctx_inps') # batch*ch_size
self.posts = tf.placeholder(tf.int32, (None, None), 'enc_inps') # batch*len
self.posts_length = tf.placeholder(tf.int32, (None,), 'enc_lens') # batch
self.prev_posts = tf.placeholder(tf.int32, (None, None), 'enc_prev_inps')
self.prev_posts_length = tf.placeholder(tf.int32, (None,), 'enc_prev_lens')
self.kgs = tf.placeholder(tf.int32, (None, None, None), 'kg_inps') # batch*len
self.kgs_h_length = tf.placeholder(tf.int32, (None, None), 'kg_h_lens') # batch
self.kgs_hr_length = tf.placeholder(tf.int32, (None, None), 'kg_hr_lens') # batch
self.kgs_hrt_length = tf.placeholder(tf.int32, (None, None), 'kg_hrt_lens') # batch
self.kgs_index = tf.placeholder(tf.float32, (None, None), 'kg_indices') # batch
self.origin_responses = tf.placeholder(tf.int32, (None, None), 'dec_inps') # batch*len
self.origin_responses_length = tf.placeholder(tf.int32, (None,), 'dec_lens') # batch
self.context_length = tf.placeholder(tf.int32, (None,), 'ctx_lens')
self.is_train = tf.placeholder(tf.bool)
num_past_turns = tf.shape(self.posts)[0] // tf.shape(self.origin_responses)[0]
# deal with original data to adapt encoder and decoder
batch_size, decoder_len = tf.shape(self.origin_responses)[0], tf.shape(self.origin_responses)[1]
self.responses = tf.split(self.origin_responses, [1, decoder_len-1], 1)[1] # no go_id
self.responses_length = self.origin_responses_length - 1
self.responses_input = tf.split(self.origin_responses, [decoder_len-1, 1], 1)[0] # no eos_id
self.responses_target = self.responses
decoder_len = decoder_len - 1
self.posts_input = self.posts # batch*len
self.decoder_mask = tf.reshape(tf.cumsum(tf.one_hot(self.responses_length-1,
decoder_len), reverse=True, axis=1), [-1, decoder_len])
kg_len = tf.shape(self.kgs)[2]
kg_h_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_h_length-1,
kg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])
kg_hr_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_hr_length-1,
kg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])
kg_hrt_mask = tf.reshape(tf.cumsum(tf.one_hot(self.kgs_hrt_length-1,
kg_len), reverse=True, axis=2), [batch_size, -1, kg_len, 1])
kg_key_mask = kg_hr_mask
kg_value_mask = kg_hrt_mask - kg_hr_mask
# initialize the training process
self.learning_rate = tf.Variable(float(args.lr), trainable=False, dtype=tf.float32)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * args.lr_decay)
self.global_step = tf.Variable(0, trainable=False)
# build the embedding table and embedding input
if embed is None:
# initialize the embedding randomly
self.embed = tf.get_variable('embed', [data.vocab_size, args.embedding_size], tf.float32)
else:
# initialize the embedding by pre-trained word vectors
self.embed = tf.get_variable('embed', dtype=tf.float32, initializer=embed)
self.encoder_input = tf.nn.embedding_lookup(self.embed, self.posts)
self.decoder_input = tf.nn.embedding_lookup(self.embed, self.responses_input)
self.kg_input = tf.nn.embedding_lookup(self.embed, self.kgs)
#self.knowledge_max = tf.reduce_max(tf.where(tf.cast(tf.tile(knowledge_mask, [1, 1, args.embedding_size]), tf.bool), self.knowledge_input, -mask_value), axis=1)
#self.knowledge_min = tf.reduce_max(tf.where(tf.cast(tf.tile(knowledge_mask, [1, 1, args.embedding_size]), tf.bool), self.knowledge_input, mask_value), axis=1)
self.kg_key_avg = tf.reduce_sum(self.kg_input * kg_key_mask, axis=2) / tf.maximum(tf.reduce_sum(kg_key_mask, axis=2), tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1), dtype=tf.float32))
self.kg_value_avg = tf.reduce_sum(self.kg_input * kg_value_mask, axis=2) / tf.maximum(tf.reduce_sum(kg_value_mask, axis=2), tf.ones_like(tf.expand_dims(self.kgs_hrt_length, -1), dtype=tf.float32))
#self.encoder_input = tf.cond(self.is_train,
# lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.posts_input), 0.8),
# lambda: tf.nn.embedding_lookup(self.embed, self.posts_input)) # batch*len*unit
#self.decoder_input = tf.cond(self.is_train,
# lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.embed, self.responses_input), 0.8),
# lambda: tf.nn.embedding_lookup(self.embed, self.responses_input))
# build rnn_cell
cell_enc = tf.nn.rnn_cell.GRUCell(args.eh_size)
cell_ctx = tf.nn.rnn_cell.GRUCell(args.ch_size)
cell_dec = tf.nn.rnn_cell.GRUCell(args.dh_size)
# build encoder
with tf.variable_scope('encoder'):
encoder_output, encoder_state = dynamic_rnn(cell_enc, self.encoder_input,
self.posts_length, dtype=tf.float32, scope="encoder_rnn")
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
prev_output, _ = dynamic_rnn(cell_enc, tf.nn.embedding_lookup(self.embed, self.prev_posts), self.prev_posts_length,
dtype=tf.float32, scope="encoder_rnn")
with tf.variable_scope('context'):
encoder_state_reshape = tf.reshape(encoder_state, [-1, num_past_turns, args.eh_size])
_, self.context_state = dynamic_rnn(cell_ctx, encoder_state_reshape,
self.context_length, dtype=tf.float32, scope='context_rnn')
# get output projection function
output_fn = MyDense(data.vocab_size, use_bias = True)
sampled_sequence_loss = output_projection_layer(args.dh_size, data.vocab_size, args.softmax_samples)
# construct attention
'''
encoder_len = tf.shape(encoder_output)[1]
attention_memory = tf.reshape(encoder_output, [batch_size, -1, args.eh_size])
attention_mask = tf.reshape(tf.sequence_mask(self.posts_length, encoder_len), [batch_size, -1])
attention_mask = tf.concat([tf.ones([batch_size, 1], tf.bool), attention_mask[:,1:]], axis=1)
attn_mechanism = MyAttention(args.dh_size, attention_memory, attention_mask)
'''
attn_mechanism = tf.contrib.seq2seq.BahdanauAttention(args.dh_size, prev_output,
memory_sequence_length=tf.maximum(self.prev_posts_length, 1))
cell_dec_attn = tf.contrib.seq2seq.AttentionWrapper(cell_dec, attn_mechanism,
attention_layer_size=args.dh_size)
ctx_state_shaping = tf.layers.dense(self.context_state, args.dh_size, activation=None)
dec_start = cell_dec_attn.zero_state(batch_size, dtype=tf.float32).clone(cell_state=ctx_state_shaping)
# calculate kg embedding
with tf.variable_scope('knowledge'):
query = tf.reshape(tf.layers.dense(tf.concat(self.context_state, axis=-1), args.embedding_size, use_bias=False), [batch_size, 1, args.embedding_size])
kg_score = tf.reduce_sum(query * self.kg_key_avg, axis=2)
kg_score = tf.where(tf.greater(self.kgs_hrt_length, 0), kg_score, - tf.ones_like(kg_score) * np.inf)
kg_alignment = tf.nn.softmax(kg_score)
kg_max = tf.argmax(kg_alignment, axis=-1)
kg_max_onehot = tf.one_hot(kg_max, tf.shape(kg_alignment)[1], dtype=tf.float32)
self.kg_acc = tf.reduce_sum(kg_max_onehot * self.kgs_index) / tf.maximum(tf.reduce_sum(tf.reduce_max(self.kgs_index, axis=-1)), tf.constant(1.0))
self.kg_loss = tf.reduce_sum(- tf.log(tf.clip_by_value(kg_alignment, 1e-12, 1.0)) * self.kgs_index, axis=1) / tf.maximum(tf.reduce_sum(self.kgs_index, axis=1), tf.ones([batch_size], dtype=tf.float32))
self.kg_loss = tf.reduce_mean(self.kg_loss)
self.knowledge_embed = tf.reduce_sum(tf.expand_dims(kg_alignment, axis=-1) * self.kg_value_avg, axis=1)
#self.knowledge_embed = tf.Print(self.knowledge_embed, ['acc', self.kg_acc, 'loss', self.kg_loss])
knowledge_embed_extend = tf.tile(tf.expand_dims(self.knowledge_embed, axis=1), [1, decoder_len, 1])
self.decoder_input = tf.concat([self.decoder_input, knowledge_embed_extend], axis=2)
# construct helper
train_helper = tf.contrib.seq2seq.TrainingHelper(self.decoder_input, tf.maximum(self.responses_length, 1))
infer_helper = MyInferenceHelper(self.embed, tf.fill([batch_size], data.go_id), data.eos_id, self.knowledge_embed)
#infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(self.embed, tf.fill([batch_size], data.go_id), data.eos_id)
# build decoder (train)
with tf.variable_scope('decoder'):
decoder_train = tf.contrib.seq2seq.BasicDecoder(cell_dec_attn, train_helper, dec_start)
train_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_train, impute_finished = True, scope = "decoder_rnn")
self.decoder_output = train_outputs.rnn_output
#self.decoder_output = tf.nn.dropout(self.decoder_output, 0.8)
self.decoder_distribution_teacher, self.decoder_loss, self.decoder_all_loss = \
sampled_sequence_loss(self.decoder_output, self.responses_target, self.decoder_mask)
# build decoder (test)
with tf.variable_scope('decoder', reuse=True):
decoder_infer = tf.contrib.seq2seq.BasicDecoder(cell_dec_attn, infer_helper, dec_start, output_layer = output_fn)
infer_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_infer, impute_finished = True,
maximum_iterations=args.max_sent_length, scope = "decoder_rnn")
self.decoder_distribution = infer_outputs.rnn_output
self.generation_index = tf.argmax(tf.split(self.decoder_distribution,
[2, data.vocab_size-2], 2)[1], 2) + 2 # for removing UNK
# calculate the gradient of parameters and update
self.params = [k for k in tf.trainable_variables() if args.name in k.name]
opt = tf.train.AdamOptimizer(self.learning_rate)
self.loss = self.decoder_loss + self.kg_loss
gradients = tf.gradients(self.loss, self.params)
clipped_gradients, self.gradient_norm = tf.clip_by_global_norm(gradients,
args.grad_clip)
self.update = opt.apply_gradients(zip(clipped_gradients, self.params),
global_step=self.global_step)
# save checkpoint
self.latest_saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,
max_to_keep=args.checkpoint_max_to_keep, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)
self.best_saver = tf.train.Saver(write_version=tf.train.SaverDef.V2,
max_to_keep=1, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)
# create summary for tensorboard
self.create_summary(args)
def store_checkpoint(self, sess, path, key, name):
if key == "latest":
self.latest_saver.save(sess, path, global_step=self.global_step, latest_filename=name)
else:
self.best_saver.save(sess, path, global_step=self.global_step, latest_filename=name)
def create_summary(self, args):
self.summaryHelper = SummaryHelper("%s/%s_%s" % \
(args.log_dir, args.name, time.strftime("%H%M%S", time.localtime())), args)
self.trainSummary = self.summaryHelper.addGroup(scalar=["loss", "perplexity"], prefix="train")
scalarlist = ["loss", "perplexity"]
tensorlist = []
textlist = []
emblist = []
for i in args.show_sample:
textlist.append("show_str%d" % i)
self.devSummary = self.summaryHelper.addGroup(scalar=scalarlist, tensor=tensorlist, text=textlist,
embedding=emblist, prefix="dev")
self.testSummary = self.summaryHelper.addGroup(scalar=scalarlist, tensor=tensorlist, text=textlist,
embedding=emblist, prefix="test")
def print_parameters(self):
for item in self.params:
print('%s: %s' % (item.name, item.get_shape()))
def step_decoder(self, sess, data, forward_only=False, inference=False):
input_feed = {
#self.init_states: data['init_states'],
self.posts: data['posts'],
self.posts_length: data['posts_length'],
self.prev_posts: data['prev_posts'],
self.prev_posts_length: data['prev_posts_length'],
self.origin_responses: data['responses'],
self.origin_responses_length: data['responses_length'],
self.context_length: data['context_length'],
self.kgs: data['kg'],
self.kgs_h_length: data['kg_h_length'],
self.kgs_hr_length: data['kg_hr_length'],
self.kgs_hrt_length: data['kg_hrt_length'],
self.kgs_index: data['kg_index'],
}
if inference:
input_feed.update({self.is_train: False})
output_feed = [self.generation_index, self.decoder_distribution_teacher, self.decoder_all_loss,
self.kg_loss, self.kg_acc]
else:
input_feed.update({self.is_train: True})
if forward_only:
output_feed = [self.decoder_loss, self.decoder_distribution_teacher, self.kg_loss, self.kg_acc]
else:
output_feed = [self.decoder_loss, self.gradient_norm, self.update, self.kg_loss, self.kg_acc]
return sess.run(output_feed, input_feed)
def evaluate(self, sess, data, batch_size, key_name):
loss = np.zeros((3,))
total_length = np.zeros((3,))
data.restart(key_name, batch_size=batch_size, shuffle=False)
batched_data = data.get_next_batch(key_name)
while batched_data != None:
decoder_loss, _, kg_loss, kg_acc = self.step_decoder(sess, batched_data, forward_only=True)
length = np.sum(np.maximum(np.array(batched_data['responses_length']) - 1, 0))
kg_length = np.sum(np.max(batched_data['kg_index'], axis=-1))
total_length += [length, kg_length, kg_length]
loss += [decoder_loss * length, kg_loss * kg_length, kg_acc * kg_length]
batched_data = data.get_next_batch(key_name)
loss /= total_length
print(' perplexity on %s set: %.2f, kg_ppx: %.2f, kg_loss: %.4f, kg_acc: %.4f' % (key_name, np.exp(loss[0]), np.exp(loss[1]), loss[1], loss[2]))
return loss
def train_process(self, sess, data, args):
loss_step, time_step, epoch_step = np.zeros((3,)), .0, 0
previous_losses = [1e18] * 3
best_valid = 1e18
data.restart("train", batch_size=args.batch_size, shuffle=True)
batched_data = data.get_next_batch("train")
for epoch_step in range(args.epochs):
while batched_data != None:
if self.global_step.eval() % args.checkpoint_steps == 0 and self.global_step.eval() != 0:
print(
"Epoch %d global step %d learning rate %.4f step-time %.2f perplexity: %.2f, kg_ppx: %.2f, kg_loss: %.4f, kg_acc: %.4f" % (
epoch_step, self.global_step.eval(), self.learning_rate.eval(), time_step, np.exp(loss_step[0]),
np.exp(loss_step[1]), loss_step[1], loss_step[2]))
self.trainSummary(self.global_step.eval() // args.checkpoint_steps,
{'loss': loss_step[0], 'perplexity': np.exp(loss_step[0])})
self.store_checkpoint(sess, '%s/checkpoint_latest/%s' % (args.model_dir, args.name), "latest",
args.name)
dev_loss = self.evaluate(sess, data, args.batch_size, "dev")
self.devSummary(self.global_step.eval() // args.checkpoint_steps,
{'loss': dev_loss[0], 'perplexity': np.exp(dev_loss[0])})
if np.sum(loss_step) > max(previous_losses):
sess.run(self.learning_rate_decay_op)
if dev_loss[0] < best_valid:
best_valid = dev_loss[0]
self.store_checkpoint(sess, '%s/checkpoint_best/%s' % (args.model_dir, args.name), "best",
args.name)
previous_losses = previous_losses[1:] + [np.sum(loss_step[0])]
loss_step, time_step = np.zeros((3,)), .0
start_time = time.time()
step_out = self.step_decoder(sess, batched_data)
loss_step += np.array([step_out[0], step_out[3], step_out[4]]) / args.checkpoint_steps
time_step += (time.time() - start_time) / args.checkpoint_steps
batched_data = data.get_next_batch("train")
data.restart("train", batch_size=args.batch_size, shuffle=True)
batched_data = data.get_next_batch("train")
def test_process_hits(self, sess, data, args):
with open(os.path.join(args.datapath, 'test_distractors.json'), 'r', encoding='utf8') as f:
test_distractors = json.load(f)
data.restart("test", batch_size=1, shuffle=False)
batched_data = data.get_next_batch("test")
loss_record = []
cnt = 0
while batched_data != None:
for key in batched_data:
if isinstance(batched_data[key], np.ndarray):
batched_data[key] = batched_data[key].tolist()
batched_data['responses_length'] = [len(batched_data['responses'][0])]
for each_resp in test_distractors[cnt]:
batched_data['responses'].append([data.go_id] + data.convert_tokens_to_ids(jieba.lcut(each_resp)) +
[data.eos_id])
batched_data['responses_length'].append(len(batched_data['responses'][-1]))
max_length = max(batched_data['responses_length'])
resp = np.zeros((len(batched_data['responses']), max_length), dtype=int)
for i, each_resp in enumerate(batched_data['responses']):
resp[i, :len(each_resp)] = each_resp
batched_data['responses'] = resp
posts = []
posts_length = []
prev_posts = []
prev_posts_length = []
context_length = []
kg = []
kg_h_length = []
kg_hr_length = []
kg_hrt_length = []
kg_index = []
for _ in range(len(resp)):
posts += batched_data['posts']
posts_length += batched_data['posts_length']
prev_posts += batched_data['prev_posts']
prev_posts_length += batched_data['prev_posts_length']
context_length += batched_data['context_length']
kg += batched_data['kg']
kg_h_length += batched_data['kg_h_length']
kg_hr_length += batched_data['kg_hr_length']
kg_hrt_length += batched_data['kg_hrt_length']
kg_index += batched_data['kg_index']
batched_data['posts'] = posts
batched_data['posts_length'] = posts_length
batched_data['prev_posts'] = prev_posts
batched_data['prev_posts_length'] = prev_posts_length
batched_data['context_length'] = context_length
batched_data['kg'] = kg
batched_data['kg_h_length'] = kg_h_length
batched_data['kg_hr_length'] = kg_hr_length
batched_data['kg_hrt_length'] = kg_hrt_length
batched_data['kg_index'] = kg_index
_, _, loss, _, _ = self.step_decoder(sess, batched_data, inference=True)
loss_record.append(loss)
cnt += 1
batched_data = data.get_next_batch("test")
assert cnt == len(test_distractors)
loss = np.array(loss_record)
loss_rank = np.argsort(loss, axis=1)
hits1 = float(np.mean(loss_rank[:, 0] == 0))
hits3 = float(np.mean(np.min(loss_rank[:, :3], axis=1) == 0))
return {'hits@1' : hits1, 'hits@3': hits3}
def test_process(self, sess, data, args):
metric1 = data.get_teacher_forcing_metric()
metric2 = data.get_inference_metric()
data.restart("test", batch_size=args.batch_size, shuffle=False)
batched_data = data.get_next_batch("test")
while batched_data != None:
batched_responses_id, gen_log_prob, _, _, _ = self.step_decoder(sess, batched_data, False, True)
metric1_data = {'resp_allvocabs': np.array(batched_data['responses_allvocabs']),
'resp_length': np.array(batched_data['responses_length']),
'gen_log_prob': np.array(gen_log_prob)}
metric1.forward(metric1_data)
batch_results = []
for response_id in batched_responses_id:
response_id_list = response_id.tolist()
if data.eos_id in response_id_list:
result_id = response_id_list[:response_id_list.index(data.eos_id) + 1]
else:
result_id = response_id_list
batch_results.append(result_id)
metric2_data = {'gen': np.array(batch_results),
'resp_allvocabs': np.array(batched_data['responses_allvocabs'])}
metric2.forward(metric2_data)
batched_data = data.get_next_batch("test")
res = metric1.close()
res.update(metric2.close())
res.update(self.test_process_hits(sess, data, args))
test_file = args.out_dir + "/%s_%s.txt" % (args.name, "test")
with open(test_file, 'w') as f:
print("Test Result:")
res_print = list(res.items())
res_print.sort(key=lambda x: x[0])
for key, value in res_print:
if isinstance(value, float):
print("\t%s:\t%f" % (key, value))
f.write("%s:\t%f\n" % (key, value))
f.write('\n')
for i in range(len(res['resp'])):
f.write("resp:\t%s\n" % " ".join(res['resp'][i]))
f.write("gen:\t%s\n\n" % " ".join(res['gen'][i]))
print("result output to %s" % test_file)
return {key: val for key, val in res.items() if type(val) in [bytes, int, float]}
| 47.188679
| 202
| 0.72496
|
4a090703f80312cadcae7800b33c6cbb3f893964
| 2,931
|
py
|
Python
|
DL_Models/main-DenseGCN.py
|
kusumikakd/EEG_DL1
|
5a6e6fc7c643fbad35a6b427dddadab930f00f2a
|
[
"MIT"
] | 3
|
2020-12-10T00:50:57.000Z
|
2022-02-10T08:49:55.000Z
|
DL_Models/main-DenseGCN.py
|
kusumikakd/EEG_DL
|
5a6e6fc7c643fbad35a6b427dddadab930f00f2a
|
[
"MIT"
] | null | null | null |
DL_Models/main-DenseGCN.py
|
kusumikakd/EEG_DL
|
5a6e6fc7c643fbad35a6b427dddadab930f00f2a
|
[
"MIT"
] | 1
|
2021-05-21T09:18:05.000Z
|
2021-05-21T09:18:05.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import useful packages
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# Hide the Configuration and Warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy import sparse
from Models.Network.lib_for_GCN import DenseGCN_Model, graph, coarsening
from Models.DatasetAPI.DataLoader import DatasetLoader
# Model Name
Model = 'Graph_Convolutional_Neural_Network'
# Clear all the stack and use GPU resources as much as possible
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Your Dataset Location, for example EEG-Motor-Movement-Imagery-Dataset
# The CSV file should be named as training_set.csv, training_label.csv, test_set.csv, and test_label.csv
DIR = 'DatasetAPI/EEG-Motor-Movement-Imagery-Dataset/'
SAVE = 'Saved_Files/' + Model + '/'
if not os.path.exists(SAVE): # If the SAVE folder doesn't exist, create one
os.mkdir(SAVE)
# Load the dataset, here it uses one-hot representation for labels
train_data, train_labels, test_data, test_labels = DatasetLoader(DIR=DIR)
# Read the Adjacency matrix
Adjacency_Matrix = pd.read_csv(DIR + 'Adjacency_Matrix.csv', header=None)
Adjacency_Matrix = np.array(Adjacency_Matrix).astype('float32')
Adjacency_Matrix = sparse.csr_matrix(Adjacency_Matrix)
graphs, perm = coarsening.coarsen(Adjacency_Matrix, levels=5, self_connections=False)
X_train = coarsening.perm_data(train_data, perm)
X_test = coarsening.perm_data(test_data, perm)
# Obtain the Graph Laplacian
L = [graph.laplacian(Adjacency_Matrix, normalized=True) for Adjacency_Matrix in graphs]
# Hyper-parameters
params = dict()
params['dir_name'] = Model
params['num_epochs'] = 100
params['batch_size'] = 1024
params['eval_frequency'] = 100
# Building blocks.
params['filter'] = 'chebyshev5'
params['brelu'] = 'b2relu'
params['pool'] = 'mpool1'
# Architecture.
params['F'] = [16, 32, 64, 128, 256, 512] # Number of graph convolutional filters.
params['K'] = [2, 2, 2, 2, 2, 2] # Polynomial orders.
params['p'] = [1, 1, 1, 1, 1, 1] # Pooling sizes.
params['M'] = [4] # Output dimensionality of fully connected layers.
# Optimization.
params['regularization'] = 0.000001 # L2 regularization
params['dropout'] = 0.50 # Dropout rate
params['learning_rate'] = 0.01 # Learning rate
params['decay_rate'] = 1 # Learning rate Decay == 1 means no Decay
params['momentum'] = 0 # momentum == 0 means Use Adam Optimizer
params['decay_steps'] = np.shape(train_data)[0] / params['batch_size']
# Train model
model = DenseGCN_Model.cgcnn(L, **params)
accuracy, loss, t_step = model.fit(X_train, train_labels, X_test, test_labels)
| 36.6375
| 104
| 0.719891
|
4a090704b6b4bd0ec5b62b6d04c54b6ec057c0ad
| 82,355
|
py
|
Python
|
rclpy/rclpy/node.py
|
ksuszka/rclpy
|
b3cd14d65d4aeb528b6bcd2db489c8000acfaf82
|
[
"Apache-2.0"
] | null | null | null |
rclpy/rclpy/node.py
|
ksuszka/rclpy
|
b3cd14d65d4aeb528b6bcd2db489c8000acfaf82
|
[
"Apache-2.0"
] | null | null | null |
rclpy/rclpy/node.py
|
ksuszka/rclpy
|
b3cd14d65d4aeb528b6bcd2db489c8000acfaf82
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TypeVar
from typing import Union
import warnings
import weakref
from rcl_interfaces.msg import FloatingPointRange
from rcl_interfaces.msg import IntegerRange
from rcl_interfaces.msg import Parameter as ParameterMsg
from rcl_interfaces.msg import ParameterDescriptor
from rcl_interfaces.msg import ParameterEvent
from rcl_interfaces.msg import ParameterType
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.msg import SetParametersResult
from rclpy.callback_groups import CallbackGroup
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.client import Client
from rclpy.clock import Clock
from rclpy.clock import ROSClock
from rclpy.constants import S_TO_NS
from rclpy.context import Context
from rclpy.exceptions import InvalidParameterTypeException
from rclpy.exceptions import InvalidParameterValueException
from rclpy.exceptions import InvalidTopicNameException
from rclpy.exceptions import NoParameterOverrideProvidedException
from rclpy.exceptions import NotInitializedException
from rclpy.exceptions import ParameterAlreadyDeclaredException
from rclpy.exceptions import ParameterImmutableException
from rclpy.exceptions import ParameterNotDeclaredException
from rclpy.executors import Executor
from rclpy.expand_topic_name import expand_topic_name
from rclpy.guard_condition import GuardCondition
from rclpy.handle import InvalidHandle
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.logging import get_logger
from rclpy.parameter import Parameter, PARAMETER_SEPARATOR_STRING
from rclpy.parameter_service import ParameterService
from rclpy.publisher import Publisher
from rclpy.qos import qos_profile_parameter_events
from rclpy.qos import qos_profile_services_default
from rclpy.qos import QoSProfile
from rclpy.qos_event import PublisherEventCallbacks
from rclpy.qos_event import SubscriptionEventCallbacks
from rclpy.qos_overriding_options import _declare_qos_parameters
from rclpy.qos_overriding_options import QoSOverridingOptions
from rclpy.service import Service
from rclpy.subscription import Subscription
from rclpy.time_source import TimeSource
from rclpy.timer import Rate
from rclpy.timer import Timer
from rclpy.topic_endpoint_info import TopicEndpointInfo
from rclpy.type_support import check_is_valid_msg_type
from rclpy.type_support import check_is_valid_srv_type
from rclpy.utilities import get_default_context
from rclpy.validate_full_topic_name import validate_full_topic_name
from rclpy.validate_namespace import validate_namespace
from rclpy.validate_node_name import validate_node_name
from rclpy.validate_parameter_name import validate_parameter_name
from rclpy.validate_topic_name import validate_topic_name
from rclpy.waitable import Waitable
HIDDEN_NODE_PREFIX = '_'
# Used for documentation purposes only
MsgType = TypeVar('MsgType')
SrvType = TypeVar('SrvType')
SrvTypeRequest = TypeVar('SrvTypeRequest')
SrvTypeResponse = TypeVar('SrvTypeResponse')
# Re-export exception defined in _rclpy C extension.
# `Node.get_*_names_and_types_by_node` methods may raise this error.
NodeNameNonExistentError = _rclpy.NodeNameNonExistentError
class Node:
"""
A Node in the ROS graph.
A Node is the primary entrypoint in a ROS system for communication.
It can be used to create ROS entities such as publishers, subscribers, services, etc.
"""
PARAM_REL_TOL = 1e-6
"""
Relative tolerance for floating point parameter values' comparison.
See `math.isclose` documentation.
"""
def __init__(
self,
node_name: str,
*,
context: Context = None,
cli_args: List[str] = None,
namespace: str = None,
use_global_arguments: bool = True,
enable_rosout: bool = True,
start_parameter_services: bool = True,
parameter_overrides: List[Parameter] = None,
allow_undeclared_parameters: bool = False,
automatically_declare_parameters_from_overrides: bool = False
) -> None:
"""
Create a Node.
:param node_name: A name to give to this node. Validated by :func:`validate_node_name`.
:param context: The context to be associated with, or ``None`` for the default global
context.
:param cli_args: A list of strings of command line args to be used only by this node.
These arguments are used to extract remappings used by the node and other ROS specific
settings, as well as user defined non-ROS arguments.
:param namespace: The namespace to which relative topic and service names will be prefixed.
Validated by :func:`validate_namespace`.
:param use_global_arguments: ``False`` if the node should ignore process-wide command line
args.
:param enable_rosout: ``False`` if the node should ignore rosout logging.
:param start_parameter_services: ``False`` if the node should not create parameter
services.
:param parameter_overrides: A list of overrides for initial values for parameters declared
on the node.
:param allow_undeclared_parameters: True if undeclared parameters are allowed.
This flag affects the behavior of parameter-related operations.
:param automatically_declare_parameters_from_overrides: If True, the "parameter overrides"
will be used to implicitly declare parameters on the node during creation.
"""
self.__handle = None
self._context = get_default_context() if context is None else context
self._parameters: dict = {}
self.__publishers: List[Publisher] = []
self.__subscriptions: List[Subscription] = []
self.__clients: List[Client] = []
self.__services: List[Service] = []
self.__timers: List[Timer] = []
self.__guards: List[GuardCondition] = []
self.__waitables: List[Waitable] = []
self._default_callback_group = MutuallyExclusiveCallbackGroup()
self._parameters_callbacks: List[Callable[[List[Parameter]], SetParametersResult]] = []
self._rate_group = ReentrantCallbackGroup()
self._allow_undeclared_parameters = allow_undeclared_parameters
self._parameter_overrides = {}
self._descriptors = {}
namespace = namespace or ''
if not self._context.ok():
raise NotInitializedException('cannot create node')
with self._context.handle:
try:
self.__node = _rclpy.Node(
node_name,
namespace,
self._context.handle,
cli_args,
use_global_arguments,
enable_rosout
)
except ValueError:
# these will raise more specific errors if the name or namespace is bad
validate_node_name(node_name)
# emulate what rcl_node_init() does to accept '' and relative namespaces
if not namespace:
namespace = '/'
if not namespace.startswith('/'):
namespace = '/' + namespace
validate_namespace(namespace)
# Should not get to this point
raise RuntimeError('rclpy_create_node failed for unknown reason')
with self.handle:
self._logger = get_logger(self.__node.logger_name())
self.__executor_weakref = None
self._parameter_event_publisher = self.create_publisher(
ParameterEvent, '/parameter_events', qos_profile_parameter_events)
with self.handle:
self._parameter_overrides = self.__node.get_parameters(Parameter)
# Combine parameters from params files with those from the node constructor and
# use the set_parameters_atomically API so a parameter event is published.
if parameter_overrides is not None:
self._parameter_overrides.update({p.name: p for p in parameter_overrides})
if automatically_declare_parameters_from_overrides:
self._parameters.update(self._parameter_overrides)
self._descriptors.update({p: ParameterDescriptor() for p in self._parameters})
# Clock that has support for ROS time.
# Note: parameter overrides and parameter event publisher need to be ready at this point
# to be able to declare 'use_sim_time' if it was not declared yet.
self._clock = ROSClock()
self._time_source = TimeSource(node=self)
self._time_source.attach_clock(self._clock)
if start_parameter_services:
self._parameter_service = ParameterService(self)
@property
def publishers(self) -> Iterator[Publisher]:
"""Get publishers that have been created on this node."""
yield from self.__publishers
@property
def subscriptions(self) -> Iterator[Subscription]:
"""Get subscriptions that have been created on this node."""
yield from self.__subscriptions
@property
def clients(self) -> Iterator[Client]:
"""Get clients that have been created on this node."""
yield from self.__clients
@property
def services(self) -> Iterator[Service]:
"""Get services that have been created on this node."""
yield from self.__services
@property
def timers(self) -> Iterator[Timer]:
"""Get timers that have been created on this node."""
yield from self.__timers
@property
def guards(self) -> Iterator[GuardCondition]:
"""Get guards that have been created on this node."""
yield from self.__guards
@property
def waitables(self) -> Iterator[Waitable]:
"""Get waitables that have been created on this node."""
yield from self.__waitables
@property
def executor(self) -> Optional[Executor]:
"""Get the executor if the node has been added to one, else return ``None``."""
if self.__executor_weakref:
return self.__executor_weakref()
return None
@executor.setter
def executor(self, new_executor: Executor) -> None:
"""Set or change the executor the node belongs to."""
current_executor = self.executor
if current_executor == new_executor:
return
if current_executor is not None:
current_executor.remove_node(self)
if new_executor is None:
self.__executor_weakref = None
else:
new_executor.add_node(self)
self.__executor_weakref = weakref.ref(new_executor)
def _wake_executor(self):
executor = self.executor
if executor:
executor.wake()
@property
def context(self) -> Context:
"""Get the context associated with the node."""
return self._context
@property
def default_callback_group(self) -> CallbackGroup:
"""
Get the default callback group.
If no other callback group is provided when the a ROS entity is created with the node,
then it is added to the default callback group.
"""
return self._default_callback_group
@property
def handle(self):
"""
Get the handle to the underlying `rcl_node_t`.
Cannot be modified after node creation.
:raises: AttributeError if modified after creation.
"""
return self.__node
@handle.setter
def handle(self, value):
raise AttributeError('handle cannot be modified after node creation')
def get_name(self) -> str:
"""Get the name of the node."""
with self.handle:
return self.handle.get_node_name()
def get_namespace(self) -> str:
"""Get the namespace of the node."""
with self.handle:
return self.handle.get_namespace()
def get_clock(self) -> Clock:
"""Get the clock used by the node."""
return self._clock
def get_logger(self):
"""Get the nodes logger."""
return self._logger
def declare_parameter(
self,
name: str,
value: Any = None,
descriptor: Optional[ParameterDescriptor] = None,
ignore_override: bool = False
) -> Parameter:
"""
Declare and initialize a parameter.
This method, if successful, will result in any callback registered with
:func:`add_on_set_parameters_callback` to be called.
:param name: Fully-qualified name of the parameter, including its namespace.
:param value: Value of the parameter to declare.
:param descriptor: Descriptor for the parameter to declare.
:param ignore_override: True if overrides shall not be taken into account; False otherwise.
:return: Parameter with the effectively assigned value.
:raises: ParameterAlreadyDeclaredException if the parameter had already been declared.
:raises: InvalidParameterException if the parameter name is invalid.
:raises: InvalidParameterValueException if the registered callback rejects the parameter.
"""
if value is None and descriptor is None:
# Temporal patch so we get deprecation warning if only a name is provided.
args = (name, )
else:
descriptor = ParameterDescriptor() if descriptor is None else descriptor
args = (name, value, descriptor)
return self.declare_parameters('', [args], ignore_override)[0]
def declare_parameters(
self,
namespace: str,
parameters: List[Union[
Tuple[str],
Tuple[str, Parameter.Type],
Tuple[str, Any],
Tuple[str, Any, ParameterDescriptor],
]],
ignore_override: bool = False
) -> List[Parameter]:
"""
Declare a list of parameters.
The tuples in the given parameter list shall contain the name for each parameter,
optionally providing a value and a descriptor.
For each entry in the list, a parameter with a name of "namespace.name"
will be declared.
The resulting value for each declared parameter will be returned, considering
parameter overrides set upon node creation as the first choice,
or provided parameter values as the second one.
The name expansion is naive, so if you set the namespace to be "foo.",
then the resulting parameter names will be like "foo..name".
However, if the namespace is an empty string, then no leading '.' will be
placed before each name, which would have been the case when naively
expanding "namespace.name".
This allows you to declare several parameters at once without a namespace.
This method, if successful, will result in any callback registered with
:func:`add_on_set_parameters_callback` to be called once for each parameter.
If one of those calls fail, an exception will be raised and the remaining parameters will
not be declared.
Parameters declared up to that point will not be undeclared.
:param namespace: Namespace for parameters.
:param parameters: List of tuples with parameters to declare.
:param ignore_override: True if overrides shall not be taken into account; False otherwise.
:return: Parameter list with the effectively assigned values for each of them.
:raises: ParameterAlreadyDeclaredException if the parameter had already been declared.
:raises: InvalidParameterException if the parameter name is invalid.
:raises: InvalidParameterValueException if the registered callback rejects any parameter.
:raises: TypeError if any tuple in **parameters** does not match the annotated type.
"""
parameter_list = []
descriptors = {}
for index, parameter_tuple in enumerate(parameters):
if len(parameter_tuple) < 1 or len(parameter_tuple) > 3:
raise TypeError(
'Invalid parameter tuple length at index {index} in parameters list: '
'{parameter_tuple}; expecting length between 1 and 3'.format_map(locals())
)
value = None
param_type = None
# Get the values from the tuple, checking its types.
# Use defaults if the tuple doesn't contain value and / or descriptor.
name = parameter_tuple[0]
second_arg = parameter_tuple[1] if 1 < len(parameter_tuple) else None
descriptor = parameter_tuple[2] if 2 < len(parameter_tuple) else ParameterDescriptor()
if not isinstance(name, str):
raise TypeError(
f'First element {name} at index {index} in parameters list '
'is not a str.')
if not isinstance(descriptor, ParameterDescriptor):
raise TypeError(
f'Third element {descriptor} at index {index} in parameters list '
'is not a ParameterDescriptor.'
)
if len(parameter_tuple) == 1:
warnings.warn(
f"when declaring parmater named '{name}', "
'declaring a parameter only providing its name is deprecated. '
'You have to either:\n'
'\t- Pass a name and a default value different to "PARAMETER NOT SET"'
' (and optionally a descriptor).\n'
'\t- Pass a name and a parameter type.\n'
'\t- Pass a name and a descriptor with `dynamic_typing=True')
descriptor.dynamic_typing = True
if isinstance(second_arg, Parameter.Type):
if second_arg.value == Parameter.Type.NOT_SET:
raise ValueError(
f'Cannot declare parameter {{{name}}} as statically typed of type NOT_SET')
if descriptor.dynamic_typing is True:
raise ValueError(
f'When declaring parameter {{{name}}} passing a descriptor with'
'`dynamic_typing=True` is not allowed when the parameter type is provided')
descriptor.type = second_arg.value
else:
value = second_arg
if not descriptor.dynamic_typing and value is not None:
# infer type from default value
if not isinstance(value, ParameterValue):
descriptor.type = Parameter.Type.from_parameter_value(value).value
else:
if value.type == ParameterType.PARAMETER_NOT_SET:
raise ValueError(
'Cannot declare a statically typed parameter with default value '
'of type PARAMETER_NOT_SET')
descriptor.type = value.type
# Get value from parameter overrides, of from tuple if it doesn't exist.
if not ignore_override and name in self._parameter_overrides:
value = self._parameter_overrides[name].value
if value is None and not descriptor.dynamic_typing:
raise NoParameterOverrideProvidedException(name)
if namespace:
name = f'{namespace}.{name}'
# Note(jubeira): declare_parameters verifies the name, but set_parameters doesn't.
validate_parameter_name(name)
parameter_list.append(Parameter(name, value=value))
descriptors.update({name: descriptor})
parameters_already_declared = [
parameter.name for parameter in parameter_list if parameter.name in self._parameters
]
if any(parameters_already_declared):
raise ParameterAlreadyDeclaredException(parameters_already_declared)
# Call the callback once for each of the parameters, using method that doesn't
# check whether the parameter was declared beforehand or not.
self._set_parameters(
parameter_list,
descriptors,
raise_on_failure=True,
allow_undeclared_parameters=True
)
return self.get_parameters([parameter.name for parameter in parameter_list])
def undeclare_parameter(self, name: str):
"""
Undeclare a previously declared parameter.
This method will not cause a callback registered with
:func:`add_on_set_parameters_callback` to be called.
:param name: Fully-qualified name of the parameter, including its namespace.
:raises: ParameterNotDeclaredException if parameter had not been declared before.
:raises: ParameterImmutableException if the parameter was created as read-only.
"""
if self.has_parameter(name):
if self._descriptors[name].read_only:
raise ParameterImmutableException(name)
else:
del self._parameters[name]
del self._descriptors[name]
else:
raise ParameterNotDeclaredException(name)
def has_parameter(self, name: str) -> bool:
"""Return True if parameter is declared; False otherwise."""
return name in self._parameters
def get_parameter_types(self, names: List[str]) -> List[Parameter.Type]:
"""
Get a list of parameter types.
:param names: Fully-qualified names of the parameters to get, including their namespaces.
:return: The values for the given parameter types.
A default Parameter.Type.NOT_SET will be returned for undeclared parameters
if undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and at least one parameter hadn't been declared beforehand.
"""
if not all(isinstance(name, str) for name in names):
raise TypeError('All names must be instances of type str')
return [self.get_parameter_type(name) for name in names]
def get_parameter_type(self, name: str) -> Parameter.Type:
"""
Get a parameter type by name.
:param name: Fully-qualified name of the parameter, including its namespace.
:return: The type for the given parameter name.
A default Parameter.Type.NOT_SET will be returned for an undeclared parameter
if undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and the parameter hadn't been declared beforehand.
"""
if self.has_parameter(name):
return self._parameters[name].type_.value
elif self._allow_undeclared_parameters:
return Parameter.Type.NOT_SET
else:
raise ParameterNotDeclaredException(name)
def get_parameters(self, names: List[str]) -> List[Parameter]:
"""
Get a list of parameters.
:param names: Fully-qualified names of the parameters to get, including their namespaces.
:return: The values for the given parameter names.
A default Parameter will be returned for undeclared parameters if
undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and at least one parameter hadn't been declared beforehand.
"""
if not all(isinstance(name, str) for name in names):
raise TypeError('All names must be instances of type str')
return [self.get_parameter(name) for name in names]
def get_parameter(self, name: str) -> Parameter:
"""
Get a parameter by name.
:param name: Fully-qualified name of the parameter, including its namespace.
:return: The value for the given parameter name.
A default Parameter will be returned for an undeclared parameter if
undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and the parameter hadn't been declared beforehand.
"""
if self.has_parameter(name):
return self._parameters[name]
elif self._allow_undeclared_parameters:
return Parameter(name, Parameter.Type.NOT_SET, None)
else:
raise ParameterNotDeclaredException(name)
def get_parameter_or(
self, name: str, alternative_value: Optional[Parameter] = None) -> Parameter:
"""
Get a parameter or the alternative value.
If the alternative value is None, a default Parameter with the given name and NOT_SET
type will be returned if the parameter was not declared.
:param name: Fully-qualified name of the parameter, including its namespace.
:param alternative_value: Alternative parameter to get if it had not been declared before.
:return: Requested parameter, or alternative value if it hadn't been declared before.
"""
if alternative_value is None:
alternative_value = Parameter(name, Parameter.Type.NOT_SET)
return self._parameters.get(name, alternative_value)
def get_parameters_by_prefix(self, prefix: str) -> Dict[str, Optional[Union[
bool, int, float, str, bytes,
Sequence[bool], Sequence[int], Sequence[float], Sequence[str]
]]]:
"""
Get parameters that have a given prefix in their names as a dictionary.
The names which are used as keys in the returned dictionary have the prefix removed.
For example, if you use the prefix "foo" and the parameters "foo.ping", "foo.pong"
and "bar.baz" exist, then the returned dictionary will have the keys "ping" and "pong".
Note that the parameter separator is also removed from the parameter name to create the
keys.
An empty string for the prefix will match all parameters.
If no parameters with the prefix are found, an empty dictionary will be returned.
:param prefix: The prefix of the parameters to get.
:return: Dict of parameters with the given prefix.
"""
if prefix:
prefix = prefix + PARAMETER_SEPARATOR_STRING
prefix_len = len(prefix)
return {
param_name[prefix_len:]: param_value
for param_name, param_value in self._parameters.items()
if param_name.startswith(prefix)
}
def set_parameters(self, parameter_list: List[Parameter]) -> List[SetParametersResult]:
"""
Set parameters for the node, and return the result for the set action.
If any parameter in the list was not declared beforehand and undeclared parameters are not
allowed for the node, this method will raise a ParameterNotDeclaredException exception.
Parameters are set in the order they are declared in the list.
If setting a parameter fails due to not being declared, then the
parameters which have already been set will stay set, and no attempt will
be made to set the parameters which come after.
If undeclared parameters are allowed, then all the parameters will be implicitly
declared before being set even if they were not declared beforehand.
Parameter overrides are ignored by this method.
If a callback was registered previously with :func:`add_on_set_parameters_callback`, it
will be called prior to setting the parameters for the node, once for each parameter.
If the callback prevents a parameter from being set, then it will be reflected in the
returned result; no exceptions will be raised in this case.
For each successfully set parameter, a :class:`ParameterEvent` message is
published.
If the value type of the parameter is NOT_SET, and the existing parameter type is
something else, then the parameter will be implicitly undeclared.
:param parameter_list: The list of parameters to set.
:return: The result for each set action as a list.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and at least one parameter in the list hadn't been declared beforehand.
"""
return self._set_parameters(parameter_list)
def _set_parameters(
self,
parameter_list: List[Parameter],
descriptors: Optional[Dict[str, ParameterDescriptor]] = None,
raise_on_failure: bool = False,
allow_undeclared_parameters: bool = False
) -> List[SetParametersResult]:
"""
Set parameters for the node, and return the result for the set action.
Method for internal usage; applies a setter method for each parameters in the list.
By default it checks if the parameters were declared, raising an exception if at least
one of them was not.
If a callback was registered previously with :func:`add_on_set_parameters_callback`, it
will be called prior to setting the parameters for the node, once for each parameter.
If the callback doesn't succeed for a given parameter, it won't be set and either an
unsuccessful result will be returned for that parameter, or an exception will be raised
according to `raise_on_failure` flag.
:param parameter_list: List of parameters to set.
:param descriptors: Descriptors to set to the given parameters.
If descriptors are given, each parameter in the list must have a corresponding one.
:param raise_on_failure: True if InvalidParameterValueException has to be raised when
the user callback rejects a parameter, False otherwise.
:param allow_undeclared_parameters: If False, this method will check for undeclared
parameters for each of the elements in the parameter list.
:return: The result for each set action as a list.
:raises: InvalidParameterValueException if the user-defined callback rejects the
parameter value and raise_on_failure flag is True.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed in this
method and at least one parameter in the list hadn't been declared beforehand.
"""
if descriptors is not None:
assert all(parameter.name in descriptors for parameter in parameter_list)
results = []
for param in parameter_list:
if not allow_undeclared_parameters:
self._check_undeclared_parameters([param])
# If undeclared parameters are allowed, parameters with type NOT_SET shall be stored.
result = self._set_parameters_atomically(
[param],
descriptors,
allow_not_set_type=allow_undeclared_parameters
)
if raise_on_failure and not result.successful:
if result.reason.startswith('Wrong parameter type'):
raise InvalidParameterTypeException(
param, Parameter.Type(descriptors[param._name].type).name)
raise InvalidParameterValueException(param.name, param.value, result.reason)
results.append(result)
return results
def set_parameters_atomically(self, parameter_list: List[Parameter]) -> SetParametersResult:
"""
Set the given parameters, all at one time, and then aggregate result.
If any parameter in the list was not declared beforehand and undeclared parameters are not
allowed for the node, this method will raise a ParameterNotDeclaredException exception.
Parameters are set all at once.
If setting a parameter fails due to not being declared, then no parameter will be set set.
Either all of the parameters are set or none of them are set.
If undeclared parameters are allowed for the node, then all the parameters will be
implicitly declared before being set even if they were not declared beforehand.
If a callback was registered previously with :func:`add_on_set_parameters_callback`, it
will be called prior to setting the parameters for the node only once for all parameters.
If the callback prevents the parameters from being set, then it will be reflected in the
returned result; no exceptions will be raised in this case.
For each successfully set parameter, a :class:`ParameterEvent` message is published.
If the value type of the parameter is NOT_SET, and the existing parameter type is
something else, then the parameter will be implicitly undeclared.
:param parameter_list: The list of parameters to set.
:return: Aggregate result of setting all the parameters atomically.
:raises: ParameterNotDeclaredException if undeclared parameters are not allowed,
and at least one parameter in the list hadn't been declared beforehand.
"""
self._check_undeclared_parameters(parameter_list)
return self._set_parameters_atomically(parameter_list)
def _check_undeclared_parameters(self, parameter_list: List[Parameter]):
"""
Check if parameter list has correct types and was declared beforehand.
:raises: ParameterNotDeclaredException if at least one parameter in the list was not
declared beforehand.
"""
if not all(isinstance(parameter, Parameter) for parameter in parameter_list):
raise TypeError("parameter must be instance of type '{}'".format(repr(Parameter)))
undeclared_parameters = (
param.name for param in parameter_list if param.name not in self._parameters
)
if (not self._allow_undeclared_parameters and any(undeclared_parameters)):
raise ParameterNotDeclaredException(list(undeclared_parameters))
def _set_parameters_atomically(
self,
parameter_list: List[Parameter],
descriptors: Optional[Dict[str, ParameterDescriptor]] = None,
allow_not_set_type: bool = False
) -> SetParametersResult:
"""
Set the given parameters, all at one time, and then aggregate result.
This internal method does not reject undeclared parameters.
If :param:`allow_not_set_type` is False, a parameter with type NOT_SET will be undeclared.
If a callback was registered previously with :func:`add_on_set_parameters_callback`, it
will be called prior to setting the parameters for the node only once for all parameters.
If the callback prevents the parameters from being set, then it will be reflected in the
returned result; no exceptions will be raised in this case.
For each successfully set parameter, a :class:`ParameterEvent` message is
published.
:param parameter_list: The list of parameters to set.
:param descriptors: New descriptors to apply to the parameters before setting them.
If descriptors are given, each parameter in the list must have a corresponding one.
:param allow_not_set_type: False if parameters with NOT_SET type shall be undeclared,
True if they should be stored despite not having an actual value.
:return: Aggregate result of setting all the parameters atomically.
"""
if descriptors is not None:
# If new descriptors are provided, ensure every parameter has an assigned descriptor
# and do not check for read-only.
assert all(parameter.name in descriptors for parameter in parameter_list)
result = self._apply_descriptors(parameter_list, descriptors, False)
else:
# If new descriptors are not provided, use existing ones and check for read-only.
result = self._apply_descriptors(parameter_list, self._descriptors, True)
if not result.successful:
return result
elif self._parameters_callbacks:
for callback in self._parameters_callbacks:
result = callback(parameter_list)
if not result.successful:
return result
result = SetParametersResult(successful=True)
if result.successful:
parameter_event = ParameterEvent()
# Add fully qualified path of node to parameter event
if self.get_namespace() == '/':
parameter_event.node = self.get_namespace() + self.get_name()
else:
parameter_event.node = self.get_namespace() + '/' + self.get_name()
for param in parameter_list:
# If parameters without type and value are not allowed, they shall be undeclared.
if not allow_not_set_type and Parameter.Type.NOT_SET == param.type_:
# Parameter deleted. (Parameter had value and new value is not set).
parameter_event.deleted_parameters.append(param.to_parameter_msg())
# Delete any unset parameters regardless of their previous value.
if param.name in self._parameters:
del self._parameters[param.name]
if param.name in self._descriptors:
del self._descriptors[param.name]
else:
# Update descriptors; set a default if it doesn't exist.
# Don't update if it already exists for the current parameter and a new one
# was not specified in this method call.
if descriptors is not None:
self._descriptors[param.name] = descriptors[param.name]
elif param.name not in self._descriptors:
descriptor = ParameterDescriptor()
descriptor.dynamic_typing = True
self._descriptors[param.name] = descriptor
if Parameter.Type.NOT_SET == self.get_parameter_or(param.name).type_:
# Parameter is new. (Parameter had no value and new value is set)
parameter_event.new_parameters.append(param.to_parameter_msg())
else:
parameter_event.changed_parameters.append(
param.to_parameter_msg())
# Descriptors have already been applied by this point.
self._parameters[param.name] = param
parameter_event.stamp = self._clock.now().to_msg()
self._parameter_event_publisher.publish(parameter_event)
return result
def add_on_set_parameters_callback(
self,
callback: Callable[[List[Parameter]], SetParametersResult]
) -> None:
"""
Add a callback in front to the list of callbacks.
Calling this function will add a callback in self._parameter_callbacks list.
It is considered bad practice to reject changes for "unknown" parameters as this prevents
other parts of the node (that may be aware of these parameters) from handling them.
:param callback: The function that is called whenever parameters are set for the node.
"""
self._parameters_callbacks.insert(0, callback)
def remove_on_set_parameters_callback(
self,
callback: Callable[[List[Parameter]], SetParametersResult]
) -> None:
"""
Remove a callback from list of callbacks.
Calling this function will remove the callback from self._parameter_callbacks list.
:param callback: The function that is called whenever parameters are set for the node.
:raises: ValueError if a callback is not present in the list of callbacks.
"""
self._parameters_callbacks.remove(callback)
def _apply_descriptors(
self,
parameter_list: List[Parameter],
descriptors: Dict[str, ParameterDescriptor],
check_read_only: bool = True
) -> SetParametersResult:
"""
Apply descriptors to parameters and return an aggregated result without saving parameters.
In case no descriptors are provided to the method, existing descriptors shall be used.
In any case, if a given parameter doesn't have a descriptor it shall be skipped.
:param parameter_list: Parameters to be checked.
:param descriptors: Descriptors to apply.
:param check_read_only: True if read-only check has to be applied.
:return: SetParametersResult; successful if checks passed, unsuccessful otherwise.
:raises: ParameterNotDeclaredException if a descriptor is not provided, the given parameter
name had not been declared and undeclared parameters are not allowed.
"""
for param in parameter_list:
if param.name in descriptors:
result = self._apply_descriptor(param, descriptors[param.name], check_read_only)
if not result.successful:
return result
return SetParametersResult(successful=True)
def _apply_descriptor(
self,
parameter: Parameter,
descriptor: Optional[ParameterDescriptor] = None,
check_read_only: bool = True
) -> SetParametersResult:
"""
Apply a descriptor to a parameter and return a result without saving the parameter.
This method sets the type in the descriptor to match the parameter type.
If a descriptor is provided, its name will be set to the name of the parameter.
:param parameter: Parameter to be checked.
:param descriptor: Descriptor to apply. If None, the stored descriptor for the given
parameter's name is used instead.
:param check_read_only: True if read-only check has to be applied.
:return: SetParametersResult; successful if checks passed, unsuccessful otherwise.
:raises: ParameterNotDeclaredException if a descriptor is not provided, the given parameter
name had not been declared and undeclared parameters are not allowed.
"""
if descriptor is None:
descriptor = self.describe_parameter(parameter.name)
else:
descriptor.name = parameter.name
if check_read_only and descriptor.read_only:
return SetParametersResult(
successful=False,
reason='Trying to set a read-only parameter: {}.'.format(parameter.name))
if descriptor.dynamic_typing:
descriptor.type = parameter.type_.value
elif descriptor.type != parameter.type_.value:
return SetParametersResult(
successful=False,
reason=(
'Wrong parameter type, expected '
f"'{Parameter.Type(descriptor.type)}'"
f" got '{parameter.type_}'")
)
if parameter.type_ == Parameter.Type.INTEGER and descriptor.integer_range:
return self._apply_integer_range(parameter, descriptor.integer_range[0])
if parameter.type_ == Parameter.Type.DOUBLE and descriptor.floating_point_range:
return self._apply_floating_point_range(parameter, descriptor.floating_point_range[0])
return SetParametersResult(successful=True)
def _apply_integer_range(
self,
parameter: Parameter,
integer_range: IntegerRange
) -> SetParametersResult:
min_value = min(integer_range.from_value, integer_range.to_value)
max_value = max(integer_range.from_value, integer_range.to_value)
# Values in the edge are always OK.
if parameter.value == min_value or parameter.value == max_value:
return SetParametersResult(successful=True)
if not min_value < parameter.value < max_value:
return SetParametersResult(
successful=False,
reason='Parameter {} out of range. '
'Min: {}, Max: {}, value: {}'.format(
parameter.name, min_value, max_value, parameter.value
)
)
if integer_range.step != 0 and (parameter.value - min_value) % integer_range.step != 0:
return SetParametersResult(
successful=False,
reason='The parameter value for {} is not a valid step. '
'Min: {}, max: {}, value: {}, step: {}'.format(
parameter.name,
min_value,
max_value,
parameter.value,
integer_range.step
)
)
return SetParametersResult(successful=True)
def _apply_floating_point_range(
self,
parameter: Parameter,
floating_point_range: FloatingPointRange
) -> SetParametersResult:
min_value = min(floating_point_range.from_value, floating_point_range.to_value)
max_value = max(floating_point_range.from_value, floating_point_range.to_value)
# Values in the edge are always OK.
if (
math.isclose(parameter.value, min_value, rel_tol=self.PARAM_REL_TOL) or
math.isclose(parameter.value, max_value, rel_tol=self.PARAM_REL_TOL)
):
return SetParametersResult(successful=True)
if not min_value < parameter.value < max_value:
return SetParametersResult(
successful=False,
reason='Parameter {} out of range '
'Min: {}, Max: {}, value: {}'.format(
parameter.name, min_value, max_value, parameter.value
)
)
if floating_point_range.step != 0.0:
distance_int_steps = round((parameter.value - min_value) / floating_point_range.step)
if not math.isclose(
min_value + distance_int_steps * floating_point_range.step,
parameter.value,
rel_tol=self.PARAM_REL_TOL
):
return SetParametersResult(
successful=False,
reason='The parameter value for {} is not close enough to a valid step. '
'Min: {}, max: {}, value: {}, step: {}'.format(
parameter.name,
min_value,
max_value,
parameter.value,
floating_point_range.step
)
)
return SetParametersResult(successful=True)
def _apply_descriptor_and_set(
self,
parameter: Parameter,
descriptor: Optional[ParameterDescriptor] = None,
check_read_only: bool = True
) -> SetParametersResult:
"""Apply parameter descriptor and set parameter if successful."""
result = self._apply_descriptor(parameter, descriptor, check_read_only)
if result.successful:
self._parameters[parameter.name] = parameter
return result
def describe_parameter(self, name: str) -> ParameterDescriptor:
"""
Get the parameter descriptor of a given parameter.
:param name: Fully-qualified name of the parameter, including its namespace.
:return: ParameterDescriptor corresponding to the parameter,
or default ParameterDescriptor if parameter had not been declared before
and undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if parameter had not been declared before
and undeclared parameters are not allowed.
"""
try:
return self._descriptors[name]
except KeyError:
if self._allow_undeclared_parameters:
return ParameterDescriptor()
else:
raise ParameterNotDeclaredException(name)
def describe_parameters(self, names: List[str]) -> List[ParameterDescriptor]:
"""
Get the parameter descriptors of a given list of parameters.
:param name: List of fully-qualified names of the parameters to describe.
:return: List of ParameterDescriptors corresponding to the given parameters.
Default ParameterDescriptors shall be returned for parameters that
had not been declared before if undeclared parameters are allowed.
:raises: ParameterNotDeclaredException if at least one parameter
had not been declared before and undeclared parameters are not allowed.
"""
parameter_descriptors = []
for name in names:
parameter_descriptors.append(self.describe_parameter(name))
return parameter_descriptors
def set_descriptor(
self,
name: str,
descriptor: ParameterDescriptor,
alternative_value: Optional[ParameterValue] = None
) -> ParameterValue:
"""
Set a new descriptor for a given parameter.
The name in the descriptor is ignored and set to **name**.
:param name: Fully-qualified name of the parameter to set the descriptor to.
:param descriptor: New descriptor to apply to the parameter.
:param alternative_value: Value to set to the parameter if the existing value does not
comply with the new descriptor.
:return: ParameterValue for the given parameter name after applying the new descriptor.
:raises: ParameterNotDeclaredException if parameter had not been declared before
and undeclared parameters are not allowed.
:raises: ParameterImmutableException if the parameter exists and is read-only.
:raises: ParameterValueException if neither the existing value nor the alternative value
complies with the provided descriptor.
"""
if not self.has_parameter(name):
if not self._allow_undeclared_parameters:
raise ParameterNotDeclaredException(name)
else:
return self.get_parameter(name).get_parameter_value()
if self.describe_parameter(name).read_only:
raise ParameterImmutableException(name)
current_parameter = self.get_parameter(name)
if alternative_value is None:
alternative_parameter = current_parameter
else:
alternative_parameter = Parameter.from_parameter_msg(
ParameterMsg(name=name, value=alternative_value))
# First try keeping the parameter, then try the alternative one.
# Don't check for read-only since we are applying a new descriptor now.
if not self._apply_descriptor_and_set(current_parameter, descriptor, False).successful:
alternative_set_result = (
self._apply_descriptor_and_set(alternative_parameter, descriptor, False)
)
if not alternative_set_result.successful:
raise InvalidParameterValueException(
name,
alternative_parameter.value,
alternative_set_result.reason
)
self._descriptors[name] = descriptor
return self.get_parameter(name).get_parameter_value()
def _validate_topic_or_service_name(self, topic_or_service_name, *, is_service=False):
name = self.get_name()
namespace = self.get_namespace()
validate_node_name(name)
validate_namespace(namespace)
validate_topic_name(topic_or_service_name, is_service=is_service)
expanded_topic_or_service_name = expand_topic_name(topic_or_service_name, name, namespace)
validate_full_topic_name(expanded_topic_or_service_name, is_service=is_service)
def _validate_qos_or_depth_parameter(self, qos_or_depth) -> QoSProfile:
if isinstance(qos_or_depth, QoSProfile):
return qos_or_depth
elif isinstance(qos_or_depth, int):
if qos_or_depth < 0:
raise ValueError('history depth must be greater than or equal to zero')
return QoSProfile(depth=qos_or_depth)
else:
raise TypeError(
'Expected QoSProfile or int, but received {!r}'.format(type(qos_or_depth)))
def add_waitable(self, waitable: Waitable) -> None:
"""
Add a class that is capable of adding things to the wait set.
:param waitable: An instance of a waitable that the node will add to the waitset.
"""
self.__waitables.append(waitable)
self._wake_executor()
def remove_waitable(self, waitable: Waitable) -> None:
"""
Remove a Waitable that was previously added to the node.
:param waitable: The Waitable to remove.
"""
self.__waitables.remove(waitable)
self._wake_executor()
def resolve_topic_name(self, topic: str, *, only_expand: bool = False) -> str:
"""
Return a topic name expanded and remapped.
:param topic: topic name to be expanded and remapped.
:param only_expand: if `True`, remapping rules won't be applied.
:return: a fully qualified topic name,
result of applying expansion and remapping to the given `topic`.
"""
with self.handle:
return _rclpy.rclpy_resolve_name(self.handle, topic, only_expand, False)
def resolve_service_name(
self, service: str, *, only_expand: bool = False
) -> str:
"""
Return a service name expanded and remapped.
:param service: service name to be expanded and remapped.
:param only_expand: if `True`, remapping rules won't be applied.
:return: a fully qualified service name,
result of applying expansion and remapping to the given `service`.
"""
with self.handle:
return _rclpy.rclpy_resolve_name(self.handle, service, only_expand, True)
def create_publisher(
self,
msg_type,
topic: str,
qos_profile: Union[QoSProfile, int],
*,
callback_group: Optional[CallbackGroup] = None,
event_callbacks: Optional[PublisherEventCallbacks] = None,
qos_overriding_options: Optional[QoSOverridingOptions] = None,
) -> Publisher:
"""
Create a new publisher.
:param msg_type: The type of ROS messages the publisher will publish.
:param topic: The name of the topic the publisher will publish to.
:param qos_profile: A QoSProfile or a history depth to apply to the publisher.
In the case that a history depth is provided, the QoS history is set to
KEEP_LAST, the QoS history depth is set to the value
of the parameter, and all other QoS settings are set to their default values.
:param callback_group: The callback group for the publisher's event handlers.
If ``None``, then the node's default callback group is used.
:param event_callbacks: User-defined callbacks for middleware events.
:return: The new publisher.
"""
qos_profile = self._validate_qos_or_depth_parameter(qos_profile)
callback_group = callback_group or self.default_callback_group
failed = False
try:
final_topic = self.resolve_topic_name(topic)
except RuntimeError:
# if it's name validation error, raise a more appropriate exception.
try:
self._validate_topic_or_service_name(topic)
except InvalidTopicNameException as ex:
raise ex from None
# else reraise the previous exception
raise
if qos_overriding_options is None:
qos_overriding_options = QoSOverridingOptions([])
_declare_qos_parameters(
Publisher, self, final_topic, qos_profile, qos_overriding_options)
# this line imports the typesupport for the message module if not already done
failed = False
check_is_valid_msg_type(msg_type)
try:
with self.handle:
publisher_object = _rclpy.Publisher(
self.handle, msg_type, topic, qos_profile.get_c_qos_profile())
except ValueError:
failed = True
if failed:
self._validate_topic_or_service_name(topic)
try:
publisher = Publisher(
publisher_object, msg_type, topic, qos_profile,
event_callbacks=event_callbacks or PublisherEventCallbacks(),
callback_group=callback_group)
except Exception:
publisher_object.destroy_when_not_in_use()
raise
self.__publishers.append(publisher)
self._wake_executor()
for event_callback in publisher.event_handlers:
self.add_waitable(event_callback)
return publisher
def create_subscription(
self,
msg_type,
topic: str,
callback: Callable[[MsgType], None],
qos_profile: Union[QoSProfile, int],
*,
callback_group: Optional[CallbackGroup] = None,
event_callbacks: Optional[SubscriptionEventCallbacks] = None,
qos_overriding_options: Optional[QoSOverridingOptions] = None,
raw: bool = False
) -> Subscription:
"""
Create a new subscription.
:param msg_type: The type of ROS messages the subscription will subscribe to.
:param topic: The name of the topic the subscription will subscribe to.
:param callback: A user-defined callback function that is called when a message is
received by the subscription.
:param qos_profile: A QoSProfile or a history depth to apply to the subscription.
In the case that a history depth is provided, the QoS history is set to
KEEP_LAST, the QoS history depth is set to the value
of the parameter, and all other QoS settings are set to their default values.
:param callback_group: The callback group for the subscription. If ``None``, then the
nodes default callback group is used.
:param event_callbacks: User-defined callbacks for middleware events.
:param raw: If ``True``, then received messages will be stored in raw binary
representation.
"""
qos_profile = self._validate_qos_or_depth_parameter(qos_profile)
callback_group = callback_group or self.default_callback_group
try:
final_topic = self.resolve_topic_name(topic)
except RuntimeError:
# if it's name validation error, raise a more appropriate exception.
try:
self._validate_topic_or_service_name(topic)
except InvalidTopicNameException as ex:
raise ex from None
# else reraise the previous exception
raise
if qos_overriding_options is None:
qos_overriding_options = QoSOverridingOptions([])
_declare_qos_parameters(
Subscription, self, final_topic, qos_profile, qos_overriding_options)
# this line imports the typesupport for the message module if not already done
failed = False
check_is_valid_msg_type(msg_type)
try:
with self.handle:
subscription_object = _rclpy.Subscription(
self.handle, msg_type, topic, qos_profile.get_c_qos_profile())
except ValueError:
failed = True
if failed:
self._validate_topic_or_service_name(topic)
try:
subscription = Subscription(
subscription_object, msg_type,
topic, callback, callback_group, qos_profile, raw,
event_callbacks=event_callbacks or SubscriptionEventCallbacks())
except Exception:
subscription_object.destroy_when_not_in_use()
raise
self.__subscriptions.append(subscription)
callback_group.add_entity(subscription)
self._wake_executor()
for event_handler in subscription.event_handlers:
self.add_waitable(event_handler)
return subscription
def create_client(
self,
srv_type,
srv_name: str,
*,
qos_profile: QoSProfile = qos_profile_services_default,
callback_group: CallbackGroup = None
) -> Client:
"""
Create a new service client.
:param srv_type: The service type.
:param srv_name: The name of the service.
:param qos_profile: The quality of service profile to apply the service client.
:param callback_group: The callback group for the service client. If ``None``, then the
nodes default callback group is used.
"""
if callback_group is None:
callback_group = self.default_callback_group
check_is_valid_srv_type(srv_type)
failed = False
try:
with self.handle:
client_impl = _rclpy.Client(
self.handle,
srv_type,
srv_name,
qos_profile.get_c_qos_profile())
except ValueError:
failed = True
if failed:
self._validate_topic_or_service_name(srv_name, is_service=True)
client = Client(
self.context,
client_impl, srv_type, srv_name, qos_profile,
callback_group)
self.__clients.append(client)
callback_group.add_entity(client)
self._wake_executor()
return client
def create_service(
self,
srv_type,
srv_name: str,
callback: Callable[[SrvTypeRequest, SrvTypeResponse], SrvTypeResponse],
*,
qos_profile: QoSProfile = qos_profile_services_default,
callback_group: CallbackGroup = None
) -> Service:
"""
Create a new service server.
:param srv_type: The service type.
:param srv_name: The name of the service.
:param callback: A user-defined callback function that is called when a service request
received by the server.
:param qos_profile: The quality of service profile to apply the service server.
:param callback_group: The callback group for the service server. If ``None``, then the
nodes default callback group is used.
"""
if callback_group is None:
callback_group = self.default_callback_group
check_is_valid_srv_type(srv_type)
failed = False
try:
with self.handle:
service_impl = _rclpy.Service(
self.handle,
srv_type,
srv_name,
qos_profile.get_c_qos_profile())
except ValueError:
failed = True
if failed:
self._validate_topic_or_service_name(srv_name, is_service=True)
service = Service(
service_impl,
srv_type, srv_name, callback, callback_group, qos_profile)
self.__services.append(service)
callback_group.add_entity(service)
self._wake_executor()
return service
def create_timer(
self,
timer_period_sec: float,
callback: Callable,
callback_group: CallbackGroup = None,
clock: Clock = None,
) -> Timer:
"""
Create a new timer.
The timer will be started and every ``timer_period_sec`` number of seconds the provided
callback function will be called.
:param timer_period_sec: The period (s) of the timer.
:param callback: A user-defined callback function that is called when the timer expires.
:param callback_group: The callback group for the timer. If ``None``, then the nodes
default callback group is used.
:param clock: The clock which the timer gets time from.
"""
timer_period_nsec = int(float(timer_period_sec) * S_TO_NS)
if callback_group is None:
callback_group = self.default_callback_group
if clock is None:
clock = self._clock
timer = Timer(callback, callback_group, timer_period_nsec, clock, context=self.context)
self.__timers.append(timer)
callback_group.add_entity(timer)
self._wake_executor()
return timer
def create_guard_condition(
self,
callback: Callable,
callback_group: CallbackGroup = None
) -> GuardCondition:
"""Create a new guard condition."""
if callback_group is None:
callback_group = self.default_callback_group
guard = GuardCondition(callback, callback_group, context=self.context)
self.__guards.append(guard)
callback_group.add_entity(guard)
self._wake_executor()
return guard
def create_rate(
self,
frequency: float,
clock: Clock = None,
) -> Rate:
"""
Create a Rate object.
:param frequency: The frequency the Rate runs at (Hz).
:param clock: The clock the Rate gets time from.
"""
if frequency <= 0:
raise ValueError('frequency must be > 0')
# Create a timer and give it to the rate object
period = 1.0 / frequency
# Rate will set its own callback
callback = None
# Rates get their own group so timing is not messed up by other callbacks
group = self._rate_group
timer = self.create_timer(period, callback, group, clock)
return Rate(timer, context=self.context)
def destroy_publisher(self, publisher: Publisher) -> bool:
"""
Destroy a publisher created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
if publisher in self.__publishers:
self.__publishers.remove(publisher)
for event_handler in publisher.event_handlers:
self.__waitables.remove(event_handler)
try:
publisher.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_subscription(self, subscription: Subscription) -> bool:
"""
Destroy a subscription created by the node.
:return: ``True`` if succesful, ``False`` otherwise.
"""
if subscription in self.__subscriptions:
self.__subscriptions.remove(subscription)
for event_handler in subscription.event_handlers:
self.__waitables.remove(event_handler)
try:
subscription.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_client(self, client: Client) -> bool:
"""
Destroy a service client created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
if client in self.__clients:
self.__clients.remove(client)
try:
client.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_service(self, service: Service) -> bool:
"""
Destroy a service server created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
if service in self.__services:
self.__services.remove(service)
try:
service.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_timer(self, timer: Timer) -> bool:
"""
Destroy a timer created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
if timer in self.__timers:
self.__timers.remove(timer)
try:
timer.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_guard_condition(self, guard: GuardCondition) -> bool:
"""
Destroy a guard condition created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
if guard in self.__guards:
self.__guards.remove(guard)
try:
guard.destroy()
except InvalidHandle:
return False
self._wake_executor()
return True
return False
def destroy_rate(self, rate: Rate):
"""
Destroy a Rate object created by the node.
:return: ``True`` if successful, ``False`` otherwise.
"""
self.destroy_timer(rate._timer)
rate.destroy()
def destroy_node(self) -> bool:
"""
Destroy the node.
Frees resources used by the node, including any entities created by the following methods:
* :func:`create_publisher`
* :func:`create_subscription`
* :func:`create_client`
* :func:`create_service`
* :func:`create_timer`
* :func:`create_guard_condition`
"""
# Drop extra reference to parameter event publisher.
# It will be destroyed with other publishers below.
self._parameter_event_publisher = None
# Destroy dependent items eagerly to work around a possible hang
# https://github.com/ros2/build_cop/issues/248
while self.__publishers:
self.destroy_publisher(self.__publishers[0])
while self.__subscriptions:
self.destroy_subscription(self.__subscriptions[0])
while self.__clients:
self.destroy_client(self.__clients[0])
while self.__services:
self.destroy_service(self.__services[0])
while self.__timers:
self.destroy_timer(self.__timers[0])
while self.__guards:
self.destroy_guard_condition(self.__guards[0])
self.__node.destroy_when_not_in_use()
self._wake_executor()
def get_publisher_names_and_types_by_node(
self,
node_name: str,
node_namespace: str,
no_demangle: bool = False
) -> List[Tuple[str, List[str]]]:
"""
Get a list of discovered topics for publishers of a remote node.
:param node_name: Name of a remote node to get publishers for.
:param node_namespace: Namespace of the remote node.
:param no_demangle: If ``True``, then topic names and types returned will not be demangled.
:return: List of tuples.
The first element of each tuple is the topic name and the second element is a list of
topic types.
:raise NodeNameNonExistentError: If the node wasn't found.
:raise RuntimeError: Unexpected failure.
"""
with self.handle:
return _rclpy.rclpy_get_publisher_names_and_types_by_node(
self.handle, no_demangle, node_name, node_namespace)
def get_subscriber_names_and_types_by_node(
self,
node_name: str,
node_namespace: str,
no_demangle: bool = False
) -> List[Tuple[str, List[str]]]:
"""
Get a list of discovered topics for subscriptions of a remote node.
:param node_name: Name of a remote node to get subscriptions for.
:param node_namespace: Namespace of the remote node.
:param no_demangle: If ``True``, then topic names and types returned will not be demangled.
:return: List of tuples.
The first element of each tuple is the topic name and the second element is a list of
topic types.
:raise NodeNameNonExistentError: If the node wasn't found.
:raise RuntimeError: Unexpected failure.
"""
with self.handle:
return _rclpy.rclpy_get_subscriber_names_and_types_by_node(
self.handle, no_demangle, node_name, node_namespace)
def get_service_names_and_types_by_node(
self,
node_name: str,
node_namespace: str
) -> List[Tuple[str, List[str]]]:
"""
Get a list of discovered service server topics for a remote node.
:param node_name: Name of a remote node to get services for.
:param node_namespace: Namespace of the remote node.
:return: List of tuples.
The first element of each tuple is the service server name
and the second element is a list of service types.
:raise NodeNameNonExistentError: If the node wasn't found.
:raise RuntimeError: Unexpected failure.
"""
with self.handle:
return _rclpy.rclpy_get_service_names_and_types_by_node(
self.handle, node_name, node_namespace)
def get_client_names_and_types_by_node(
self,
node_name: str,
node_namespace: str
) -> List[Tuple[str, List[str]]]:
"""
Get a list of discovered service client topics for a remote node.
:param node_name: Name of a remote node to get service clients for.
:param node_namespace: Namespace of the remote node.
:return: List of tuples.
The fist element of each tuple is the service client name
and the second element is a list of service client types.
:raise NodeNameNonExistentError: If the node wasn't found.
:raise RuntimeError: Unexpected failure.
"""
with self.handle:
return _rclpy.rclpy_get_client_names_and_types_by_node(
self.handle, node_name, node_namespace)
def get_topic_names_and_types(self, no_demangle: bool = False) -> List[Tuple[str, List[str]]]:
"""
Get a list topic names and types for the node.
:param no_demangle: If ``True``, then topic names and types returned will not be demangled.
:return: List of tuples.
The first element of each tuple is the topic name and the second element is a list of
topic types.
"""
with self.handle:
return _rclpy.rclpy_get_topic_names_and_types(self.handle, no_demangle)
def get_service_names_and_types(self) -> List[Tuple[str, List[str]]]:
"""
Get a list of service topics for the node.
:return: List of tuples.
The first element of each tuple is the service name and the second element is a list of
service types.
"""
with self.handle:
return _rclpy.rclpy_get_service_names_and_types(self.handle)
def get_node_names(self) -> List[str]:
"""
Get a list of names for discovered nodes.
:return: List of node names.
"""
with self.handle:
names_ns = self.handle.get_node_names_and_namespaces()
return [n[0] for n in names_ns]
def get_node_names_and_namespaces(self) -> List[Tuple[str, str]]:
"""
Get a list of names and namespaces for discovered nodes.
:return: List of tuples containing two strings: the node name and node namespace.
"""
with self.handle:
return self.handle.get_node_names_and_namespaces()
def get_node_names_and_namespaces_with_enclaves(self) -> List[Tuple[str, str, str]]:
"""
Get a list of names, namespaces and enclaves for discovered nodes.
:return: List of tuples containing three strings: the node name, node namespace
and enclave.
"""
with self.handle:
return self.handle.get_node_names_and_namespaces_with_enclaves()
def get_fully_qualified_name(self) -> str:
"""
Get the node's fully qualified name.
:return: Fully qualified node name.
"""
with self.handle:
return self.handle.get_fully_qualified_name()
def _count_publishers_or_subscribers(self, topic_name, func):
fq_topic_name = expand_topic_name(topic_name, self.get_name(), self.get_namespace())
validate_full_topic_name(fq_topic_name)
with self.handle:
return func(fq_topic_name)
def count_publishers(self, topic_name: str) -> int:
"""
Return the number of publishers on a given topic.
`topic_name` may be a relative, private, or fully qualified topic name.
A relative or private topic is expanded using this node's namespace and name.
The queried topic name is not remapped.
:param topic_name: the topic_name on which to count the number of publishers.
:return: the number of publishers on the topic.
"""
with self.handle:
return self._count_publishers_or_subscribers(
topic_name, self.handle.get_count_publishers)
def count_subscribers(self, topic_name: str) -> int:
"""
Return the number of subscribers on a given topic.
`topic_name` may be a relative, private, or fully qualified topic name.
A relative or private topic is expanded using this node's namespace and name.
The queried topic name is not remapped.
:param topic_name: the topic_name on which to count the number of subscribers.
:return: the number of subscribers on the topic.
"""
with self.handle:
return self._count_publishers_or_subscribers(
topic_name, self.handle.get_count_subscribers)
def _get_info_by_topic(
self,
topic_name: str,
no_mangle: bool,
func: Callable[[object, str, bool], List[Dict]]
) -> List[TopicEndpointInfo]:
with self.handle:
if no_mangle:
fq_topic_name = topic_name
else:
fq_topic_name = expand_topic_name(
topic_name, self.get_name(), self.get_namespace())
validate_full_topic_name(fq_topic_name)
fq_topic_name = _rclpy.rclpy_remap_topic_name(self.handle, fq_topic_name)
info_dicts = func(self.handle, fq_topic_name, no_mangle)
infos = [TopicEndpointInfo(**x) for x in info_dicts]
return infos
def get_publishers_info_by_topic(
self,
topic_name: str,
no_mangle: bool = False
) -> List[TopicEndpointInfo]:
"""
Return a list of publishers on a given topic.
The returned parameter is a list of TopicEndpointInfo objects, where each will contain
the node name, node namespace, topic type, topic endpoint's GID, and its QoS profile.
When the `no_mangle` parameter is `true`, the provided `topic_name` should be a valid topic
name for the middleware (useful when combining ROS with native middleware (e.g. DDS) apps).
When the `no_mangle` parameter is `false`, the provided `topic_name` should follow
ROS topic name conventions.
`topic_name` may be a relative, private, or fully qualified topic name.
A relative or private topic will be expanded using this node's namespace and name.
The queried `topic_name` is not remapped.
:param topic_name: the topic_name on which to find the publishers.
:param no_mangle: no_mangle if `true`, `topic_name` needs to be a valid middleware topic
name, otherwise it should be a valid ROS topic name. Defaults to `false`.
:return: a list of TopicEndpointInfo for all the publishers on this topic.
"""
return self._get_info_by_topic(
topic_name,
no_mangle,
_rclpy.rclpy_get_publishers_info_by_topic)
def get_subscriptions_info_by_topic(
self,
topic_name: str,
no_mangle: bool = False
) -> List[TopicEndpointInfo]:
"""
Return a list of subscriptions on a given topic.
The returned parameter is a list of TopicEndpointInfo objects, where each will contain
the node name, node namespace, topic type, topic endpoint's GID, and its QoS profile.
When the `no_mangle` parameter is `true`, the provided `topic_name` should be a valid topic
name for the middleware (useful when combining ROS with native middleware (e.g. DDS) apps).
When the `no_mangle` parameter is `false`, the provided `topic_name` should follow
ROS topic name conventions.
`topic_name` may be a relative, private, or fully qualified topic name.
A relative or private topic will be expanded using this node's namespace and name.
The queried `topic_name` is not remapped.
:param topic_name: the topic_name on which to find the subscriptions.
:param no_mangle: no_mangle if `true`, `topic_name` needs to be a valid middleware topic
name, otherwise it should be a valid ROS topic name. Defaults to `false`.
:return: a list of TopicEndpointInfo for all the subscriptions on this topic.
"""
return self._get_info_by_topic(
topic_name,
no_mangle,
_rclpy.rclpy_get_subscriptions_info_by_topic)
| 42.960355
| 99
| 0.649724
|
4a09070a7db366d6b58eb959db0e86af1896df29
| 3,908
|
py
|
Python
|
fastestimator/trace/io/model_saver.py
|
DwijayDS/fastestimator
|
9b288cb2bd870f971ec4cee09d0b3205e1316a94
|
[
"Apache-2.0"
] | null | null | null |
fastestimator/trace/io/model_saver.py
|
DwijayDS/fastestimator
|
9b288cb2bd870f971ec4cee09d0b3205e1316a94
|
[
"Apache-2.0"
] | null | null | null |
fastestimator/trace/io/model_saver.py
|
DwijayDS/fastestimator
|
9b288cb2bd870f971ec4cee09d0b3205e1316a94
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
from collections import deque
from typing import Optional, Union
import tensorflow as tf
import torch
from fastestimator.backend._save_model import save_model
from fastestimator.trace.trace import Trace
from fastestimator.util.data import Data
from fastestimator.util.traceability_util import traceable
@traceable()
class ModelSaver(Trace):
"""Save model weights based on epoch frequency during training.
Args:
model: A model instance compiled with fe.build.
save_dir: Folder path into which to save the `model`.
frequency: Model saving frequency in epoch(s).
max_to_keep: Maximum number of latest saved files to keep. If 0 or None, all models will be saved.
save_architecture: Whether to save the full model architecture in addition to the model weights. This option is
only available for TensorFlow models at present, and will generate a folder containing several files. The
model can then be re-instantiated even without access to the original code by calling:
tf.keras.models.load_model(<path to model folder>).
Raises:
ValueError: If `max_to_keep` is negative, or if save_architecture is used with a PyTorch model.
"""
def __init__(self,
model: Union[tf.keras.Model, torch.nn.Module],
save_dir: str,
frequency: int = 1,
max_to_keep: Optional[int] = None,
save_architecture: bool = False) -> None:
super().__init__(mode="train")
self.model = model
self.save_dir = save_dir
self.frequency = frequency
self.save_architecture = save_architecture
if save_architecture and isinstance(model, torch.nn.Module):
raise ValueError("Sorry, architecture saving is not currently enabled for PyTorch")
if max_to_keep is not None and max_to_keep < 0:
raise ValueError(f"max_to_keep should be a non-negative integer, but got {max_to_keep}")
self.file_queue = deque([None] * (max_to_keep or 0), maxlen=max_to_keep or 0)
def on_epoch_end(self, data: Data) -> None:
# No model will be saved when save_dir is None, which makes smoke test easier.
if self.save_dir and self.system.epoch_idx % self.frequency == 0:
model_name = "{}_epoch_{}".format(self.model.model_name, self.system.epoch_idx)
model_path = save_model(model=self.model,
save_dir=self.save_dir,
model_name=model_name,
save_architecture=self.save_architecture)
print("FastEstimator-ModelSaver: Saved model to {}".format(model_path))
rm_path = self.file_queue[self.file_queue.maxlen - 1] if self.file_queue.maxlen else None
if rm_path:
os.remove(rm_path)
if self.save_architecture:
shutil.rmtree(os.path.splitext(rm_path)[0])
print("FastEstimator-ModelSaver: Removed model {} due to file number exceeding max_to_keep".format(
rm_path))
self.file_queue.appendleft(model_path)
| 48.85
| 119
| 0.660184
|
4a09072891aaa7ea17e8180d34e8c7f8788c38d1
| 3,698
|
py
|
Python
|
setup.py
|
readingwritingcode/google-scholar-report
|
611c0416ec7ca65a81d34a2c984c7f2a5b6eb450
|
[
"BSD-2-Clause"
] | 1
|
2021-07-10T15:52:42.000Z
|
2021-07-10T15:52:42.000Z
|
setup.py
|
restrepo/GoogleScholarReport
|
611c0416ec7ca65a81d34a2c984c7f2a5b6eb450
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
restrepo/GoogleScholarReport
|
611c0416ec7ca65a81d34a2c984c7f2a5b6eb450
|
[
"BSD-2-Clause"
] | 2
|
2021-07-09T13:15:59.000Z
|
2021-07-09T13:40:59.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright (c) Colav.
# Distributed under the terms of the Modified BSD License.
# -----------------------------------------------------------------------------
# Minimal Python version sanity check (from IPython)
# -----------------------------------------------------------------------------
# See https://stackoverflow.com/a/26737258/2268280
# sudo pip3 install twine
# python3 setup.py sdist bdist_wheel
# twine upload dist/*
# For test purposes
# twine upload --repository-url https://test.pypi.org/legacy/ dist/*
from __future__ import print_function
from setuptools import setup, find_packages
import os
import sys
import codecs
v = sys.version_info
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
shell = False
if os.name in ('nt', 'dos'):
shell = True
warning = "WARNING: Windows is not officially supported"
print(warning, file=sys.stderr)
def main():
setup(
# Application name:
name="GoogleScholarReport",
# Version number (initial):
version="0.1.0",
# Application author details:
author="Colav",
author_email="grupocolav@udea.edu.co",
# Packages
packages=find_packages(exclude=['tests']),
# Include additional files into the package
include_package_data=True,
# Details
url="https://github.com/readingwritingcode/google-scholar-report",
#
license="BSD",
description="Bibliographic capture system for non-scrapping data sources",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
# Dependent packages (distributions)
install_requires=['beautifulsoup4==4.9.3',
'bibtexparser==1.2.0',
'bs4==0.0.1',
'et-xmlfile==1.1.0',
'future==0.18.2',
'fuzzywuzzy==0.18.0',
'helium==3.0.5',
'lxml==4.6.2',
'numpy==1.20.1',
'openpyxl==3.0.7',
'pandas==1.2.3',
'pyparsing==2.4.7',
'python-dateutil==2.8.1',
'python-Levenshtein==0.12.2',
'pytz==2021.1',
'selenium==3.141.0',
'six==1.15.0',
'soupsieve==2.2.1',
'Unidecode==1.2.0',
'urllib3==1.26.3'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
if __name__ == "__main__":
main()
| 28.890625
| 82
| 0.556247
|
4a09091f069fcdd7d5006dfe82de50816d2d57be
| 546
|
py
|
Python
|
modules/math-codes/modules/differential-calculus/limits/src/stars_function.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | 1
|
2020-09-06T22:17:19.000Z
|
2020-09-06T22:17:19.000Z
|
modules/math-codes/modules/differential-calculus/limits/src/stars_function.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
modules/math-codes/modules/differential-calculus/limits/src/stars_function.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
def f(x):
return 1 / x
if __name__ =='__main__':
from matplotlib import pyplot as plt
import pandas as pd
df = pd.DataFrame ({'x': range(-10, 10+1)}) # x Values.
df['y'] = f(df['x']) # y Values.
print(df)
plt.figure(figsize=(10, 10))
plt.plot(df.x, df.y, color="b", marker='o')
plt.title('f(x) = 1 / x')
plt.xlabel('x')
plt.ylabel('y = f(x)')
plt.grid()
plt.xticks(range(-10, 10+1, 1))
plt.yticks(range(-5, 5+1, 1))
plt.axhline()
plt.axvline()
plt.savefig('../images/plot-03.png', format='png')
plt.show()
| 21.84
| 57
| 0.578755
|
4a09099f33a8573c7d084bfcaae074df61946c6e
| 2,393
|
py
|
Python
|
DarunGrimAnalyzers.py
|
infamous41md/darun-grim-script
|
9f1a5fe55652a37c54ed52266aceef2a26a81550
|
[
"BSD-3-Clause"
] | 1
|
2015-07-17T14:26:15.000Z
|
2015-07-17T14:26:15.000Z
|
DarunGrimAnalyzers.py
|
infamous41md/darun-grim-script
|
9f1a5fe55652a37c54ed52266aceef2a26a81550
|
[
"BSD-3-Clause"
] | null | null | null |
DarunGrimAnalyzers.py
|
infamous41md/darun-grim-script
|
9f1a5fe55652a37c54ed52266aceef2a26a81550
|
[
"BSD-3-Clause"
] | null | null | null |
import DarunGrimDatabaseWrapper
class PatternAnalyzer:
SecurityImpactPatterns = (
( "match", "cmp", 1 ),
( "match", "test", 1 ),
( "match", "wcslen", 2 ),
( "match", "strlen", 2 ),
( "match", "0xFFFFFFF", 3 ),
( "match", "StringCchCopyW", 2 ),
( "match", "ULongLongToULong", 2 )
)
def __init__( self ):
pass
def GetDisasmLinesWithSecurityImplications( self, lines, unidentified ):
return_lines = ''
security_implications_score = 0
for line in lines:
new_line = line
whole_weight = 0
for ( type, pattern, weight ) in self.SecurityImpactPatterns:
if type == 'match' and line.find( pattern ) >= 0:
whole_weight += weight
if whole_weight > 0:
security_implications_score += whole_weight
if unidentified:
new_line = '<div class="SecurityImplicationInUnidentifiedBlock">'
else:
new_line = '<div class="SecurityImplicationInModifiedBlock">'
new_line += line + '</div>'
return_lines += '<p>' + new_line
return ( security_implications_score, return_lines )
def GetSecurityImplicationsScore( self, databasename, source_address, target_address ):
database = DarunGrimDatabaseWrapper.Database( databasename )
source_address = int(source_address)
target_address = int(target_address)
comparison_table = database.GetDisasmComparisonTextByFunctionAddress( source_address, target_address )
left_line_security_implications_score_total = 0
right_line_security_implications_score_total = 0
for ( left_address, left_lines, right_address, right_lines, match_rate ) in comparison_table:
left_line_security_implications_score = 0
right_line_security_implications_score = 0
if (right_address == 0 and left_address !=0) or match_rate < 100 :
( left_line_security_implications_score, left_line_text ) = self.GetDisasmLinesWithSecurityImplications( left_lines, right_address == 0 )
if (left_address == 0 and right_address !=0) or match_rate < 100 :
( right_line_security_implications_score, right_line_text ) = self.GetDisasmLinesWithSecurityImplications( right_lines, left_address == 0 )
left_line_security_implications_score_total += left_line_security_implications_score
right_line_security_implications_score_total += right_line_security_implications_score
return right_line_security_implications_score_total
| 38.596774
| 144
| 0.731718
|
4a090a606e4e4e16f84d46041dff630bd8b26a27
| 120
|
py
|
Python
|
homework/stunum.py
|
hiyouga/PY-Learning
|
296f08e7964845c314874906039f244010d5422a
|
[
"MIT"
] | 2
|
2017-12-09T14:41:29.000Z
|
2017-12-27T11:12:16.000Z
|
homework/stunum.py
|
hiyouga/PY-Learning
|
296f08e7964845c314874906039f244010d5422a
|
[
"MIT"
] | null | null | null |
homework/stunum.py
|
hiyouga/PY-Learning
|
296f08e7964845c314874906039f244010d5422a
|
[
"MIT"
] | null | null | null |
import re
line = input()
print("True" if re.match(r'(^\d{5}$)|(^((ZY)|(SY)|(BY))\d{7}$)|(^\d{8}$)', line) else "False")
| 30
| 94
| 0.508333
|
4a090acbc5fec25c8a3c5e050b25d4705675d332
| 933
|
py
|
Python
|
main/migrations/0005_auto_20200606_1023.py
|
Sudani-Coder/ResistanceCommitteesSystem
|
58c79a906070f9c29fc668fc608542678b245535
|
[
"MIT"
] | null | null | null |
main/migrations/0005_auto_20200606_1023.py
|
Sudani-Coder/ResistanceCommitteesSystem
|
58c79a906070f9c29fc668fc608542678b245535
|
[
"MIT"
] | null | null | null |
main/migrations/0005_auto_20200606_1023.py
|
Sudani-Coder/ResistanceCommitteesSystem
|
58c79a906070f9c29fc668fc608542678b245535
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-06 08:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200602_0858'),
]
operations = [
migrations.AlterModelOptions(
name='task',
options={'ordering': ['Created']},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=100)),
('Created', models.DateTimeField(auto_now_add=True)),
('Area', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.Area')),
],
options={
'ordering': ['Created'],
},
),
]
| 30.096774
| 117
| 0.560557
|
4a090b805237fed32d5298e0e04f61f119b56a08
| 354
|
py
|
Python
|
mbgdml/data/__init__.py
|
aalexmmaldonado/mbGDML
|
a7f89973730fd22d017eeda10de8d3c2cea25aee
|
[
"MIT"
] | null | null | null |
mbgdml/data/__init__.py
|
aalexmmaldonado/mbGDML
|
a7f89973730fd22d017eeda10de8d3c2cea25aee
|
[
"MIT"
] | null | null | null |
mbgdml/data/__init__.py
|
aalexmmaldonado/mbGDML
|
a7f89973730fd22d017eeda10de8d3c2cea25aee
|
[
"MIT"
] | null | null | null |
"""Data structures handled by mbgdml."""
from .basedata import mbGDMLData
from .calculation import PartitionOutput
from .structureset import structureSet
from .model import mbModel
from .predictset import predictSet
from .dataset import dataSet
__all__ = [
'mbGDMLData', 'PartitionOutput', 'structureSet', 'mbModel', 'predictSet',
'dataSet'
]
| 25.285714
| 78
| 0.765537
|
4a090c073d1362a9782c3199afaffdb42be254b8
| 3,648
|
py
|
Python
|
integration/python/src/test_naughty_strings.py
|
roninx991/planetmint
|
fa2c8a5cc570535ad4740d87daa86dcbd5a123ea
|
[
"Apache-2.0"
] | 3
|
2022-01-19T13:39:52.000Z
|
2022-01-28T05:57:08.000Z
|
integration/python/src/test_naughty_strings.py
|
roninx991/planetmint
|
fa2c8a5cc570535ad4740d87daa86dcbd5a123ea
|
[
"Apache-2.0"
] | 67
|
2022-01-13T22:42:17.000Z
|
2022-03-31T14:18:26.000Z
|
integration/python/src/test_naughty_strings.py
|
roninx991/planetmint
|
fa2c8a5cc570535ad4740d87daa86dcbd5a123ea
|
[
"Apache-2.0"
] | 7
|
2022-01-13T16:20:54.000Z
|
2022-02-07T11:42:05.000Z
|
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# ## Testing potentially hazardous strings
# This test uses a library of `naughty` strings (code injections, weird unicode chars., etc.) as both keys and values.
# We look for either a successful tx, or in the case that we use a naughty string as a key, and it violates some key
# constraints, we expect to receive a well formatted error message.
# ## Imports
# Since the naughty strings get encoded and decoded in odd ways,
# we'll use a regex to sweep those details under the rug.
import re
# We'll use a nice library of naughty strings...
from blns import blns
# And parameterize our test so each one is treated as a separate test case
import pytest
# For this test case we import and use the Python Driver.
from planetmint_driver.crypto import generate_keypair
from planetmint_driver.exceptions import BadRequest
# import helper to manage multiple nodes
from .helper.hosts import Hosts
naughty_strings = blns.all()
# This is our base test case, but we'll reuse it to send naughty strings as both keys and values.
def send_naughty_tx(asset, metadata):
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
hosts = Hosts('/shared/hostnames')
pm = hosts.get_connection()
# Here's Alice.
alice = generate_keypair()
# Alice is in a naughty mood today, so she creates a tx with some naughty strings
prepared_transaction = pm.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=asset,
metadata=metadata)
# She fulfills the transaction
fulfilled_transaction = pm.transactions.fulfill(
prepared_transaction,
private_keys=alice.private_key)
# The fulfilled tx gets sent to the pm network
try:
sent_transaction = pm.transactions.send_commit(fulfilled_transaction)
except BadRequest as e:
sent_transaction = e
# If her key contained a '.', began with a '$', or contained a NUL character
regex = r'.*\..*|\$.*|.*\x00.*'
key = next(iter(metadata))
if re.match(regex, key):
# Then she expects a nicely formatted error code
status_code = sent_transaction.status_code
error = sent_transaction.error
regex = (
r'\{\s*\n*'
r'\s*"message":\s*"Invalid transaction \(ValidationError\):\s*'
r'Invalid key name.*The key name cannot contain characters.*\n*'
r'\s*"status":\s*400\n*'
r'\s*\}\n*')
assert status_code == 400
assert re.fullmatch(regex, error), sent_transaction
# Otherwise, she expects to see her transaction in the database
elif 'id' in sent_transaction.keys():
tx_id = sent_transaction['id']
assert pm.transactions.retrieve(tx_id)
# If neither condition was true, then something weird happened...
else:
raise TypeError(sent_transaction)
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_keys(naughty_string):
asset = {'data': {naughty_string: 'nice_value'}}
metadata = {naughty_string: 'nice_value'}
send_naughty_tx(asset, metadata)
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_values(naughty_string):
asset = {'data': {'nice_key': naughty_string}}
metadata = {'nice_key': naughty_string}
send_naughty_tx(asset, metadata)
| 36.118812
| 118
| 0.700658
|
4a090c7906f45725651ac4e6abdab0b4a3ff9cd7
| 15,710
|
py
|
Python
|
geolite2legacy.py
|
miyurusankalpa/geolite2legacy
|
19d7f2f3f102db03eb4adc9b6524172a18128c6a
|
[
"MIT"
] | 1
|
2021-08-19T00:41:38.000Z
|
2021-08-19T00:41:38.000Z
|
geolite2legacy.py
|
miyurusankalpa/geolite2legacy
|
19d7f2f3f102db03eb4adc9b6524172a18128c6a
|
[
"MIT"
] | null | null | null |
geolite2legacy.py
|
miyurusankalpa/geolite2legacy
|
19d7f2f3f102db03eb4adc9b6524172a18128c6a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Mark Teodoro
# Copyright (c) 2018-2019 Gianluigi Tiesi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import re
import sys
import csv
import struct
import codecs
import ipaddr
import logging
import argparse
from time import time
from zipfile import ZipFile
from collections import defaultdict
from pygeoip_const import *
re_words = re.compile(r'\W+', re.U)
cc_idx = dict((cc.lower(), i) for i, cc in enumerate(COUNTRY_CODES))
cc_idx['cw'] = cc_idx['an'] # netherlands antilles / curacao
cc_idx['uk'] = cc_idx['gb'] # uk / great britain
cc_idx['sx'] = cc_idx['fx'] # st. martin?
cc_idx['xk'] = cc_idx['rs'] # kosovo -> serbia
continent_codes = {'AS': 'AP'}
geoname2fips = {}
output_encoding = 'utf-8'
datfilecomment = ''
def serialize_text(text):
try:
return text.encode(output_encoding)
except UnicodeEncodeError:
print('Warning cannot encode {!r} using {}'.format(text, output_encoding))
return text.encode(output_encoding, 'replace')
if sys.version_info[0] == 2:
# noinspection PyShadowingBuiltins,PyUnresolvedReferences
range = xrange
def decode_text(text):
return text.decode('utf-8')
# noinspection PyPep8Naming,PyUnusedLocal
def TextIOWrapper(f, encoding=None):
return f
else:
from io import TextIOWrapper
def decode_text(text):
return text
class RadixTreeNode(object):
__slots__ = ['segment', 'lhs', 'rhs']
def __init__(self, segment):
self.segment = segment
self.lhs = None
self.rhs = None
class RadixTree(object):
seek_depth = -1
edition = -1
reclen = -1
segreclen = -1
def __init__(self, debug=False):
self.debug = debug
self.netcount = 0
self.segments = [RadixTreeNode(0)]
self.data_offsets = {}
self.data_segments = []
self.cur_offset = 1
def __setitem__(self, net, data):
self.netcount += 1
inet = int(net)
node = self.segments[0]
for depth in range(self.seek_depth, self.seek_depth - (net.prefixlen - 1), -1):
if inet & (1 << depth):
if not node.rhs:
node.rhs = RadixTreeNode(len(self.segments))
self.segments.append(node.rhs)
node = node.rhs
else:
if not node.lhs:
node.lhs = RadixTreeNode(len(self.segments))
self.segments.append(node.lhs)
node = node.lhs
if data not in self.data_offsets:
self.data_offsets[data] = self.cur_offset
enc_data = self.encode(*data)
self.data_segments.append(enc_data)
self.cur_offset += (len(enc_data))
if self.debug:
# store net after data for easier debugging
data = data, net
if inet & (1 << self.seek_depth - (net.prefixlen - 1)):
node.rhs = data
else:
node.lhs = data
def gen_nets(self, codes, outfile):
raise NotImplementedError
def load(self, locationsfile, outfile):
locations = {}
if locationsfile:
for row in csv.DictReader(locationsfile):
geoname_id = row['geoname_id']
# remap continent codes according to https://dev.maxmind.com/geoip/legacy/codes/iso3166/
continent_code = row['continent_code']
row['continent_code'] = continent_codes.get(continent_code, continent_code)
locations[geoname_id] = row
for nets, data in self.gen_nets(locations, outfile):
for net in nets:
self[net] = data
def dump_node(self, node):
if not node:
# empty leaf
return '--'
elif isinstance(node, RadixTreeNode):
# internal node
return node.segment
else:
# data leaf
data = node[0] if self.debug else node
return '%d %s' % (len(self.segments) + self.data_offsets[data], node)
def dump(self):
for node in self.segments:
print(node.segment, [self.dump_node(node.lhs), self.dump_node(node.rhs)])
def encode(self, *args):
raise NotImplementedError
def encode_rec(self, rec, reclen):
"""encode rec as 4-byte little-endian int, then truncate it to reclen"""
assert (reclen <= 4)
return struct.pack('<I', rec)[:reclen]
def serialize_node(self, node):
if not node:
# empty leaf
rec = len(self.segments)
elif isinstance(node, RadixTreeNode):
# internal node
rec = node.segment
else:
# data leaf
data = node[0] if self.debug else node
rec = len(self.segments) + self.data_offsets[data]
return self.encode_rec(rec, self.reclen)
def serialize(self, f):
if len(self.segments) >= 2 ** (8 * self.segreclen):
logging.warning('too many segments for final segment record size!')
for node in self.segments:
f.write(self.serialize_node(node.lhs))
f.write(self.serialize_node(node.rhs))
f.write(struct.pack('B', 42)) # So long, and thanks for all the fish!
f.write(b''.join(self.data_segments))
f.write(datfilecomment.encode('ascii')) # .dat file comment - can be anything
f.write(struct.pack('B', 0xff) * 3)
f.write(struct.pack('B', self.edition))
f.write(self.encode_rec(len(self.segments), self.segreclen))
class ASNRadixTree(RadixTree):
seek_depth = 31
edition = ASNUM_EDITION
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
def gen_nets(self, locations, infile):
for row in csv.DictReader(infile):
nets = [ipaddr.IPNetwork(row['network'])]
org = decode_text(row['autonomous_system_organization'])
asn = row['autonomous_system_number']
entry = u'AS{} {}'.format(asn, org)
yield nets, (serialize_text(entry),)
def encode(self, data):
return data + b'\0\0\0'
class ASNv6RadixTree(ASNRadixTree):
seek_depth = 127
edition = ASNUM_EDITION_V6
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
class CityRev1RadixTree(RadixTree):
seek_depth = 31
edition = CITY_EDITION_REV1
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
def gen_nets(self, locations, infile):
for row in csv.DictReader(infile):
location = locations.get(row['geoname_id'])
if location is None:
continue
nets = [ipaddr.IPNetwork(row['network'])]
country_iso_code = location['country_iso_code'] or location['continent_code']
fips_code = geoname2fips.get(location['geoname_id'])
if fips_code is None:
logging.debug('Missing fips-10-4 for {}'.format(location['subdivision_1_name']))
fips_code = '00'
else:
logging.debug('fips-10-4 for {} is {}'.format(location['subdivision_1_name'], fips_code))
yield nets, (country_iso_code,
serialize_text(fips_code), # region
serialize_text(decode_text(location['city_name'])),
serialize_text(row['postal_code']),
row['latitude'],
row['longitude'],
location['metro_code'],
'') # area_code
def encode(self, country, region, city, postal_code, lat, lon, metro_code, area_code):
def str2num(num, ntype):
return ntype(num) if num else ntype(0)
country = country.lower()
lat, lon = round(str2num(lat, float), 4), round(str2num(lon, float), 4)
metro_code, area_code = str2num(metro_code, int), str2num(area_code, int)
buf = []
try:
buf.append(struct.pack('B', cc_idx[country]))
except KeyError:
logging.warning("'%s': missing country. update const.COUNTRY_CODES?", country)
buf.append(struct.pack('B', cc_idx['']))
buf.append(b'\0'.join((region, city, postal_code)))
buf.append(b'\0')
buf.append(self.encode_rec(int((lat + 180) * 10000), 3))
buf.append(self.encode_rec(int((lon + 180) * 10000), 3))
if (metro_code or area_code) and country == 'us':
buf.append(self.encode_rec(metro_code * 1000 + area_code, 3))
else:
buf.append(b'\0\0\0')
return b''.join(buf)
class CityRev1v6RadixTree(CityRev1RadixTree):
seek_depth = 127
edition = CITY_EDITION_REV1_V6
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
class CountryRadixTree(RadixTree):
seek_depth = 31
edition = COUNTRY_EDITION
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
def gen_nets(self, locations, infile):
for row in csv.DictReader(infile):
location = locations.get(row['geoname_id'])
if location is None:
continue
nets = [ipaddr.IPNetwork(row['network'])]
country_iso_code = location['country_iso_code'] or location['continent_code']
yield nets, (country_iso_code,)
def encode(self, cc):
# unused
return ''
def serialize_node(self, node):
if not node:
# empty leaf
rec = COUNTRY_BEGIN
elif isinstance(node, RadixTreeNode):
# internal node
rec = node.segment
else:
# data leaf
data = node[0] if self.debug else node
cc = data[0]
try:
offset = cc_idx[cc.lower()]
except KeyError:
logging.warning("'%s': missing country. update const.COUNTRY_CODES?", cc)
offset = 0
# data leaves directly encode cc index as an offset
rec = COUNTRY_BEGIN + offset
return self.encode_rec(rec, self.reclen)
def serialize(self, f):
for node in self.segments:
f.write(self.serialize_node(node.lhs))
f.write(self.serialize_node(node.rhs))
f.write(struct.pack('B', 0x00) * 3)
f.write(datfilecomment.encode('ascii')) # .dat file comment - can be anything
f.write(struct.pack('B', 0xff) * 3)
f.write(struct.pack('B', self.edition))
f.write(self.encode_rec(len(self.segments), self.segreclen))
class Countryv6RadixTree(CountryRadixTree):
seek_depth = 127
edition = COUNTRY_EDITION_V6
reclen = STANDARD_RECORD_LENGTH
segreclen = SEGMENT_RECORD_LENGTH
RTree = {
'Country': {'IPv4': CountryRadixTree, 'IPv6': Countryv6RadixTree},
'City': {'IPv4': CityRev1RadixTree, 'IPv6': CityRev1v6RadixTree},
'ASN': {'IPv4': ASNRadixTree, 'IPv6': ASNv6RadixTree}
}
Filenames = {
'Country': {'IPv4': "GeoIP.dat", 'IPv6': "GeoIPv6.dat"},
'City': {'IPv4': "GeoIPCity.dat", 'IPv6': "GeoIPCityv6.dat"},
'ASN': {'IPv4': "GeoIPASNum.dat", 'IPv6': "GeoIPASNumv6.dat"}
}
def parse_fips(fipsfile):
with open(fipsfile) as f:
for row in csv.DictReader(f):
geoname2fips[row['geoname_id']] = row['region']
return geoname2fips
def main():
global output_encoding, datfilecomment
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-file', required=True, help='input zip file containings csv databases')
parser.add_argument('-o', '--output-file', help='output GeoIP dat file')
parser.add_argument('-f', '--fips-file', help='geonameid to fips code mappings')
parser.add_argument('-e', '--encoding', help='encoding to use for the output rather than utf-8')
parser.add_argument('-d', '--debug', action='store_true', default=False, help='debug mode')
parser.add_argument('-6', '--ipv6', action='store_const', default='IPv4', const='IPv6', help='use ipv6 database')
opts = parser.parse_args()
if opts.encoding:
try:
codecs.lookup(opts.encoding)
except LookupError as e:
print(e)
sys.exit(1)
output_encoding = opts.encoding
re_entry = re.compile(r'.*?/Geo(?:Lite|IP)2-(?P<database>.*?)-(?P<filetype>.*?)-(?P<arg>.*)\.csv')
entries = defaultdict(lambda: defaultdict(dict))
ziparchive = ZipFile(opts.input_file)
for entry in ziparchive.filelist:
match = re_entry.match(entry.filename)
if match is None:
continue
db, filetype, arg = match.groups()
entries[db][filetype][arg] = entry
if len(entries) != 1:
print('More than one kind of database found, please check the archive')
sys.exit(1)
# noinspection PyUnboundLocalVariable
datfilecomment = '{} converted to legacy MaxMind DB with geolite2legacy'.format(os.path.dirname(entry.filename))
dbtype, entries = entries.popitem()
if dbtype == 'ASN':
locs = None
else:
if not {'Locations', 'Blocks'} <= set(entries.keys()):
print('Missing Locations or Block files, please check the archive')
sys.exit(1)
locs = entries['Locations'].get('en')
if locs is None:
print('Selected locale not found in archive')
sys.exit(1)
locs = TextIOWrapper(ziparchive.open(locs, 'r'), encoding='utf-8')
if dbtype not in RTree:
print('{} not supported'.format(dbtype))
sys.exit(1)
r = RTree[dbtype][opts.ipv6](debug=opts.debug)
blocks = entries['Blocks'].get(opts.ipv6)
if blocks is None:
print('The selected block file not found in archive')
sys.exit(1)
if dbtype != 'ASN':
fips_file = opts.fips_file or os.path.join(os.path.dirname(os.path.realpath(__file__)), 'geoname2fips.csv')
parse_fips(fips_file)
tstart = time()
print('Database type {} - Blocks {} - Encoding: {}'.format(dbtype, opts.ipv6, output_encoding))
r.load(locs, TextIOWrapper(ziparchive.open(blocks, 'r'), encoding='utf-8'))
if not opts.output_file:
opts.output_file = Filenames[dbtype][opts.ipv6]
print('Output file {}'.format(opts.output_file))
with open(opts.output_file, 'wb') as output:
r.serialize(output)
tstop = time()
print('wrote %d-node trie with %d networks (%d distinct labels) in %d seconds' % (
len(r.segments), r.netcount, len(r.data_offsets), tstop - tstart))
if __name__ == '__main__':
main()
| 33.784946
| 117
| 0.61515
|
4a090ddb75236a20cb81fb582f01c5b3f713d0f2
| 2,348
|
py
|
Python
|
visu_util.py
|
gillbam/pcn
|
b23687f28f080ed26654c3ebc89bfa00c8579f72
|
[
"MIT"
] | 199
|
2019-09-27T02:26:32.000Z
|
2022-03-31T06:24:18.000Z
|
visu_util.py
|
gillbam/pcn
|
b23687f28f080ed26654c3ebc89bfa00c8579f72
|
[
"MIT"
] | 38
|
2019-09-27T17:42:59.000Z
|
2022-03-16T22:46:39.000Z
|
visu_util.py
|
gillbam/pcn
|
b23687f28f080ed26654c3ebc89bfa00c8579f72
|
[
"MIT"
] | 50
|
2019-10-10T17:33:45.000Z
|
2022-03-15T00:03:17.000Z
|
'''
MIT License
Copyright (c) 2018 Wentao Yuan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import open3d as o3d
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_pcd_three_views(filename, pcds, titles, suptitle='', sizes=None, cmap='Reds', zdir='y',
xlim=(-0.3, 0.3), ylim=(-0.3, 0.3), zlim=(-0.3, 0.3)):
if sizes is None:
sizes = [0.5 for i in range(len(pcds))]
fig = plt.figure(figsize=(len(pcds) * 3, 9))
for i in range(3):
elev = 30
azim = -45 + 90 * i
for j, (pcd, size) in enumerate(zip(pcds, sizes)):
color = pcd[:, 0]
ax = fig.add_subplot(3, len(pcds), i * len(pcds) + j + 1, projection='3d')
ax.view_init(elev, azim)
ax.scatter(pcd[:, 0], pcd[:, 1], pcd[:, 2], zdir=zdir, c=color, s=size, cmap=cmap, vmin=-1, vmax=0.5)
ax.set_title(titles[j])
ax.set_axis_off()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_zlim(zlim)
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.9, wspace=0.1, hspace=0.1)
plt.suptitle(suptitle)
fig.savefig(filename)
plt.close(fig)
def show_pcd(points):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
o3d.visualization.draw_geometries([pcd])
| 39.79661
| 113
| 0.680153
|
4a090dde9178bb8d0b1b5f68508950f216c4c5df
| 28,578
|
py
|
Python
|
languages/pt-br.py
|
rosanaw/chipincode
|
8c15c962676937dc8c1c2ac69e0780d88198094e
|
[
"MIT"
] | 1
|
2019-01-15T11:14:28.000Z
|
2019-01-15T11:14:28.000Z
|
languages/pt-br.py
|
rosanaw/chipincode
|
8c15c962676937dc8c1c2ac69e0780d88198094e
|
[
"MIT"
] | null | null | null |
languages/pt-br.py
|
rosanaw/chipincode
|
8c15c962676937dc8c1c2ac69e0780d88198094e
|
[
"MIT"
] | null | null | null |
# coding: utf8
{
'!=': '!=',
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%(nrows)s records found': '%(nrows)s registros encontrados',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(max 140 characters)': '(máximo 140 characteres)',
'(max 75 characters)': '(máximo 75 characteres)',
'(max 75 letters)': '(máximo 75 words)',
'+project.project.status_text+': '+project.project.status_text+',
'-------- Select --------': '-------- Selecione --------',
'----------------- Select --------': '----------------- Selecione --------',
'--------------------- Select --------': '--------------------- Selecione --------',
'--------------------- Select ---------------------': '--------------------- Selecione ---------------------',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'A cool name for your project': 'Um nome legal para seu o projeto',
'A cool name for your project (max 55 words)': 'Um nome legal para o seu projeto (máx. 55 caracteres)',
'A cool name for your project (max 75 characters)': 'Um nome legal para o seu projeto (máx. 75 caracteres)',
'A cool name here =) (max 55 characters)': 'Um nome legal aqui =) (máx. 55 caracteres)',
'A cool name here! (max 55 characters)': 'Um nome legal para o seu projeto (max 55 characters)',
'A new project was registered.': 'Um novo projeto foi registrado',
'A project by': 'Um projeto por',
'A project of': 'Um projeto de',
'A record change was detected. Consecutive update self-submissions are not allowed. Try re-submitting or refreshing the form page.': 'Por segurança, você não pode enviar atualizações consecutivas, volte para a listagem ou atualize a página.',
'A user tried to change a project that does not belong to him': 'Um usuário tentou alterar um projeto que não pertence a ele.',
'About': 'Sobre',
'About the project': 'Sobre o projeto',
'About user.': 'Sobre o usuário',
'About you': 'Sobre você',
'Accept the terms to proceed.': 'Aceite os termos para prosseguir.',
'Access Control': 'Controle de acesso',
'Achieved': 'Atingidos',
'Action': 'Ação',
'Active Projects': 'Projetos Ativos',
'Add': 'Adicionar',
'Address': 'Endereço',
'Address.': 'Endereço.',
'Admin Panel': 'Painel Administrativo',
'Administrative interface': 'Interface administrativa',
'Administrative Interface': 'Interface Administrativa',
'All Projects.': 'Todos os Projetos',
'And': 'E',
'Anonymous ': 'Anônimo',
'Anonymous Avatar': 'Avatar Anônimo',
'Anonymous avatar.': 'Avatar anônimo',
'App Settings': 'Configurações',
'appadmin is disabled because insecure channel': 'Administração desativada devido ao canal inseguro',
'Approve': 'Aprovar',
'are collected until': 'forem arrecadados até',
'Are you sure you want to delete this object?': 'Você tem certeza que quer excluir esse item?',
'Arrecadando': 'Arrecadando',
'As senhas não são iguais': 'As senhas não são iguais',
'Autentique-se': 'Entrar',
'Auth user ID.': 'Id de usuário.',
'Authorize': 'Autorizar',
'Authorize Funding': 'Autorizar Financiamento',
'Authorize Funding.': 'Autorizar Financiamento',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Back': 'Voltar',
'backers': 'apoiadores',
'Backers': 'Apoiadores',
'Become a fan': 'Torne-se um Fã',
'Bio': 'Sobre mim',
'Blog': 'Blog',
'by ': 'por',
'Cannot be empty': 'Não pode ser vazio',
'Categories': 'Categorias',
'Categories Configured': 'Categorias configuradas.',
'Category': 'Categoria',
'Category Name': 'Nome da Categoria',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'Check to delete:': 'Marque para apagar:',
'Choose your reward': 'Escolha a sua recompensa',
'City.': 'Cidade',
'City/State': 'Cidade/Estado',
'City/state': 'Cidade/Estado',
'Clear': 'Limpar',
'Click on the link %(link)s to reset your password': 'Clique no link %(link)s para redefinir sua senha',
'Client IP': 'IP do Cliente',
'Close': 'Fechar',
'Collected': 'Arrecadados',
'Collecting': 'Arrecadando',
'Comments': 'Comentários',
'Complet you profile': 'Complete o Seu Perfil',
'Complete the form to contact us': 'Preencha o formulário para entrar em contato conosco',
'Complete the form to submit your project': 'Preencha o formulário para enviar o seu projeto',
'Confirm Payment': 'Confirme o Pagamento',
'Confirm the password': 'Confirme a senha',
'Confirm your password': 'Confirme sua senha',
'Contact phone.': 'Telefone de contato',
'Contact us': 'Contate-nos',
'contains': 'contém',
'Content': 'Conteúdo',
'Content Deleted': 'Conteúdo excluído',
'Content Updated': 'Conteúdo atualizado',
'Content.': 'Conteúdo',
'Controller': 'Controlador',
'Copyright': 'Copyright:',
'created by': 'criado por',
'Created By': 'Criado por',
'Created On': 'Criado em',
'credit': 'em créditos',
'Credit value.': 'Valor do crédito',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (colunas ocultas)',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'Data Registered Successfully!': 'Informação cadastrada com sucesso!',
'Data Sender': 'Remetente dos Dados',
'Data updated successfully': 'Dados atualizados com sucesso',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'Date': 'Data',
'Date of the donations': 'Data da doação',
'days': 'dias',
'days left': 'dias restantes',
'db': 'bd',
'DB Model': 'Modelo BD',
'Default Avatar': 'Avatar padrão',
'Default Avatar.': 'Avatar padrão',
'Default Image': 'Imagem padrão',
'Default Image.': 'Imagem padrão',
'Delete': 'Apagar',
'delete': 'apagar',
'Delete:': 'Apagar:',
'Description': 'Descrição',
'Description of the project.': 'Descrição do projeto',
'Disable': 'Desabilitar',
'Disapprove': 'Reprovar',
'Do not want to receive reward': 'Não quero receber recompensa',
'Donated': 'Doados',
'Donation date.': 'Data da doação',
'Donation gateway.': 'Gateway',
'Donation Received': 'Doação Recebida',
'Donation registered successfully': 'Doação registrada com sucesso',
'Donation reversed!': 'Doação estornada!',
'Donation status text.': 'Texto de estatus da doação',
'Donation status.': 'Estatus da doação',
'Donation value.': 'Valor da Doação',
'Donation visibility.': 'Visibilidade da doação',
'done!': 'Concluído!',
'E-mail': 'E-mail',
'E-mail inválido': 'E-mail inválido',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'Edit profile': 'Editar perfil',
'edit profile': 'editar perfil',
'Edit the Project': 'Editar o Projeto',
'Edit the Reward': 'Editar a Recompensa',
'Edit the reward': 'Editar a recompensa',
'Edit This App': 'Editar Essa Aplicação',
'Edit updates': 'Editar atualizações',
'eg. 33670166': 'ex.:. 33670166',
'eg. 4,000.00': 'ex.:. 4000.00',
'eg. 4000 (numbers only)': 'ex.: 4000 (somente números)',
'eg. @codeupstudio': 'ex.: @codeupstudio',
'eg. http://codeup.com.br': 'ex.: http://codeup.com.br',
'eg. http://facebook.com/codeupstudio': 'ex.: http://facebook.com/codeupstudio',
'EIN': 'CPF',
'Email': 'E-mail',
'Email data Configured!': 'Dados de email configurados.',
'Email inválido': 'E-mail inválido',
'Email Login': 'Login do e-mail',
'Email Password': 'Senha do e-mail',
'Email sent': 'E-mail enviado',
'Email sent successfully': 'E-mail enviado com sucesso',
'Email Server': 'Servidor de e-mail',
'Email Server Port': 'Porta do servidor de e-mail',
'Email Settings': 'Configurações de E-mail',
'Enable': 'Habilitar',
'Enable MoIP': 'Habilitar MoIP',
'Enable Paypal': 'Habilitar Paypal',
'End date of the project.': 'Data Final do Projeto',
'End Date:': 'Data Final:',
'Ending': 'Finalizando',
'enter a number between %(min)g and %(max)g': 'Digite um número entre %(min)g e %(max)g',
'enter an integer between %(min)g and %(max)g': 'digite um número entre %(min)g e %(max)g',
'enter date as %(format)s': 'insira uma data nesse formato %(format)s',
'Entrar': 'Entrar',
'Enviar': 'Enviar',
'Errors': 'Erros',
'Errors in form, please check it out.': 'Há erros no formulário, por favor, verifique-os.',
'Expired Projects.': 'Projetos Expirados',
'export as csv file': 'exportar como um arquivo csv',
'Export:': 'Exportar:',
'F.A.Q': 'F.A.Q',
'F.A.Q Question': 'Pergunta da F.A.Q',
'Facebook': 'Facebook',
'Facebook Page': 'Página do Facebook',
'Facebook page': 'Página do Facebook',
'Facebook page of the project.': 'Facebook',
'Facebook Profile.': 'Facebook',
'False': 'Falso',
'FAQ': 'F.A.Q',
'Featured image': 'Imagem destacada',
'Featured picture of the project.': 'Imagem destaque do projeto',
'file': 'Arquivo',
'Fill the project categories': 'Escolha a categoria',
'Fill the project categories.': 'Escolha a categoria',
'Fill the project description.': 'Insira a descrição.',
'Fill the project value.': 'Insira o valor',
'Finalized': 'Finalizados',
'Find us': 'Encontre-nos',
'Finish project that achieved the goal': 'Finalizar projeto que atingiu a meta',
'Finish project that did not obtain the goal': 'Finalizar projeto que não atingiu a meta\r\n',
'Finish Successfully': 'Finalizar com sucesso',
'Finish Unsuccessfully': 'Finalizar sem sucesso',
'First name': 'Nome',
'Forgot username?': 'Esqueceu seu nome de usuário?',
'Funding Time': 'Tempo de financiamento',
'G+': 'G+',
'Goal of the project.': 'Objetivo concluído',
'Google Analytics id': 'ID do Google Analytics',
'Google Groups': 'Grupo de Usuários',
'Group %(group_id)s created': 'O grupo %(group_id)s foi criado',
'Group ID': 'ID do Grupo',
'Group uniquely assigned to user %(id)s': ' Grupo atribuído exclusivamente ao usuário %(id)s',
'Groups': 'Grupos',
'Hello': 'Olá',
'Help Projects': 'Ajudar Projetos',
'Help the Project': 'Ajudar o Projeto',
'Here are listed all active projects': 'Aqui estão listados todos os projetos que estão ativos.',
'Here are listed all the projects registered on the system': 'Aqui estão listados todos os projetos registrados no sistema',
'Here are listed all the projects that are awaiting for approval': 'Aqui estão listados todos os projetos que estão aguardando aprovação',
'Here are listed all the projects that completed the period of funding': 'Aqui estão listados todos os projetos que já completaram o período de financiamento',
'His project was registered and is awaiting approval.': 'Seu projeto foi registrado e está aguardando aprovação.',
'His project was registered.': 'Seu projeto foi registrado.',
'Home': 'Início',
'Home Banner': 'Banner da página inicial',
'Home Banner.': 'Banner da página inicial',
'How did you get here?': 'Como você chegou aqui?',
'How much would you like donate?': 'Quanto você gostaria de doar?',
'HTML': 'HTML',
'I forgot my password': 'Esqueci a senha',
'I have read and agree to the': 'Eu li e concordo com os',
'I liked this project in': 'Eu gostei desse projeto em',
'I want that my help is anonymous': 'Quero que minha ajuda seja anônima',
'Id': 'ID',
'Illegal Operation': 'Operação Ilegal',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'in': 'em',
'in credits.': 'em créditos.',
'Index': 'Início',
'Insert': 'Inserir',
"Insert here the description of your project for us, what will you do with the money raised, etc.. Don't worry, you can improve it later": 'Insira a descrição do seu projeto, o que você vai fazer com o dinheiro, etc. Não se preocupe, você poderá editar essas informações depois.',
'Insert here the description of your project, what will you do with the money raised, etc..': 'Insira a descrição do seu projeto, o que você vai fazer com o dinheiro, etc. Não se preocupe, você poderá editar essas informações depois.',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Insert reward': 'Inserir recompensa',
'Install': 'Instalar',
'Install the System': 'Instalar o Sistema',
'Insufficient privileges': 'Privilégios Insuficientes',
'Internal State': 'Estado Interno',
'Introduction': 'Introdução',
'Invalid email': 'E-mail inválido',
'invalid expression': 'Expressão inválida',
'Invalid login': 'Login inválido',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'Is Active': 'Está ativo',
'It should be a number! eg. 33670166': 'Insira apenas o id do vídeo! Ex.: 33670166',
'It should be a url! eg. http://codeup.com.br': 'Precisa ser uma url. Ex.: http://codeup.com.br',
'It should be a url! eg. http://facebook.com/codeupstudio': 'Precisa ser uma url. Ex.: http://facebook.com/codeupstudio',
'Join': 'Registrar-se',
'JSON': 'JSON',
'Key': 'Chave',
'Last name': 'Sobrenome',
'left': 'restantes',
'Left': 'Restantes',
'Link Description': 'Descrição do Link',
'Link URL': 'URL do Link',
'List Active': 'Listar Ativos',
'List All': 'Listar Todos',
'List All Projects.': 'Listar Todos os Projetos',
'List Expired': 'Listar Expirados',
'List Pending': 'Listar Pendentes',
'Logged in': 'Conectado',
'Logged in as': 'Conectado como',
'Logged out': 'Desconectado',
'Login': 'Entrar',
'login': 'Entrar',
'Login with Facebook': 'Fazer login com o Facebook',
'Logo Image.': 'Imagem do logo',
'logout': 'Sair',
'Logout': 'Desconectar',
'Lost my password': 'Perdi minha senha',
'Lost Password': 'Esqueceu sua senha?',
'Lost password?': 'Esqueceu sua senha?',
'lost password?': 'Esqueceu sua senha?',
'Main Menu': 'Menu Principal',
'Menu Model': 'Modelo de Menu',
'Message': 'Mensagem',
'Modified By': 'Modificado por',
'Modified On': 'Modificado em',
'MoIP id': 'ID do MoIP',
'MoIP URL': 'URL do MoIP',
'More info of the project': 'Mais informações sobre o projeto',
'My Sites': 'Meus sites',
'Name': 'Nome',
'Name of the project.': 'Nome do projeto',
'Network': 'Rede Social',
'New': 'Novo',
'New password': 'Nova senha',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'New Reward': 'Nova recompensa',
'New update': 'Nova atualização',
'Newsletter': 'Newsletter',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Sem bancos de dados nesta aplicação',
'No donation received yet! Be the first to donate.': 'Nenhuma doação recebida ainda. Seja o primeiro a doar!',
'No project found': 'Nenhum projeto encontrado',
'No records found': 'Nenhum registro encontrado',
'No updates yet!': 'Nenhuma atualização ainda.',
'No, I want to go back': 'Não, eu gostaria de retornar.',
'Not Autorized': 'Não Autorizado',
'not in': 'não em',
'Não pode ser vazio': 'Não pode ficar em branco',
'Object or table name': 'Nome do objeto ou tabela',
'OK - Finish Successfully': 'OK - Finalizar com sucesso',
'OK - Finish Unsuccessfully': 'OK - Finalizar sem sucesso',
'Online examples': 'Alguns exemplos',
'Or': 'Ou',
'or import from csv file': 'ou importar de um arquivo csv',
'or more.': 'ou mais',
'Origin': 'Origem',
'Other Data': 'Outros Dados',
'Owner': 'Dono',
'Password': 'Senha',
"Password fields don't match": 'As senhas não são iguais',
'Password reset': 'Redefinir senha',
'Pay': 'Pagar',
'Payment': 'Pagamento',
'Payment method Configured!': 'Métodos de pagamento configurados.',
'Payment Settings': 'Configurações de Pagamento',
'Paypal ID': 'ID do Paypal',
'Paypal URL': 'URL do Paypal',
'Pending Projects.': 'Projetos Pendentes',
'Phone': 'Telefone',
'please input your password again': 'insira sua senha novamente',
'Please, check the form errors!': 'Verifique os erros abaixo',
'Please, configure the categories!': 'Por favor, configure as categorias!',
'Please, configure the email data!': 'Por favor, configure os dados de envio de email!',
'Please, configure the payment method!': 'Por favor, configure os métodos de pagamento!',
'Please, configure the website website_images!': 'Por favor, configure as Imagens do site!',
'Please, create the website terms of use text!': 'Por favor, configure o texto dos termos de uso',
'Please, enter a value from 0 to 75 characters': 'Insira no máximo 75 caracteres.',
'Please, enter a value from 1 to 140 characters': 'Insira no mínimo 1 e no máximo 140 caracteres.',
'Please, enter a value from 1 to 55 characters': 'Insira no mínimo 1 e no máximo 55 caracteres.',
'Please, enter a value from 1 to 75 characters': 'Insira no mínimo 1 e no máximo 75 caracteres.',
'Please, enter a value from 1 to 80 characters': 'Insira no mínimo 1 e no máximo 80 caracteres.',
'Please, enter the website info!': 'Por favor, insira as informações do site',
'Pojects that you help': 'Projetos que você ajuda',
'previous 100 rows': '100 linhas anteriores',
'Profile updated': 'Perfil atualizado',
'Project': 'Projeto',
'Project by': 'Projeto por',
'Project category': 'Categoria do projeto',
'Project Category': 'Categoria',
'Project description': 'Descrição do projeto',
'Project Description': 'Descrição do Projeto',
'Project details': 'Detalhes do Projeto',
'Project Finalized?': 'Projeto finalizado',
'Project Name': 'Nome do Projeto',
'Project name': 'Nome do projeto',
'Project Registered Successfully!': 'Projeto cadastrado com sucesso!',
'Project reward Created': 'Recompensa criada',
'Project reward Updated': 'Recompensa atualizada',
'Project Slug': 'Slug do Projeto',
'Project Status': 'Status do Projeto',
'Project Updated': 'Projeto atualizado',
'Project Updates': 'Atualizações do Projeto',
'project.project.status_text': 'project.project.status_text',
'Projects': 'Projetos',
'Projects Categories': 'Categorias dos Projetos',
'Projects Supported': 'Projetos apoiados',
'Projeto Registered Successfully!': 'Projeto cadastrado com sucesso!',
'Published on ': 'Publicado em',
'Put here your payment settings': 'Coloque aqui suas informações de pagamento',
'Put here your settings to send emails': 'Coloque aqui suas configurações para o envio de e-mails',
'Put here your site info': 'Coloque aqui as informações de seu site',
'Query Not Supported: %s': '',
'Query:': 'Consulta:',
'R$': 'R$',
'Raising Funding': 'Captação de Recursos',
'reached of R$': 'atingidos de R$',
'Record': 'registro',
'Record %(id)s created': 'Registro %(id)s creaado',
'Record %(id)s updated': 'Registro %(id)s Atualizado',
'Record Created': 'Registro Criado',
'Record Deleted': 'Registro Apagado',
'record does not exist': 'registro não existe',
'Record id': 'id do registro',
'Record ID': 'ID do Registro',
'Record Updated': 'Registro Atualizado',
'Redirecting to Moip page': 'Redirecionando para a página do MoIP...',
'Redirecting to Paypal page': 'Redirecionando para a página do PayPal...',
'register': 'Registre-se',
'Register': 'Registre-se',
'Register date of the project.': 'Data de cadastro do projeto',
'Registration identifier': 'Identificador de cadastro',
'Registration key': 'Chave de cadastro',
'Registration successful': 'Cadastro efetuado com sucesso!',
'Registre-se': 'Cadastre-se',
'Remember me': 'Lembre-se de mim',
'Remember me (for 30 days)': 'Lembre-se de mim (por 30 dias)',
'Request reset password': 'Solicitar nova senha',
'Required Fields': 'Campos Obrigatórios',
'Reset Password': 'Redefinir a Senha',
'Reset Password key': 'Chave de solicitação de senha',
'Resources': 'Recursos',
'Return': 'Retornar',
'Reverse': 'Estornar',
'Reverse donation': 'Estornar doação',
'Reward': 'Recompensa',
'Reward description.': 'Descrição da recompensa.',
'Reward value.': 'Valor da recompensa',
'Rewards': 'Recompensas',
'Role': 'Regra',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Salvar perfil': 'Salvar perfil',
'Save changes': 'Salvar alterações',
'Save profile': 'Salvar perfil',
'Search': 'Pesquisar',
'Send': 'Enviar',
'Send a Project': 'Enviar um Projeto',
'Send message': 'Enviar mensagem',
'Send Project': 'Enviar projeto',
'Send Projects': 'Enviar Projetos',
'Send your Project': 'Envie Seu Projeto',
'Set here the admin system data': 'Defina aqui os dados do sistema administrativo do site',
'Set here your images': 'Configure aqui as imagens do site',
'Short description': 'Descrição curta',
'Short Description': 'Descrição Curta',
'Short Description of the project.': 'Descrição curta do projeto',
'short_url.': 'URL curta',
'Show projects in the category': 'Projetos cadastrados na categoria',
'Sign in': 'Entrar',
'Site Logo': 'Logotipo do Site',
'Social Network': 'Redes Sociais',
'Social Network Links': 'Links das Redes Sociais',
'Solicitar nova senha': 'Solicitar nova senha',
'SSC (social security card)': 'CPF',
'Start date of the project.': 'Data Inicial',
'Start Date:': 'Data Inicial:',
'starts with': 'inicia com ',
'state': 'estado',
'State.': 'UF',
'Status': 'Status',
'Status of the project': 'Status do projeto',
'Status of the project.': 'Status do Projeto',
'Status text.': 'Texto de status do projeto',
'Subject': 'Assunto',
'Submit': 'Enviar',
'submit': 'enviar',
'Success!': 'Sucesso!',
'Support': 'Suporte',
'Support the Project': 'Apoiar o Projeto',
'Sure you want to delete this object?': 'Está certo(a) de que deseja apagar esse objeto?',
'System': 'Sistema',
'System Install': 'Instalação do Sistema',
'Table': 'tabela',
'Table name': 'Nome da Tabela',
'Terms of use': 'Termos de Uso',
'Terms of Use': 'Termos de Uso',
'Terms of use of the Chip In Code Plataform': 'Termos de Uso da Plataforma Chip In Code',
'Thank you for your donation, once it is released by MoIP will be credited to the project.': 'Obrigado por sua doação, assim que ela for liberada pelo MoIP, será creditada para o projeto.',
'Thank you, for your donation.': 'Obrigado por sua doação.',
'Thank you. I just want to help the project.': 'Obrigado. Eu quero somente ajudar o projeto.',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The project': 'O projeto',
'This project is closed.': 'O projeto foi finalizado.',
'This project will only be funded if at least R$': 'Este projeto só será financiado se no mínimo R$ ',
'Timestamp': 'Timestamp',
'Title': 'Título',
'Title.': 'Título',
'To R$': 'Até R$',
'To submit your project, we need you to enter or confirm some data.': 'Para enviar o seu projeto, é necessário que você informe ou confirme alguns dados.',
'too short': 'muito curto',
'Total Collected': 'Total Arrecadado',
'Total of donors.': 'Total de doadores',
'Traceback': 'Traceback',
'True': 'Verdadeiro',
'TSV (Excel compatible)': 'TSV (Compatível com o Excel)',
'TSV (Excel compatible, hidden cols)': 'TSV (Compatível com o Excel, colunas ocultas)',
'Tweet': 'Tweet',
'Twitter': 'Twitter',
'Twitter account': 'Conta do Twitter',
'Twitter account of the project.': 'Twitter',
'Twitter page.': 'Twitter',
'Type the subject': 'Informe o assunto',
'Type your email': 'Informe seu e-mail',
'Type your name': 'Informe seu nome',
'unable to parse csv file': 'não foi possível analisar o arquivo csv',
'Unable to send email': 'Não é possível enviar e-mails',
'Update': 'Atualizar',
'Update Profile': 'Atualizar Perfil',
'Update Project Created': 'Atualização cadastrada',
'Update the project': 'Atualizar projeto',
'Update Your info': 'Atualizar seus dados',
'Update:': 'Atualizar:',
'Updates': 'Atualizações',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'Use the left side menu to manage the system and the registered projects.': 'Use o menu da esquerda para gerenciar o sistema e os projetos registrados.',
'Use the left side menu, or the shortcuts below, to manage the system and the registered projects.': 'Use o menu da esquerda, ou os atalhos abaixo, para gerenciar o sistema e os projetos registrados.',
'Use this space to update your backers on the progress of your project': 'Utilize este espaço para atualizar seus apoiadores sobre o progresso de seu projeto',
'User': 'Usuário',
'User %(id)s Logged-in': 'Usuário %(id)s conectou',
'User %(id)s Logged-out': 'Usuário %(id)s saiu',
'User %(id)s Password reset': 'Usuário %(id)s Password reset',
'User %(id)s Profile updated': 'Usuário %(id)s Profile updated',
'User %(id)s Registered': 'Usuário %(id)s Registered',
'User avatar.': 'Foto',
'User EIN.': 'CPF',
'User ID': 'ID do Usuário',
'User Message Contact': 'Mensagem de Contato do Usuário',
'User owner of the project.': 'Dono do projeto',
'User profile': 'Perfil do Usuário',
'User Profile': 'Perfil do Usuário',
'User SSC (social security card).': 'CPF',
'User.': 'Usuário',
'Username': 'Nome de usuário',
'Value': 'Valor',
'value already in database or empty': 'Já cadastrado no sistema',
'value not allowed': 'Valor não permitido',
'value not in database': 'Valor não incluso no sistema',
'Value of the donation': 'Valor da doação',
'Value of the project.': 'Valor do Projeto',
'Value Required': 'Valor',
'Value that you need': 'Valor que você precisa',
'Value total collected.': 'Total arrecadado',
'Verify Password': 'Verifique sua senha',
'Video ID on vimeo.com': 'ID do vídeo no vimeo.com',
'Video URL of the project.': 'Vídeo do projeto',
'Videos': 'Vídeos',
'View': 'Ver',
'View Site': 'Visualizar Site',
'Vimeo': 'Vimeo',
'Wait! Do not close this page!': 'Espere! Não feche esta página!',
'Waiting for approval': 'Aguardando aprovação',
'Warning, this operation is irreversible. Are you sure?': 'Aviso: esta operação é irreversível. Você tem certeza?',
'Warning, this operation is irreversible. Donations will be credited to the donors.': 'Aviso: esta operação é irreversível. As doações serão creditadas para os doadores. ',
'Warning, this operation is irreversible. Donations will be credited to the donors. Are you sure?': 'Aviso: esta operação é irreversível. As doações serão creditadas para os doadores. Você tem certeza?',
'Website': 'Site',
'Website Answer': 'Respostas',
'Website Author': 'Autor',
'Website Configurations': 'Configurações do site',
'Website description': 'Descrição do site',
'Website Description': 'Descrição do Site',
'Website email': 'E-mail do site',
'Website Email Settings': 'Configurações do E-mail do Site',
'Website F.A.Q': 'F.A.Q do Site',
'Website Generator': 'Gerador do Site',
'Website Images': 'Imagens do Site',
'Website images Configured!': 'Imagens do site configuradas.',
'Website Info': 'Informações do Site',
'Website info Configured!': 'Informações do site configuradas.',
'Website keywords': 'Palavras-chave do site',
'Website Meta Info': 'Meta Dados do Site',
'Website of the project.': 'Site do projeto',
'Website Payment Settings': 'Configurações de Pagamento do Site',
'Website terms of use Configured!': 'Termos de uso do site configurado.',
'Website Title': 'Título do site',
'Website.': 'Site',
'Welcome': 'Bem-vindo',
'Welcome %s': 'Bem-vindo %s',
'Welcome to web2py': 'Bem-vindo ao web2py',
'Welcome to web2py!': 'Bem-vindo ao web2py',
'XML': 'XML',
'You can track the status of your project, and register rewards, accessing your user profile': 'Você pode acompanhar o status do projeto e registrar recompensas acessando o seu perfil.',
'You have': 'Você possui',
'You have a': 'Você possui',
'You have R$': 'Você possui R$',
'You tried to do an irregular operation. An e-mail was sent to the website administrator.': 'Você tentou realizar uma operação irregular. Um e-mail de notificação foi enviado para o administrador do site!',
'You will be directed to MoIP page.': 'Você será redirecionado à página do MoIP.',
'You will be directed to Paypal page.': 'Você será redirecionado à página do PayPal.',
'Your avatar': 'Seu avatar',
'Your credits': 'Seus créditos',
'Your data will not be publicly visible. Only when you support a project is that the project owner will receive your email and your information to provide the rewards later. We also need some information if you have registered any project.': 'Seus dados não ficarão visíveis publicamente. Apenas quando você apoiar um projeto é que o dono do projeto receberá o seu e-mail e as suas informações para providenciar a recompensa mais tarde. Nós também precisamos de algumas informações, caso você tenha registrado algum projeto.',
'Your info': 'Seus dados',
'Your profile': 'Seu perfil',
'Your project is finished.': 'Seu projeto foi finalizado.',
'Your project was registered and is awaiting for approval.': 'Seu projeto foi registrado e está aguardando aprovação.',
'Your project was registered.': 'Seu projeto foi registrado.',
'Your Projects': 'Seus projetos',
'Youtube': 'Youtube',
'Zip code': 'CEP',
'Zip Code': 'CEP',
'Zip code.': 'CEP',
}
| 47.080725
| 526
| 0.702883
|
4a090eabcd338ad4e2730c0936e6e553ac8b64b3
| 8,350
|
py
|
Python
|
get_1M_RANSAC_FPFH_error.py
|
BiaoBiaoLi/TAG-Reg-Iterative-Accurate-Global-Registration-Algorithm
|
36bb217da8aec44d1cde5082b1126ccd00d47e7f
|
[
"MIT"
] | 1
|
2021-06-14T21:43:59.000Z
|
2021-06-14T21:43:59.000Z
|
get_1M_RANSAC_FPFH_error.py
|
BiaoBiaoLi/TAG-Reg-Iterative-Accurate-Global-Registration-Algorithm
|
36bb217da8aec44d1cde5082b1126ccd00d47e7f
|
[
"MIT"
] | null | null | null |
get_1M_RANSAC_FPFH_error.py
|
BiaoBiaoLi/TAG-Reg-Iterative-Accurate-Global-Registration-Algorithm
|
36bb217da8aec44d1cde5082b1126ccd00d47e7f
|
[
"MIT"
] | null | null | null |
import open3d as o3d
import numpy as np
import time
import copy
from sklearn.neighbors import KDTree
import matplotlib.pyplot as plt
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=0.15, max_nn=30))
target_temp.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=0.15, max_nn=30))
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def preprocess_point_cloud(pcd, voxel_size):
print(":: Downsample with a voxel size %.3f." % voxel_size)
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
# print(":: Estimate normal with search radius %.3f." % radius_normal)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
# print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd_down, pcd_fpfh
def prepare_dataset(source, target, voxel_size):
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
# print('source_fpfh', source_fpfh.num, target_fpfh.num)
# print('source_fpfh',source_fpfh,np.asarray(source_fpfh.data))
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_global_registration(source_down, target_down, source_fpfh,
target_fpfh, voxel_size):
distance_threshold = voxel_size * 1.5
# print(":: RANSAC registration on downsampled point clouds.")
# print(" Since the downsampling voxel size is %.3f," % voxel_size)
# print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
4, [
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(
0.9),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.pipelines.registration.RANSACConvergenceCriteria(1000000, 500))
return result
def file2matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) #get the number of lines in the file
trans = np.eye(4) #prepare matrix to return
truth = [] #prepare labels return
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
# listFromLine = line.split('\t')
listFromLine = line.split()
listFromLine = [float(x) for x in listFromLine]
if(index % 5 ==0):
index = 0
elif(index % 5 ==1):
trans[0, :] = np.array(listFromLine)
elif(index % 5 ==2):
trans[1,:] = np.array(listFromLine)
elif(index % 5 ==3):
trans[2,:] = np.array(listFromLine)
elif(index % 5 ==4):
trans[3,:] = np.array(listFromLine)
truth.append(trans.copy())#这里不用copy的话,,,每个元素都是一样的
index += 1
return truth
if __name__ == '__main__':
root_path = '/Bill/DataSet/RedWood/'
dataset_names = ['loft', 'lobby', 'apartment','bedroom','boardroom']
root_save_path = '/ransac_1M/src2ref'
dataset_numbers = [252,199,319,219,243]
for i in range(len(dataset_names)):
# for i in range(1):
file_path = root_path + dataset_names[i]
end = dataset_numbers[i]
save_path = dataset_names[i] + root_save_path
print(file_path)
groud_truth = file2matrix(file_path + '/reg_output.log')
voxel_size = 0.05 # means 5cm for this dataset
err_R = []
err_T = []
trans_all = []
fail_list = []
start = 0
# end = 251
for j in range(start, end):
print(
'j',j
)
# index_src = j + 1
# index_ref = j
index_src = j
index_ref = j + 1
source_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_src))
target_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_ref))
source, target, source_down, target_down, source_fpfh, target_fpfh = prepare_dataset(source_show, target_show, voxel_size)
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
print(result_ransac.transformation)
total_trans = result_ransac.transformation
R = total_trans[:3,:3].reshape(3,3)
t = total_trans[:3,3].reshape(-1,1)
if index_src > index_ref:
err_R.append(np.arccos((np.trace(R.T @ groud_truth[j][:3,:3]) - 1) / 2) * 180 / np.pi )
err_T.append(np.linalg.norm(t - groud_truth[j][:3,3].reshape(-1,1), ord=2,axis=0))
trans_all.append((total_trans))
else:
err_R.append( np.arccos( (np.trace(R @ groud_truth[j][:3,:3] ) - 1) / 2) * 180 / np.pi )
err_T.append(np.linalg.norm(-R.T @ t - groud_truth[j][:3,3].reshape(-1,1), ord=2,axis=0))
trans_all.append((total_trans))
# print(total_trans[:3,:3] @ groud_truth[j][:3,:3], np.trace(total_trans[:3,:3] @ groud_truth[j][:3,:3] - np.eye(3)))
# print(total_trans, groud_truth[j])
print('err_R err_T', err_R[j - start], err_T[j - start],total_trans)
if index_src > index_ref:
#
# location = str(start) + '_' + str(end)
err_all = [err_R, err_T]
plt.figure("ERR_R ref2src") # 图像窗口名称
plt.plot(err_R)
plt.savefig(save_path + '/%s_%s_err_All_ref2src.jpg'%(start, end))
plt.close()
# plt.show()
plt.figure("ERR_T ref2src") # 图像窗口名称
plt.plot(err_T)
plt.savefig(save_path + '/%s_%s_trans_all_ref2src.jpg' % (start, end))
plt.close()
# plt.show()
np.savetxt(save_path + '/%s_%s_fail_list_ref2src.txt'%(start, end), fail_list)
np.save(save_path + '/%s_%s_err_All_ref2src.npy'%(start, end), err_all)
np.savetxt(save_path + '/%s_%s_err_All_ref2src.txt' % (start, end), err_all)
np.save(save_path + '/%s_%s_trans_all_ref2src.npy'%(start, end), trans_all)
np.savetxt(save_path + '/%s_%s_trans_all_ref2src.txt'%(start, end), np.array(trans_all).reshape(-1,4),fmt='%0.8f')
else:
err_all = [err_R, err_T]
plt.figure("ERR_R src2ref") # 图像窗口名称
plt.plot(err_R)
plt.savefig(save_path + '/%s_%serr_All_src2ref.jpg'%(start, end))
plt.close()
# plt.show()
plt.figure("ERR_T src2ref") # 图像窗口名称
plt.plot(err_T)
plt.savefig(save_path + '/%s_%strans_all_src2ref.jpg' % (start, end))
plt.close()
# plt.show()
np.savetxt(save_path + '/%s_%s_fail_list_src2ref.txt'%(start, end), fail_list)
np.savetxt(save_path + '/%s_%serr_All_src2ref.txt' % (start, end), err_all)
np.save(save_path + '/%s_%serr_All_src2ref.npy'%(start, end), err_all)
np.save(save_path + '/%s_%strans_all_src2ref.npy'%(start, end), trans_all)
np.savetxt(save_path + '/%s_%strans_all_src2ref.txt'%(start, end), np.array(trans_all).reshape(-1,4),fmt='%0.8f')
| 45.135135
| 135
| 0.596287
|
4a090fa554c397d92c52778d4f897d548579be7c
| 4,242
|
py
|
Python
|
setup.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 51
|
2021-03-19T14:14:31.000Z
|
2022-02-18T00:42:51.000Z
|
setup.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 9
|
2021-03-31T15:39:28.000Z
|
2021-04-16T13:28:15.000Z
|
setup.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T04:54:18.000Z
|
2021-07-26T04:54:18.000Z
|
"""
TakeLab Podium is an open source library for natural language processing.
Podium accelerates data loading, preprocessing & batching to enable faster development of NLP models.
See http://takelab.fer.hr/podium/ for complete documentation.
"""
import re
from pathlib import Path
from setuptools import find_packages, setup
def _get_version():
with open(Path(__file__).parent / "podium" / "__init__.py", "r", encoding="utf-8") as f:
version = re.search(r'__version__ = \"(.*)\"', f.read()).group(1)
return version
DISTNAME = 'podium-nlp'
VERSION = _get_version()
DOCLINES = __doc__.split('\n')
INSTALL_REQUIRES = [
# for numericalization in batching
"numpy;python_version>='3.7'",
"numpy<=1.19;python_version<'3.7'",
# for improved dataset pickling
"dill",
# for tokenization and data encoded in tree structure
"nltk>=3.0,<3.6",
# for improved csv parsing
"pandas;python_version>='3.7'",
"pandas<1.2.0;python_version<'3.7'",
# for downloading datasets over HTTP
"paramiko",
"requests",
# for models and model selection
"scikit-learn",
# for sparse storage
"scipy;python_version>='3.7'",
"scipy<1.6.0;python_version<'3.7'",
# progress bar in download and model selection
"tqdm",
# for nodes in HierarhicalDataset
"dataclasses;python_version<'3.7'",
]
TESTS_REQUIRE = [
"pytest",
"pytest-cov",
"pytest-mock",
"urllib3",
# for preprocessing (tokenization, hooks, etc.)
"spacy",
"spacy-lookups-data"
]
QUALITY_REQUIRE = [
"black",
"flake8",
"isort",
"docformatter",
]
DATASETS_REQUIRE = [
# to transform CONLL-U datasets to our dataset type
"conllu",
# to support HF Datasets conversion
"datasets",
# to support saving/loading datasets from a disk
"pyarrow>=1.0.0",
# to tokenize the input in the IMDB dataset
"spacy",
]
PREPROC_REQUIRE = [
# for normalization and tokenization
"sacremoses",
# for text cleanup (url removal, currency removal, etc.)
"clean-text",
# for truecasing
"truecase",
# for keyword extraction
"rake-nltk",
]
DOCS_REQUIRE = [
'sphinx',
'sphinx_rtd_theme',
'sphinx-copybutton',
'recommonmark',
'nbformat',
'datasets',
]
EXTRAS_REQUIRE = {
# for training and evaluation of PyTorch models
"torch": ["torch"],
# dependencies for all dataset implementations (including the ones in dataload)
"datasets": DATASETS_REQUIRE,
"docs": DOCS_REQUIRE,
"quality": QUALITY_REQUIRE,
"tests": TESTS_REQUIRE + DATASETS_REQUIRE + PREPROC_REQUIRE,
}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + QUALITY_REQUIRE
setup(
name=DISTNAME,
version=VERSION,
description=DOCLINES[0],
long_description='\n'.join(DOCLINES),
author="TakeLab",
author_email="takelab@fer.hr",
url="https://github.com/TakeLab/podium",
download_url="https://github.com/TakeLab/podium/tags",
license="BSD-3",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests",
"tests.*",
"examples",
"examples.*",
]
),
package_data={"podium": ["py.typed"]},
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.6",
classifiers=[
# maturity level
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Text Processing",
],
keywords="podium nlp natural-language-processing machine learning",
zip_safe=False,
)
| 25.865854
| 101
| 0.62777
|
4a0910705e434460ea2c25824a54b982db5ebfa2
| 403
|
py
|
Python
|
course/urls.py
|
JumjieYang/TutorPlatform
|
4aa6ffb2393b39ba888fab29f73db69d24f689f8
|
[
"MIT"
] | 3
|
2020-07-01T14:06:01.000Z
|
2022-01-23T23:51:10.000Z
|
course/urls.py
|
Stanleyjhx/TutorPlatform
|
f7aba1f22a7d9a9821351d797b8ec1b4a11f2dd0
|
[
"MIT"
] | 1
|
2020-03-15T02:34:09.000Z
|
2020-03-15T02:34:09.000Z
|
course/urls.py
|
Stanleyjhx/TutorPlatform
|
f7aba1f22a7d9a9821351d797b8ec1b4a11f2dd0
|
[
"MIT"
] | 1
|
2020-05-06T02:53:36.000Z
|
2020-05-06T02:53:36.000Z
|
from django.urls import path
from .views import *
urlpatterns = [
path('course/', CourseCreateView.as_view(),name='course_create'),
path('course/<int:pk>', CourseDetail.as_view(),name='course_detail'),
path('courses/', CourseList.as_view(),name='course_list'),
path('carts/', CartList.as_view(),name='cart_list'),
path('cart/<int:pk>', CartDetail.as_view(),name='cart_detail'),
]
| 31
| 73
| 0.684864
|
4a091186807ff12dd3b4f8d314bdab2ce97f56ac
| 6,889
|
py
|
Python
|
monty/dev.py
|
jmmshn/monty
|
e410311ae3785d14d1dfe8a1f26ebc34786bc35f
|
[
"MIT"
] | null | null | null |
monty/dev.py
|
jmmshn/monty
|
e410311ae3785d14d1dfe8a1f26ebc34786bc35f
|
[
"MIT"
] | 24
|
2021-11-01T13:21:44.000Z
|
2022-03-29T13:23:09.000Z
|
monty/dev.py
|
jmmshn/monty
|
e410311ae3785d14d1dfe8a1f26ebc34786bc35f
|
[
"MIT"
] | null | null | null |
"""
This module implements several useful functions and decorators that can be
particularly useful for developers. E.g., deprecating methods / classes, etc.
"""
import re
import sys
import logging
import warnings
import os
import subprocess
import multiprocessing
import functools
logger = logging.getLogger(__name__)
def deprecated(replacement=None, message=None, category=FutureWarning):
"""
Decorator to mark classes or functions as deprecated,
with a possible replacement.
Args:
replacement (callable): A replacement class or method.
message (str): A warning message to be displayed.
category (Warning): Choose the category of the warning to issue. Defaults
to FutureWarning. Another choice can be DeprecationWarning. NOte that
FutureWarning is meant for end users and is always shown unless silenced.
DeprecationWarning is meant for developers and is never shown unless
python is run in developmental mode or the filter is changed. Make
the choice accordingly.
Returns:
Original function, but with a warning to use the updated class.
"""
def wrap(old):
def wrapped(*args, **kwargs):
msg = "%s is deprecated" % old.__name__
if replacement is not None:
if isinstance(replacement, property):
r = replacement.fget
elif isinstance(replacement, (classmethod, staticmethod)):
r = replacement.__func__
else:
r = replacement
msg += "; use %s in %s instead." % (r.__name__, r.__module__)
if message is not None:
msg += "\n" + message
warnings.warn(msg, category=category, stacklevel=2)
return old(*args, **kwargs)
return wrapped
return wrap
class requires:
"""
Decorator to mark classes or functions as requiring a specified condition
to be true. This can be used to present useful error messages for
optional dependencies. For example, decorating the following code will
check if scipy is present and if not, a runtime error will be raised if
someone attempts to call the use_scipy function::
try:
import scipy
except ImportError:
scipy = None
@requires(scipy is not None, "scipy is not present.")
def use_scipy():
print(scipy.majver)
Args:
condition: Condition necessary to use the class or function.
message: A message to be displayed if the condition is not True.
"""
def __init__(self, condition, message):
"""
:param condition: A expression returning a bool.
:param message: Message to display if condition is False.
"""
self.condition = condition
self.message = message
def __call__(self, _callable):
"""
:param _callable: Callable function.
"""
@functools.wraps(_callable)
def decorated(*args, **kwargs):
if not self.condition:
raise RuntimeError(self.message)
return _callable(*args, **kwargs)
return decorated
def get_ncpus():
"""
.. note::
If you are using Python >= 2.7, multiprocessing.cpu_count() already
provides the number of CPUs. In fact, this is the first method tried.
The purpose of this function is to cater to old Python versions that
still exist on many Linux style clusters.
Number of virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program. Return -1 if ncpus cannot be detected. Taken from:
http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-
cpus-in-python
"""
# Python 2.6+
# May raise NonImplementedError
try:
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf("SC_NPROCESSORS_ONLN"))
if res > 0:
return res
except (AttributeError, ValueError):
pass
# Windows
try:
res = int(os.environ["NUMBER_OF_PROCESSORS"])
if res > 0:
return res
except (KeyError, ValueError):
pass
# jython
try:
from java.lang import Runtime # pylint: disable=import-outside-toplevel
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# BSD
try:
with subprocess.Popen(["sysctl", "-n", "hw.ncpu"], stdout=subprocess.PIPE) as sysctl:
scstdout = sysctl.communicate()[0]
res = int(scstdout)
if res > 0:
return res
except (OSError, ValueError):
pass
# Linux
try:
res = open("/proc/cpuinfo").read().count("processor\t:") # pylint: disable=R1732
if res > 0:
return res
except IOError:
pass
# Solaris
try:
pseudo_devices = os.listdir("/devices/pseudo/")
expr = re.compile("^cpuid@[0-9]+$")
res = 0
for pd in pseudo_devices:
if expr.match(pd) is not None:
res += 1
if res > 0:
return res
except OSError:
pass
# Other UNIXes (heuristic)
try:
try:
with open("/var/run/dmesg.boot") as f:
dmesg = f.read()
except IOError:
with subprocess.Popen(["dmesg"], stdout=subprocess.PIPE) as dmesg_process:
dmesg = dmesg_process.communicate()[0]
res = 0
while "\ncpu" + str(res) + ":" in dmesg:
res += 1
if res > 0:
return res
except OSError:
pass
logger.warning("Cannot determine number of CPUs on this system!")
return -1
def install_excepthook(hook_type="color", **kwargs):
"""
This function replaces the original python traceback with an improved
version from Ipython. Use `color` for colourful traceback formatting,
`verbose` for Ka-Ping Yee's "cgitb.py" version kwargs are the keyword
arguments passed to the constructor. See IPython.core.ultratb.py for more
info.
Return:
0 if hook is installed successfully.
"""
try:
from IPython.core import ultratb # pylint: disable=import-outside-toplevel
except ImportError:
warnings.warn("Cannot install excepthook, IPyhon.core.ultratb not available")
return 1
# Select the hook.
hook = dict(
color=ultratb.ColorTB,
verbose=ultratb.VerboseTB,
).get(hook_type.lower(), None)
if hook is None:
return 2
sys.excepthook = hook(**kwargs)
return 0
| 29.693966
| 93
| 0.609813
|
4a0913827b4d090791af4277f2a1a69a5a513f29
| 2,218
|
py
|
Python
|
core/utils/form_utils.py
|
Archinowsk/kompassi
|
00f754498d0a5c0a3e4cfa1e4b19620aa99e7cd6
|
[
"CC-BY-3.0"
] | null | null | null |
core/utils/form_utils.py
|
Archinowsk/kompassi
|
00f754498d0a5c0a3e4cfa1e4b19620aa99e7cd6
|
[
"CC-BY-3.0"
] | null | null | null |
core/utils/form_utils.py
|
Archinowsk/kompassi
|
00f754498d0a5c0a3e4cfa1e4b19620aa99e7cd6
|
[
"CC-BY-3.0"
] | null | null | null |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div
def make_field_readonly(field):
if type(field.widget) in [
forms.widgets.CheckboxSelectMultiple,
forms.widgets.CheckboxInput,
]:
field.widget.attrs['disabled'] = True
else:
field.widget.attrs['readonly'] = True
def make_form_readonly(form):
for field in form.fields.values():
make_field_readonly(field)
def initialize_form(FormClass, request, readonly=False, **kwargs):
if not readonly and request.method == 'POST':
form = FormClass(request.POST, **kwargs)
else:
form = FormClass(**kwargs)
if readonly:
make_form_readonly(form)
return form
def initialize_form_set(FormSetClass, request, **kwargs):
if 'readonly' in kwargs:
readonly = kwargs.pop('readonly')
else:
readonly = False
if not readonly and request.method == 'POST':
form_set = FormSetClass(request.POST, **kwargs)
else:
form_set = FormSetClass(**kwargs)
if readonly:
for form in form_set:
for field in form.fields.values():
make_field_readonly(field)
return form_set
def indented_without_label(input, css_class='col-md-offset-3 col-md-9'):
# Checkboxen handled by pypugjs
if isinstance(input, str):
return input
# Submits we need to handle ourselves
else:
return Div(Div(input, css_class='controls {}'.format(css_class)), css_class='form-group')
def make_horizontal_form_helper(helper):
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-3'
helper.field_class = 'col-md-9'
return helper
def horizontal_form_helper():
return make_horizontal_form_helper(FormHelper())
class DateField(forms.DateField):
def __init__(self, *args, **kwargs):
defaults = dict(
widget=forms.DateInput(format='%d.%m.%Y'),
input_formats=(
'%d.%m.%Y',
'%Y-%m-%d'
),
help_text='Muoto: 24.2.1994',
)
my_kwargs = dict(defaults, **kwargs)
super(DateField, self).__init__(*args, **my_kwargs)
| 26.094118
| 97
| 0.638413
|
4a0913b9729954e1e91599266a8b6a87501c45c6
| 172
|
py
|
Python
|
JDjangoDemo/JDjangoDemo/asgi.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | 3
|
2020-12-28T05:09:02.000Z
|
2021-06-23T10:02:03.000Z
|
JDjangoDemo/JDjangoDemo/asgi.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
JDjangoDemo/JDjangoDemo/asgi.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JDjangoDemo.settings')
application = get_asgi_application()
| 21.5
| 71
| 0.831395
|
4a091516c7adfa1697ba0c224c48ef9804f319ac
| 4,092
|
py
|
Python
|
annotations/post/post_comp.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
annotations/post/post_comp.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
annotations/post/post_comp.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
import sys, json, codecs, gzip
from datetime import datetime
from argparse import ArgumentParser
from app.prepare.read_json import JsonLineReader
from .comp_hets import FamilyDataMiner, CompHetsBatch
#=====================================
class PostAttonationProcess:
def __init__(self, mine_lines = 1000, line_period = 1000):
self.mMineLines = mine_lines
self.mLinePeriod = line_period
self.mBufRecords = []
self.mDataMiner = FamilyDataMiner()
self.mBatch = None
def isOK(self):
return (self.mBatch is not None or
self.mDataMiner is not None)
def _shutDown(self):
self.mDataMiner = None
self.mBufRecords = None
print >> sys.stderr, \
"Family is not complete for compound het evaluation"
def process(self, rec_no, rec_data):
if self.mDataMiner is not None:
self.mDataMiner.feed(rec_data)
family_data = self.mDataMiner.getFamilyData()
if family_data is None:
self.mBufRecords.append((rec_no, rec_data))
if len(self.mBufRecords) >= self.mMineLines:
self._shutDown()
return
self.mBatch = CompHetsBatch(family_data, self.mLinePeriod)
for r_no, r_data in self.mBufRecords:
self.mBatch.process(r_no, r_data)
self.mBufRecords = None
self.mDataMiner = None
if self.mBatch is not None:
self.mBatch.process(rec_no, rec_data)
def finishUp(self):
if self.mBufRecords is not None:
self._shutDown();
return False
if self.mBatch is not None:
return self.mBatch.finishUp()
return False
def report(self, output):
if self.mBatch is not None:
self.mBatch.report(output)
def recIsActive(self, rec_no):
if self.mBatch is not None:
return self.mBatch.recIsActive(rec_no)
return False
def transform(self, rec_no, rec_data):
if self.mBatch is not None:
return self.mBatch.transform(rec_no, rec_data)
return False
#=====================================
if __name__ == '__main__':
sys.stdin = codecs.getreader('utf8')(sys.stdin)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
parser = ArgumentParser()
parser.add_argument("--minelines", type = int, default = 1000,
help="Max count of lines to mine family info")
parser.add_argument("--replines", type = int, default = 1000,
help="Report lines period")
parser.add_argument("-o", "--output",
help="Output name for modified annotated json, .gz expected")
parser.add_argument("source", nargs = 1, help = "Dataset name")
run_args = parser.parse_args()
proc = PostAttonationProcess(run_args.minelines, run_args.replines)
with JsonLineReader(run_args.source[0]) as inp:
for rec_no, rec_data in enumerate(inp):
proc.process(rec_no, rec_data)
proc.finishUp()
if not proc.isOK():
print >> sys.stderr, "Terminated"
sys.exit()
if not run_args.output:
proc.report(sys.stdout)
sys.exit()
time_start_save = datetime.now()
print >> sys.stderr, "Save result to", run_args.output, \
"at", time_start_save
with gzip.open(run_args.output, "wb") as outp:
with JsonLineReader(run_args.source[0], False) as inp:
for rec_no, rec_line in enumerate(inp):
if proc.recIsActive(rec_no):
rec_data = json.loads(rec_line)
proc.transform(rec_no, rec_data)
rec_line = json.dumps(rec_data, ensure_ascii = False)
print >> outp, rec_line.encode("utf-8")
if rec_no % run_args.replines == 0:
print >> sys.stderr, "\r", rec_no, "lines...",
time_done_save = datetime.now()
print >> sys.stderr, "Done at", time_done_save, \
"for", time_done_save - time_start_save
| 36.212389
| 73
| 0.605083
|
4a091551ac7a3ee3d5c59f561392133f4b64f366
| 6,363
|
py
|
Python
|
reparameterizers/beta.py
|
jramapuram/vae
|
6000c71ceae0ae7de3fae4a61691ef05ad13a4b5
|
[
"MIT"
] | 1
|
2019-10-15T16:17:06.000Z
|
2019-10-15T16:17:06.000Z
|
reparameterizers/beta.py
|
jramapuram/vae
|
6000c71ceae0ae7de3fae4a61691ef05ad13a4b5
|
[
"MIT"
] | null | null | null |
reparameterizers/beta.py
|
jramapuram/vae
|
6000c71ceae0ae7de3fae4a61691ef05ad13a4b5
|
[
"MIT"
] | 2
|
2019-05-29T15:22:52.000Z
|
2019-06-06T20:15:30.000Z
|
# coding: utf-8
from __future__ import print_function
import torch
import torch.nn as nn
import torch.distributions as D
from torch.autograd import Variable
from helpers.utils import zeros_like, same_type
# from helpers.utils import eps as eps_fn
class Beta(nn.Module):
def __init__(self, config):
""" Beta distribution.
:param config: argparse
:returns: Beta module
:rtype: nn.Module
"""
super(Beta, self).__init__()
self.is_discrete = False
self.config = config
self.input_size = self.config['continuous_size']
assert self.config['continuous_size'] % 2 == 0
self.output_size = self.config['continuous_size'] // 2
def get_reparameterizer_scalars(self):
""" Returns any scalars used in reparameterization.
:returns: dict of scalars
:rtype: dict
"""
return {}
def prior(self, batch_size, **kwargs):
""" Returns a Kerman beta prior.
Kerman, J. (2011). Neutral noninformative and informative
conjugate beta and gamma prior distributions. Electronic
Journal of Statistics, 5, 1450-1470.
:param batch_size: the number of prior samples
:returns: prior
:rtype: torch.Tensor
"""
conc1 = Variable(
same_type(self.config['half'], self.config['cuda'])(
batch_size, self.output_size
).zero_() + 1/3
)
conc2 = Variable(
same_type(self.config['half'], self.config['cuda'])(
batch_size, self.output_size
).zero_() + 1/3
)
return D.Beta(conc1, conc2).sample()
def _reparametrize_beta(self, conc1, conc2, force=False):
""" Internal function to reparameterize beta distribution using concentrations.
:param conc1: concentration 1
:param conc2: concentration 2
:returns: reparameterized sample, distribution params
:rtype: torch.Tensor, dict
"""
if self.training or force:
beta = D.Beta(conc1, conc2).rsample()
return beta, {'conc1': conc1, 'conc2': conc2}
# can't use mean like in gaussian because beta mean can be > 1.0
return D.Beta(conc1, conc2).sample(), {'conc1': conc1, 'conc2': conc2}
def reparmeterize(self, logits, force=False):
""" Given logits reparameterize to a beta using
first half of features for mean and second half for std.
:param logits: unactivated logits
:returns: reparameterized tensor (if training), param dict
:rtype: torch.Tensor, dict
"""
# eps = eps_fn(self.config['half'])
# determine which dimension we slice over
dim_map = {
2: -1, # [B, F]
3: -1, # [B, T, F] --> TODO: do we want to slice time or feature?
4: 1 # [B, C, H, W]
}
assert logits.dim() in dim_map, "unknown number of dims for isotropic gauss reparam"
dim2slice = dim_map[logits.dim()]
# Compute feature size and do some sanity checks
feature_size = logits.shape[dim2slice]
assert feature_size % 2 == 0, "feature dimension not divisible by 2 for mu/sigma^2."
assert feature_size // 2 == self.output_size, \
"feature_size = {} but requested output_size = {}".format(feature_size, self.output_size)
# Slice the first chunk for concentration1 and the second for concentration2
conc1 = torch.sigmoid(torch.narrow(logits, dim2slice, 0, feature_size // 2))
conc2 = torch.sigmoid(torch.narrow(logits, dim2slice, feature_size // 2, feature_size // 2))
return self._reparametrize_beta(conc1, conc2, force=force)
def _kld_beta_kerman_prior(self, conc1, conc2):
""" Internal function to do a KL-div against the prior.
:param conc1: concentration 1.
:param conc2: concentration 2.
:returns: batch_size tensor of kld against prior.
:rtype: torch.Tensor
"""
# prior = D.Beta(zeros_like(conc1) + 1/3,
# zeros_like(conc2) + 1/3)
prior = D.Beta(zeros_like(conc1) + 1.1,
zeros_like(conc2) + 1.1)
beta = D.Beta(conc1, conc2)
return torch.sum(D.kl_divergence(beta, prior), -1)
def kl(self, dist_a, prior=None):
if prior is None: # use standard reparamterizer
return self._kld_beta_kerman_prior(
dist_a['beta']['conc1'], dist_a['beta']['conc2']
)
# we have two distributions provided (eg: VRNN)
return torch.sum(D.kl_divergence(
D.Beta(dist_a['beta']['conc1'], dist_a['beta']['conc2']),
D.Beta(prior['beta']['conc1'], prior['beta']['conc2'])
), -1)
def mutual_info(self, params, eps=1e-9):
""" I(z_d; x) ~ H(z_prior, z_d) + H(z_prior)
:param params: parameters of distribution
:param eps: tolerance
:returns: batch_size mutual information (prop-to) tensor.
:rtype: torch.Tensor
"""
z_true = D.Beta(params['beta']['conc1'],
params['beta']['conc2'])
z_match = D.Beta(params['q_z_given_xhat']['beta']['conc1'],
params['q_z_given_xhat']['beta']['conc2'])
kl_proxy_to_xent = torch.sum(D.kl_divergence(z_match, z_true), dim=-1)
return self.config['continuous_mut_info'] * kl_proxy_to_xent
def log_likelihood(self, z, params):
""" Log-likelihood of z induced under params.
:param z: inferred latent z
:param params: the params of the distribution
:returns: log-likelihood
:rtype: torch.Tensor
"""
return D.Beta(params['beta']['conc1'],
params['beta']['conc2']).log_prob(z)
def forward(self, logits, force=False):
""" Returns a reparameterized gaussian and it's params.
:param logits: unactivated logits.
:returns: reparam tensor and params.
:rtype: torch.Tensor, dict
"""
z, beta_params = self.reparmeterize(logits, force=force)
beta_params['conc1_mean'] = torch.mean(beta_params['conc1'])
beta_params['conc2_mean'] = torch.mean(beta_params['conc2'])
return z, {'z': z, 'logits': logits, 'beta': beta_params}
| 35.747191
| 101
| 0.598146
|
4a09170d962c1737d9c054a991144eb34640ca69
| 73,646
|
py
|
Python
|
nwb/find_links.py
|
ajtritt/nwb-api-python
|
927ac74e1f84b694bd034774bb21aa1ff16b303f
|
[
"BSD-3-Clause"
] | null | null | null |
nwb/find_links.py
|
ajtritt/nwb-api-python
|
927ac74e1f84b694bd034774bb21aa1ff16b303f
|
[
"BSD-3-Clause"
] | null | null | null |
nwb/find_links.py
|
ajtritt/nwb-api-python
|
927ac74e1f84b694bd034774bb21aa1ff16b303f
|
[
"BSD-3-Clause"
] | null | null | null |
# program to find hdf5 links
import re
import sys
import h5py
import copy
import numpy as np
import operator
import pprint
pp = pprint.PrettyPrinter(indent=4)
# def get_group_info(f, h5_node):
# """ Return information about h5_node"""
# paths = h5py.h5i.h5_node.name
# gi = f.get(path, getlink=True)
# # import pdb; pdb.set_trace()
# # gi2 = h5py.h5g.get_objinfo(h5_group, follow_link=False)
# gi = str(gi)
# while True:
# match = re.match(r'^<h5py\._hl\.group\.HardLink object at (0x[0-9a-f]+)>$', gi)
# if match:
# loc = match.group(1)
# info = {'type':'hard', 'loc':loc}
# break
# match = re.match(r'^<SoftLink to "([^"]+)">$', gi)
# if match:
# loc = match.group(1)
# info = {'type':'soft', 'loc':loc}
# break;
# print "Unable to determine link type: gi='%s'" % gi
# sys.exit(1)
# return info
def show_links(links):
""" Display different structures in links"""
print "********* Link groups **************"
lg = links['lg']
for type in lg:
print "****** %s links:" % type
num_locations = len(lg[type].keys())
print "%i locations with %s link" % (num_locations, type)
if num_locations == 0:
continue
multi_locs = {}
# find locations with more than one:
for loc in lg[type]:
paths = lg[type][loc]
if len(paths) > 1 or type == "soft": # soft links always have more than one since target is also
multi_locs[loc] = paths
if multi_locs:
print "%i have multiple paths:" % len(multi_locs)
pp.pprint(multi_locs)
else:
print "none have multiple paths"
# soft links
sl = links['sl_from_to']
print "***** %i soft links. (from -> to) below:" % len(sl)
pp.pprint(sl)
def add_item(dict, key, val):
""" Append value to list at dict[key]. (dict is a dictionary). Created
dict[key] first if it does not exist"""
if key not in dict:
dict[key] = []
dict[key].append(val)
def save_info(objtype, objno, path, target, links):
""" Save information in links structure"""
if objtype in ('group', 'dataset'):
links['count'][objtype] += 1
type = 'hard'
loc = str(objno)
add_item(links['lg'][type], loc, path)
elif objtype == "link":
type = "soft"
add_item(links['lg'][type], target, path)
# save soft link from and to
links['sl_from_to'][path] = target
elif objtype == "type":
# ignore hdf5 type type
pass
elif objtype == 'ext_link':
type = 'ext'
add_item(links['lg'][type], target, path)
else:
print "Unknown object type: '%s'" % objtype
sys.exit(1)
h5_ntypes = { # hdf5 node types, from: http://api.h5py.org/h5g.html
h5py.h5g.LINK: 'link',
h5py.h5g.GROUP: 'group',
h5py.h5g.DATASET: 'dataset',
h5py.h5g.TYPE: 'type',
4: 'ext_link' # a guess
}
def find_links(fp, links):
""" Find links in hdf5 file. fp is the pointer to a h5py file. links is structure to
store results. Builds links['lg'], ("lg stands for "location group), which maps
each location to all the paths associated with that location. Structure is:
{ 'type1' : {'loc_1': ['path1', 'path2', ...], 'loc_2': ['path3', 'path4', ...]
'type2' : { ... }
where type is type of link, "hard", "soft", "ext" (for external),
loc is location, either hex address if hard, target path if soft,
path are paths to nodes that have the same location.
Also makes a dictionary in links, links['sl_from_to'] (sl stands for 'soft link').
This stores the source and target for each soft link. It is used
in function merge_soft_links to merge soft link groups that point
to the same target through a chain of soft links."""
global h5_ntypes
fid = fp.id
root = h5py.h5g.open(fid, '/')
np = (root, '/') # np - node & path, h5 object and path
# groups_to_visit = [ f["/"],]
groups_to_visit = [ np,]
while groups_to_visit:
np = groups_to_visit.pop(0)
h5g, path = np
for i in range(h5g.get_num_objs()):
mname = h5g.get_objname_by_idx(i)
mtype = h5_ntypes[h5g.get_objtype_by_idx(i)]
try:
minfo = h5py.h5g.get_objinfo(h5g, mname)
except TypeError:
objno = "ext_link:dangling" # make a fake object number
else:
objno = minfo.objno
if mtype == 'link':
# target of symbolic link
target = h5g.get_linkval(mname)
elif mtype == 'ext_link':
# target of external link
target = "\n".join(h5g.links.get_val(mname))
else:
target = None
if path == "/":
full_path = "/" + mname
else:
full_path = path + "/" + mname
save_info(mtype, objno, full_path, target, links)
if mtype == 'group':
mh5g = h5py.h5g.open(h5g, mname)
mnp = (mh5g, full_path)
groups_to_visit.append(mnp)
# This was used in compute_autogen, but no longer needed since link_info can be used
# to determine if there is an external link.
# def get_h5_node_info(f, path):
# """Get type, and if applicable, target for node at path path.
# This requires finding the object with the name, then getting the type. The h5py
# low-level interface is used for this. parent is the h5py.Group parent
# """
# global h5_ntypes
# # get h5py low level object for parent group
# parent_path, name = f.get_name_from_full_path(path)
# parent_group = f.file_pointer[parent_path]
# h5g = parent_group.id
# for i in range(h5g.get_num_objs()):
# mname = h5g.get_objname_by_idx(i)
# if mname == name:
# mtype = h5_ntypes[h5g.get_objtype_by_idx(i)]
# # minfo = h5py.h5g.get_objinfo(h5g, mname)
# if mtype == 'link':
# # target of symbolic link
# target = h5g.get_linkval(mname)
# elif mtype == 'ext_link':
# # target of external link
# target = "\n".join(h5g.links.get_val(mname))
# else:
# target = None
# return (mtype, target)
# print "%s: get_h5_node_info, did not find '%s'" % (parent.name, name)
# sys.exit(1)
def merge_soft_links(links):
""" Merge any soft link location groups that refer to the same location due
to a chains of soft links."""
sg = links['lg']['soft']
to_remove = []
# source_group1 -> target1
# source_group2 -> target2
# if target1 (through a chain of symbolic links) goes to target2
# need to put source_group1 into source_group2
# and remove source_group1 -> target1
for target1 in sg.keys():
target2 = target1
count = 0
limit = 100
while target2 in links['sl_from_to']:
target2 = links['sl_from_to'][target2]
count = count+1
if count > limit:
sys.exit("Apparent loop in symbolic links (target='%s'). Aborting" % target2)
if target1 != target2:
# found new location (target2)
source_group1 = sg[target1]
source_group2 = sg[target2]
source_group2.extend(source_group1)
to_remove.append(target1)
# remove groups that have been merged
for t in to_remove:
del sg[t]
def test_links():
""" For testing merge soft links"""
links = {
"lg": {
"soft": {
"e": ["a", "b", "c"],
"b": ["d",],
"d": ["f",]}
},
"sl_from_to": {
"a": "e",
"b": "e",
"c": "e",
"d": "b",
"f": "d"}}
return links;
def make_path2loc(lgrps):
""" Set path2loc to map each path in lgrps (location groups) to
the id of the location group associated with that path. Input is:
lgrps = { 'loc1': [ 'path1', 'path2', ...]; 'loc2': ['path3', ...], ...}
Return is a dictionary of form:
{ 'path1': 'loc1', 'path2': 'loc1', 'path3': loc2', ... }"""
path2loc = {}
for loc in lgrps:
for path in lgrps[loc]:
path2loc[path] = loc
return path2loc
def prune_hard_links(links):
""" Hard links that are made at a node above a leaf node in the tree
create hard links for all nodes below to the leafs. These hard links
are not useful for deducing where links were made when reading a file.
This function removes them to reduce the number of hard link locations
stored (reducing memory required) and to make the read process
more efficient."""
hl = links['lg']['hard']
# Step 0. Remove any hard link locations that have only one path
# (These are not part of an added hard link)
for loc in hl.keys():
if len(hl[loc]) == 1:
del hl[loc]
# Step 1. Make path2loc which maps paths to the location identifier
# This is just the reverse of the "location group" dictionaries mapping
# locations to paths at the location
path2loc = make_path2loc(hl)
# Step 2. Get list of parent location(s) for each location as dictionary:
# loc_parents = { 'loc1': [parent1, parent2, ..], 'loc2': [] ... }. This is
# done by stripping off suffix to path and finding any parent locations that
# use the prefix. All paths in the same location should have the same
# suffix if they descend from the same parent group. First check for that.
loc_parents = {}
# print "before pruning, hard link groups are:"
# pp.pprint(shorten_dict(hl))
for loc in hl:
suffix = get_common_basename(hl[loc])
if not suffix:
# suffixes do not match, this group is not from the same parent group. Don't save
continue
# suffixes match, now build list of parent group locations
parent_paths = trim_common_basename(hl[loc], suffix)
pl_list = []
for parent_path in parent_paths:
parent_loc = path2loc[parent_path] if parent_path in path2loc else None
if parent_loc not in pl_list:
pl_list.append(parent_loc)
loc_parents[loc] = pl_list
# print "parent locations ="
# pp.pprint(loc_parents)
# Step 3. Prune any location groups that have only one parent group. These are
# are the location groups that can be ignored.
pruned={}
for loc in loc_parents:
if len(loc_parents[loc]) == 1:
# this location has only one parent location, and all paths have the same suffix
# prune this location group if it has not already been removed
if loc in hl:
pruned[loc] = hl[loc]
del hl[loc]
# print "pruned %i hard link location(s):" % len(pruned)
# pp.pprint(shorten_dict(pruned))
# print "After pruning, hard link groups are:"
# pp.pprint(shorten_dict(hl))
def merge_soft_and_hard_links(links):
"""Merge any link groups that are from soft links and hard links which are actually
part of the same group. Example, if link group "h" contains a set of paths that refer
to the same hdf5 node because they are all linked by hard links; and link-group "s" is
a set of nodes that refer to the same target location via soft links; and if the target
of 's' is a member of 'h', then these two groups are the same. Treat the resulting group
as a 'soft' link-group with the same target as the original soft link-group."""
# get paths to locations for all paths in hard links link-groups
hpath2loc = make_path2loc(links['lg']['hard'])
# See if any targets of soft links are in a hard link link-group
sl = links['lg']['soft'] # soft link-groups
hl = links['lg']['hard'] # hard link-groups
merged = []
for target in sl:
if target in hpath2loc:
# found target in hard link link-group
hloc = hpath2loc[target]
# include members of hard link-group in soft link-group and delete hard link-group
sl[target].extend(hl[hloc])
del hl[hloc]
merged.append(hloc)
# print "hard-link groups merged with softlink goups:", merged
def make_path2lg(links):
""" Creates path2lg (path to link group) information used by main h5gate when reading a file.
This is stored in dictionary within 'links' using the following structure:
'path2lg': { 'hard': <paths_to_hard_lgs>, 'soft': <paths_to_soft_lgs>, 'ext': <paths_to_ext_lgs> }
where each <path_to_hard_lgs> is a dictionary mapping each path in a hard link-group to the
location associated with the hard link-group it is in. e.g. form is:
{'path1': 'hard_lg1', 'path2': 'hard_lg1', 'path3': 'hard_lg28', ... }
A similar structure is used for <path_to_soft_lgs> and ext (external) except that soft
(or external) link-groups are referenced."""
links['path2lg'] = {}
for type in ('hard','soft','ext'):
lg = links['lg'][type] # link-group
links['path2lg'][type] = make_path2loc(lg)
# hl = links['lg']['hard'] # hard link-groups
# sl = links['lg']['soft'] # soft link-groups
# el = links['lg']
#
# links['path2lg']['hard'] = make_path2loc(hl)
# links['path2lg']['soft'] = make_path2loc(sl)
def shorten_list(l, max_length=15):
""" If list longer than max_length, returned shortened version"""
length = len(l)
if length > max_length:
sl = l[0:max_length]
sl.append("...%i total" % length)
else:
sl = l
return sl
def shorten_dict(d, max_length=20):
""" shorten dictionary to make easier to see"""
sd = {}
for key in d.keys()[0:max_length]:
value = d[key]
if isinstance(value, (list, tuple)):
new_value = shorten_list(value)
elif isinstance(value, dict):
new_value = shorten_dict(value, max_length)
else:
new_value = value
sd[key] = new_value
if len(d) > max_length:
sd["..."]="(%i total)" % len(d)
return sd
def show_stats(links):
""" Display length of various items"""
hlg = len(links['lg']['hard'])
slg = len(links['lg']['soft'])
hp = len(links['path2lg']['hard'])
sp = len(links['path2lg']['soft'])
ft = len(links['sl_from_to'])
print "Num groups: %i hard, %i soft" % (hlg, slg)
print "path2lg: %i hard, %i soft" % (hp, sp)
print "Soft link from-to: %i" % ft
slinks = shorten_dict(links)
print "links is:"
pp.pprint(slinks)
def initialize():
""" Initialized empty links structure. See function "find" for description."""
links = {
'lg' : {'hard': {}, 'soft': {}, 'ext': {}},
'path2lg': {'hard': {}, 'soft': {}, 'ext': {}},
'sl_from_to': {},
'targets_created': {'hard': {}, 'soft': {}},
'missing_links': {'hard': {}, 'soft': {}},
'count': {'group': 0, 'dataset': 0}
}
return links
def find(fp, links):
""" Find all links in hdf5 file (both hard and soft). Parameter fp is the
pointer to a h5py File object. links is a dictionary that will contain
the link information. It must have been setup by a call to initialize().
Returns links dictionary with the following:
{ 'lg': { 'hard': <hard_link_groups>, 'soft': <soft_link_groups> },
'path2lg' { 'hard': <paths_to_hard_lgs>, 'soft': <paths_to_soft_lgs>},
'sl_from_to': { 'souce1': 'target1', 'source2': 'target2', ... },
'targets_created': {
'hard': {'loc1': 'target1', 'loc2': 'target2', ...}
'soft': {'loc3': 'target3', ... }}
'missing_links': {
'hard': {'loc1': ('from1', 'from2', ...), 'loc2': ('from3', 'from4', ...) }
'soft': {'loc4': ('from6', 'from7', ...), 'loc5': ('from8', 'from9', ...) }
'count': {'group': <number_groups>, 'dataset': <number_datasets> }
}
Where:
'lg': contains the hard and soft "link groups". Structure is:
{ 'type1' : {'loc_1': ['path1', 'path2', ...], 'loc_2': ['path3', 'path4', ...]
'type2' : { ... }
where type is type of link, "hard", "soft", "ext" (for external),
loc is location, either a string consisting of two numbers in parentheses if
a hard link, or a target path if a soft link.
path are paths to nodes that have the same location.
'sl_from_to': (sl stands for 'soft link') stores the source and target for each
soft link. This is used in function merge_soft_links to merge soft link
groups that point to the same target through a chain of soft links.
'path2lg': maps paths to the hard or soft link group corresponding to the path.
<paths_to_hard_lgs> is a dictionary mapping each path in a hard link-group to the
location associated with the hard link-group it is in. e.g. form is:
{'path1': 'hard_lg1_loc', 'path2': 'hard_lg1_loc', 'path3': 'hard_lg28_loc', ... }
A similar structure is used for <path_to_soft_lgs> except that soft link-groups
locations are referenced.
'targets_created': provides h5gate node created which is the
target for hard and soft link groups. This is filled in when reading a
file to record when the targets are made.
'missing_links': lists of links that need to be created when reading a file. Each
loc is a location associated with a link group. They are mapped to an array of
paths of nodes that have links to that location, but the links are not created yet
(i.e. are missing). (The 'from' entries are paths. The 'from' paths are added
if a link is detected when reading a file but the target node has not been created
yet. The list is processed by function "fill_missing_links"
"""
find_links(fp, links)
build_links_dicts(links)
# merge_soft_links(links)
# # print "Before pruning:"
# # show_stats(links)
# prune_hard_links(links)
# # print "After pruning:"
# # show_stats(links)
# merge_soft_and_hard_links(links)
# make_path2lg(links)
def build_links_dicts(links):
""" Constructs structures (dictionaries) described in function "find" in variable
links using information about links that was stored using calls to function
save_info. This function is called in two situations: 1. When reading a file,
before the nodes in the file are read to interpret them (i.e. figure out which
nodes map to which structures in the specification), the file is first read to
determine all the links. This is done by function "find". Function find then
calls this function to build the link information structures that are needed
to interpret nodes when reading. 2. When writing a file, each time
a new link is made by a call to the api (make_group or set dataset) from code
that is creating or modifying a file, the constructer for the Node class calls
function "save_link_info" which calls function "save_info" if the information
about that link has not yet been saved. Then function h5gate.close, calls
this function to build (or update) the link information structures which are
needed to calculate values for the autogen specifications.
"""
merge_soft_links(links)
# print "Before pruning:"
# show_stats(links)
prune_hard_links(links)
# print "After pruning:"
# show_stats(links)
merge_soft_and_hard_links(links)
make_path2lg(links)
# return links
def deduce_link_info(f, full_path, type, sdef):
""" ** This function only called when reading a file **
Determine if hdf5 node is associated with a link (as either a source
or destination) and if so save information needed to create the link. (See
routine "find" in file "find_links.py" for description of "f.links" structures
used for this). Return "link_info" structure if this node is a link (source). Also
saves information about link if it's a destination (and returns "None").
Inputs: f - file object from h5gate, full_path - full path to node, type - type
of node ('group' or 'dataset'), sdef - contains structure definition of node.
Three cases:
1. If node is not in a "link group" or is not the location of a soft link-group (i.e.
the target) it is not part of a link (either source or
destination). Save no information. Return link_info = None.
2. Node is the source of a link. Two cases:
2-a. The target of the link (node in node_tree) has already been created. (i.e.
is in "f.links.targets_created". Return link_info = { Node: <target_node> }
2-b. The target of the link has not been created. Save path to node and loc of
link-group in "f.links.missing_links" so can be filled in later after
the target is created. Return link_info = { Node: None }
3. Node is the destination of a link. Save path to node and link-group location
---
To determine if node is a source or destination of a link (needed for steps 2 & 3 above):
(D1) If the node is in a soft-link group, the target path is already known. (It's the
location key for the soft_link group). If the path to this node is the location
for a soft link-group, then this node is the link destination; otherwise it is a link
source.
- If the node is in a hard-link group: (note: group here does not refer to h5gate group or
hdf5 group. It referes to the list of nodes that share the same location, a link-group).
-(D2) and if the target of the hard-link group has already been created; then this
node should not match the path of that target (if it does something is wrong;
system error). and thus this node will be a source.
-(D3) if the target of the hard-link group has not already been created; then, given
no other information, it's impossible to determine if this is the source or not.
However, fortunately we do have additional information: the definition of the
node in the specification. If the definition of the node includes a "link"
specification, then:
-(D4) If the node is a group, and the link specification includes "must_be_link"
set to True. This this node must be a link source. Otherwise, assume
that this link is the target.
-(D5) If the node is a dataset, and the link specification is included, and the
"data_type" specification is not included, then this node must be a link
source (i.e. a link). Otherwise assume it is a target.
"""
link_inf = get_link_inf(f, full_path)
if not link_inf:
# path is not in a hard or soft link-group, so is not part of a link (case 1 above)
return None
link_type = link_inf['link_type']
if link_type == "ext":
# external link
file, path = link_inf['loc'].split('\n')
link_info = {"extlink": (file, path)}
return link_info
loc = link_inf['loc']
# Now determine if node is source or target. (Steps "D1" to "D5" above).
if link_type == 'soft':
is_source = link_inf['is_source'] # D1 above
else:
# link_type = 'hard' if full_path in f.links['path2lg']['hard'] else (
# 'soft' if (full_path in f.links['path2lg']['soft']
# or full_path in f.links['lg']['soft']) else None)
# if link_type is None:
# # node is not in a hard or soft link-group, so is not part of a link. (Case 1 above).
# return None
# # Now determine if node is source or target. (Steps "D1" to "D5" above).
# if link_type == 'soft':
# # soft link. This is the target if it is the location for a soft link group (D1 above)
# is_source = not full_path in f.links['lg']['soft']
# loc = f.links['path2lg']['soft'][full_path] if is_source else full_path
# else:
# # link_type is hard
# loc = f.links['path2lg']['hard'][full_path] # location (key) of link-group
if loc in f.links['targets_created']['hard']:
# target exists (D2 above)
target = f.links['targets_created']['hard'][loc]
assert full_path != target, "Unexpected match between full_path and target: '%s'" %target
is_source = True
else:
# target does not exist (D3 above). Must look at definition to determine if source or target.
link_spec = sdef['df']['link'] if 'link' in sdef['df'] else None
# assume is source if there is a link spec
is_source = True if link_spec is not None else False
# if type == 'group':
# # node type is group (D4 above)
# is_source = (link_spec and 'must_be_link' in link_spec and link_spec['must_be_link'])
# else:
# # node type is dataset (D5 above)
# is_source = link_spec and 'data_type' not in sdef['df']
# now know if node is source or target of link. Also have loc of link group. proceed to case 2 above.
if is_source:
if loc in f.links['targets_created'][link_type]:
# target has been created (2-a above)
target = f.links['targets_created'][link_type][loc]
target_node = f.path2node[target]
link_info = { 'node': target_node }
else:
# target has not been created (2-b above)
# save this node in missing links
add_item(f.links['missing_links'][link_type], loc, full_path)
link_info = {'node': None}
return link_info
else:
# node is destination of a link. Save in targets_created (3 above)
f.links['targets_created'][link_type][loc] = full_path
# return None so this node will be created (not a link).
return None
def get_link_inf(f, path):
"""Given a path to a node, returns information about
the link, or None, if the path is not part of a link. Returned link_inf has keys:
link_type - type of link, either 'hard' or 'soft'
loc - location (key) of link group associated with link. i.e. in links['lg'][link_type]
is_source - For link_type "soft" returns True if is a link source (not the target
of the link-group). For link type 'hard' this is not returned.
Note: This routine called 'get_link_inf' (not 'get_link_info") because the returned
dictionary is different than what is stored in the node class, "link_info" dict.
"""
link_type = (
'hard' if path in f.links['path2lg']['hard']
else 'soft' if path in f.links['path2lg']['soft']
or path in f.links['lg']['soft'] # this in case path is the target
else 'ext' if path in f.links['path2lg']['ext'] else None)
if link_type is None:
# node is not in a hard, soft or ext link-group, so is not part of a link
return None
if link_type == 'soft':
# soft link. This is the target if it is the location for a soft link group
is_source = not path in f.links['lg']['soft']
loc = f.links['path2lg']['soft'][path] if is_source else path
link_inf = {'link_type': link_type, 'loc': loc, 'is_source': is_source}
else:
# must be hard or external. loc for hard is a tuple, for ext is file\npath
loc = f.links['path2lg'][link_type][path]
link_inf = {'link_type': link_type, 'loc': loc}
return link_inf
def get_common_links(f, path):
"""Return list of links (paths) that share a link with path. The list
includes "path", but must also contain multiple paths. If there are no
links that share it (or only one), return None. The case of one link
can occur is the link is an external link and there are no other
external links to the same target."""
link_inf = get_link_inf(f, path)
if not link_inf:
# path is not in a hard or soft link-group, so is not part of a link
return None
link_type = link_inf['link_type']
loc = link_inf['loc']
paths = f.links['lg'][link_type][loc]
if link_type == "hard":
links = paths
else:
# link type is 'soft', need to include target (link group location)
# Note: don't use "append", because that will change the original list
links = paths + [loc]
# Remove any nodes that do not exist. This required because hard links
# may create some paths that are not actually in node tree.
links = [x for x in links if x in f.path2node]
if len(links) == 1:
# there is only one link, don't return it since there must be more than one
return None
return sorted(links)
# link_type = 'hard' if path in f.links['path2lg']['hard'] else (
# 'soft' if (path in f.links['path2lg']['soft']
# or path in f.links['lg']['soft']) else None)
# if link_type is None:
# # node is not in a hard or soft link-group, so is not part of a link
# return None
# if link_type == 'soft':
# # soft link. This is the target if it is the location for a soft link group
# is_source = not path in f.links['lg']['soft']
# loc = f.links['path2lg']['soft'][path] if is_source else path
# else:
# loc = f.links['path2lg'][link_type][path]
# links = copy.copy(f.links['lg'][link_type][loc])
# if link_type == 'soft':
# # need to include target (link group location) in with list of soft groups
# links.append(loc)
# return links
def get_nodes_from_path_template(starting_node, path_template):
""" Given a "path template" return all nodes that have path matching
the template, within the starting_node. The path template is a relative path,
which allows variable named nodes to be included in the path. (Variable-named nodes
are those that have the id enclosed in angle brackets, <> in the
structures specification."""
path_parts = path_template.strip('/').split('/')
nodes = gnfpt(starting_node, path_parts)
return nodes
def gnfpt(node, path_parts):
""" Recursive function to get all nodes from template in path_parts
This used for autogen"""
if not path_parts:
# no path parts, must be at leaf (end of recursion)
return [node,]
pp = path_parts[0] # path part
if pp == "<*>":
# include all nodes
found_nodes = []
for id in node.mstats:
for mnode in node.mstats[id]['created']:
found_nodes = found_nodes + gnfpt(mnode, path_parts[1:])
else:
# not a wild card
# need to check for both id without a slash (dataset) and idwith slash (group)
ppg = pp + "/" # groups have trailing slash
id = pp if pp in node.mstats else (ppg if ppg in node.mstats else None)
if not id:
print ("Unable to find member id %s in autogen at: %s\n"
" available members are: %s") % (pp, node.full_path, node.mstats.keys())
# import pdb; pdb.set_trace()
sys.exit(1)
# found member in mstats. Get nodes
found_nodes = []
for mnode in node.mstats[id]['created']:
found_nodes = found_nodes + gnfpt(mnode, path_parts[1:])
return found_nodes
def filter_autogen_targets(nodes, tsig):
""" Return list of nodes that match the autogen target signature tsig."""
if not tsig:
return nodes
matching_nodes = []
for node in nodes:
if node_matches_tsig(node, tsig):
matching_nodes.append(node)
return matching_nodes
def node_matches_tsig(node, tsig):
""" Return true if node matches autogen signature 'tsig'. tsig structure is:
{type group or dataset # if not present, either group or dataset match
# list of attributes that must match
"""
if 'type' in tsig:
if node.sdef['type'] != tsig['type']:
return False
# types match (if tsig type specified). Now check attributes
na = node.attributes
if 'attrs' in tsig:
for aid in tsig['attrs']:
if aid in na:
value = na[aid]['nv'] if 'nv' in na[aid] else (
na[aid]['value'] if 'value' in na[aid] else None)
if not value or not values_match(value, tsig['attrs'][aid]):
return False
# everything matches
return True
def fill_missing_links(f):
""" Fill in any links that were missing targets at time link source was read in"""
missing_targets = {'hard': [], 'soft': []}
for link_type in ('hard', 'soft'):
for loc in f.links['missing_links'][link_type]:
from_paths = f.links['missing_links'][link_type][loc]
if loc not in f.links['targets_created'][link_type]:
missing_targets[link_type].append(loc)
else:
target_path = f.links['targets_created'][link_type][loc]
target_node = f.path2node[target_path]
for from_path in from_paths:
from_node = f.path2node[from_path]
assert from_node.link_info['node'] is None, "Link already set in %s" %target_path
from_node.link_info['node'] = target_node
validate_link(f, from_node, target_node)
# save information about link in links structure for later use in autogen
# save_info(objtype, objno, path, target, links):
### Actually, don't save link info. It's already saved when reading
# save_info("link", None, from_node.full_path, target_node.full_path, f.links)
if missing_targets['hard'] or missing_targets['soft']:
print "*** Link targets were missing when reading file:"
for link_type in ('hard', 'soft'):
for loc in missing_targets[link_type]:
from_paths = f.links['missing_links'][link_type][loc]
print "loc '%s', from paths:" % loc
pp.pprint(from_paths)
sys.exit(1)
def validate_link(f, source, target):
""" Check that a link being made from node "source" to node "target"
is valid (i.e. is consistent with the specification definition of
both source and target"""
error = f.error
warning = f.warning
link_spec = source.sdef['df']['link'] if 'link' in source.sdef['df'] else None
target_type = f.make_qid(target.sdef['id'], target.sdef['ns']) # add namespace for match
if link_spec:
expected_target_type = link_spec['target_type'] if 'target_type' in link_spec else None
if expected_target_type:
# add in namespace to expected target_type
expected_target_type = f.make_qid(expected_target_type, source.sdef['ns'])
if expected_target_type == target_type:
# perfect match
pass
elif (source.sdef['type'] == 'group' and 'allow_subclasses' in link_spec and
link_spec['allow_subclasses']):
# see if match to a subclass
subclasses = target.merged if hasattr(target, 'merged') else []
if expected_target_type not in subclasses:
error.append("%s - link target_type (%s) does not match target type (%s) or subclasses (%s) at: %s" %
(source.full_path, expected_target_type, target_type, subclasses, target.full_path))
else:
# is OK, matches subclass
pass
else:
inc_path = source.get_include_path()
error.append("%s - link target_type (%s)%s does not match target type (%s) at: %s" %
(source.full_path, expected_target_type, inc_path, target_type, target.full_path))
else:
warning.append("%s - target type of link not specified. Linking to type '%s' at: %s" %
(source.full_path, target_type, target.full_path))
else:
# no link spec see if type of items match
source_type = f.make_qid(source.sdef['id'], source.sdef['ns'])
if source_type != target_type:
warning.append("%s - type (%s) is linked to a different type (%s) at: %s" %
(source.full_path, source_type, target_type, target.full_path))
# import pdb; pdb.set_trace()
else:
# perfect match
pass
# def initialize_autogen():
# """ Setup structure for storing autogen information. Looks like:
# { 'found': [ a1, a2, ... ],
# 'ds2mk': [ d1, d2, ... ] }
# each "a" is a dictionary containing information about an autogen
# found in an *existing* (i.e. already created) node. Each d1 is
# information about an autogen in a dataset that may not exist. Such
# datasets will be created automatically before processing the autogen
# directives they contain."""
# ag = { 'found': [], 'ds2mk': [] }
# return ag
def check_for_autogen(dict, aid, path, ctype, f):
""" Check for autogen specification in either an attribute or dataset
properties list. If in an attribute, aid is the attribute id, otherwise
None. "path" is the path to the hg5gate object which contains dict.
ctype is the type of object, either "group" or "dataset".
If autogen is found and is syntactically correct, save it. Returns True if
autogen is found (whether valid or not). Otherwise returns False.
f - h5gate file object. Needed to allow accessing f.autogen array.
This routine just saves the information so it can later be processed by:
routines: compute_autogen, validate_autogen, update_autogen.
"""
# print "check for autogen, aid=%s" % aid
# if aid == "missing_fields":
# import pdb; pdb.set_trace()
if 'autogen' not in dict:
return False
agspec = dict['autogen']
type = get_param(agspec, 'type', None)
target = get_param(agspec, 'target', None)
trim = get_param(agspec, 'trim', False)
sort = get_param(agspec, 'sort', True)
qty = get_param(agspec, 'qty', "*")
tsig = get_param(agspec, 'tsig', {})
include_empty = get_param(agspec, 'include_empty', False)
format = get_param(agspec, 'format', "$t")
# dimensions used to determine if result is an array or a single string
# if dimensions present, then array, otherwise a single string
dimensions = dict['dimensions'] if 'dimensions' in dict else None
# exclude = get_param(agspec, 'exclude', None)
error = []
ag_types = ('links', 'link_path', "names", "values", "length", "create", "missing", "extern")
if type not in ag_types:
error.append("Invalid autogen specification. (%s). Type must be one of: %s" % (agspec, ag_types))
if target is None and type not in ('create', 'missing', "extern"):
error.append("Invalid 'autogen' specification. 'target' must be specified: %s" % agspec)
if trim not in (True, False):
error.apped("Invalid 'autogen' specification. 'trim' must be True or False: %s" % agspec)
if sort not in (True, False):
error.apped("Invalid 'autogen' specification. 'sort' must be True or False: %s" % agspec)
if qty not in ("!", "*"):
error.apped("Invalid 'autogen' specification. 'qty' must be '!' or '*': %s" % agspec)
if error:
f.error.append(error)
else:
a = {'node_path':path, 'aid': aid, 'agtarget':target, 'agtype':type, 'format':format,
'trim':trim, 'sort':sort, 'qty':qty, 'tsig':tsig, 'include_empty':include_empty,
'ctype':ctype, 'dimensions': dimensions}
f.autogen.append(a)
return True
# def save_autogen(f, node, aid, agtype, agtarget, trim, qty, tsig):
# # (f, node, aid, agtarget, agtype, params):
# """ Save information about an "autogen" specification. Parameters are:
#
# """
# a = {'node':node, 'aid': aid, 'agtarget':agtarget, 'agtype':agtype,
# 'trim': trim, 'qty': qty, 'tsig': tsig}
# f.autogen.append(a)
def show_autogen(f):
""" Displays stored autogen, for testing"""
print "found autogens:"
for a in f.autogen:
attr = "attribute (%s)" % a['aid']
print a['ctype'], a['agtype'], a['node_path'], attr, a['agtarget'], a['aid']
# def create_autogen_datasets(f):
# """ Create any data sets that were found in the mstats of a group, and have the
# autogen specification, but which were not created yet. This is so the autogen
# in these datasets can be processed by 'check_for_autogen' (to extract and save
# the autogen information). The information is then used by comompute_autogen
# to fill in the values for the dataset."""
# import pdb; pdb.set_trace()
# for ds2mk in f.autogen['ds2mk']:
# grp, id = ds2mk
# if grp.mstats[id]['created']:
# # dataset already created. Autogen would have been found already
# continue
# # pretend to be a client program creating the data set
# # that will save the autogen information as though a client program made it.
# # print "*** The magic starts, creating %s: %s" %(grp.full_path, id)
# grp.set_dataset(id, "autogen")
def process_autogen(f):
""" Main routine for computing and updating autogens. This called by h5gate after
the close() call is made to update / create autogen fields before validation.
The autogen type "missing" checks for missing fields. All of them must be
executed after all the other augogens, so the missing autogen won't detect
something as missing which is actually made by a subsequent autogen.
All autogens are processed in two steps: 1. compute - which computes the values
and 2. - update which updates them in the hdf5 file if writing or updating
the file (not read-only mode).
"""
# this processes all autogens that are not type missing (first loop),
# then all that are (second loop)
for op in (operator.ne, operator.eq):
compute_autogens(f, op, "missing")
if f.options['mode'] in ['w', 'r+']:
# only update hdf5 file if in write mode
update_autogens (f, op, "missing")
def compute_autogens(f, op, ftype):
""" Compute all autogens that have type (equal or not equeal) to ftype.
op is an operator, either ne or eq"""
# use index to go though so new ones can be added within the loop
i = 0
while i < len(f.autogen):
a = f.autogen[i]
i = i + 1
if op(a['agtype'], ftype):
compute_autogen(f, a) # singular, not plural
def update_autogens(f, op, ftype):
""" Update all autogens that have type (equal or not equeal) to ftype.
op is an operator, either ne or eq"""
# use index to go though so new ones can be added within the loop
i = 0
while i < len(f.autogen):
a = f.autogen[i]
i = i + 1
if op(a['agtype'], ftype):
update_autogen(f, a) # singular, not plural
# def compute_autogen(f):
# """ Computes values for each autogen saved in f.autogen. f is the hfgate File
# object. Stores the computed values in array autogen[a]['agvalue']."""
# # first, create any datasets that have autogen, but were not created
# # create_autogen_datasets(f)
# # now process all found autogens
# # first put all type "missing" at the end so these are
# # run after all the others. Otherwise, they may flag as missing values filled in
# # by other autogen's
# ag_all = []
# ag_last = []
# for ag in f.autogen:
# if ag['agtype'] == 'missing':
# ag_last.append(ag)
# else:
# ag_all.append(ag)
# # store back in f.autogen because more may be added when running the autogens
# f.autogen = ag_all + ag_last
# # use index to go though so new ones can be added within the loop
# # for a in f.autogen: # old method
# show_autogen(f)
# print "starting to compute autogen, num augogens = %i" % len(f.autogen)
# import pdb; pdb.set_trace()
# i = 0
# while i < len(f.autogen):
# a = f.autogen[i]
# compute_autogen_1(f, a)
# i = i + 1
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def get_constant_suffix(path):
# return last component of path if is a constant (not enclosed in <>)
# and if there is a variable part of path earlier. Otherwise return None
match = re.match(r'[^>]+>(/[^<>]+)$', path)
if match:
suffix = match.group(1)
else:
suffix = None
return suffix
def remove_suffix(text, suffix):
# remove suffix from text if present
if suffix and text.endswith(suffix):
text = text[0:-len(suffix)]
return text
def check_if_all_str(vals):
"""return true if all elements of list are string"""
if len(vals) == 0:
return False
for val in vals:
if not (isinstance(val, str) and re.match(r'^[0-9]+$', val)):
return False
return True
def natural_sort(vals):
"""return sorted numerically if all integers, otherwise, sorted alphabetically"""
all_str = all(isinstance(x, (str, unicode)) for x in vals)
sv = sorted(vals, key=sortkey_natural) if all_str else sorted(vals)
return sv
# function used for natural sort ke
# from: http://stackoverflow.com/questions/2545532/python-analog-of-natsort-function-sort-a-list-using-a-natural-order-algorithm
def sortkey_natural(s):
return tuple(int(part) if re.match(r'[0-9]+$', part) else part
for part in re.split(r'([0-9]+)', s))
def get_parent_path(full_path):
""" Return parent path for full_path"""
parent_path, node_name = full_path.rstrip('/').rsplit('/',1) # rstrip in case is group (ending with '/')
if parent_path == "":
parent_path = "/"
return parent_path
def compute_autogen(f, a):
""" computes values for one autogen. Stores in updated autogen entry "a"."""
# compute agtarget path
# figure out enclosing path
# if attribute of a group, then enclosing path is just the node_path, otherwise the parent
if a['aid'] and a['ctype'] == 'group':
enclosing_path = a['node_path']
else:
enclosing_path = get_parent_path(a['node_path'])
assert enclosing_path in f.path2node, "Enclosing group for autogen does not exist: %s" % enclosing_path
enclosing_node = f.path2node[enclosing_path]
# store value in case there is an error, plus to ignore type "create" in ag_validate , ag_update
a['agvalue'] = None
if a['agtype'] == "create":
# type create is special, compute it separately
process_ag_create(f, a, enclosing_node)
return
if a['agtype'] == "missing":
# type missing is also special, compute it separately
process_ag_missing(f, a, enclosing_node)
return
if a['agtype'] == "extern":
# type extern is also special, compute it separately
process_ag_extern(f, a, enclosing_node)
return
agtarget_nodes = get_nodes_from_path_template(enclosing_node, a['agtarget'])
agtarget_nodes = filter_autogen_targets(agtarget_nodes, a['tsig'])
if not agtarget_nodes:
if a['qty'] == "!":
msg = ("Unable to find target for required autogen, enclosing group='%s', type='%s'\n"
"node_path='%s', attribute [%s], target='%s'") % ( enclosing_path,
enclosing_node.sdef['id'], a['node_path'], a['aid'], a['agtarget'])
f.error.append(msg)
elif a['include_empty']:
# return empty list
# return numpy type so h5py will create empty list of strings
# rather than empty float array, if option 'include_empty' is True
a['agvalue'] = np.empty([0,], dtype=np.string_)
# a['agvalue'] = []
else:
# since it's not required (qty != '!') and not including empty, just ignore
# for nwb format, ignoring removes warming messages for unable to find data or timestamps
# target when making data_links or timestamps_links.
pass
# msg = ("Unable to find autogen target, enclosing group='%s', type='%s'\n"
# "node_path='%s', attribute [%s], target='%s'") % ( enclosing_path,
# enclosing_node.sdef['id'], a['node_path'], a['aid'], a['agtarget'])
# f.warning.append(msg)
return
if a['qty'] == '!' and len(agtarget_nodes) > 1:
f.error.append(("%s: one and only one (!) specified, but more than one autogen"
" target found for path: %s") % (a['node_path'], a['agtarget']))
return
# at least one target node found
if a['agtype'] == 'links' and len(agtarget_nodes) > 1:
f.error.append("%s: multiple target nodes for autogen 'links' not supported, %i matches found"
% (a['node_path'], len(agtarget_nodes)))
return
# now, process each of the different autogen types
if a['agtype'] == 'links':
# Get all links sharing agtarget
agtarget_path = agtarget_nodes[0].full_path
a['agvalue'] = get_common_links(f, agtarget_path)
if a['agvalue'] and a['trim']:
basename = get_common_basename(a['agvalue'])
if basename:
a['agvalue'] = trim_common_basename(a['agvalue'], basename)
# if a['exclude'] and a['agvalue']:
# # import pdb; pdb.set_trace()
# # remove nodes with path starting with string in 'exclude'
# a['agvalue'] = [x for x in a['agvalue'] if not x.startswith(a['exclude'])]
a['agvalue'] = sorted(a['agvalue'])
elif a['agtype'] == 'link_path':
lpaths = {}
# make prefix in case using trim
# can use encode to remove unicode prefix, but maybe it's better to leave it in
# prefix = enclosing_node.full_path.encode('ascii', 'ignore') + '/'
prefix = enclosing_node.full_path + '/'
# get suffix if constant for trimming right side of source path
suffix = get_constant_suffix(a['agtarget'])
for node in agtarget_nodes:
# if not hasattr(node, 'link_info'):
if not node.link_info or node.link_info['node'] is None:
f.error.append("%s: referenced in 'link_path' autogen, but is not a link" % node.full_path)
return
else:
fpath = node.full_path # .encode('ascii', 'ignore')
tpath = node.link_info['node'].full_path # .encode('ascii', 'ignore')
if a['trim']:
# trim prefix and any suffix on fpath
fpath = remove_prefix(fpath, prefix)
fpath = remove_suffix(fpath, suffix)
lpaths[fpath] = tpath
if False and a['qty'] == "!": # skip this, try using just formatting
agv = lpaths[lpaths.keys()[0]]
else:
agv = []
for fpath in sorted(lpaths.keys()):
tpath = lpaths[fpath]
strt = a['format']
strt = strt.replace("$s", fpath)
strt = strt.replace("$t", tpath)
agv.append(str(strt)) # "\"'%s' is '%s'\"" % (fpath, tpath))
if a['dimensions'] is None:
# no dimensions specified, save as a single string
# TODO, add in a "join_str" option rather than assume newline
agv = "\n".join(agv)
a['agvalue'] = agv
elif a['agtype'] == 'names':
# Get all names matching agtarget
names = []
for node in agtarget_nodes:
name = node.name
# encoding unicode now done in h5gate create_dataset
# if isinstance(name, unicode):
# name = name.encode('utf8')
names.append(name)
names = natural_sort(names)
a['agvalue'] = names
elif a['agtype'] == 'values':
# Get set of values that are in names matching agtarget
values = set()
for node in agtarget_nodes:
path = node.full_path
nv = f.file_pointer[path].value
if not isinstance(nv, (list, tuple, np.ndarray)):
f.error.append("%s: autogen values must be list. Found type: "
"%s at target:\n%s\nvalue found:%s" % (a['node_path'], type(nv),
path, nv))
return
snv = set(nv)
values = values.union(snv)
lvalues = list(values)
lvalues = natural_sort(lvalues)
a['agvalue'] = lvalues
elif a['agtype'] == 'length':
# get length of target
path = agtarget_nodes[0].full_path
try:
val = f.file_pointer[path].value
except KeyError:
# unable to get value. See if this is an external link
# if hasattr(agtarget_nodes[0], 'link_info') and 'extlink' in agtarget_nodes[0].link_info:
if agtarget_nodes[0].link_info and 'extlink' in agtarget_nodes[0].link_info:
# ntype, target = get_h5_node_info(f, path)
# if ntype == "ext_link":
tfile, tpath = agtarget_nodes[0].link_info['extlink']
msg = ("%s: autogen unable to determine length of '%s' because hdf5 external "
"link missing: file='%s', path='%s'") % (
a['node_path'], path, tfile, tpath)
f.warning.append(msg)
# set length to 0 to indicate do not have valid value
# length = 0
# leave value undetermined. Require user set it.
return
else:
print "Unexpected node type in autogen length, type=%s, target=%s" % (ntype, target)
# import pdb; pdb.set_trace()
sys.exit(1)
else:
try:
length = len(val)
except TypeError, e:
msg = "%s: autogen unable to determine length of '%s' error is: '%s'" % (
a['node_path'], path, e)
f.warning.append(msg)
# leave value unspecified
return
a['agvalue'] = length
else:
sys.exit("invalid autogen specification type: %s" % a['agtype'])
# if the computed value is an empty list, replace it by numpy type so h5py will create
# empty list of strings rather than empty float array, if option 'include_empty' is True
if isinstance(a['agvalue'], list) and len(a['agvalue']) == 0:
a['agvalue'] = np.empty([0,], dtype=np.string_)
def process_ag_create(f, a, enclosing_node):
""" process autogen "create" type. This creates group members
that are specified to be created which are required and do not exist.
Everything for the autogen create type is performed in this routine.
There is nothing to do for the autogen validate or update for the
create type.
"""
# only process (create required groups) if write mode
if not f.options['mode'] in ['w', 'r+']:
return
mid = remove_prefix(a['node_path'], enclosing_node.full_path).lstrip('/')
mstats = enclosing_node.mstats
assert mid in mstats, "%s: autogen create unable to find member id (%s) in mstats" % (
enclosing_node.full_path, mid)
minfo = mstats[mid]
if minfo['created'] or minfo['qty'] not in ("!", "+"):
# member was already created or is not required
return
type = minfo['type']
assert type == 'group', "%s: autogen create referencing dataset (%s). This is not allowed." %(
enclosing_node.full_path, mid)
gid = mid.rstrip('/')
# create the group
enclosing_node.make_group(gid)
def process_ag_missing(f, a, enclosing_node):
""" process autogen "missing" type. This returns a list datasets
that are specified as required and do not exist.
"""
missing = []
for mid in enclosing_node.mstats:
minfo = enclosing_node.mstats[mid]
if minfo['qty'] in ('!', '^', '+') and not minfo['created']:
# member was required but does not exists
missing.append(mid)
if missing:
a['agvalue'] = sorted(missing)
def process_ag_extern(f, a, enclosing_node):
""" Process autogen "extern" type. This returns a list of
groups or datasets that are set to hdf5 external links"""
extern = []
for mid in enclosing_node.mstats:
minfo = enclosing_node.mstats[mid]
if minfo['created']:
for node in minfo['created']:
if node.link_info and 'extlink' in node.link_info:
# if 'image_stack' == node.sdef['id'].rstrip('/'):
# import pdb; pdb.set_trace()
extern.append(node.sdef['id'].rstrip('/'))
if extern:
a['agvalue'] = sorted(extern)
def get_common_basename(paths):
""" Return common "basename" (or "suffix"; is the last component of path)
in list "paths". Or return None if all elements in paths do not share a common last
component. Generate an error if any path ends with a slash."""
if len(paths) == 0:
return None
# get suffix of first element:
first_path = paths[0]
first_suffix = first_path.rsplit('/', 1)[-1]
assert first_suffix !='', "Invalid path, has trailing slash: %s" % first_path
match = True
for path in paths:
prefix, suffix = path.rsplit('/', 1)
assert suffix !='', "Invalid path, has trailing slash: %s" % path
if suffix != first_suffix:
match = False
break
if not match:
# suffixes do not match
return None
# suffixes match
return first_suffix
def trim_common_basename(paths, basename):
""" Trim the common last component (given by "basename") from each path in
list "paths". Return new list."""
new_list = []
for path in paths:
prefix, suffix = path.rsplit('/',1)
assert suffix == basename, "Path '%s' does not have common suffix '%s'" % (path, basename)
new_list.append(prefix)
return new_list
def values_match(x, y):
""" Compare x and y. This needed because the types are unpredictable and sometimes
a ValueError is thrown when trying to compare.
"""
if x is y:
return True
# explicit checks for None used to prevent warnings like:
# FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
# eq = x==y
if x is None:
if y is None:
return True
else:
return False
if y is None:
return False
try:
eq = x==y
except ValueError:
# ValueError: shape mismatch: objects cannot be broadcast to a single shape
return False
if isinstance(eq, bool):
return eq
return eq.all()
# if x is y:
# return True
# return x==y
# if (isinstance(x, (list, tuple, np.ndarray))
# and isinstance(y, (list, tuple, np.ndarray))):
# eq = x==y
# if isinstance(eq, bool):
# return eq
# return eq.all()
# if isinstance(x, basestring) and isinstance(y, basestring):
# return x == y
# # don't have same shape or type
# return False
# def validate_autogen_old(f):
# """ Validates that autogen fields have correct values. i.e. match what was
# generated by "ag_compute". f is the h5gate file object"""
# for a in f.autogen:
# if a['agvalue'] is None:
# # skip those that have no value
# continue
# if a['aid']:
# # value is stored in attribute
# aid = a['aid']
# node = f.path2node[a['node_path']]
# ats = node.attributes[aid]
# value = ats['nv'] if 'nv' in ats else (
# ats['value'] if 'value' in ats else None)
# if not values_match(value, a['agvalue']):
# f.error.append(("'%s' autogen attribute [%s] values incorrect.\n"
# "expected:%s (type %s)\nfound:%s (type %s)") % (a['node_path'], aid,
# a['agvalue'], type(a['agvalue']), value, type(value)))
# elif value is None and a['qty'] == '!':
# f.error.append(("%s autogen attribute [%s] value required "
# "but is empty") % (a['node_path'], aid))
# else:
# # value is stored in value of a dataset
# # TODO: change to buffer value in node so can work with MATLAB
# if a['node_path'] in f.file_pointer:
# # data set exists
# ds = f.file_pointer[a['node_path']]
# value = ds.value
# if not values_match(value, a['agvalue']):
# f.warning.append(("'%s' autogen dataset values possibly wrong.\n"
# "expected:%s (type %s)\nfound:%s (type %s)") % (a['node_path'],
# a['agvalue'], type(a['agvalue']), value, type(value)))
# elif value is None and a['qty'] == '!':
# f.error.append(("%s autogen dataset '%s' value required "
# "but is empty") % (a['node_path']))
# else:
# # data set does not exist
# # if validating this is an error (since dataset should exist)
# # Otherwise it's ok because autogen will make it.
# if f.reading_file:
# f.error.append("%s: autogen dataset not found." % a['node_path'])
def validate_autogen(f):
""" Validates that autogen fields have correct values. i.e. match what was
generated by "ag_compute". f is the h5gate file object"""
for a in f.autogen:
if a['agvalue'] is None:
# skip those that have no value
continue
if a['aid']:
# value is stored in attribute
aid = a['aid']
node = f.path2node[a['node_path']]
ats = node.attributes[aid]
value = ats['nv'] if 'nv' in ats else (
ats['value'] if 'value' in ats else None)
compare_autogen_values(f, a, value)
# if not values_match(value, a['agvalue']):
# f.error.append(("'%s' autogen attribute [%s] values incorrect.\n"
# "expected:%s (type %s)\nfound:%s (type %s)") % (a['node_path'], aid,
# a['agvalue'], type(a['agvalue']), value, type(value)))
# elif value is None and a['qty'] == '!':
# f.error.append(("%s autogen attribute [%s] value required "
# "but is empty") % (a['node_path'], aid))
else:
# value is stored in value of a dataset
# TODO: change to buffer value in node so can work with MATLAB
if a['node_path'] in f.file_pointer:
# data set exists
ds = f.file_pointer[a['node_path']]
value = ds.value
compare_autogen_values(f, a, value)
# if not values_match(value, a['agvalue']):
# f.warning.append(("'%s' autogen dataset values possibly wrong.\n"
# "expected:%s (type %s)\nfound:%s (type %s)") % (a['node_path'],
# a['agvalue'], type(a['agvalue']), value, type(value)))
# elif value is None and a['qty'] == '!':
# f.error.append(("%s autogen dataset '%s' value required "
# "but is empty") % (a['node_path']))
else:
# data set does not exist
# if validating this is an error (since dataset should exist)
# Otherwise it's ok because autogen will make it.
if f.reading_file:
f.error.append("%s: autogen dataset not found." % a['node_path'])
def compare_autogen_values(f, a, value):
""" Compare value in hdf5 file to expected value computed by autogen. Save error
or warning message if the expected value does not match the value found.
f - h5gate file object
a - row of f.autogen
value - value in (or being stored in) hdf5 file for autogen field
"""
if values_match(value, a['agvalue']):
# value match, check for value required but missing
if value is None and a['qty'] == '!':
msg = "value required but is empty."
report_autogen_problem(f, a, msg)
return
# values do not match, check if match when values are sorted
if isinstance(value, (list, np.ndarray)):
sorted_value = natural_sort(value)
if values_match(sorted_value, a['agvalue']):
# sorted values match
if a['sort']:
msg = "values are correct, but not sorted."
report_autogen_problem(f, a, msg)
return
# neither original or sorted values match
msg = ("values incorrect.\nexpected:%s (type %s)\n"
"found:%s (type %s)") % (a['agvalue'], type(a['agvalue']), value, type(value))
report_autogen_problem(f, a, msg)
def report_autogen_problem(f, a, msg):
if a['aid']:
# value stored in attribute
aid = a['aid']
f.error.append("'%s': autogen attribute [%s] %s" % (a['node_path'], aid, msg))
else:
# value stored in dataset
output_msg = "'%s': autogen dataset %s" % (a['node_path'], msg)
if a['agtype'] == 'length':
# display warnings for lengths different than expected (this is
# done for NWB format, since length only used in one place, e.g.
# timeseries num_samples. Should modify specification language
# to allow specifying either a warning or an error in autogen
f.warning.append(output_msg)
else:
f.error.append(output_msg)
def update_autogen(f, a):
"""Update values that are stored in autogen fields. This processes one
autogen."""
if a['agvalue'] is None:
# skip those that have no value
return
if a['aid']:
# value is stored in attribute
aid = a['aid']
node = f.path2node[a['node_path']]
ats = node.attributes[aid]
value = ats['nv'] if 'nv' in ats else (
ats['value'] if 'value' in ats else None)
if not values_match(value, a['agvalue']):
# values do not match, update them
ats['nv'] = a['agvalue']
f.set_attribute(a['node_path'], aid, a['agvalue'])
else:
if a['node_path'] in f.path2node:
# dataset exists
ds = f.file_pointer[a['node_path']]
value = ds.value
if not values_match(value, a['agvalue']):
f.error.append(("%s autogen dataset values do not match. Unable to update.\n"
" expected:%s\nfound:%s") % (a['node_path'],
str(a['agvalue']), str(value)))
else:
# data set does not exist. Create it using autogen value
enclosing_path, name = a['node_path'].rsplit('/',1)
grp = f.path2node[enclosing_path]
# pretend to be a client program creating the data set with the desired value
grp.set_dataset(name, a['agvalue'])
# def update_autogen_old(f):
# """Update values that are stored in autogen fields. This should be
# called before closing a file that was opened in write (or read/write)
# mode to update the autogen values before validating and saving the file."""
# for a in f.autogen:
# if a['agvalue'] is None:
# # skip those that have no value
# continue
# if a['aid']:
# # value is stored in attribute
# aid = a['aid']
# node = f.path2node[a['node_path']]
# ats = node.attributes[aid]
# value = ats['nv'] if 'nv' in ats else (
# ats['value'] if 'value' in ats else None)
# if not values_match(value, a['agvalue']):
# # values do not match, update them
# ats['nv'] = a['agvalue']
# f.set_attribute(a['node_path'], aid, a['agvalue'])
# else:
# if a['node_path'] in f.path2node:
# # dataset exists
# ds = f.file_pointer[a['node_path']]
# value = ds.value
# if not values_match(value, a['agvalue']):
# f.error.append(("%s autogen dataset values do not match. Unable to update.\n"
# " expected:%s\nfound:%s") % (a['node_path'],
# str(a['agvalue']), str(value)))
# else:
# # data set does not exist. Create it using autogen value
# enclosing_path, name = a['node_path'].rsplit('/',1)
# grp = f.path2node[enclosing_path]
# # pretend to be a client program creating the data set with the desired value
# grp.set_dataset(name, a['agvalue'])
def get_param(dict, key, default):
""" Return value from dictionary if key present, otherwise return default value"""
val = dict[key] if key in dict else default
return val
# error = None
# params = None
# if not isinstance(agspec, (list, tuple)):
# error = "Invalid 'autogen' specification. Must be array: %s" % agspec
# else:
# if agspec[0] == 'links':
# if len(agspec) == 3:
# if agspec[2] != 'trim':
# error = ("Invalid 'links' autogen specification. If present, "
# "third parameter must be keyword 'trim': %s") % agspec
# else:
# params = {'trim':True}
# elif len(agspec) == 2:
# params = {'trim':False} # default is False
# else:
# error = ("Invalid 'links' autogen specification. Must have either"
# " two or three elments: %s") % agspec
# elif agspec[0] == 'link_path':
# if len(agspec) != 2:
# error = ("Invalid 'link_path' autogen specification. Must have"
# " two elments: %s") % agspec
# else:
# error = "Invalid autogen specification. Must be 'links' or 'link_path': %s" % agspec
# # done checking for errors
# if error:
# f.error.append(error)
# return None
# # Seems ok. Save it.
# # save_autogen(f, node, aid, agtarget, agtype, params)
# a = save_autogen(f, node, aid, agspec[1], agspec[0], params)
# return a
# for type in ('hard', 'soft'):
# path2lg = self.links['path2lg'][type]
# if full_path in path2lg:
# # this node is part of a link group
# loc = path2lg[full_path]
# # see if target already created and identified
# if loc in self.links['targets_created']:
# target_node = self.links['targets_created'][loc]
# link_info = {'node': target_node}
# return
# # Target not already created and identified
# # See if soft-link. If so, the target is the link group location, see
# # if it's been created.
# if type == 'soft':
# target_path = loc
# if target_path in self.path2node
# target_node = self.path2node[target_path]
# # save target_node in links 'targets_created'
# self.links['targets_created'] = target_node
# link_info = = {'node': target_node}
# return link_info
# # target node not yet created.
# link_info = {'node': None}
# return link_info
# hp = self.links['path2lg']['hard']
if __name__ == '__main__':
if len(sys.argv) != 2:
print "format is:"
print "pyhton %s <hdf5_file_name>" % sys.argv[0]
sys.exit(0)
fname = sys.argv[1]
try:
fp = h5py.File(fname, "r")
# f = h5py.h5f.open(fname, h5py.h5f.ACC_RDONLY)
except IOError:
print "Unable to open file '%s'" % fname
sys.exit(1)
links = initialize()
find(fp, links)
fp.close()
# print "found links are:"
# pp.pprint(links)
# pp.ppshow_links(links)
show_stats(links)
# main
#
# # test_ginfo(f)
# links = find_links(f)
# # links = test_links()
# print "Before anything:"
# show_links(links)
# merge_soft_links(links);
# prune_hard_links(links)
# print "After merge, and prune, before merge_soft_and_hard_lgs"
# show_links(links)
# merge_soft_and_hard_lgs(links)
# print "After merge_soft_and_hard_lgs:"
# show_links(links)
# f.close()
| 44.525998
| 128
| 0.587961
|
4a091719671560d8e564038a19c2545f0fb0683b
| 2,117
|
py
|
Python
|
gen-plasma/test/test_erc20_plasma_contract.py
|
plasma-group/research
|
4d454351cd2417ccaa18ac214e2f155ea7345704
|
[
"MIT"
] | 16
|
2019-03-01T11:16:25.000Z
|
2019-07-04T21:12:10.000Z
|
gen-plasma/test/test_erc20_plasma_contract.py
|
plasma-group/research
|
4d454351cd2417ccaa18ac214e2f155ea7345704
|
[
"MIT"
] | 16
|
2019-03-20T17:20:59.000Z
|
2021-08-01T20:36:40.000Z
|
gen-plasma/test/test_erc20_plasma_contract.py
|
plasma-group/research
|
4d454351cd2417ccaa18ac214e2f155ea7345704
|
[
"MIT"
] | 1
|
2019-04-16T07:50:57.000Z
|
2019-04-16T07:50:57.000Z
|
from utils import State, StateUpdate
def test_deposit(alice, erc20_ct, erc20_plasma_ct, ownership_predicate):
# Deposit some funds
erc20_plasma_ct.deposit(alice.address, 100, ownership_predicate, {'recipient': alice.address})
# Assert the balances have changed
assert erc20_ct.balanceOf(alice.address) == 900
assert erc20_ct.balanceOf(erc20_plasma_ct.address) == 100
# Assert that we recorded the deposit and incremented total_deposits
assert len(erc20_plasma_ct.exitable_ranges) == 1 and isinstance(next(iter(erc20_plasma_ct.exitable_ranges.values())), int)
assert len(erc20_plasma_ct.deposits) == 1 and isinstance(next(iter(erc20_plasma_ct.deposits.values())), StateUpdate)
assert erc20_plasma_ct.total_deposited == 100
def test_state_updates(alice, bob, operator, erc20_plasma_ct, ownership_predicate):
# Deposit some funds
commit0_alice_deposit = erc20_plasma_ct.deposit(alice.address, 100, ownership_predicate, {'recipient': alice.address})
commit1_bob_deposit = erc20_plasma_ct.deposit(alice.address, 100, ownership_predicate, {'recipient': bob.address})
# Create the new state updates which we plan to commit
state_bob_ownership = State(ownership_predicate, {'recipient': bob.address})
state_alice_ownership = State(ownership_predicate, {'recipient': alice.address})
# Create the state_update objects based on the states which will be included in plasma blocks
commit2_alice_to_bob = StateUpdate(state_bob_ownership, commit0_alice_deposit.start, commit0_alice_deposit.end, 0)
commit3_bob_to_alice = StateUpdate(state_alice_ownership, commit1_bob_deposit.start, commit1_bob_deposit.end, 0)
# Add the state_updates
erc20_plasma_ct.state_update_chain.commit_block(operator.address, {erc20_plasma_ct.address: [commit2_alice_to_bob, commit3_bob_to_alice]})
# Assert inclusion of our state_updates
assert erc20_plasma_ct.state_update_chain.verify_inclusion(commit2_alice_to_bob, erc20_plasma_ct.address, None)
assert erc20_plasma_ct.state_update_chain.verify_inclusion(commit3_bob_to_alice, erc20_plasma_ct.address, None)
| 73
| 142
| 0.800189
|
4a09174ee1e271d8bf8f70496d8dfe4d1d5d52c1
| 2,601
|
py
|
Python
|
mindmappings/costModel/example/grad_descent.py
|
kartik-hegde/mindmappings
|
e96f2a287da2a93c4af0794a3bab1211bc95ba0a
|
[
"MIT"
] | 17
|
2021-03-30T07:11:44.000Z
|
2022-03-08T11:06:39.000Z
|
mindmappings/costModel/example/grad_descent.py
|
kartik-hegde/mindmappings
|
e96f2a287da2a93c4af0794a3bab1211bc95ba0a
|
[
"MIT"
] | 1
|
2022-01-12T03:20:31.000Z
|
2022-01-30T17:24:30.000Z
|
mindmappings/costModel/example/grad_descent.py
|
kartik-hegde/mindmappings
|
e96f2a287da2a93c4af0794a3bab1211bc95ba0a
|
[
"MIT"
] | 4
|
2021-05-25T00:30:41.000Z
|
2022-02-09T13:57:21.000Z
|
import numpy as np
from mindmappings.utils.parallelProcess import parallelProcess
from copy import deepcopy
class GradientDescent:
"""
Basic Gradient Descent Algorithm.
"""
def __init__(self, f, df, constraints, learning_factor=0.5, decay_factor=0.5, error=1e-10):
"""
Initialize.
"""
self.alpha = learning_factor
self.decay_factor = decay_factor
self.epsilon = error
self.f = f
self.df = df
self.constraints = constraints
def gradient_descent(self, x0, steps=100):
"""
Simple Gradient Descent Algorithm.
"""
alpha = self.alpha
iters = 0
x = x0
best_x = x0
cost = self.f(x0)
result = [cost,]
while iters < steps:
x_next = x - (alpha * self.df(x))
next_cost = self.f(x_next)
# print("x: {0}, x_next: {1}, cost: {2}, next_cost: {3}".format(x, x_next, min(result), next_cost))
if((cost < next_cost) or (not self.constraints(x_next))):
x_next = x
alpha = alpha * self.decay_factor
# print("Updated Alpha", alpha)
result.append(min(result))
else:
result.append(next_cost)
best_x = x
# input()
x = x_next
iters += 1
return result, best_x
def gradDescent_unpack(self, args):
return self.gradient_descent(*args)[0]
def runGradDesc(self, x0=None, steps=100, average=100):
# Avg iters
n=average
# Random init
# Fixed init point
x0 = np.array([10.0, 10.0, 10.0, 10.0, 10.0]) if(x0==None) else x0 #Fixed Init
# x0 = np.array([.25, .25, .25, 0.25, 0.25]) #parse_initial_guess(args)
init_points = [x0 for _ in range(n)]
# Launch work
work = [deepcopy((init_points[i], steps)) for i in range(n)]
costArr = parallelProcess(self.gradDescent_unpack, work, num_cores=None)
# Slice the array
allMins = [np.mean(costArr[i:i+n], axis=0) for i in range(0, len(costArr), n)]
stdDev = [np.std(costArr[i:i+n], axis=0) for i in range(0, len(costArr), n)]
# print("Mins", allMins)
print("Done!")
return allMins
if __name__ == '__main__':
def f(x):
return x*x+x
def df(x):
return 2*x+1
def constraints(x):
return True
grad_descent = GradientDescent(f,df,constraints, learning_factor=0.1, decay_factor=0.5)
print(grad_descent.runGradDesc(10.0))
| 30.6
| 111
| 0.553633
|
4a091765f55ffdbed0ba18e34b6f61e91e994b90
| 39
|
py
|
Python
|
example/mnist/src/training/__init__.py
|
dsanno/chainer-training-template
|
b9edb257c37a7ef32b57c509a0e94d163391b307
|
[
"MIT"
] | 1
|
2017-11-13T01:39:45.000Z
|
2017-11-13T01:39:45.000Z
|
template/src/training/__init__.py
|
dsanno/chainer-training-template
|
b9edb257c37a7ef32b57c509a0e94d163391b307
|
[
"MIT"
] | null | null | null |
template/src/training/__init__.py
|
dsanno/chainer-training-template
|
b9edb257c37a7ef32b57c509a0e94d163391b307
|
[
"MIT"
] | null | null | null |
from training_step import TrainingStep
| 19.5
| 38
| 0.897436
|
4a09178e6692854a6f309b5ccb5a0c743d5a5a65
| 6,915
|
py
|
Python
|
backend/foodz4_32695/settings.py
|
crowdbotics-apps/foodz4-32695
|
cf29b53e70cda9b647fe4e7bb0680879de468cad
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/foodz4_32695/settings.py
|
crowdbotics-apps/foodz4-32695
|
cf29b53e70cda9b647fe4e7bb0680879de468cad
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/foodz4_32695/settings.py
|
crowdbotics-apps/foodz4-32695
|
cf29b53e70cda9b647fe4e7bb0680879de468cad
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for foodz4_32695 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'foodz4_32695.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foodz4_32695.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.551282
| 112
| 0.730441
|
4a0917e96c00e330f5d9b64ec01675c19b0164f0
| 4,806
|
py
|
Python
|
build/lua-reader/source/protobuf-2.6.1/python/google/protobuf/text_encoding.py
|
LazyPlanet/MX_LIB
|
8f4032b53153ee630f621afbda22454e7aae9a4e
|
[
"BSD-3-Clause"
] | null | null | null |
build/lua-reader/source/protobuf-2.6.1/python/google/protobuf/text_encoding.py
|
LazyPlanet/MX_LIB
|
8f4032b53153ee630f621afbda22454e7aae9a4e
|
[
"BSD-3-Clause"
] | null | null | null |
build/lua-reader/source/protobuf-2.6.1/python/google/protobuf/text_encoding.py
|
LazyPlanet/MX_LIB
|
8f4032b53153ee630f621afbda22454e7aae9a4e
|
[
"BSD-3-Clause"
] | null | null | null |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
"""Encoding related utilities."""
import re
import sys ##PY25
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in xrange(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in xrange(0, 32)] +
[chr(i) for i in xrange(32, 127)] +
[r'\%03o' % i for i in xrange(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, basestring) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if sys.version_info[0] < 3: ##PY25
##!PY25 if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| 43.297297
| 80
| 0.694757
|
4a091a2c9d83075d64f5396802741e79a9dda82e
| 1,014
|
py
|
Python
|
setup.py
|
ritiek/smartbytes-monitor
|
9f55d9455be211b7c9a59b4525af7d5f51c64d86
|
[
"MIT"
] | 3
|
2017-08-16T19:06:29.000Z
|
2020-04-12T01:03:31.000Z
|
setup.py
|
Gilles00/smartbytes-monitor
|
9f55d9455be211b7c9a59b4525af7d5f51c64d86
|
[
"MIT"
] | 2
|
2017-08-15T14:14:15.000Z
|
2017-08-17T13:23:41.000Z
|
setup.py
|
Gilles00/smartbytes-monitor
|
9f55d9455be211b7c9a59b4525af7d5f51c64d86
|
[
"MIT"
] | 2
|
2017-08-15T14:06:28.000Z
|
2020-04-12T01:04:16.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import smartbytes
with open("README.rst", "r") as f:
long_description = f.read()
setup(name='smartbytes-monitor',
version=smartbytes.__version__,
description=' A Python package to monitor your Airtel Broadband network stats, remaining data, days left, etc',
long_description=long_description,
author='Ritiek Malhotra',
author_email='ritiekmalhotra123@gmail.com',
packages = find_packages(),
entry_points={
'console_scripts': [
'smartbytes = smartbytes.smartbytes:_command_line',
]
},
url='https://www.github.com/ritiek/smartbytes-monitor',
keywords=['airtel', 'stats', 'monitor', 'data-usage', 'smartbytes-tracker'],
license='MIT',
download_url='https://github.com/ritiek/smartbytes-monitor/archive/v' + smartbytes.__version__ + '.tar.gz',
classifiers=[],
install_requires=[
'BeautifulSoup4',
]
)
| 33.8
| 117
| 0.653846
|
4a091af3409c20a9530ad9d701a3f8bb4f13daa1
| 4,452
|
py
|
Python
|
test/multimaster_tests.py
|
privalof/azure-cosmos-python
|
fa438e477cea85cf3f57e11f2ac9298b0c411e93
|
[
"MIT"
] | 88
|
2018-09-25T05:51:11.000Z
|
2022-03-30T06:56:26.000Z
|
test/multimaster_tests.py
|
privalof/azure-cosmos-python
|
fa438e477cea85cf3f57e11f2ac9298b0c411e93
|
[
"MIT"
] | 81
|
2018-09-25T21:42:01.000Z
|
2021-03-24T03:21:24.000Z
|
test/multimaster_tests.py
|
privalof/azure-cosmos-python
|
fa438e477cea85cf3f57e11f2ac9298b0c411e93
|
[
"MIT"
] | 85
|
2018-10-02T12:01:24.000Z
|
2022-01-07T19:27:08.000Z
|
import json
import os.path
import unittest
import uuid
import pytest
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.documents as documents
import azure.cosmos.errors as errors
import azure.cosmos.base as base
import azure.cosmos.constants as constants
import azure.cosmos.retry_options as retry_options
from azure.cosmos.http_constants import HttpHeaders, StatusCodes, SubStatusCodes
import azure.cosmos.retry_utility as retry_utility
import test.test_config as test_config
@pytest.mark.usefixtures("teardown")
class MultiMasterTests(unittest.TestCase):
host = test_config._test_config.host
masterKey = test_config._test_config.masterKey
connectionPolicy = test_config._test_config.connectionPolicy
counter = 0
last_headers = []
def test_tentative_writes_header_present(self):
self.last_headers = []
self.EnableMultipleWritableLocations = True
self._validate_tentative_write_headers()
def test_tentative_writes_header_not_present(self):
self.last_headers = []
self.EnableMultipleWritableLocations = False
self._validate_tentative_write_headers()
def _validate_tentative_write_headers(self):
self.OriginalExecuteFunction = retry_utility._ExecuteFunction
retry_utility._ExecuteFunction = self._MockExecuteFunction
connectionPolicy = MultiMasterTests.connectionPolicy
connectionPolicy.UseMultipleWriteLocations = True
client = cosmos_client.CosmosClient(MultiMasterTests.host, {'masterKey': MultiMasterTests.masterKey}, connectionPolicy)
created_collection = test_config._test_config.create_multi_partition_collection_with_custom_pk_if_not_exist(client)
document_definition = { 'id': 'doc' + str(uuid.uuid4()),
'pk': 'pk',
'name': 'sample document',
'operation': 'insertion'}
created_document = client.CreateItem(created_collection['_self'], document_definition)
sproc_definition = {
'id': 'sample sproc' + str(uuid.uuid4()),
'serverScript': 'function() {var x = 10;}'
}
sproc = client.CreateStoredProcedure(created_collection['_self'], sproc_definition)
client.ExecuteStoredProcedure(sproc['_self'], None, {'partitionKey':'pk'})
client.ReadItem(created_document['_self'], {'partitionKey':'pk'})
created_document['operation'] = 'replace'
replaced_document = client.ReplaceItem(created_document['_self'], created_document)
replaced_document['operation'] = 'upsert'
upserted_document = client.UpsertItem(created_collection['_self'], replaced_document)
client.DeleteItem(upserted_document['_self'], {'partitionKey':'pk'})
is_allow_tentative_writes_set = self.EnableMultipleWritableLocations == True
# Create Document - Makes one initial call to fetch collection
self.assertEqual(self.last_headers[0], is_allow_tentative_writes_set)
self.assertEqual(self.last_headers[1], is_allow_tentative_writes_set)
# Create Stored procedure
self.assertEqual(self.last_headers[2], is_allow_tentative_writes_set)
# Execute Stored procedure
self.assertEqual(self.last_headers[3], is_allow_tentative_writes_set)
# Read Document
self.assertEqual(self.last_headers[4], is_allow_tentative_writes_set)
# Replace Document
self.assertEqual(self.last_headers[5], is_allow_tentative_writes_set)
# Upsert Document
self.assertEqual(self.last_headers[6], is_allow_tentative_writes_set)
# Delete Document
self.assertEqual(self.last_headers[7], is_allow_tentative_writes_set)
retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def _MockExecuteFunction(self, function, *args, **kwargs):
self.counter += 1
if self.counter == 1:
return {constants._Constants.EnableMultipleWritableLocations: self.EnableMultipleWritableLocations}, {}
else:
if len(args) > 0:
self.last_headers.append(HttpHeaders.AllowTentativeWrites in args[5]['headers']
and args[5]['headers'][HttpHeaders.AllowTentativeWrites] == 'true')
return self.OriginalExecuteFunction(function, *args, **kwargs)
if __name__ == '__main__':
unittest.main()
| 42.4
| 127
| 0.708895
|
4a091afd386f8d588dcb3c43028a2f2810382894
| 1,032
|
py
|
Python
|
tracker/models/recipient/call_me_bot.py
|
jneuendorf/price_tracker
|
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
|
[
"MIT"
] | null | null | null |
tracker/models/recipient/call_me_bot.py
|
jneuendorf/price_tracker
|
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
|
[
"MIT"
] | null | null | null |
tracker/models/recipient/call_me_bot.py
|
jneuendorf/price_tracker
|
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
|
[
"MIT"
] | null | null | null |
import logging
import urllib
from django.db import models
import requests
from tracker.models import NotificationRecipient
logger = logging.getLogger(__name__)
class CallMeBotRecipient(NotificationRecipient):
API_URL = (
'https://api.callmebot.com/whatsapp.php'
'?phone={phone}'
'&apikey={api_key}'
# '&text={message}'
)
phone = models.CharField(max_length=30, blank=False)
api_key = models.CharField(max_length=7)
def notify(self, page, previous_price, current_price):
message = (
f'Page \'{page.name}\' got '
f'{str(round(previous_price - current_price, 2))} cheaper. '
f'Check {page.url}'
)
# NOTE: The '+' in the phone number must not be URL encoded.
text = urllib.parse.urlencode({'text': message})
url = (
f'{self.API_URL.format(phone=self.phone, api_key=self.api_key)}'
f'&{text}'
)
logger.info(f'\t>> url = {url}')
return requests.get(url)
| 27.157895
| 76
| 0.60562
|
4a091b323578c0ba9ac8f37dabee147cd7fa1b00
| 2,302
|
py
|
Python
|
pygccxml/declarations/algorithm.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 945
|
2015-01-09T00:43:52.000Z
|
2022-03-30T08:23:02.000Z
|
pygccxml/declarations/algorithm.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 2,354
|
2015-02-04T21:54:21.000Z
|
2022-03-31T20:58:21.000Z
|
pygccxml/declarations/algorithm.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 566
|
2015-01-04T14:26:57.000Z
|
2022-03-18T20:33:18.000Z
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
Define few unrelated algorithms that work on declarations.
"""
from . import declaration_utils
from . import runtime_errors
class match_declaration_t(object):
"""
Helper class for different search algorithms.
This class will help developer to match declaration by:
- declaration type, for example :class:`class_t` or
:class:`operator_t`.
- declaration name
- declaration full name
- reference to parent declaration
"""
def __init__(
self, decl_type=None,
name=None, fullname=None, parent=None):
self._decl_type = decl_type
self.name = name
self.fullname = fullname
self.parent = parent
def does_match_exist(self, inst):
"""
Returns True if inst does match one of specified criteria.
:param inst: declaration instance
:type inst: :class:`declaration_t`
:rtype: bool
"""
answer = True
if self._decl_type is not None:
answer &= isinstance(inst, self._decl_type)
if self.name is not None:
answer &= inst.name == self.name
if self.parent is not None:
answer &= self.parent is inst.parent
if self.fullname is not None:
if inst.name:
answer &= self.fullname == declaration_utils.full_name(inst)
else:
answer = False
return answer
def __call__(self, inst):
"""
.. code-block:: python
return self.does_match_exist(inst)
"""
return self.does_match_exist(inst)
def apply_visitor(visitor, decl_inst):
"""
Applies a visitor on declaration instance.
:param visitor: instance
:type visitor: :class:`type_visitor_t` or :class:`decl_visitor_t`
"""
fname = 'visit_' + \
decl_inst.__class__.__name__[:-2] # removing '_t' from class name
if not hasattr(visitor, fname):
raise runtime_errors.visit_function_has_not_been_found_t(
visitor, decl_inst)
return getattr(visitor, fname)()
| 26.159091
| 76
| 0.622068
|
4a091b5b3afefcda16740f52c97a3c237dc59b3d
| 370
|
py
|
Python
|
estimoji/model.py
|
kenkov/estimoji
|
2bdce4f8cd54a913cbcabc9600ff888d9b301902
|
[
"MIT"
] | 5
|
2018-05-16T18:40:31.000Z
|
2019-03-01T05:41:41.000Z
|
estimoji/model.py
|
kenkov/estimoji
|
2bdce4f8cd54a913cbcabc9600ff888d9b301902
|
[
"MIT"
] | null | null | null |
estimoji/model.py
|
kenkov/estimoji
|
2bdce4f8cd54a913cbcabc9600ff888d9b301902
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding: utf-8
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from estimoji.util import load_emoji_id
from estimoji.util import Tokenizer
import pickle
def load_model(modelname):
pipe = pickle.load(open(modelname, "br"))
return pipe
| 26.428571
| 59
| 0.805405
|
4a091d9431dde532fc08a391b4b48a4bdd6bff88
| 8,501
|
py
|
Python
|
old/main_experiments.py
|
jsuagee/dcss-ai-wrapper
|
da446a88d4f2ca290fe8477773373ce7c9bb88a7
|
[
"MIT"
] | 29
|
2020-03-06T05:55:15.000Z
|
2022-01-24T19:10:23.000Z
|
old/main_experiments.py
|
jsuagee/dcss-ai-wrapper
|
da446a88d4f2ca290fe8477773373ce7c9bb88a7
|
[
"MIT"
] | 22
|
2020-04-06T19:17:32.000Z
|
2022-03-08T02:42:38.000Z
|
old/main_experiments.py
|
jsuagee/dcss-ai-wrapper
|
da446a88d4f2ca290fe8477773373ce7c9bb88a7
|
[
"MIT"
] | 10
|
2020-03-06T03:04:35.000Z
|
2022-03-24T18:08:17.000Z
|
import asyncio
import websockets
import zlib
import json
import time
import logging
import sys
import threading
import signal
import datetime
# custom
import actions
import state
import simple_planning_agent
import relational_learning_agent
from exploratory_planning_agent import ExploratoryPlanningAgent
logging.basicConfig(level=logging.INFO)
server_uri = 'ws://localhost:8080/socket'
### CHANGE USERNAME AND PASSWORD HERE ###
login_msg = {'msg':'login',
'username':'midca',
'password':'meta'}
decomp = zlib.decompressobj(-zlib.MAX_WBITS)
# Messages for permanently quitting a session
initiate_quit_msg_1 = {'msg':'key', 'keycode':17} # Equivalent to Ctrl-q
confirm_quit_msg_2 = {'msg':'key', 'keycode':21}
confirm_quit_msg_3 = {'msg':'key', 'keycode':11}
confirm_quit_with_yes_4 = {'msg':'input', 'text':'yes\r'}
confirm_quit_clear_menu_via_enter_5 = {'msg':'input', 'text':'\r'}
confirm_quit_clear_menu_via_enter_6 = {'msg':'input', 'text':'\r'}
quit_messages_sequence = [initiate_quit_msg_1,
confirm_quit_msg_2,
confirm_quit_msg_3,
confirm_quit_with_yes_4,
confirm_quit_clear_menu_via_enter_5,
confirm_quit_clear_menu_via_enter_6]
# WebTiles refers to Dungeon Crawl Stone Soup (tiles version) played
# via a webserver
class WebTilesConnection():
last_msg_from_server = {}
def __init__(self, ai=None):
self.websocket = None
self.logged_in = False
self.ai = ai
self.recv_counter = 0
self.CREATE_NEW_GAME = False
def pretty_print_server_msg(self, msg_from_server):
try:
for msg in msg_from_server['msgs']:
print("--> New Message from Server <--")
#ignore_list = ['cell','html','content','skip']
ignore_list = []
skip_bc_ignore = False
for key in msg.keys():
for ignore in ignore_list:
if ignore in key:
skip_bc_ignore = True
if skip_bc_ignore:
skip_bc_ignore = False
else:
if key == 'cell':
for i in msg[key]:
print(" " + str(i))
else:
print(str(key)+":"+str(msg[key]))
except Exception as e:
print("Ignoring unparseable JSON (error: %s): %s.", e.args[0], msg_from_server)
async def get_all_server_messages(self):
i = 0
SERVER_READY_FOR_INPUT = False
while not SERVER_READY_FOR_INPUT:
try:
future = self.websocket.recv()
#print("** AWAITING ON WEBSOCKET RECV in loop, i=" + str(i))
data_recv = await asyncio.wait_for(future, timeout=0.5)
#print("** POST-AWAITING ON WEBSOCKET RECV in loop, i=" + str(i))
data_recv += bytes([0, 0, 255, 255])
json_message = decomp.decompress(data_recv)
json_message = json_message.decode("utf-8")
msg_from_server = json.loads(json_message)
logging.debug("i=" + str(i) + "Received Message:\n" + str(msg_from_server))
if self.ai:
self.ai.add_server_message(msg_from_server)
# {'msgs': [{'mode': 1, 'msg': 'input_mode'}]}
# if 'msgs' in msg_from_server.keys():
# for msg in msg_from_server['msgs']:
# if 'msg' in msg.keys() and 'mode' in msg.keys():
# if msg['msg'] == 'input_mode' and msg['mode'] == 1:
# SERVER_READY_FOR_INPUT = True
# print("Server is now ready for input!")
except ValueError as e:
logging.warning("i="+str(i)+"Ignoring unparseable JSON (error: %s): %s.", e.args[0], json_message)
except asyncio.TimeoutError:
# server is now ready for input
SERVER_READY_FOR_INPUT = True
i+=1
async def send_and_receive(self, message):
# send data to server
#print("AWAITING ON WEBSOCKET_1 SEND - sending message: "+str(message))
await self.websocket.send(json.dumps(message))
#print("POST-AWAITING ON WEBSOCKET_1 SEND")
# wait for server to get back
await self.get_all_server_messages()
async def end_session_and_quit_game(self):
'''
Sends the ctrl-q signal to the webserver to permamently end the game.
:return:
'''
for quit_msg in quit_messages_sequence:
await self.send_and_receive(quit_msg)
async def run(self):
# connect
logging.debug("Connecting to URI "+str(server_uri)+ " ...")
#print("AWAITING ON WEBSOCKET_3 CONNECT")
self.websocket = await websockets.connect(server_uri)
#print("POST-AWAITING ON WEBSOCKET_3 CONNECT")
logging.info("Connected to webserver:"+str(self.websocket and self.websocket.open))
# login
logging.debug("Sending login message...")
await self.send_and_receive(login_msg)
# break apart msg from server
#msgs = []
#if 'msgs' in msg_from_server.keys():
# msgs = msg_from_server['msgs']
# send pong
#for msg_i in msgs:
# if msg_i['msg'] == 'ping':
# logging.debug("Received message ping from server, about to send pong")
await self.websocket.send(json.dumps({'msg' : 'pong'}))
time.sleep(0.5)
# choose the game mode
game_id = 'sprint-web-trunk'
play_game_msg = {'msg':'play', 'game_id':game_id}
await self.send_and_receive(play_game_msg)
time.sleep(0.5)
# create a new game if needed (choose character, background, etc)
sprint_select = {'text':'l','msg':'input'} # this chooses the sprint level
#species_select = {'text':'b','msg':'input'} # older version of crawl
species_select = {'text': 'b', 'msg': 'input'} # use this for most recent version of crawl
background_select = {'text':'h','msg':'input'}
weapon_select = {'text':'a','msg':'input'}
game_creation_command_list = [sprint_select,species_select,background_select,weapon_select]
for gm_create_cmd_msg in game_creation_command_list:
time.sleep(1) # give some delay
logging.debug("Sending game creation selection command: " + str(gm_create_cmd_msg))
await self.send_and_receive(gm_create_cmd_msg)
time.sleep(1)
# game loop
while True:
next_agent_action = self.ai.next_action()
msg_from_server = await self.send_and_receive(next_agent_action)
print("Sent Action "+str(next_agent_action)+ " And received:\n\t"+str(msg_from_server))
if self.ai.ready_to_delete_game():
self.ai.save_data()
await self.end_session_and_quit_game()
break
await self.end_session_and_quit_game()
def single_run(action_selection_type_str=None):
ai = relational_learning_agent.RelationalLearningAgent()
agent_file_name = str(ai) + '-' + datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d--%H-%M-%S')
ai.set_data_filename(agent_file_name)
ai.set_action_selection(type_str=action_selection_type_str)
conn = WebTilesConnection(ai=ai)
def _graceful_exit(signal, frame):
try:
ai.save_data()
conn.end_session_and_quit_game()
sys.exit()
except Exception as e:
print("Encountered error {} - data may not have been saved, exiting now".format(e))
sys.exit()
# gracefully save data when exiting via ctrl-c
signal.signal(signal.SIGINT, _graceful_exit)
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(conn.run())
finally:
event_loop.close()
if __name__ == "__main__":
agent_action_selection_types = ['explore','random','human']
for action_type_str in agent_action_selection_types:
single_run(action_type_str)
print("Sleeping for 10 seconds before next round starts....")
time.sleep(10)
| 36.021186
| 114
| 0.590166
|
4a091ddaffee13001c0e702f1a90059be29f0ec1
| 3,068
|
py
|
Python
|
pincho/settings.py
|
rafen/pincho
|
fbc697f94838017e73832c046b940a416ae794bc
|
[
"MIT"
] | null | null | null |
pincho/settings.py
|
rafen/pincho
|
fbc697f94838017e73832c046b940a416ae794bc
|
[
"MIT"
] | null | null | null |
pincho/settings.py
|
rafen/pincho
|
fbc697f94838017e73832c046b940a416ae794bc
|
[
"MIT"
] | null | null | null |
"""
Django settings for pincho project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$sjad!_runrh0v@5n-oa4c(_b=1v@@^7h)q2x5382f2nk2ma!f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'button',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pincho.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pincho.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Buenos_Aires'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/www/media/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/var/www/static/'
STATIC_URL = '/static/'
BROKER_URL = 'amqp://pi:pi@localhost:5672/myvhost'
CELERYBEAT_SCHEDULE = {
'temp-every-5-minutes': {
'task': 'button.tasks.temp',
'schedule': timedelta(minutes=5),
'args': None
},
}
CELERY_TIMEZONE = 'UTC'
| 24.943089
| 71
| 0.69296
|
4a091f9a99b10cf84c8f97395b4ef0c0b6e38b8a
| 637
|
py
|
Python
|
meanMedianMinMax.py
|
mcsimenc/GenomicsScripts
|
a9c169ef30ddbdedc8537ad20627ba0df322db1f
|
[
"MIT"
] | 1
|
2021-11-19T10:30:16.000Z
|
2021-11-19T10:30:16.000Z
|
meanMedianMinMax.py
|
mcsimenc/GenomicsScripts
|
a9c169ef30ddbdedc8537ad20627ba0df322db1f
|
[
"MIT"
] | 1
|
2021-11-01T19:06:36.000Z
|
2021-11-01T19:06:36.000Z
|
meanMedianMinMax.py
|
mcsimenc/GenomicsScripts
|
a9c169ef30ddbdedc8537ad20627ba0df322db1f
|
[
"MIT"
] | 1
|
2021-07-11T18:32:19.000Z
|
2021-07-11T18:32:19.000Z
|
#!/usr/bin/env python3
import sys
import statistics
def help():
print('''
Usage:
------------
meanMedianMinMax < <path>
Description:
------------
Calculates min, max, mean, and median from a list of numbers
given on stdin and outputs to stdout.
''')
sys.exit(0)
if '-h' in sys.argv:
help()
values = []
for line in sys.stdin:
values.append(float(line.strip()))
print('min\t{0}'.format(min(values)))
print('max\t{0}'.format(max(values)))
print('mean\t{0}'.format(statistics.mean(values)))
print('median\t{0}'.format(statistics.median(values)))
print('total\t{0}'.format(sum(values)))
| 20.548387
| 65
| 0.618524
|
4a09206589721a826743fd69903a3a5f3dd453ee
| 4,172
|
py
|
Python
|
Forest-Type-Cover-Prediction/code.py
|
Shamika14/ga-learner-dsmp-repo
|
51f01820e68813e8e074c8ca926e984e08368ac3
|
[
"MIT"
] | null | null | null |
Forest-Type-Cover-Prediction/code.py
|
Shamika14/ga-learner-dsmp-repo
|
51f01820e68813e8e074c8ca926e984e08368ac3
|
[
"MIT"
] | null | null | null |
Forest-Type-Cover-Prediction/code.py
|
Shamika14/ga-learner-dsmp-repo
|
51f01820e68813e8e074c8ca926e984e08368ac3
|
[
"MIT"
] | null | null | null |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# read the dataset
dataset= pd.read_csv(path)
# look at the first five columns
print(dataset.head())
# Check if there's any column which is not useful and remove it like the column id
dataset.drop('Id',axis=1, inplace=True)
# check the statistical description
print(dataset.describe())
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
features = dataset.drop('Cover_Type',axis=1)
size = len(features.columns)
#x-axis has target attribute to distinguish between classes
x = dataset['Cover_Type']
#y-axis shows values of an attribute
y = dataset[features.columns]
#Plot violin for all attributes
for i in features.columns:
sns.violinplot(x,y[i])
plt.show()
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:,0:10]
data_corr = subset_train.corr()
# sns.heatmap(data_corr)
correlation = data_corr.unstack().sort_values(kind='quicksort')
corr_var_list = correlation[(correlation > upper_threshold) | (correlation < lower_threshold)]
corr_var_list = corr_var_list[corr_var_list != 1.0]
print(corr_var_list)
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
import numpy as np
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X = dataset.drop('Cover_Type',axis=1)
Y = dataset['Cover_Type']
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.2,random_state=0)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
X_train_non_cat = X_train.iloc[:,0:10]
X_test_non_cat = X_test.iloc[:,0:10]
scaler = StandardScaler()
#Standardized
#Apply transform only for non-categorical data
X_train_temp = scaler.fit_transform(X_train_non_cat)
X_test_temp = scaler.fit_transform(X_test_non_cat)
#Concatenate non-categorical data and categorical
X_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,10:].astype('object')),axis=1)
X_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,10:].astype('object')),axis=1)
scaled_features_train_df = pd.DataFrame(data = X_train1, index=X_train.index, columns=X_train.columns)
scaled_features_test_df = pd.DataFrame(data = X_test1, index=X_test.index, columns=X_test.columns)
print(scaled_features_test_df.head())
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
skb = SelectPercentile(score_func=f_classif, percentile=20)
predictors = skb.fit_transform(X_train1,Y_train)
scores = list(skb.scores_)
Features = scaled_features_train_df.columns
dataframe = pd.DataFrame({'Features' : Features,
'scores' : scores})
dataframe = dataframe.sort_values(by = ['scores'], ascending=False)
top_k_predictors = list(dataframe['Features'][:predictors.shape[1]])
print(top_k_predictors)
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
clf = OneVsRestClassifier(estimator=LogisticRegression())
clf1 = OneVsRestClassifier(estimator=LogisticRegression())
model_fit_all_features = clf1.fit(X_train,Y_train)
predictions_all_features = model_fit_all_features.predict(X_test)
score_all_features = accuracy_score(Y_test,predictions_all_features)
model_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors],Y_train)
predictions_top_features = model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])
score_top_features = accuracy_score(Y_test,predictions_top_features)
print(score_all_features)
print(score_top_features)
| 28.380952
| 138
| 0.774449
|
4a0920c7e2f952f7246c3a5209b2da3fdfd149aa
| 4,475
|
py
|
Python
|
agent_stable_baselines/stable_baselines/acktr/policies.py
|
Jannkar/doom_actionspace
|
37663341f60a05943202b77394a4203d070fad95
|
[
"MIT"
] | 1
|
2020-04-24T13:54:01.000Z
|
2020-04-24T13:54:01.000Z
|
agent_stable_baselines/stable_baselines/acktr/policies.py
|
Jannkar/doom_actionspace
|
37663341f60a05943202b77394a4203d070fad95
|
[
"MIT"
] | null | null | null |
agent_stable_baselines/stable_baselines/acktr/policies.py
|
Jannkar/doom_actionspace
|
37663341f60a05943202b77394a4203d070fad95
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from stable_baselines.acktr.utils import dense, kl_div
import stable_baselines.common.tf_util as tf_util
class GaussianMlpPolicy(object):
def __init__(self, ob_dim, ac_dim):
"""
Create a gaussian MLP policy
:param ob_dim: (int) Observation dimention
:param ac_dim: (int) action dimention
"""
# Here we'll construct a bunch of expressions, which will be used in two places:
# (1) When sampling actions
# (2) When computing loss functions, for the policy update
# Variables specific to (1) have the word "sampled" in them,
# whereas variables specific to (2) have the word "old" in them
ob_no = tf.placeholder(tf.float32, shape=[None, ob_dim * 2], name="ob") # batch of observations
oldac_na = tf.placeholder(tf.float32, shape=[None, ac_dim], name="ac") # batch of actions previous actions
# batch of actions previous action distributions
oldac_dist = tf.placeholder(tf.float32, shape=[None, ac_dim * 2], name="oldac_dist")
adv_n = tf.placeholder(tf.float32, shape=[None], name="adv") # advantage function estimate
wd_dict = {}
layer_1 = tf.nn.tanh(dense(ob_no, 64, "h1",
weight_init=tf_util.normc_initializer(1.0),
bias_init=0.0, weight_loss_dict=wd_dict))
layer_2 = tf.nn.tanh(dense(layer_1, 64, "h2",
weight_init=tf_util.normc_initializer(1.0),
bias_init=0.0, weight_loss_dict=wd_dict))
mean_na = dense(layer_2, ac_dim, "mean", weight_init=tf_util.normc_initializer(0.1),
bias_init=0.0, weight_loss_dict=wd_dict) # Mean control output
self.wd_dict = wd_dict
# Variance on outputs
self.logstd_1a = logstd_1a = tf.get_variable("logstd", [ac_dim], tf.float32, tf.zeros_initializer())
logstd_1a = tf.expand_dims(logstd_1a, 0)
std_1a = tf.exp(logstd_1a)
std_na = tf.tile(std_1a, [tf.shape(mean_na)[0], 1])
ac_dist = tf.concat([tf.reshape(mean_na, [-1, ac_dim]), tf.reshape(std_na, [-1, ac_dim])], 1)
# This is the sampled action we'll perform.
sampled_ac_na = tf.random_normal(tf.shape(ac_dist[:, ac_dim:])) * ac_dist[:, ac_dim:] + ac_dist[:, :ac_dim]
logprobsampled_n = - tf.reduce_sum(tf.log(ac_dist[:, ac_dim:]), axis=1) - 0.5 * tf.log(
2.0 * np.pi) * ac_dim - 0.5 * tf.reduce_sum(
tf.square(ac_dist[:, :ac_dim] - sampled_ac_na) / (tf.square(ac_dist[:, ac_dim:])),
axis=1) # Logprob of sampled action
logprob_n = - tf.reduce_sum(tf.log(ac_dist[:, ac_dim:]), axis=1) - 0.5 * tf.log(
2.0 * np.pi) * ac_dim - 0.5 * tf.reduce_sum(
tf.square(ac_dist[:, :ac_dim] - oldac_na) / (tf.square(ac_dist[:, ac_dim:])),
axis=1) # Logprob of previous actions under CURRENT policy (whereas oldlogprob_n is under OLD policy)
kl_loss = tf.reduce_mean(kl_div(oldac_dist, ac_dist, ac_dim))
# kl = .5 * tf.reduce_mean(tf.square(logprob_n - oldlogprob_n))
# Approximation of KL divergence between old policy used to generate actions,
# and new policy used to compute logprob_n
surr = - tf.reduce_mean(adv_n * logprob_n) # Loss function that we'll differentiate to get the policy gradient
surr_sampled = - tf.reduce_mean(logprob_n) # Sampled loss of the policy
# Generate a new action and its logprob
self._act = tf_util.function([ob_no], [sampled_ac_na, ac_dist, logprobsampled_n])
# self.compute_kl = U.function([ob_no, oldac_na, oldlogprob_n], kl)
# Compute (approximate) KL divergence between old policy and new policy
self.compute_kl = tf_util.function([ob_no, oldac_dist], kl_loss)
# Input and output variables needed for computing loss
self.update_info = ((ob_no, oldac_na, adv_n), surr, surr_sampled)
tf_util.initialize() # Initialize uninitialized TF variables
def act(self, obs):
"""
get the action from an observation
:param obs: ([float]) observation
:return: ([float], [float], [float]) action, action_proba, logp
"""
action, ac_dist, logp = self._act(obs[None])
return action[0], ac_dist[0], logp[0]
| 58.881579
| 120
| 0.619218
|
4a0920da2e09d1448e2ceaf1e7a6e60797a1bc83
| 292
|
py
|
Python
|
server_py/flatgov/common/management/commands/process_bill_meta.py
|
aih/BillMap
|
d130b1396cb25b415cd7d9ea7389ad558a34eec1
|
[
"CC0-1.0"
] | 2
|
2022-01-18T14:55:52.000Z
|
2022-01-31T03:38:39.000Z
|
server_py/flatgov/common/management/commands/process_bill_meta.py
|
aih/FlatGov
|
8201ef1813bbc062841421017f492e877f75a5f8
|
[
"CC0-1.0"
] | 321
|
2020-09-01T16:20:35.000Z
|
2021-07-03T06:42:34.000Z
|
server_py/flatgov/common/management/commands/process_bill_meta.py
|
aih/FlatGov
|
8201ef1813bbc062841421017f492e877f75a5f8
|
[
"CC0-1.0"
] | 1
|
2022-03-31T15:02:49.000Z
|
2022-03-31T15:02:49.000Z
|
from django.core.management.base import BaseCommand
from common.process_bill_meta import makeAndSaveTitlesIndex
class Command(BaseCommand):
help = 'combines data from the results of the bill data, to get titles'
def handle(self, *args, **options):
makeAndSaveTitlesIndex()
| 29.2
| 75
| 0.763699
|
4a09223260817161a73b404d2ac214e64a889b03
| 11,678
|
py
|
Python
|
bp_includes/external/httpagentparser/__init__.py
|
chuycepeda/mboilerplate
|
1fad3f10d491e5f6e051ff615370073e38dba1fe
|
[
"MIT"
] | 5
|
2016-02-23T17:18:16.000Z
|
2016-08-05T22:26:29.000Z
|
bp_includes/external/httpagentparser/__init__.py
|
CodeandoMonterrey/mboilerplate
|
2ddbb408229cb9164895a5de571a39cc58042d85
|
[
"MIT"
] | 1
|
2016-03-19T02:02:32.000Z
|
2016-05-09T05:43:36.000Z
|
bp_includes/external/httpagentparser/__init__.py
|
CodeandoMonterrey/mboilerplate
|
2ddbb408229cb9164895a5de571a39cc58042d85
|
[
"MIT"
] | 9
|
2016-02-19T18:56:18.000Z
|
2019-01-13T16:50:05.000Z
|
"""
Extract client information from http user agent
The module does not try to detect all capabilities of browser in current form (it can easily be extended though).
Tries to
* be fast
* very easy to extend
* reliable enough for practical purposes
* assist python web apps to detect clients.
"""
class DetectorsHub(dict):
_known_types = ['os', 'dist', 'flavor', 'browser']
def __init__(self, *args, **kw):
dict.__init__(self, *args, **kw)
for typ in self._known_types:
self.setdefault(typ, [])
self.registerDetectors()
def register(self, detector):
if detector.info_type not in self._known_types:
self[detector.info_type] = [detector]
self._known_types.insert(detector.order, detector.info_type)
else:
self[detector.info_type].append(detector)
def __iter__(self):
return iter(self._known_types)
def registerDetectors(self):
detectors = [v() for v in globals().values() if DetectorBase in getattr(v, '__mro__', [])]
for d in detectors:
if d.can_register:
self.register(d)
class DetectorBase(object):
name = "" # "to perform match in DetectorsHub object"
info_type = "override me"
result_key = "override me"
order = 10 # 0 is highest
look_for = "string to look for"
skip_if_found = [] # strings if present stop processin
can_register = False
version_markers = [("/", " ")]
allow_space_in_version = False
_suggested_detectors = None
platform = None
def __init__(self):
if not self.name:
self.name = self.__class__.__name__
self.can_register = (self.__class__.__dict__.get('can_register', True))
def detect(self, agent, result):
# -> True/None
if self.checkWords(agent):
result[self.info_type] = dict(name=self.name)
version = self.getVersion(agent)
if version:
result[self.info_type]['version'] = version
if self.platform:
result['platform'] = {'name': self.platform, 'version': version}
return True
def checkWords(self, agent):
# -> True/None
for w in self.skip_if_found:
if w in agent:
return False
if isinstance(self.look_for, (tuple, list)):
for word in self.look_for:
if word in agent:
return True
elif self.look_for in agent:
return True
def getVersion(self, agent):
"""
=> version string /None
"""
version_markers = self.version_markers if \
isinstance(self.version_markers[0], (list, tuple)) else [self.version_markers]
version_part = agent.split(self.look_for, 1)[-1]
for start, end in version_markers:
if version_part.startswith(start) and end in version_part:
version = version_part[1:]
if end: # end could be empty string
version = version.split(end)[0]
if not self.allow_space_in_version:
version = version.split()[0]
return version
class OS(DetectorBase):
info_type = "os"
can_register = False
version_markers = [";", " "]
allow_space_in_version = True
platform = None
class Dist(DetectorBase):
info_type = "dist"
can_register = False
platform = None
class Flavor(DetectorBase):
info_type = "flavor"
can_register = False
platform = None
class Browser(DetectorBase):
info_type = "browser"
can_register = False
class Firefox(Browser):
look_for = "Firefox"
version_markers = [('/', '')]
skip_if_found = ["SeaMonkey"]
class SeaMonkey(Browser):
look_for = "SeaMonkey"
version_markers = [('/', '')]
class Konqueror(Browser):
look_for = "Konqueror"
version_markers = ["/", ";"]
class OperaMobile(Browser):
look_for = "Opera Mobi"
name = "Opera Mobile"
def getVersion(self, agent):
try:
look_for = "Version"
return agent.split(look_for)[1][1:].split(' ')[0]
except:
look_for = "Opera"
return agent.split(look_for)[1][1:].split(' ')[0]
class Opera(Browser):
look_for = "Opera"
def getVersion(self, agent):
try:
look_for = "Version"
return agent.split(look_for)[1][1:].split(' ')[0]
except:
look_for = "Opera"
return agent.split(look_for)[1][1:].split(' ')[0]
class OperaNew(Browser):
"""
Opera after version 15
"""
name = "Opera"
look_for = "OPR"
version_markers = [('/', '')]
class Netscape(Browser):
look_for = "Netscape"
version_markers = [("/", '')]
class Trident(Browser):
look_for = "Trident"
skip_if_found = ["MSIE", "Opera"]
name = "Microsoft Internet Explorer"
version_markers = ["/", ";"]
trident_to_ie_versions = {
'4.0': '8.0',
'5.0': '9.0',
'6.0': '10.0',
'7.0': '11.0',
}
def getVersion(self, agent):
return self.trident_to_ie_versions.get(super(Trident, self).getVersion(agent))
class MSIE(Browser):
look_for = "MSIE"
skip_if_found = ["Opera"]
name = "Microsoft Internet Explorer"
version_markers = [" ", ";"]
class Galeon(Browser):
look_for = "Galeon"
class WOSBrowser(Browser):
look_for = "wOSBrowser"
def getVersion(self, agent):
pass
class Safari(Browser):
look_for = "Safari"
def checkWords(self, agent):
unless_list = ["Chrome", "OmniWeb", "wOSBrowser"]
if self.look_for in agent:
for word in unless_list:
if word in agent:
return False
return True
def getVersion(self, agent):
if "Version/" in agent:
return agent.split('Version/')[-1].split(' ')[0].strip()
if "Safari/" in agent:
return agent.split('Safari/')[-1].split(' ')[0].strip()
else:
return agent.split('Safari ')[-1].split(' ')[0].strip() # Mobile Safari
class Linux(OS):
look_for = 'Linux'
platform = 'Linux'
def getVersion(self, agent):
pass
class Blackberry(OS):
look_for = 'BlackBerry'
platform = 'BlackBerry'
def getVersion(self, agent):
pass
class BlackberryPlaybook(Dist):
look_for = 'PlayBook'
platform = 'BlackBerry'
def getVersion(self, agent):
pass
class iOS(OS):
look_for = ('iPhone', 'iPad')
class iPhone(Dist):
look_for = 'iPhone'
platform = 'iOS'
class IPad(Dist):
look_for = 'iPad; CPU OS'
version_markers = [(' ', ' ')]
allow_space_in_version = False
platform = 'iOS'
class Macintosh(OS):
look_for = 'Macintosh'
def getVersion(self, agent):
pass
class MacOS(Flavor):
look_for = 'Mac OS'
platform = 'Mac OS'
skip_if_found = ['iPhone', 'iPad']
def getVersion(self, agent):
version_end_chars = [';', ')']
part = agent.split('Mac OS')[-1].strip()
for c in version_end_chars:
if c in part:
version = part.split(c)[0]
return version.replace('_', '.')
return ''
class Windows(OS):
look_for = 'Windows'
platform = 'Windows'
win_versions = {
"NT 6.3": "8.1",
"NT 6.2": "8",
"NT 6.1": "7",
"NT 6.0": "Vista",
"NT 5.2": "Server 2003 / XP x64",
"NT 5.1": "XP",
"NT 5.01": "2000 SP1",
"NT 5.0": "2000",
"98; Win 9x 4.90": "Me"
}
def getVersion(self, agent):
v = agent.split('Windows')[-1].split(';')[0].strip()
if ')' in v:
v = v.split(')')[0]
v = self.win_versions.get(v, v)
return v
class Ubuntu(Dist):
look_for = 'Ubuntu'
version_markers = ["/", " "]
class Debian(Dist):
look_for = 'Debian'
version_markers = ["/", " "]
class Chrome(Browser):
look_for = "Chrome"
version_markers = ["/", " "]
skip_if_found = ["OPR"]
def getVersion(self, agent):
part = agent.split(self.look_for + self.version_markers[0])[-1]
version = part.split(self.version_markers[1])[0]
if '+' in version:
version = part.split('+')[0]
return version.strip()
class ChromeiOS(Browser):
look_for = "CriOS"
version_markers = ["/", " "]
class ChromeOS(OS):
look_for = "CrOS"
platform = ' ChromeOS'
version_markers = [" ", " "]
def getVersion(self, agent):
version_markers = self.version_markers
if self.look_for + '+' in agent:
version_markers = ['+', '+']
return agent.split(self.look_for + version_markers[0])[-1].split(version_markers[1])[1].strip()[:-1]
class Android(Dist):
look_for = 'Android'
platform = 'Android'
def getVersion(self, agent):
return agent.split(self.look_for)[-1].split(';')[0].strip()
class WebOS(Dist):
look_for = 'hpwOS'
def getVersion(self, agent):
return agent.split('hpwOS/')[-1].split(';')[0].strip()
class prefs: # experimental
os = dict(
Linux=dict(dict(browser=[Firefox, Chrome], dist=[Ubuntu, Android])),
BlackBerry=dict(dist=[BlackberryPlaybook]),
Macintosh=dict(flavor=[MacOS]),
Windows=dict(browser=[MSIE, Firefox]),
ChromeOS=dict(browser=[Chrome]),
Debian=dict(browser=[Firefox])
)
dist = dict(
Ubuntu=dict(browser=[Firefox]),
Android=dict(browser=[Safari]),
IPhone=dict(browser=[Safari]),
IPad=dict(browser=[Safari]),
)
flavor = dict(
MacOS=dict(browser=[Opera, Chrome, Firefox, MSIE])
)
detectorshub = DetectorsHub()
def detect(agent, fill_none=False):
"""
fill_none: if name/version is not detected respective key is still added to the result with value None
"""
result = dict(platform=dict(name=None, version=None))
_suggested_detectors = []
for info_type in detectorshub:
detectors = _suggested_detectors or detectorshub[info_type]
for detector in detectors:
try:
detector.detect(agent, result)
except Exception as _err:
pass
if fill_none:
attrs_d = {'name': None, 'version': None}
for key in ('os', 'browser'):
if key not in result:
result[key] = attrs_d
else:
for k, v in attrs_d.items():
result[k] = v
return result
def simple_detect(agent):
"""
-> (os, browser) # tuple of strings
"""
result = detect(agent)
os_list = []
if 'flavor' in result:
os_list.append(result['flavor']['name'])
if 'dist' in result:
os_list.append(result['dist']['name'])
if 'os' in result:
os_list.append(result['os']['name'])
os = os_list and " ".join(os_list) or "Unknown OS"
os_version = os_list and (result.get('flavor') and result['flavor'].get('version')) or \
(result.get('dist') and result['dist'].get('version')) or (result.get('os') and result['os'].get('version')) or ""
browser = 'browser' in result and result['browser'].get('name') or 'Unknown Browser'
browser_version = 'browser' in result and result['browser'].get('version') or ""
if browser_version:
browser = " ".join((browser, browser_version))
if os_version:
os = " ".join((os, os_version))
return os, browser
| 26.361174
| 122
| 0.569276
|
4a092268d604cefa6ece5142884f9f315e091344
| 344
|
py
|
Python
|
pyeccodes/defs/grib1/local_98_8_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib1/local_98_8_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib1/local_98_8_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
def load(h):
h.add(_.Constant('GRIBEXSection1Problem', (62 - _.Get('section1Length'))))
_.Template('grib1/mars_labeling.def').load(h)
h.add(_.Unsigned('intervalBetweenTimes', 1))
h.add(_.Constant('numberOfIntegers', 12))
h.add(_.Unsigned('unsignedIntegers', 1, _.Get('numberOfIntegers')))
| 31.272727
| 78
| 0.694767
|
4a09228e338c3f71f3622d057912c73c9999c4ee
| 2,387
|
py
|
Python
|
scrape/spatula/cli.py
|
csnardi/people
|
9279bc9b755237b07513ca707f7265d1d1dfd5bf
|
[
"CC0-1.0"
] | 1
|
2021-04-19T20:42:59.000Z
|
2021-04-19T20:42:59.000Z
|
scrape/spatula/cli.py
|
csnardi/people
|
9279bc9b755237b07513ca707f7265d1d1dfd5bf
|
[
"CC0-1.0"
] | null | null | null |
scrape/spatula/cli.py
|
csnardi/people
|
9279bc9b755237b07513ca707f7265d1d1dfd5bf
|
[
"CC0-1.0"
] | null | null | null |
import attr
import click
import importlib
import pprint
from typing import List
from scrapelib import Scraper
from .pages import ListPage
from .core import URL
def get_class(dotted_name: str):
mod_name, cls_name = dotted_name.rsplit(".", 1)
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name)
def _display(obj) -> str:
if isinstance(obj, dict):
return pprint.pformat(obj)
elif hasattr(obj, "to_dict"):
return pprint.pformat(obj.to_dict())
else:
return repr(obj)
@click.group()
def cli() -> None:
pass
@cli.command()
@click.argument("class_name")
@click.option("-i", "--interactive")
@click.option("-d", "--data", multiple=True)
@click.option("-s", "--source")
def test(class_name: str, interactive: bool, data: List[str], source: str) -> None:
Cls = get_class(class_name)
s = Scraper()
# special case for passing a single URL source
if source:
source = URL(source)
# build fake input from command line data if present
fake_input = {}
for item in data:
k, v = item.split("=", 1)
fake_input[k] = v
input_type = getattr(Cls, "input_type", None)
if input_type:
print(f"{Cls.__name__} expects input ({input_type.__name__}): ")
for field in attr.fields(input_type):
if field.name in fake_input:
print(f" {field.name}: {fake_input[field.name]}")
elif interactive:
fake_input[field.name] = click.prompt(" " + field.name)
else:
dummy_val = f"~{field.name}"
fake_input[field.name] = dummy_val
print(f" {field.name}: {dummy_val}")
page = Cls(input_type(**fake_input), source=source)
else:
page = Cls(fake_input, source=source)
# fetch data after input is handled, since we might need to build the source
page._fetch_data(s)
if issubclass(Cls, ListPage):
for i, item in enumerate(page.process_page()):
print(f"{i}:", _display(item))
else:
print(_display(page.process_page()))
@cli.command()
@click.argument("workflow_name")
@click.option("-o", "--output-dir", default=None)
def scrape(workflow_name: str, output_dir: str) -> None:
workflow = get_class(workflow_name)
workflow.execute(output_dir=output_dir)
if __name__ == "__main__":
cli()
| 27.436782
| 83
| 0.629661
|
4a0923c7ef49230c3fc381a15e61498c01500d79
| 2,274
|
py
|
Python
|
Python/win32/win32_msg_handler.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
Python/win32/win32_msg_handler.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | 3
|
2020-03-24T16:26:35.000Z
|
2020-04-15T19:40:41.000Z
|
Python/win32/win32_msg_handler.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
import win32con
import win32api
import win32gui
import time
mainloop_delay = 0.1 # [s]
className = "CLICK PLC manager WM handler className"
windowName = "CLICK PLC manager WM handler"
WM_START_ACCUMUL_DATA = win32con.WM_USER + 0
WM_STOP_ACCUMUL_DATA = win32con.WM_USER + 1
class WMHandler(object):
def __init__(self, gui_instance):
self.gui_instance = gui_instance
self._on_destroy_called = False
def mainloop(self):
self._build_window()
while True and not self._on_destroy_called:
_, msg = win32gui.PeekMessage(None, 0, 0, win32con.PM_REMOVE)
win32gui.TranslateMessage(msg)
win32gui.DispatchMessage(msg)
time.sleep(mainloop_delay)
win32gui.PostQuitMessage(0)
def _build_window(self):
msgMap = {
win32con.WM_DESTROY: self.on_destroy,
WM_START_ACCUMUL_DATA: self.on_start_accumul_data,
WM_STOP_ACCUMUL_DATA: self.on_stop_accumul_data
}
hinst = win32api.GetModuleHandle(None)
wndClass = win32gui.WNDCLASS()
wndClass.hInstance = hinst
wndClass.lpfnWndProc = msgMap
wndClass.lpszClassName = className
registeredWndClass = win32gui.RegisterClass(wndClass)
hwnd = win32gui.CreateWindowEx(
0, # Extended window style.
registeredWndClass, # Class atom created by RegisterClass.
windowName, # The window name.
0, # Style of the window being created.
0, # Initial horizontal position.
0, # Initial vertical position.
0, # Width of the window.
0, # Height of the window.
win32con.HWND_MESSAGE, # Message-only window.
0, # A handle to a menu.
hinst, # A handle to the instance of the main module.
None # A pointer to a value for CREATESTRUCT structure.
)
def on_destroy(self, hwnd, message, wparam, lparam):
self._on_destroy_called = True
def on_start_accumul_data(self, hwnd, message, wparam, lparam):
self.gui_instance._start_pause_event()
def on_stop_accumul_data(self, hwnd, message, wparam, lparam):
self.gui_instance._end_pause_event()
| 34.454545
| 73
| 0.643799
|
4a0923d6d084a6c7b32369fb6ecebab46717fd93
| 243
|
py
|
Python
|
main.py
|
fujihiraryo/rubiks-cube
|
f833544e07638772c81b5913111e9df632637a44
|
[
"MIT"
] | null | null | null |
main.py
|
fujihiraryo/rubiks-cube
|
f833544e07638772c81b5913111e9df632637a44
|
[
"MIT"
] | null | null | null |
main.py
|
fujihiraryo/rubiks-cube
|
f833544e07638772c81b5913111e9df632637a44
|
[
"MIT"
] | null | null | null |
import rubiks22
import solver22
*u, = input().split()
*d, = input().split()
*f, = input().split()
*b, = input().split()
*l, = input().split()
*r, = input().split()
cube = rubiks22.Cube(u, d, f, b, l, r)
cube.show()
print(*solver22.solve(cube))
| 22.090909
| 38
| 0.600823
|
4a09252f068881f8d80f9a02fbd0459de1ddf3da
| 7,436
|
py
|
Python
|
homeassistant/components/here_travel_time/sensor.py
|
devnull-nz/home-assistant-core
|
b6b72f50ec4a21e0c9cc5681bd09193e1a4ec256
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/here_travel_time/sensor.py
|
devnull-nz/home-assistant-core
|
b6b72f50ec4a21e0c9cc5681bd09193e1a4ec256
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/here_travel_time/sensor.py
|
devnull-nz/home-assistant-core
|
b6b72f50ec4a21e0c9cc5681bd09193e1a4ec256
|
[
"Apache-2.0"
] | null | null | null |
"""Support for HERE travel time sensors."""
from __future__ import annotations
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_MODE,
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM,
TIME_MINUTES,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.start import async_at_start
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import HereTravelTimeDataUpdateCoordinator
from .const import (
ATTR_DURATION,
ATTR_DURATION_IN_TRAFFIC,
ATTR_TRAFFIC_MODE,
ATTR_UNIT_SYSTEM,
CONF_ARRIVAL,
CONF_DEPARTURE,
CONF_DESTINATION_ENTITY_ID,
CONF_DESTINATION_LATITUDE,
CONF_DESTINATION_LONGITUDE,
CONF_ORIGIN_ENTITY_ID,
CONF_ORIGIN_LATITUDE,
CONF_ORIGIN_LONGITUDE,
CONF_ROUTE_MODE,
CONF_TRAFFIC_MODE,
DEFAULT_NAME,
DOMAIN,
ICON_BICYCLE,
ICON_CAR,
ICON_PEDESTRIAN,
ICON_PUBLIC,
ICON_TRUCK,
ROUTE_MODE_FASTEST,
ROUTE_MODES,
TRAFFIC_MODE_ENABLED,
TRAVEL_MODE_BICYCLE,
TRAVEL_MODE_CAR,
TRAVEL_MODE_PEDESTRIAN,
TRAVEL_MODE_PUBLIC,
TRAVEL_MODE_PUBLIC_TIME_TABLE,
TRAVEL_MODE_TRUCK,
TRAVEL_MODES,
TRAVEL_MODES_PUBLIC,
UNITS,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Inclusive(
CONF_DESTINATION_LATITUDE, "destination_coordinates"
): cv.latitude,
vol.Inclusive(
CONF_DESTINATION_LONGITUDE, "destination_coordinates"
): cv.longitude,
vol.Exclusive(CONF_DESTINATION_LATITUDE, "destination"): cv.latitude,
vol.Exclusive(CONF_DESTINATION_ENTITY_ID, "destination"): cv.entity_id,
vol.Inclusive(CONF_ORIGIN_LATITUDE, "origin_coordinates"): cv.latitude,
vol.Inclusive(CONF_ORIGIN_LONGITUDE, "origin_coordinates"): cv.longitude,
vol.Exclusive(CONF_ORIGIN_LATITUDE, "origin"): cv.latitude,
vol.Exclusive(CONF_ORIGIN_ENTITY_ID, "origin"): cv.entity_id,
vol.Optional(CONF_DEPARTURE): cv.time,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODE, default=TRAVEL_MODE_CAR): vol.In(TRAVEL_MODES),
vol.Optional(CONF_ROUTE_MODE, default=ROUTE_MODE_FASTEST): vol.In(ROUTE_MODES),
vol.Optional(CONF_TRAFFIC_MODE, default=False): cv.boolean,
vol.Optional(CONF_UNIT_SYSTEM): vol.In(UNITS),
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_DESTINATION_LATITUDE, CONF_DESTINATION_ENTITY_ID),
cv.has_at_least_one_key(CONF_ORIGIN_LATITUDE, CONF_ORIGIN_ENTITY_ID),
cv.key_value_schemas(
CONF_MODE,
{
None: PLATFORM_SCHEMA,
TRAVEL_MODE_BICYCLE: PLATFORM_SCHEMA,
TRAVEL_MODE_CAR: PLATFORM_SCHEMA,
TRAVEL_MODE_PEDESTRIAN: PLATFORM_SCHEMA,
TRAVEL_MODE_PUBLIC: PLATFORM_SCHEMA,
TRAVEL_MODE_TRUCK: PLATFORM_SCHEMA,
TRAVEL_MODE_PUBLIC_TIME_TABLE: PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_ARRIVAL, "arrival_departure"): cv.time,
vol.Exclusive(CONF_DEPARTURE, "arrival_departure"): cv.time,
}
),
},
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the HERE travel time platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
_LOGGER.warning(
"Your HERE travel time configuration has been imported into the UI; "
"please remove it from configuration.yaml as support for it will be "
"removed in a future release"
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add HERE travel time entities from a config_entry."""
async_add_entities(
[
HERETravelTimeSensor(
config_entry.data[CONF_NAME],
config_entry.options[CONF_TRAFFIC_MODE],
hass.data[DOMAIN][config_entry.entry_id],
)
],
)
class HERETravelTimeSensor(SensorEntity, CoordinatorEntity):
"""Representation of a HERE travel time sensor."""
def __init__(
self,
name: str,
traffic_mode: str,
coordinator: HereTravelTimeDataUpdateCoordinator,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self._traffic_mode = traffic_mode == TRAFFIC_MODE_ENABLED
self._attr_native_unit_of_measurement = TIME_MINUTES
self._attr_name = name
async def async_added_to_hass(self) -> None:
"""Wait for start so origin and destination entities can be resolved."""
await super().async_added_to_hass()
async def _update_at_start(_):
await self.async_update()
self.async_on_remove(async_at_start(self.hass, _update_at_start))
@property
def native_value(self) -> str | None:
"""Return the state of the sensor."""
if self.coordinator.data is not None:
return str(
round(
self.coordinator.data.get(
ATTR_DURATION_IN_TRAFFIC
if self._traffic_mode
else ATTR_DURATION
)
)
)
return None
@property
def extra_state_attributes(
self,
) -> dict[str, None | float | str | bool] | None:
"""Return the state attributes."""
if self.coordinator.data is not None:
res = {
ATTR_UNIT_SYSTEM: self.coordinator.config.units,
ATTR_MODE: self.coordinator.config.travel_mode,
ATTR_TRAFFIC_MODE: self._traffic_mode,
**self.coordinator.data,
}
res.pop(ATTR_ATTRIBUTION)
return res
return None
@property
def attribution(self) -> str | None:
"""Return the attribution."""
if self.coordinator.data is not None:
return self.coordinator.data.get(ATTR_ATTRIBUTION)
return None
@property
def icon(self) -> str:
"""Icon to use in the frontend depending on travel_mode."""
if self.coordinator.config.travel_mode == TRAVEL_MODE_BICYCLE:
return ICON_BICYCLE
if self.coordinator.config.travel_mode == TRAVEL_MODE_PEDESTRIAN:
return ICON_PEDESTRIAN
if self.coordinator.config.travel_mode in TRAVEL_MODES_PUBLIC:
return ICON_PUBLIC
if self.coordinator.config.travel_mode == TRAVEL_MODE_TRUCK:
return ICON_TRUCK
return ICON_CAR
| 32.471616
| 87
| 0.666756
|
4a092617c6071bf1da0087afb6dda2533cfa16d8
| 4,938
|
py
|
Python
|
resources/lib/kodi/context_menu.py
|
mediabrasiltv/plugin.video.netflix
|
48c7ccc0492d877bd21076140fcd5b1f8a1b31b2
|
[
"MIT"
] | null | null | null |
resources/lib/kodi/context_menu.py
|
mediabrasiltv/plugin.video.netflix
|
48c7ccc0492d877bd21076140fcd5b1f8a1b31b2
|
[
"MIT"
] | null | null | null |
resources/lib/kodi/context_menu.py
|
mediabrasiltv/plugin.video.netflix
|
48c7ccc0492d877bd21076140fcd5b1f8a1b31b2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Helper functions to generating context menu items
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.globals import g
from resources.lib.kodi.library import is_in_library
from resources.lib.kodi.library_autoupdate import show_excluded_from_auto_update
def generate_context_menu_mainmenu(menu_id):
"""Generate context menu items for a listitem of the main menu"""
items = []
if menu_id in ['myList', 'continueWatching']:
items.append(_ctx_item('force_update_list', None, {'menu_id': menu_id}))
return items
def generate_context_menu_searchitem(row_id, search_type):
"""Generate context menu items for a listitem of the search menu"""
items = []
if search_type == 'text':
items.append(_ctx_item('search_edit', None, {'row_id': row_id}))
items.append(_ctx_item('search_remove', None, {'row_id': row_id}))
return items
def generate_context_menu_items(videoid, is_in_mylist, perpetual_range_start=None, add_remove_watched_status=False):
"""Generate context menu items for a listitem"""
items = []
if videoid.mediatype not in [common.VideoId.SUPPLEMENTAL, common.VideoId.EPISODE]:
# Library operations for supplemental (trailers etc) and single episodes are not allowed
lib_auto_upd_mode = g.ADDON.getSettingInt('lib_auto_upd_mode')
if lib_auto_upd_mode != 0:
items = _generate_library_ctx_items(videoid, lib_auto_upd_mode)
# Old rating system
# if videoid.mediatype != common.VideoId.SEASON and \
# videoid.mediatype != common.VideoId.SUPPLEMENTAL:
# items.insert(0, _ctx_item('rate', videoid))
if videoid.mediatype in [common.VideoId.MOVIE, common.VideoId.SHOW]:
items.insert(0, _ctx_item('rate_thumb', videoid))
if add_remove_watched_status:
items.insert(0, _ctx_item('remove_watched_status', videoid))
if (videoid.mediatype != common.VideoId.SUPPLEMENTAL and
videoid.mediatype in [common.VideoId.MOVIE, common.VideoId.SHOW]):
items.insert(0, _ctx_item('trailer', videoid))
if videoid.mediatype in [common.VideoId.MOVIE, common.VideoId.SHOW]:
list_action = 'remove_from_list' if is_in_mylist else 'add_to_list'
items.insert(0, _ctx_item(list_action, videoid, {'perpetual_range_start': perpetual_range_start}))
if videoid.mediatype in [common.VideoId.MOVIE, common.VideoId.EPISODE]:
# Add menu to allow change manually the watched status when progress manager is enabled
if g.ADDON.getSettingBool('ProgressManager_enabled'):
items.insert(0, _ctx_item('change_watched_status', videoid))
return items
def _generate_library_ctx_items(videoid, lib_auto_upd_mode):
library_actions = []
allow_lib_operations = True
lib_is_sync_with_mylist = (g.ADDON.getSettingBool('lib_sync_mylist') and
lib_auto_upd_mode == 2)
if lib_is_sync_with_mylist:
# If the synchronization of Netflix "My List" with the Kodi library is enabled
# only in the chosen profile allow to do operations in the Kodi library otherwise
# it creates inconsistency to the exported elements and increases the work for sync
sync_mylist_profile_guid = g.SHARED_DB.get_value('sync_mylist_profile_guid',
g.LOCAL_DB.get_guid_owner_profile())
allow_lib_operations = sync_mylist_profile_guid == g.LOCAL_DB.get_active_profile_guid()
if allow_lib_operations:
_is_in_library = is_in_library(videoid)
if lib_is_sync_with_mylist:
if _is_in_library:
library_actions = ['update']
else:
library_actions = ['remove', 'update'] if _is_in_library else ['export']
if videoid.mediatype == common.VideoId.SHOW and _is_in_library:
library_actions.append('export_new_episodes')
if show_excluded_from_auto_update(videoid):
library_actions.append('include_in_auto_update')
else:
library_actions.append('exclude_from_auto_update')
return [_ctx_item(action, videoid) for action in library_actions]
def _ctx_item(template, videoid, params=None):
"""Create a context menu item based on the given template and videoid"""
# Do not move the import to the top of the module header, see context_menu_utils.py
from resources.lib.kodi.context_menu_utils import CONTEXT_MENU_ACTIONS
return (CONTEXT_MENU_ACTIONS[template]['label'],
common.run_plugin_action(
CONTEXT_MENU_ACTIONS[template]['url'](videoid, params)))
| 44.890909
| 116
| 0.707371
|
4a092696ed13f903f9436d528ecd35b29c2272a3
| 283
|
py
|
Python
|
app/main/forms.py
|
josylad/News-App
|
111e55365541cdbfc5841c8bc0a9af0ea38e60d4
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
josylad/News-App
|
111e55365541cdbfc5841c8bc0a9af0ea38e60d4
|
[
"MIT"
] | 1
|
2021-06-02T00:24:52.000Z
|
2021-06-02T00:24:52.000Z
|
app/main/forms.py
|
josylad/News-App
|
111e55365541cdbfc5841c8bc0a9af0ea38e60d4
|
[
"MIT"
] | 2
|
2019-09-10T03:02:26.000Z
|
2019-09-10T18:12:14.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
# from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review Title')
review = TextAreaField('Movie Review')
submit = SubmitField('Submit')
| 28.3
| 59
| 0.773852
|
4a0926be4a186cc72f9f46cc0132f4ced3de347b
| 2,315
|
py
|
Python
|
ass3-airplane_det/mmdet/datasets/dataset_wrappers.py
|
Rooooyy/BUAA_PR
|
5b4d12dc786c3fdc469ae59e0b099e8095aee550
|
[
"BSD-2-Clause"
] | 2
|
2021-06-09T16:21:53.000Z
|
2021-08-30T02:31:56.000Z
|
mmdet/datasets/dataset_wrappers.py
|
jedibobo/S2ANet-custom-dataset
|
869b196d4c33713a5c61bd80064d10a453fb76ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/dataset_wrappers.py
|
jedibobo/S2ANet-custom-dataset
|
869b196d4c33713a5c61bd80064d10a453fb76ef
|
[
"Apache-2.0"
] | null | null | null |
import bisect
import math
from collections import defaultdict
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .registry import DATASETS
@DATASETS.register_module
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
@DATASETS.register_module
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
return self.times * self._ori_len
| 30.866667
| 79
| 0.642333
|
4a0926e8faa272a11bb6ae789968807503611894
| 4,175
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/test_compact01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 72
|
2020-06-12T06:33:41.000Z
|
2021-03-22T03:15:56.000Z
|
src/third_party/wiredtiger/test/suite/test_compact01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 9
|
2020-07-02T09:36:49.000Z
|
2021-03-25T23:54:00.000Z
|
src/third_party/wiredtiger/test/suite/test_compact01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 14
|
2020-06-12T03:08:03.000Z
|
2021-02-03T11:43:09.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2018 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from suite_subprocess import suite_subprocess
from wtdataset import SimpleDataSet, ComplexDataSet
from wiredtiger import stat
from wtscenario import make_scenarios
# test_compact.py
# session level compact operation
class test_compact(wttest.WiredTigerTestCase, suite_subprocess):
name = 'test_compact'
# Use a small page size because we want to create lots of pages.
config = 'allocation_size=512,' +\
'leaf_page_max=512,key_format=S'
nentries = 50000
# The table is a complex object, give it roughly 5 pages per underlying
# file.
types = [
('file', dict(type='file:', dataset=SimpleDataSet, maxpages=5)),
('table', dict(type='table:', dataset=ComplexDataSet, maxpages=50))
]
compact = [
('method', dict(utility=0,reopen=0)),
('method_reopen', dict(utility=0,reopen=1)),
('utility', dict(utility=1,reopen=0)),
]
scenarios = make_scenarios(types, compact)
# Configure the connection so that eviction doesn't happen (which could
# skew our compaction results).
conn_config = 'cache_size=1GB,eviction_checkpoint_target=80,' +\
'eviction_dirty_target=80,eviction_dirty_trigger=95,statistics=(all)'
# Test compaction.
def test_compact(self):
# Populate an object
uri = self.type + self.name
ds = self.dataset(self, uri, self.nentries - 1, config=self.config)
ds.populate()
# Reopen the connection to force the object to disk.
self.reopen_conn()
# Confirm the tree starts big
stat_cursor = self.session.open_cursor('statistics:' + uri, None, None)
self.assertGreater(stat_cursor[stat.dsrc.btree_row_leaf][2], self.maxpages)
stat_cursor.close()
# Remove most of the object.
c1 = self.session.open_cursor(uri, None)
c1.set_key(ds.key(5))
c2 = self.session.open_cursor(uri, None)
c2.set_key(ds.key(self.nentries - 5))
self.session.truncate(None, c1, c2, None)
c1.close()
c2.close()
# Compact it, using either the session method or the utility.
if self.utility == 1:
self.session.checkpoint(None)
self.close_conn()
self.runWt(["compact", uri])
else:
# Optionally reopen the connection so we do more on-disk tests.
if self.reopen == 1:
self.session.checkpoint(None)
self.reopen_conn()
self.session.compact(uri, None)
# Confirm compaction worked: check the number of on-disk pages
self.reopen_conn()
stat_cursor = self.session.open_cursor('statistics:' + uri, None, None)
self.assertLess(stat_cursor[stat.dsrc.btree_row_leaf][2], self.maxpages)
stat_cursor.close()
if __name__ == '__main__':
wttest.run()
| 38.657407
| 83
| 0.68503
|
4a0926fafe4f606a43b7994dce7f88beb25c1c65
| 656
|
py
|
Python
|
audit/models.py
|
thulasi-ram/logistika
|
a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d
|
[
"MIT"
] | null | null | null |
audit/models.py
|
thulasi-ram/logistika
|
a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d
|
[
"MIT"
] | null | null | null |
audit/models.py
|
thulasi-ram/logistika
|
a9a7b649f0e15bf8cdad43fdab2a8bd61326f83d
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from djutil.models import TimeStampedModel
from logistika.views.model_crud_permissions import CRUDPermissions
from quotes.models import Quotes
from tenders.models import Tenders
from users.models import User
class TendersAudit(TimeStampedModel, CRUDPermissions):
tender = models.ForeignKey(Tenders)
message = models.CharField(max_length=100)
user = models.ForeignKey(User, null=True)
class QuotesAudit(TimeStampedModel, CRUDPermissions):
quote = models.ForeignKey(Quotes)
message = models.CharField(max_length=100)
user = models.ForeignKey(User, null=True)
| 29.818182
| 66
| 0.803354
|
4a0927200eb2c270a1dc30a89afd0b8805ecaa7e
| 798
|
py
|
Python
|
safe_transaction_service/tokens/migrations/0004_ethereum_address_field_v2_20211201_1512.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 5
|
2018-07-02T17:18:18.000Z
|
2018-09-10T20:58:34.000Z
|
safe_transaction_service/tokens/migrations/0004_ethereum_address_field_v2_20211201_1512.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 5
|
2018-08-08T11:05:56.000Z
|
2018-10-03T08:51:37.000Z
|
safe_transaction_service/tokens/migrations/0004_ethereum_address_field_v2_20211201_1512.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 1
|
2022-02-07T09:04:23.000Z
|
2022-02-07T09:04:23.000Z
|
# Generated by Django 3.2.9 on 2021-12-01 15:12
from django.db import migrations
import gnosis.eth.django.models
class Migration(migrations.Migration):
dependencies = [
("tokens", "0003_auto_20201222_1053"),
]
operations = [
migrations.RunSQL(
"""
DROP INDEX IF EXISTS tokens_token_address_18ef94ca_like;
ALTER TABLE "tokens_token" ALTER COLUMN "address" TYPE bytea USING DECODE(SUBSTRING("address", 3), 'hex');
""",
reverse_sql=migrations.RunSQL.noop,
),
migrations.AlterField(
model_name="token",
name="address",
field=gnosis.eth.django.models.EthereumAddressV2Field(
primary_key=True, serialize=False
),
),
]
| 26.6
| 118
| 0.593985
|
4a09293d8a1ce139914c5cd60705d1e61165e7b5
| 25,210
|
py
|
Python
|
src/olympia/versions/tests/test_views.py
|
soniasingla/addons-server
|
1bbaa965faf180e866b6e908e5f7e27edb8fe506
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/versions/tests/test_views.py
|
soniasingla/addons-server
|
1bbaa965faf180e866b6e908e5f7e27edb8fe506
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/versions/tests/test_views.py
|
soniasingla/addons-server
|
1bbaa965faf180e866b6e908e5f7e27edb8fe506
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.test.utils import override_settings
from django.utils.http import urlquote
import mock
import pytest
from pyquery import PyQuery
from olympia import amo
from olympia.access import acl
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon
from olympia.amo.templatetags.jinja_helpers import user_media_url
from olympia.amo.tests import TestCase, addon_factory, version_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlencode, urlparams
from olympia.files.models import File
from olympia.users.models import UserProfile
from olympia.versions import views
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
self.addon = addon_factory(
slug=u'my-addôn', file_kw={'size': 1024},
version_kw={'version': '1.0'})
self.version = self.addon.current_version
self.addon.current_version.update(created=self.days_ago(3))
self.url_list = reverse('addons.versions', args=[self.addon.slug])
self.url_detail = reverse(
'addons.versions',
args=[self.addon.slug, self.addon.current_version.version])
@mock.patch.object(views, 'PER_PAGE', 1)
def test_version_detail(self):
version = version_factory(addon=self.addon, version='2.0')
version.update(created=self.days_ago(2))
version = version_factory(addon=self.addon, version='2.1')
version.update(created=self.days_ago(1))
urls = [(v.version, reverse('addons.versions',
args=[self.addon.slug, v.version]))
for v in self.addon.versions.all()]
version, url = urls[0]
assert version == '2.1'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=1#version-%s' % version)
version, url = urls[1]
assert version == '2.0'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=2#version-%s' % version)
version, url = urls[2]
assert version == '1.0'
response = self.client.get(url, follow=True)
self.assert3xx(
response, self.url_list + '?page=3#version-%s' % version)
def test_version_detail_cache_key_normalized(self):
"""Test regression with memcached cache-key.
https://github.com/mozilla/addons-server/issues/8622
"""
url = reverse(
'addons.versions', args=[self.addon.slug, u'Âûáèðàåì âåðñèþ 23.0'])
response = self.client.get(url, follow=True)
assert response.status_code == 404
def test_version_detail_404(self):
bad_pk = self.addon.current_version.pk + 42
response = self.client.get(reverse('addons.versions',
args=[self.addon.slug, bad_pk]))
assert response.status_code == 404
bad_pk = u'lolé'
response = self.client.get(reverse('addons.versions',
args=[self.addon.slug, bad_pk]))
assert response.status_code == 404
def get_content(self):
response = self.client.get(self.url_list)
assert response.status_code == 200
return PyQuery(response.content)
@pytest.mark.xfail(reason='Temporarily hidden, #5431')
def test_version_source(self):
self.addon.update(view_source=True)
assert len(self.get_content()('a.source-code')) == 1
def test_version_no_source_one(self):
self.addon.update(view_source=False)
assert len(self.get_content()('a.source-code')) == 0
def test_version_addon_not_public(self):
self.addon.update(view_source=True, status=amo.STATUS_NULL)
response = self.client.get(self.url_list)
assert response.status_code == 404
def test_version_link(self):
version = self.addon.current_version.version
doc = self.get_content()
link = doc('.version h3 > a').attr('href')
assert link == self.url_detail
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_button_shows_download_anyway(self):
first_version = self.addon.current_version
first_version.update(created=self.days_ago(1))
first_file = first_version.files.all()[0]
second_version = version_factory(addon=self.addon, version='2.0')
second_file = second_version.files.all()[0]
doc = self.get_content()
links = doc('.download-anyway a')
assert links
assert links[0].attrib['href'] == second_file.get_url_path(
'version-history', attachment=True)
assert links[1].attrib['href'] == first_file.get_url_path(
'version-history', attachment=True)
def test_version_list_doesnt_show_unreviewed_versions_public_addon(self):
version = self.addon.current_version.version
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_AWAITING_REVIEW},
version='2.1')
doc = self.get_content()
assert len(doc('.version')) == 1
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_does_show_unreviewed_versions_unreviewed_addon(self):
version = self.addon.current_version.version
file_ = self.addon.current_version.files.all()[0]
file_.update(status=amo.STATUS_AWAITING_REVIEW)
doc = self.get_content()
assert len(doc('.version')) == 1
assert doc('.version').attr('id') == 'version-%s' % version
def test_version_list_for_unlisted_addon_returns_404(self):
"""Unlisted addons are not listed and have no version list."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url_list).status_code == 404
def test_version_detail_does_not_return_unlisted_versions(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url_detail)
assert response.status_code == 404
def test_version_list_file_size_uses_binary_prefix(self):
response = self.client.get(self.url_list)
assert '1.0 KiB' in response.content
def test_version_list_no_compat_displayed_if_not_necessary(self):
doc = self.get_content()
compat_info = doc('.compat').text()
assert compat_info
assert 'Firefox 4.0.99 and later' in compat_info
self.addon.update(type=amo.ADDON_DICT)
doc = self.get_content()
compat_info = doc('.compat').text()
assert not compat_info
def test_version_update_info(self):
self.version.release_notes = {
'en-US': u'Fix for an important bug',
'fr': u'Quelque chose en français.\n\nQuelque chose d\'autre.'
}
self.version.save()
file_ = self.version.files.all()[0]
file_.update(platform=amo.PLATFORM_WIN.id)
# Copy the file to create a new one attached to the same version.
# This tests https://github.com/mozilla/addons-server/issues/8950
file_.pk = None
file_.platform = amo.PLATFORM_MAC.id
file_.save()
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
# pyquery is annoying to use with XML and namespaces. Use the HTML
# parser, but do check that xmlns attribute is present (required by
# Firefox for the notes to be shown properly).
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == 'Fix for an important bug'
# Test update info in another language.
with self.activate(locale='fr'):
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
assert '<br/>' in response.content, (
'Should be using XHTML self-closing tags!')
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == (
u"Quelque chose en français.<br/><br/>Quelque chose d'autre.")
def test_version_update_info_legacy_redirect(self):
response = self.client.get('/versions/updateInfo/%s' % self.version.id,
follow=True)
url = reverse('addons.versions.update_info',
args=(self.version.addon.slug, self.version.version))
self.assert3xx(response, url, 301)
def test_version_update_info_legacy_redirect_deleted(self):
self.version.delete()
response = self.client.get(
'/en-US/firefox/versions/updateInfo/%s' % self.version.id)
assert response.status_code == 404
def test_version_update_info_no_unlisted(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 404
class TestDownloadsBase(TestCase):
fixtures = ['base/addon_5299_gcal', 'base/users']
def setUp(self):
super(TestDownloadsBase, self).setUp()
self.addon = Addon.objects.get(id=5299)
self.file = File.objects.get(id=33046)
self.file_url = reverse('downloads.file', args=[self.file.id])
self.latest_url = reverse('downloads.latest', args=[self.addon.slug])
def assert_served_by_host(self, response, host, file_=None):
if not file_:
file_ = self.file
assert response.status_code == 302
assert response.url == (
urlparams('%s%s/%s' % (
host, self.addon.id, urlquote(file_.filename)
), filehash=file_.hash))
assert response['X-Target-Digest'] == file_.hash
def assert_served_internally(self, response, guarded=True):
assert response.status_code == 200
file_path = (self.file.guarded_file_path if guarded else
self.file.file_path)
assert response[settings.XSENDFILE_HEADER] == file_path
def assert_served_locally(self, response, file_=None, attachment=False):
path = user_media_url('addons')
if attachment:
path += '_attachments/'
self.assert_served_by_host(response, path, file_)
def assert_served_by_cdn(self, response, file_=None):
assert response.url.startswith(settings.MEDIA_URL)
assert response.url.startswith('http')
self.assert_served_by_host(response, user_media_url('addons'), file_)
class TestDownloadsUnlistedVersions(TestDownloadsBase):
def setUp(self):
super(TestDownloadsUnlistedVersions, self).setUp()
self.make_addon_unlisted(self.addon)
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
class TestDownloads(TestDownloadsBase):
def test_file_404(self):
r = self.client.get(reverse('downloads.file', args=[234]))
assert r.status_code == 404
def test_public(self):
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
self.assert_served_by_cdn(self.client.get(self.file_url))
def test_public_addon_unreviewed_file(self):
self.file.status = amo.STATUS_AWAITING_REVIEW
self.file.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_unreviewed_addon(self):
self.addon.status = amo.STATUS_PENDING
self.addon.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_type_attachment(self):
self.assert_served_by_cdn(self.client.get(self.file_url))
url = reverse('downloads.file', args=[self.file.id, 'attachment'])
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = self.file_url + self.file.filename
self.assert_served_by_cdn(self.client.get(url))
def test_null_datestatuschanged(self):
self.file.update(datestatuschanged=None)
self.assert_served_locally(self.client.get(self.file_url))
def test_unicode_url(self):
self.file.update(filename=u'图像浏览器-0.5-fx.xpi')
self.assert_served_by_cdn(self.client.get(self.file_url))
class TestDisabledFileDownloads(TestDownloadsBase):
def test_admin_disabled_404(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_user_disabled_404(self):
self.addon.update(disabled_by_user=True)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_anon_404(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_unprivileged_404(self):
assert self.client.login(email='regular@mozilla.com')
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_ok_for_author(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_reviewer(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='reviewer@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_admin(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_author(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_author(self):
self.addon.update(disabled_by_user=True)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_admin(self):
self.addon.update(disabled_by_user=True)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
class TestUnlistedDisabledFileDownloads(TestDisabledFileDownloads):
def setUp(self):
super(TestDisabledFileDownloads, self).setUp()
self.make_addon_unlisted(self.addon)
self.grant_permission(
UserProfile.objects.get(email='reviewer@mozilla.com'),
'Addons:ReviewUnlisted')
class TestDownloadsLatest(TestDownloadsBase):
def setUp(self):
super(TestDownloadsLatest, self).setUp()
self.platform = 5
def test_404(self):
url = reverse('downloads.latest', args=[123])
assert self.client.get(url).status_code == 404
def test_type_none(self):
r = self.client.get(self.latest_url)
assert r.status_code == 302
url = '%s?%s' % (self.file.filename,
urlencode({'filehash': self.file.hash}))
assert r['Location'].endswith(url), r['Location']
def test_success(self):
assert self.addon.current_version
self.assert_served_by_cdn(self.client.get(self.latest_url))
def test_platform(self):
# We still match PLATFORM_ALL.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5})
self.assert_served_by_cdn(self.client.get(url))
# And now we match the platform in the url.
self.file.platform = self.platform
self.file.save()
self.assert_served_by_cdn(self.client.get(url))
# But we can't match platform=3.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
assert self.client.get(url).status_code == 404
def test_type(self):
url = reverse('downloads.latest', kwargs={'addon_id': self.addon.slug,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_and_type(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
url += self.file.filename
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_multiple_objects(self):
f = File.objects.create(platform=3, version=self.file.version,
filename='unst.xpi', status=self.file.status)
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
self.assert_served_locally(self.client.get(url), file_=f)
@override_settings(XSENDFILE=True)
class TestDownloadSource(TestCase):
fixtures = ['base/addon_3615', 'base/admin']
def setUp(self):
super(TestDownloadSource, self).setUp()
self.addon = Addon.objects.get(pk=3615)
# Make sure non-ascii is ok.
self.addon.update(slug=u'crosswarpex-확장')
self.version = self.addon.current_version
tdir = temp.gettempdir()
self.source_file = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
self.source_file.write('a' * (2 ** 21))
self.source_file.seek(0)
self.version.source = DjangoFile(self.source_file)
self.version.save()
self.filename = os.path.basename(self.version.source.path)
self.user = UserProfile.objects.get(email="del@icio.us")
self.group = Group.objects.create(
name='Editors BinarySource',
rules='Editors:BinarySource'
)
self.url = reverse('downloads.source', args=(self.version.pk, ))
def test_owner_should_be_allowed(self):
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = self.filename
if not isinstance(filename, unicode):
filename = filename.decode('utf8')
assert filename in response['Content-Disposition'].decode('utf8')
path = self.version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
assert response[settings.XSENDFILE_HEADER].decode('utf8') == path
def test_anonymous_should_not_be_allowed(self):
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version(self):
self.version.delete()
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 404
def test_group_binarysource_should_be_allowed(self):
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = self.filename
if not isinstance(filename, unicode):
filename = filename.decode('utf8')
assert filename in response['Content-Disposition'].decode('utf8')
path = self.version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
assert response[settings.XSENDFILE_HEADER].decode('utf8') == path
def test_no_source_should_go_in_404(self):
self.version.source = None
self.version.save()
response = self.client.get(self.url)
assert response.status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
| 42.584459
| 79
| 0.661325
|
4a092d536c946a36da4ad76b90fa929be8ac2377
| 3,372
|
py
|
Python
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 16
|
2019-07-23T04:14:27.000Z
|
2022-02-15T10:46:06.000Z
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 1
|
2021-04-08T19:34:31.000Z
|
2021-04-08T19:34:31.000Z
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 5
|
2019-07-23T13:18:42.000Z
|
2021-01-28T06:37:47.000Z
|
# Generated by Django 2.2.2 on 2019-07-18 01:37
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=30, null=True, verbose_name='姓名')),
('birthday', models.DateField(blank=True, null=True, verbose_name='出生年月')),
('gender', models.CharField(blank=True, choices=[('male', '男'), ('female', '女')], default='female', max_length=6, verbose_name='性别')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('image', models.ImageField(blank=True, null=True, upload_to='image/%Y/%m/%d')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 68.816327
| 329
| 0.654804
|
4a092e9ee0c021556629a5ef8d8d43b4cd8a86d6
| 9,117
|
py
|
Python
|
fiftyone/server/metadata.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | 3
|
2022-01-18T06:13:33.000Z
|
2022-02-14T13:28:23.000Z
|
fiftyone/server/metadata.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/server/metadata.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | null | null | null |
"""
FiftyOne Server JIT metadata utilities.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import logging
import shutil
import struct
import asyncio
import aiofiles
import eta.core.serial as etas
import eta.core.utils as etau
import eta.core.video as etav
import fiftyone.core.media as fom
logger = logging.getLogger(__name__)
_FFPROBE_BINARY_PATH = shutil.which("ffprobe")
async def get_metadata(filepath, media_type, metadata=None):
"""Gets the metadata for the given media file.
Args:
filepath: the path to the file
media_type: the media type of the collection
metadata (None): a pre-existing metadata dict to use if possible
Returns:
metadata dict
"""
is_video = media_type == fom.VIDEO
if metadata:
if is_video:
width = metadata.get("frame_width", None)
height = metadata.get("frame_height", None)
frame_rate = metadata.get("frame_rate", None)
if width and height and frame_rate:
return {
"width": width,
"height": height,
"frame_rate": frame_rate,
}
else:
width = metadata.get("width", None)
height = metadata.get("height", None)
if width and height:
return {"width": width, "height": height}
try:
return await read_metadata(filepath, is_video)
except:
pass
if is_video:
return {"width": 512, "height": 512, "frame_rate": 30}
return {"width": 512, "height": 512}
async def read_metadata(filepath, is_video):
"""Calculates the metadata for the given media path.
Args:
filepath: a filepath
is_video: whether the file is a video
Returns:
dict
"""
if is_video:
info = await get_stream_info(filepath)
return {
"width": info.frame_size[0],
"height": info.frame_size[1],
"frame_rate": info.frame_rate,
}
async with aiofiles.open(filepath, "rb") as f:
width, height = await get_image_dimensions(f)
return {"width": width, "height": height}
async def get_stream_info(path):
"""Returns a :class:`eta.core.video.VideoStreamInfo` instance for the
provided video path.
Args:
path: a video filepath
Returns:
a :class:`eta.core.video.VideoStreamInfo`
"""
if _FFPROBE_BINARY_PATH is None:
raise RuntimeError(
"You must have ffmpeg installed on your machine in order to view "
"video datasets in the App, but we failed to find it"
)
proc = await asyncio.create_subprocess_exec(
_FFPROBE_BINARY_PATH,
"-loglevel",
"error",
"-show_format",
"-show_streams",
"-print_format",
"json",
"-i",
path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if stderr:
raise ValueError(stderr)
info = etas.load_json(stdout.decode("utf8"))
video_streams = [s for s in info["streams"] if s["codec_type"] == "video"]
num_video_streams = len(video_streams)
if num_video_streams == 1:
stream_info = video_streams[0]
elif num_video_streams == 0:
logger.debug("No video stream found; defaulting to first stream")
stream_info = info["streams"][0]
else:
logger.debug("Found multiple video streams; using first stream")
stream_info = video_streams[0]
format_info = info["format"]
mime_type = etau.guess_mime_type(path)
return etav.VideoStreamInfo(stream_info, format_info, mime_type=mime_type)
async def get_image_dimensions(input):
"""Gets the dimensions of an image from its asynchronous byte stream.
Args:
input: file-like object with async read and seek methods
Returns:
the ``(width, height)``
"""
height = -1
width = -1
data = await input.read(26)
size = len(data)
if (size >= 10) and data[:6] in (b"GIF87a", b"GIF89a"):
# GIFs
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
elif (
(size >= 24)
and data.startswith(b"\211PNG\r\n\032\n")
and (data[12:16] == b"IHDR")
):
# PNGs
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
elif (size >= 16) and data.startswith(b"\211PNG\r\n\032\n"):
# older PNGs
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
elif (size >= 2) and data.startswith(b"\377\330"):
await input.seek(2)
b = await input.read(1)
while b and ord(b) != 0xDA:
while ord(b) != 0xFF:
b = await input.read(1)
while ord(b) == 0xFF:
b = await input.read(1)
if ord(b) >= 0xC0 and ord(b) <= 0xC3:
await input.read(3)
tmp = await input.read(4)
h, w = struct.unpack(">HH", tmp)
break
else:
tmp = await input.read(2)
await input.read(int(struct.unpack(">H", tmp)[0]) - 2)
b = await input.read(1)
width = int(w)
height = int(h)
elif (size >= 26) and data.startswith(b"BM"):
# BMP
headersize = struct.unpack("<I", data[14:18])[0]
if headersize == 12:
w, h = struct.unpack("<HH", data[18:22])
width = int(w)
height = int(h)
elif headersize >= 40:
w, h = struct.unpack("<ii", data[18:26])
width = int(w)
# as h is negative when stored upside down
height = abs(int(h))
else:
raise MetadataException(
"Unkown DIB header size: %s" % str(headersize)
)
elif (size >= 8) and data[:4] in (b"II\052\000", b"MM\000\052"):
# Standard TIFF, big- or little-endian
# BigTIFF and other different but TIFF-like formats are not
# supported currently
byteOrder = data[:2]
boChar = ">" if byteOrder == "MM" else "<"
# maps TIFF type id to size (in bytes)
# and python format char for struct
tiffTypes = {
1: (1, boChar + "B"), # BYTE
2: (1, boChar + "c"), # ASCII
3: (2, boChar + "H"), # SHORT
4: (4, boChar + "L"), # LONG
5: (8, boChar + "LL"), # RATIONAL
6: (1, boChar + "b"), # SBYTE
7: (1, boChar + "c"), # UNDEFINED
8: (2, boChar + "h"), # SSHORT
9: (4, boChar + "l"), # SLONG
10: (8, boChar + "ll"), # SRATIONAL
11: (4, boChar + "f"), # FLOAT
12: (8, boChar + "d"), # DOUBLE
}
ifdOffset = struct.unpack(boChar + "L", data[4:8])[0]
countSize = 2
await input.seek(ifdOffset)
ec = await input.read(countSize)
ifdEntryCount = struct.unpack(boChar + "H", ec)[0]
# 2 bytes: TagId + 2 bytes: type + 4 bytes: count of values + 4
# bytes: value offset
ifdEntrySize = 12
for i in range(ifdEntryCount):
entryOffset = ifdOffset + countSize + i * ifdEntrySize
await input.seek(entryOffset)
tag = await input.read(2)
tag = struct.unpack(boChar + "H", tag)[0]
if tag == 256 or tag == 257:
# if type indicates that value fits into 4 bytes, value
# offset is not an offset but value itself
type = await input.read(2)
type = struct.unpack(boChar + "H", type)[0]
if type not in tiffTypes:
raise MetadataException("Unable to read metadata")
typeSize = tiffTypes[type][0]
typeChar = tiffTypes[type][1]
await input.seek(entryOffset + 8)
value = await input.read(typeSize)
value = int(struct.unpack(typeChar, value)[0])
if tag == 256:
width = value
else:
height = value
if width > -1 and height > -1:
break
elif size >= 2:
await input.seek(0)
reserved = await input.read(2)
if 0 != struct.unpack("<H", reserved)[0]:
raise MetadataException("Unable to read metadata")
format = await input.read(2)
if 1 != struct.unpack("<H", format)[0]:
raise MetadataException("Unable to read metadata")
num = await input.read(2)
num = struct.unpack("<H", num)[0]
# http://msdn.microsoft.com/en-us/library/ms997538.aspx
w = await input.read(1)
h = await input.read(1)
width = ord(w)
height = ord(h)
return width, height
class MetadataException(Exception):
""""Exception raised when metadata for a media file cannot be computed."""
pass
| 31.116041
| 78
| 0.54371
|
4a092f58b1c7590cc84f277a51e3ea8d34f58a80
| 12,275
|
py
|
Python
|
pymatgen/io/cifio.py
|
qimin/pymatgen
|
4823c777a8af4a3ca7cd29297563ba8174ec402c
|
[
"MIT"
] | 1
|
2022-03-29T20:03:58.000Z
|
2022-03-29T20:03:58.000Z
|
pymatgen/io/cifio.py
|
qimin/pymatgen
|
4823c777a8af4a3ca7cd29297563ba8174ec402c
|
[
"MIT"
] | null | null | null |
pymatgen/io/cifio.py
|
qimin/pymatgen
|
4823c777a8af4a3ca7cd29297563ba8174ec402c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Wrapper classes for Cif input and output from Structures.
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import re
import cStringIO
import math
import warnings
from collections import OrderedDict
import CifFile
import numpy as np
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.util.io_utils import zopen
from pymatgen.util.coord_utils import in_coord_list_pbc
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
class CifParser(object):
"""
A wrapper class around PyCifRW to read Cif and convert into a pymatgen
Structure object.
"""
def __init__(self, filename, occupancy_tolerance=1.):
"""
Args:
filename:
Cif filename. bzipped or gzipped cifs are fine too.
occupancy_tolerance:
If total occupancy of a site is between 1 and
occupancy_tolerance, the occupancies will be scaled down to 1.
"""
self._occupancy_tolerance = occupancy_tolerance
if isinstance(filename, basestring):
with zopen(filename, "r") as f:
self._cif = CifFile.ReadCif(f)
else:
self._cif = CifFile.ReadCif(filename)
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string:
String representation of a CIF.
occupancy_tolerance:
If total occupancy of a site is between 1 and
occupancy_tolerance, the occupancies will be scaled down to 1.
Returns:
CifParser
"""
output = cStringIO.StringIO(cif_string)
return CifParser(output, occupancy_tolerance)
def _unique_coords(self, coord_in):
"""
Generate unique coordinates using coord and symmetry positions.
"""
coords = []
for op in self.symmetry_operations:
coord = op.operate(coord_in)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord, atol=1e-3):
coords.append(coord)
return coords
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
lengths = [float_from_str(data["_cell_length_" + i])
for i in ["a", "b", "c"]]
angles = [float_from_str(data["_cell_angle_" + i])
for i in ["alpha", "beta", "gamma"]]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
try:
sympos = data["_symmetry_equiv_pos_as_xyz"]
except KeyError:
try:
sympos = data["_symmetry_equiv_pos_as_xyz_"]
except KeyError:
warnings.warn("No _symmetry_equiv_pos_as_xyz type key found. "
"Defaulting to P1.")
sympos = ['x, y, z']
self.symmetry_operations = parse_symmetry_operations(sympos)
def parse_symbol(sym):
m = re.search("([A-Z][a-z]*)", sym)
if m:
return m.group(1)
return ""
try:
oxi_states = {data["_atom_type_symbol"][i]:
float_from_str(data["_atom_type_oxidation_number"][i])
for i in xrange(len(data["_atom_type_symbol"]))}
except (ValueError, KeyError):
oxi_states = None
coord_to_species = OrderedDict()
for i in xrange(len(data["_atom_site_type_symbol"])):
symbol = parse_symbol(data["_atom_site_type_symbol"][i])
if oxi_states is not None:
el = Specie(symbol,
oxi_states[data["_atom_site_type_symbol"][i]])
else:
el = Element(symbol)
x = float_from_str(data["_atom_site_fract_x"][i])
y = float_from_str(data["_atom_site_fract_y"][i])
z = float_from_str(data["_atom_site_fract_z"][i])
try:
occu = float_from_str(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
if coord not in coord_to_species:
coord_to_species[coord] = {el: occu}
else:
coord_to_species[coord][el] = occu
allspecies = []
allcoords = []
for coord, species in coord_to_species.items():
coords = self._unique_coords(coord)
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
#rescale occupancies if necessary
for species in allspecies:
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
for key, value in species.iteritems():
species[key] = value / totaloccu
struct = Structure(lattice, allspecies, allcoords)
if primitive:
struct = struct.get_primitive_structure()
return struct.get_sorted_structure()
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive:
Set to False to return conventional unit cells. Defaults to
True.
Returns:
List of Structures.
"""
structures = []
for k, v in self._cif.items():
try:
structures.append(self._get_structure(v, primitive))
except KeyError:
pass
return structures
@property
def to_dict(self):
d = OrderedDict()
for k, v in self._cif.items():
d[k] = {}
for k2, v2 in v.items():
d[k][k2] = v2
return d
class CifWriter:
"""
A wrapper around PyCifRW to write CIF files from pymatgen structures.
"""
def __init__(self, struct):
"""
Args:
struct:
A pymatgen.core.structure.Structure object.
"""
block = CifFile.CifBlock()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = Composition(comp.formula)
block["_symmetry_space_group_name_H-M"] = "P 1"
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = str(getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = str(getattr(latt, cell_attr))
block["_chemical_name_systematic"] = "Generated by pymatgen"
block["_symmetry_Int_Tables_number"] = 1
block["_chemical_formula_structural"] = str(no_oxi_comp
.reduced_formula)
block["_chemical_formula_sum"] = str(no_oxi_comp.formula)
block["_cell_volume"] = str(latt.volume)
reduced_comp = Composition.from_formula(no_oxi_comp.reduced_formula)
el = no_oxi_comp.elements[0]
amt = comp[el]
fu = int(amt / reduced_comp[Element(el.symbol)])
block["_cell_formula_units_Z"] = str(fu)
block.AddCifItem(([["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"]],
[[["1"], ["x, y, z"]]]))
contains_oxidation = True
try:
symbol_to_oxinum = {str(el): float(el.oxi_state)
for el in comp.elements}
except AttributeError:
symbol_to_oxinum = {el.symbol: 0 for el in comp.elements}
contains_oxidation = False
if contains_oxidation:
block.AddCifItem(([["_atom_type_symbol",
"_atom_type_oxidation_number"]],
[[symbol_to_oxinum.keys(),
symbol_to_oxinum.values()]]))
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_attached_hydrogens = []
atom_site_B_iso_or_equiv = []
atom_site_label = []
atom_site_occupancy = []
count = 1
for site in struct:
for sp, occu in site.species_and_occu.items():
atom_site_type_symbol.append(str(sp))
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_attached_hydrogens.append("0")
atom_site_B_iso_or_equiv.append(".")
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(str(occu))
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_label": atom_site_label})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_symmetry_multiplicity":
atom_site_symmetry_multiplicity})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_fract_x": atom_site_fract_x})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_fract_y": atom_site_fract_y})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_fract_z": atom_site_fract_z})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_attached_hydrogens":
atom_site_attached_hydrogens})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_B_iso_or_equiv":
atom_site_B_iso_or_equiv})
block.AddToLoop("_atom_site_type_symbol",
{"_atom_site_occupancy": atom_site_occupancy})
self._cf = CifFile.CifFile()
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self._cf[comp.reduced_formula[0:74]] = block
def __str__(self):
"""
Returns the cif as a string.
"""
return str(self._cf)
def write_file(self, filename):
"""
Write the cif file.
"""
with open(filename, "w") as f:
f.write(self.__str__())
def float_from_str(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
return float(re.sub("\(.+\)", "", text))
def parse_symmetry_operations(symmops_str_list):
"""
Help method to parse the symmetry operations.
Args:
symmops_str_list:
List of symmops strings of the form
['x, y, z', '-x, -y, z', '-y+1/2, x+1/2, z+1/2', ...]
Returns:
List of SymmOps
"""
ops = []
for op_str in symmops_str_list:
rot_matrix = np.zeros((3, 3))
trans = np.zeros(3)
toks = op_str.strip().split(",")
for i, tok in enumerate(toks):
for m in re.finditer("([\+\-]*)\s*([x-z\d]+)/*(\d*)", tok):
factor = -1 if m.group(1) == "-" else 1
if m.group(2) in ("x", "y", "z"):
j = ord(m.group(2)) - 120
rot_matrix[i, j] = factor
else:
num = float(m.group(2))
if m.group(3) != "":
num /= float(m.group(3))
trans[i] = factor * num
op = SymmOp.from_rotation_and_translation(rot_matrix, trans)
ops.append(op)
return ops
| 35.476879
| 80
| 0.563747
|
4a092f6116fba1e90eb002755bcc47b7169d2b6d
| 431
|
py
|
Python
|
labext/modules/jquery.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | 3
|
2020-06-21T22:57:55.000Z
|
2021-06-03T23:36:39.000Z
|
labext/modules/jquery.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | null | null | null |
labext/modules/jquery.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | 1
|
2020-06-20T19:50:37.000Z
|
2020-06-20T19:50:37.000Z
|
from typing import List, Type, Dict
from labext.module import Module
class JQuery(Module):
@classmethod
def id(cls) -> str:
return 'jquery'
@classmethod
def css(cls) -> List[str]:
return []
@classmethod
def js(cls) -> Dict[str, str]:
return {cls.id(): '//code.jquery.com/jquery-3.3.1.min'}
@classmethod
def dependencies(cls) -> List[Type['Module']]:
return []
| 18.73913
| 63
| 0.589327
|
4a092f9136fe8f965d910552ab5c32363a4b2c3b
| 1,966
|
py
|
Python
|
nova/tests/unit/scheduler/filters/test_exact_core_filter.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2016-04-28T16:20:38.000Z
|
2021-04-25T11:19:03.000Z
|
nova/tests/unit/scheduler/filters/test_exact_core_filter.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/tests/unit/scheduler/filters/test_exact_core_filter.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 7
|
2015-01-20T10:30:08.000Z
|
2020-02-05T10:29:05.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.scheduler.filters import exact_core_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestExactCoreFilter(test.NoDBTestCase):
def setUp(self):
super(TestExactCoreFilter, self).setUp()
self.filt_cls = exact_core_filter.ExactCoreFilter()
def test_exact_core_filter_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=1))
vcpus = 3
host = self._get_host({'vcpus_total': vcpus, 'vcpus_used': vcpus - 1})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
self.assertEqual(host.limits.get('vcpu'), vcpus)
def test_exact_core_filter_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=2))
host = self._get_host({'vcpus_total': 3, 'vcpus_used': 2})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
self.assertNotIn('vcpu', host.limits)
def test_exact_core_filter_fails_host_vcpus_not_set(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=1))
host = self._get_host({})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
self.assertNotIn('vcpu', host.limits)
def _get_host(self, host_attributes):
return fakes.FakeHostState('host1', 'node1', host_attributes)
| 40.122449
| 78
| 0.70295
|
4a092ff78bedc5e9d9713924b42e70f90948cd4f
| 7,196
|
py
|
Python
|
human_data/subject_data_to_csv.py
|
NYUMaLab/confidence
|
50b6e906753315707bd01d682df2cf3d8f8e5d6b
|
[
"MIT"
] | 6
|
2016-01-10T06:56:53.000Z
|
2020-10-23T12:11:54.000Z
|
human_data/subject_data_to_csv.py
|
NYUMaLab/confidence
|
50b6e906753315707bd01d682df2cf3d8f8e5d6b
|
[
"MIT"
] | null | null | null |
human_data/subject_data_to_csv.py
|
NYUMaLab/confidence
|
50b6e906753315707bd01d682df2cf3d8f8e5d6b
|
[
"MIT"
] | 5
|
2018-05-01T18:09:52.000Z
|
2021-07-16T06:50:02.000Z
|
import numpy as np
import os
import datetime
import re
import pandas as pd
import scipy.io as spio
### https://stackoverflow.com/questions/7008608/scipy-io-loadmat-nested-structures-i-e-dictionaries
def loadmat(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
def _check_keys(d):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif isinstance(elem, np.ndarray):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif isinstance(sub_elem, np.ndarray):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
###
class Vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)() # retain local pointer to value
return value
block_types = ['Test','Training']
def oldcat2newcat(x):
# turns 1 and 2 into -1 and 1
return 2 * np.array(x,np.int8) - 3
#%%
experiments = Vividict({
'reliability_exp1': {
'A':'reliability_exp1/taskA',
'B':'reliability_exp1/taskB'}})
# experiments = Vividict({'attention': {
# 'B':'attention'},
# 'reliability_exp1': {
# 'A':'reliability_exp1/taskA',
# 'B':'reliability_exp1/taskB'},
# 'reliability_exp2': {
# 'A':'reliability_exp2/taskA',
# 'B':'reliability_exp2/taskB'},
# 'reliability_exp3': {
# 'B':'reliability_exp3'}
# })
for experiment in experiments:
experiments[experiment]['subjects'] = Vividict()
for task, taskdir in [i for i in experiments[experiment].items() if i[0]!='subjects']:
for file in os.listdir(taskdir):
if file.endswith('.mat'):
name, date, time = re.split('_',file)[0:3]
if name not in experiments[experiment]['subjects']:
experiments[experiment]['subjects'][name]['df'] = pd.DataFrame()
session_start_time = datetime.datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),int(time[0:2]),int(time[2:4]),min(59,int(time[4:6])))
mat = loadmat(taskdir+'/'+file)
stim_type = mat['P']['stim_type']
for block_type in block_types:
tmpdata = mat[block_type]
stim_dict = tmpdata['R']
resp_dict = tmpdata['responses']
for block_no in range(0,len(tmpdata['R']['draws'])):
# for blocks with only one section, wrap in a list to make the indexing work better
if np.array(stim_dict['draws'][block_no]).ndim==1:
for d in ['draws', 'sigma', 'phase', 'trial_order']:
stim_dict[d][block_no]=[stim_dict[d][block_no]]
for d in ['tf', 'c', 'conf', 'rt']:
resp_dict[block_no][d]=[resp_dict[block_no][d]]
for section_no in range(0,len(tmpdata['R']['draws'][block_no])):
stim_category = oldcat2newcat(stim_dict['trial_order'][block_no][section_no])
resp_category = oldcat2newcat(resp_dict[block_no]['c'][section_no])
tmpdf = pd.DataFrame({'stim_orientation': stim_dict['draws'][block_no][section_no],
'stim_reliability': stim_dict['sigma'][block_no][section_no],
'stim_phase': stim_dict['phase'][block_no][section_no],
'stim_category': stim_category,
'stim_type': stim_type,
'resp_confidence': resp_dict[block_no]['conf'][section_no],
'resp_category': resp_category,
'resp_buttonid': resp_category*resp_dict[block_no]['conf'][section_no],
'resp_correct': resp_dict[block_no]['tf'][section_no],
'resp_rt': resp_dict[block_no]['rt'][section_no],
'block_no': block_no,
'section_no': section_no,
'task': task,
'block_type': block_type,
'session_start_time': session_start_time,
})
experiments[experiment]['subjects'][name]['df'] = pd.concat([experiments[experiment]['subjects'][name]['df'], tmpdf], ignore_index = True)
experiments[experiment]['subjects'][name]['df']['subject_name'] = name
for experiment in experiments:
dfs = [i[1]['df'] for i in experiments[experiment]['subjects'].items()]
sort_cols = ['subject_name', 'session_start_time', 'task', 'block_type', 'block_no', 'section_no']
ascending = [False if i=='block_type' else True for i in sort_cols]
cols = ['subject_name', 'session_start_time', 'task', 'block_type', 'block_no', 'section_no', 'stim_type', 'stim_category', 'stim_reliability', 'stim_orientation', 'stim_phase', 'resp_buttonid', 'resp_category', 'resp_confidence', 'resp_rt', 'resp_correct']
all = pd.concat(dfs, ignore_index=True)
all = all.sort_values(sort_cols, ascending=ascending)[cols]
all.to_csv(f'{experiment}.csv', index=False)
| 47.03268
| 261
| 0.522513
|
4a0933131a47df9dac45b781e06b794413aff850
| 4,128
|
py
|
Python
|
app/recipe/test/test_tags_api.py
|
jtartsi/recipe-app-api
|
56d35b2eadc426d2214aa3e312a8cc003a9cbf08
|
[
"MIT"
] | null | null | null |
app/recipe/test/test_tags_api.py
|
jtartsi/recipe-app-api
|
56d35b2eadc426d2214aa3e312a8cc003a9cbf08
|
[
"MIT"
] | null | null | null |
app/recipe/test/test_tags_api.py
|
jtartsi/recipe-app-api
|
56d35b2eadc426d2214aa3e312a8cc003a9cbf08
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicTagsApiTests(TestCase):
"""Test the publicly available Tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = create_user(
email='test@jmitesolutions.com',
password='testpass',
name='John Doe',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = create_user(
email='other@jmitesolutions.com',
password='testpass',
name='Other User'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigner_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes='5',
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 31.51145
| 71
| 0.630329
|
4a093318c2520961c7354d6d8c16a928c5e0dba4
| 191,935
|
py
|
Python
|
jax/_src/lax/lax.py
|
VolodyaCO/jax
|
4d148999404a572ebac6dd1b58f2d93f73245360
|
[
"Apache-2.0"
] | 1
|
2022-03-10T16:30:31.000Z
|
2022-03-10T16:30:31.000Z
|
jax/_src/lax/lax.py
|
VolodyaCO/jax
|
4d148999404a572ebac6dd1b58f2d93f73245360
|
[
"Apache-2.0"
] | null | null | null |
jax/_src/lax/lax.py
|
VolodyaCO/jax
|
4d148999404a572ebac6dd1b58f2d93f73245360
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import enum
import functools
from functools import partial
import itertools
import operator
from typing import (Any, Callable, Optional, Sequence, Tuple, List, TypeVar,
Union, cast as type_cast)
import warnings
import numpy as np
import jax
from jax import core
from jax._src import ad_util
from jax._src import api
from jax._src import api_util
from jax._src import device_array
from jax._src import dispatch
from jax import linear_util as lu
from jax._src import dtypes
from jax import tree_util
from jax._src import source_info_util
from jax._src.config import config
from jax.core import (Primitive, UnshapedArray, ShapedArray, ConcreteArray,
raise_to_shaped, abstract_token, canonicalize_shape)
from jax._src.abstract_arrays import array_types
from jax.interpreters import partial_eval as pe
from jax.interpreters import mlir
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import invertible_ad as iad
from jax.interpreters import batching
from jax.interpreters import masking
import jax._src.pretty_printer as pp
from jax._src import util
from jax._src.util import (cache, safe_zip, prod, safe_map, canonicalize_axis,
split_list, new_name_stack)
from jax.tree_util import tree_map
import jax._src.lib
from jax._src.lib import pytree
from jax._src.lib import xla_bridge
from jax._src.lib import xla_client
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import chlo
from jax._src.lib.mlir.dialects import mhlo
from jax._src.lax.utils import (
_input_dtype,
standard_abstract_eval,
standard_multi_result_abstract_eval,
standard_named_shape_rule,
standard_primitive,
)
from jax._src.lax import slicing
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
xe = xla_client._xla
_max = builtins.max
_min = builtins.min
_reduce = functools.reduce
Array = Any
DType = Any
Shape = core.Shape
T = TypeVar("T")
def _validate_shapes(shapes: Sequence[Shape]):
def _check_static_shape(shape: Shape):
checked = canonicalize_shape(shape)
if not all(idx >= 0 for idx in checked):
msg = f"Only non-negative indices are allowed when broadcasting" \
f" static shapes, but got shape {shape!r}."
raise TypeError(msg)
assert shapes
if config.jax_dynamic_shapes:
# pass dynamic shapes through unchecked
return
else:
_ = tuple(map(_check_static_shape, shapes))
def _try_broadcast_shapes(
shapes: Sequence[Tuple[int, ...]]) -> Optional[Tuple[int, ...]]:
if len(shapes) == 1: return shapes[0]
rank, *others = {len(shape) for shape in shapes}
if others: return None # must have consistent rank
if not rank: return () # scalar case
result_shape = [-1] * rank
for i, sizes in enumerate(zip(*shapes)):
non_1s = {d for d in sizes if not core.symbolic_equal_dim(d, 1)}
if len(non_1s) > 1:
return None # must have equal sizes other than 1-sized axes
result_shape[i] = next(iter(non_1s), 1)
return tuple(result_shape)
def broadcast_shapes(*shapes: Tuple[Union[int, core.Tracer], ...]
) -> Tuple[Union[int, core.Tracer], ...]:
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
# NOTE: We have both cached and uncached versions to handle Tracers in shapes.
try:
return _broadcast_shapes_cached(*shapes)
except:
return _broadcast_shapes_uncached(*shapes)
@cache()
def _broadcast_shapes_cached(*shapes: Tuple[int, ...]) -> Tuple[int, ...]:
return _broadcast_shapes_uncached(*shapes)
def _broadcast_shapes_uncached(*shapes):
_validate_shapes(shapes)
fst, *rst = shapes
if not rst: return fst
# First check if we need only rank promotion (and not singleton-broadcasting).
try: return _reduce(_broadcast_ranks, rst, fst)
except ValueError: pass
# Next try singleton-broadcasting, padding out ranks using singletons.
ndim = _max(len(shape) for shape in shapes)
shape_list = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
result_shape = _try_broadcast_shapes(shape_list)
if result_shape is None:
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(shape_list)))
return result_shape
def _broadcast_ranks(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
assert len(s1) <= len(s2)
s1_ = s2[len(s2) - len(s1):]
if core.symbolic_equal_shape(s1_, s1): return s2
else: raise ValueError
def _identity(x): return x
### traceables
def neg(x: Array) -> Array:
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x: Array) -> Array:
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
r"""Returns the next representable value after `x1` in the direction of `x2`.
Note that in some environments flush-denormal-to-zero semantics is used.
This means that, around zero, this function returns strictly non-zero
values which appear as zero in any operations. Consider this example::
>>> jnp.nextafter(0, 1) # denormal numbers are representable
DeviceArray(1.e-45, dtype=float32)
>>> jnp.nextafter(0, 1) * 1 # but are flushed to zero
DeviceArray(0., dtype=float32)
For the smallest usable (i.e. normal) float, use ``tiny`` of ``jnp.finfo``.
"""
return nextafter_p.bind(x1, x2)
def floor(x: Array) -> Array:
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x: Array) -> Array:
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
class RoundingMethod(enum.IntEnum):
AWAY_FROM_ZERO = 0
TO_NEAREST_EVEN = 1
def round(x: Array,
rounding_method: RoundingMethod = RoundingMethod.AWAY_FROM_ZERO
) -> Array:
r"""Elementwise round.
Rounds values to the nearest integer.
Args:
x: an array or scalar value to round.
rounding_method: the method to use when rounding halfway values
(e.g., `0.5`). See ``lax.RoundingMethod`` for the list of possible
values.
Returns:
An array containing the elementwise rounding of x.
"""
rounding_method = RoundingMethod(rounding_method)
return round_p.bind(x, rounding_method=rounding_method)
def is_finite(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x: Array) -> Array:
r"""Elementwise :math:`e^{x} - 1`."""
return expm1_p.bind(x)
def log(x: Array) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x: Array) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x: Array) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete beta integral."""
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete gamma function."""
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
r"""Elementwise complementary regularized incomplete gamma function."""
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
r"""Elementwise derivative of the regularized incomplete gamma function."""
return igamma_grad_a_p.bind(a, x)
def random_gamma_grad(a: Array, x: Array) -> Array:
r"""Elementwise derivative of samples from `Gamma(a, 1)`."""
return random_gamma_grad_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 0:
:math:`\mathrm{i0e}(x) = e^{-|x|} \mathrm{i0}(x)`
"""
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 1:
:math:`\mathrm{i1e}(x) = e^{-|x|} \mathrm{i1}(x)`
"""
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x: Array) -> Array:
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x: Array) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(x, y)
def conj(x: Array) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer."""
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}`."""
return rsqrt_p.bind(x)
def cbrt(x: Array) -> Array:
r"""Elementwise cube root: :math:`\sqrt[3]{x}`."""
return cbrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
r"""Elementwise popcount, count the number of set bits in each element."""
return population_count_p.bind(x)
def clz(x: Array) -> Array:
r"""Elementwise count-leading-zeros."""
return clz_p.bind(x)
def add(x: Array, y: Array) -> Array:
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: a NumPy dtype representing the target type.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
if hasattr(operand, '__jax_array__'):
operand = operand.__jax_array__()
return _convert_element_type(operand, new_dtype, weak_type=False)
def _convert_element_type(operand: Array, new_dtype: Optional[DType] = None,
weak_type: bool = False):
# Don't canonicalize old_dtype because x64 context might cause
# un-canonicalized operands to be passed in.
old_dtype = dtypes.dtype(operand, canonicalize=False)
old_weak_type = dtypes.is_weakly_typed(operand)
if new_dtype is None:
new_dtype = old_dtype
else:
new_dtype = np.dtype(new_dtype)
new_dtype = dtypes.dtype(new_dtype, canonicalize=True)
new_weak_type = bool(weak_type)
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, np.ComplexWarning, stacklevel=2)
# Python has big integers, but convert_element_type(2 ** 100, np.float32) need
# not be an error since the target dtype fits the value. Handle this case by
# converting to a NumPy array before calling bind. Without this step, we'd
# first canonicalize the input to a value of dtype int32 or int64, leading to
# an overflow error.
if type(operand) is int:
operand = np.asarray(operand, new_dtype)
old_weak_type = False
if ((old_dtype, old_weak_type) == (new_dtype, new_weak_type)
and isinstance(operand, (core.Tracer, device_array.DeviceArray))):
return operand
else:
return convert_element_type_p.bind(operand, new_dtype=new_dtype,
weak_type=new_weak_type)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
def clamp(min: Array, x: Array, max: Array) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
if len(operands) == 0:
raise ValueError("concatenate requires a non-empty sequences of arrays")
return concatenate_p.bind(*operands, dimension=dimension)
class _enum_descriptor(object):
def __init__(self, val):
self.val = val
def __get__(self, _, owner):
return owner(self.val)
class Precision(xla_client.PrecisionConfig.Precision): # type: ignore
"""Precision enum for lax functions
The `precision` argument to JAX functions generally controls the tradeoff
between speed and accuracy for array computations on accelerator backends,
(i.e. TPU and GPU). Members are:
DEFAULT:
Fastest mode, but least accurate. Performs computations in bfloat16.
Aliases: ``'default'``, ``'fastest'``, ``'bfloat16'``.
HIGH:
Slower but more accurate. Performs float32 computations in 3 bfloat16
passes, or using tensorfloat32 where available. Aliases: ``'high'`,
``'bfloat16_3x'``, ``'tensorfloat32'``.
HIGHEST:
Slowest but most accurate. Performs computations in float32 or float64
as applicable. Aliases: ``'highest'``, ``'float32'``.
"""
# Wrap enum values with this class.
DEFAULT = _enum_descriptor('default')
HIGH = _enum_descriptor('high')
HIGHEST = _enum_descriptor('highest')
_strings = {
'highest': xla_client.PrecisionConfig.Precision.HIGHEST,
'float32': xla_client.PrecisionConfig.Precision.HIGHEST,
'high': xla_client.PrecisionConfig.Precision.HIGH,
'bfloat16_3x': xla_client.PrecisionConfig.Precision.HIGH,
'tensorfloat32': xla_client.PrecisionConfig.Precision.HIGH,
'default': xla_client.PrecisionConfig.Precision.DEFAULT,
'bfloat16': xla_client.PrecisionConfig.Precision.DEFAULT,
'fastest': xla_client.PrecisionConfig.Precision.DEFAULT,
None: xla_client.PrecisionConfig.Precision.DEFAULT,
}
def __init__(self, arg0):
arg0 = self._strings.get(arg0, arg0)
super().__init__(arg0)
def __str__(self) -> str:
return self.name
PrecisionType = Precision
PrecisionLike = Union[None, str, PrecisionType, Tuple[str, str],
Tuple[PrecisionType, PrecisionType]]
def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
:class:`~jax.lax.Precision` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the product.
"""
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and core.symbolic_equal_dim(lhs.shape[-1], rhs.shape[0]):
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision,
preferred_element_type=preferred_element_type)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
:class:`~jax.lax.Precision` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the result.
"""
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
cdims = (api_util._ensure_index_tuple(lhs_contract),
api_util._ensure_index_tuple(rhs_contract))
bdims = (api_util._ensure_index_tuple(lhs_batch),
api_util._ensure_index_tuple(rhs_batch))
preferred_element_type = (
None if preferred_element_type is None else
dtypes.canonicalize_dtype(np.dtype(preferred_element_type)))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(cdims, bdims),
precision=canonicalize_precision(precision),
preferred_element_type=preferred_element_type)
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
"""Broadcasts an array, adding new leading dimensions
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new leading dimensions
to add to the front of the array.
Returns:
An array containing the result.
See Also:
jax.lax.broadcast_in_dim : add new dimensions at any location in the array shape.
"""
dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `BroadcastInDim
<https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_
operator.
Args:
operand: an array
shape: the shape of the target array
broadcast_dimensions: to which dimension in the target shape each dimension
of the operand shape corresponds to
Returns:
An array containing the result.
See Also:
jax.lax.broadcast : simpler interface to add new leading dimensions.
"""
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if (np.ndim(operand) == len(shape) and not len(broadcast_dimensions)
and isinstance(operand, (device_array.DeviceArray, core.Tracer))):
return operand
if config.jax_dynamic_shapes:
# We must gate this behavior under a flag because otherwise the errors
# raised are different (and have worse source provenance information).
dyn_shape = [d for d in shape if isinstance(d, core.Tracer)]
shape_ = [d if not isinstance(d, core.Tracer) else None for d in shape]
else:
dyn_shape = []
shape_ = shape # type: ignore
return broadcast_in_dim_p.bind(
operand, *dyn_shape, shape=tuple(shape_),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
"""Adds leading dimensions of ``1`` to give ``x`` rank ``rank``."""
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
For inserting/removing dimensions of size 1, prefer using ``lax.squeeze`` /
``lax.expand_dims``. These preserve information about axis identity that may
be useful for advanced transformation rules.
Args:
operand: array to be reshaped.
new_sizes: sequence of integers specifying the resulting shape. The size
of the final array must match the size of the input.
dimensions: optional sequence of integers specifying the permutation order of
the input shape. If specified, the length must match ``operand.shape``.
Returns:
out: reshaped array.
Examples:
Simple reshaping from one to two dimensions:
>>> x = jnp.arange(6)
>>> y = reshape(x, (2, 3))
>>> y
DeviceArray([[0, 1, 2],
[3, 4, 5]], dtype=int32)
Reshaping back to one dimension:
>>> reshape(y, (6,))
DeviceArray([0, 1, 2, 3, 4, 5], dtype=int32)
Reshaping to one dimension with permutation of dimensions:
>>> reshape(y, (6,), (1, 0))
DeviceArray([0, 3, 1, 4, 2, 5], dtype=int32)
"""
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = core.symbolic_equal_shape(np.shape(operand), new_sizes)
if dimensions is None:
same_dims = True
dims = None
else:
dims = api_util._ensure_index_tuple(dimensions)
same_dims = tuple(dims) == tuple(range(np.ndim(operand)))
if (np.shape(operand) and same_shape and same_dims
and isinstance(operand, (core.Tracer, device_array.DeviceArray))):
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dims is None or same_dims else dims)
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
"""Applies low, high, and/or interior padding to an array.
Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
Args:
operand: an array to be padded.
padding_value: the value to be inserted as padding. Must have the same dtype
as ``operand``.
padding_config: a sequence of ``(low, high, interior)`` tuples of integers,
giving the amount of low, high, and interior (dilation) padding to insert
in each dimension.
Returns:
The ``operand`` array with padding value ``padding_value`` inserted in each
dimension according to the ``padding_config``.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
# Caution! The select_n_p primitive has the *opposite* order of arguments to
# select(). This is because it implements `select_n`.
return select_n_p.bind(pred, on_false, on_true)
def select_n(which: Array, *cases: Array) -> Array:
"""Selects array values from multiple cases.
Generalizes XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator. Unlike XLA's version, the operator is variadic and can select
from many cases using an integer `pred`.
Args:
which: determines which case should be returned. Must be an array containing
either a boolean or integer values. May either be a scalar or have
shape matching ``cases``. For each array element, the value of ``which``
determines which of ``cases`` is taken. ``which`` must be in the range
``[0 .. len(cases))``; for values outside that range the behavior is
implementation-defined.
*cases: a non-empty list of array cases. All must have equal dtypes and
equal shapes.
Returns:
An array with shape and dtype equal to the cases, whose values are chosen
according to ``which``.
"""
if len(cases) == 0:
raise ValueError("select_n() must have at least one case")
return select_n_p.bind(which, *cases)
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(operator.index(d) for d in permutation)
if (permutation == tuple(range(np.ndim(operand)))
and isinstance(operand, (core.Tracer, device_array.DeviceArray))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def argmin(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the minimum element along ``axis``."""
return argmin_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def argmax(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the maximum element along ``axis``."""
return argmax_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def reduce(operands: Any,
init_values: Any,
computation: Callable[[Any, Any], Any],
dimensions: Sequence[int]) -> Any:
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
``init_values`` and ``computation`` together must form a `monoid
<https://en.wikipedia.org/wiki/Monoid>`_
for correctness. That is ``init_values`` must be an identity of
``computation``, and ``computation`` must be associative. XLA may exploit both
of these properties during code generation; if either is violated the result
is undefined.
"""
flat_operands, operand_tree = tree_util.tree_flatten(operands)
flat_init_values, init_value_tree = tree_util.tree_flatten(init_values)
if operand_tree != init_value_tree:
raise ValueError('Operands must have the same tree structure as init_values:'
f' {operand_tree} vs. {init_value_tree}')
if len(flat_operands) != len(flat_init_values):
raise ValueError('Must have same total number of operands as init_values: '
f' {len(flat_operands)} vs. {len(flat_init_values)}')
monoid_reducer = _get_monoid_reducer(computation, flat_init_values)
if monoid_reducer:
# monoid reducers bypass the weak_type_rule, so we set it explicitly.
weak_type = dtypes.is_weakly_typed(*flat_operands) and dtypes.is_weakly_typed(*flat_init_values)
return _convert_element_type(monoid_reducer(*flat_operands, dimensions),
weak_type=weak_type)
else:
flat_init_avals = safe_map(_abstractify, flat_init_values)
jaxpr, consts, out_tree = _variadic_reduction_jaxpr(
computation, tuple(flat_init_avals), init_value_tree)
out = reduce_p.bind(*(flat_operands + flat_init_values), computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
return tree_util.tree_unflatten(out_tree, out)
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
@lu.wrap_init
def comp(x, y):
result = computation(x, y)
if not (isinstance(result, core.Tracer) or core.valid_jaxtype(result)):
raise ValueError(
f"Invalid return type from reduction function: {type(result)}\n"
f"Reduction functions should only return an array.\n"
f"Full return value: {result}")
return (result,)
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
@cache()
def _variadic_reduction_jaxpr(computation, flat_avals, aval_tree):
avals = tree_util.tree_unflatten(aval_tree, flat_avals)
flat_in_avals, in_tree = tree_util.tree_flatten((avals, avals))
comp = lu.wrap_init(computation)
flat_comp, out_tree = api_util.flatten_fun_nokwargs(comp, in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_comp, tuple(flat_in_avals))
return jaxpr, tuple(consts), out_tree()
def _get_monoid_reducer(monoid_op: Callable,
xs: Sequence[Array]) -> Optional[Callable]:
if len(xs) != 1:
return None
x, = xs
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return np.equal(aval.val, 0) and partial(_reduce_sum)
elif monoid_op is mul:
return np.equal(aval.val, 1) and _reduce_prod
elif monoid_op is bitwise_or and dtype == np.bool_:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_or
elif monoid_op is bitwise_and and dtype == np.bool_:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_and
elif monoid_op is max:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_max
elif monoid_op is min:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1,
is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_ operator.
For floating point inputs, -0.0 and 0.0 are treated as equivalent, and NaN values
are sorted to the end of the array. For complex inputs, the sort order is
lexicographic over the real and imaginary parts, with the real part primary.
Args:
operand : Array or sequence of arrays
dimension : integer dimension along which to sort. Default: -1.
is_stable : boolean specifying whether to use a stable sort. Default: True.
num_keys : number of operands to treat as sort keys. Default: 1.
For num_keys > 1, the sort order will be determined lexicographically using
the first `num_keys` arrays, with the first key being primary.
The remaining operands will be returned with the same permutation.
Returns:
operand : sorted version of the input or inputs.
"""
if isinstance(operand, Sequence):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
if not (1 <= num_keys <= len(operand)):
raise ValueError(f"num_keys={num_keys} must be between 1 and len(operand)={len(operand)}")
dimension = canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension,
is_stable=is_stable,
num_keys=num_keys))
else:
if num_keys != 1:
raise ValueError(f"num_keys={num_keys} must equal 1 for a single operand.")
dimension = canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension, is_stable=is_stable, num_keys=1)[0]
def sort_key_val(keys: Array, values: Array, dimension: int = -1,
is_stable: bool = True) -> Tuple[Array, Array]:
"""Sorts ``keys`` along ``dimension`` and applies the same permutation to ``values``."""
dimension = canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension, is_stable=is_stable, num_keys=1)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
"""Returns top ``k`` values and their indices along the last axis of ``operand``."""
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
"""Deprecated. Ignores ``x`` and returns ``y``."""
return y
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
"""Returns an array of `shape` filled with `fill_value`.
Args:
shape: sequence of integers, describing the shape of the output array.
fill_value: the value to fill the new array with.
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
shape = canonicalize_shape(shape)
if np.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(np.shape(fill_value)))
weak_type = dtype is None and dtypes.is_weakly_typed(fill_value)
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
fill_value = _convert_element_type(fill_value, dtype, weak_type)
return broadcast(fill_value, shape)
def _device_put_raw(x, weak_type=None):
if isinstance(x, device_array.DeviceArray):
return x
else:
aval = raise_to_shaped(core.get_aval(x), weak_type=weak_type)
return dispatch.array_result_handler(None, aval)(*dispatch.device_put(x))
def zeros_like_shaped_array(aval: Array) -> Array:
assert isinstance(aval, ShapedArray)
if aval.dtype == dtypes.float0:
scalar_zero = np.zeros((), dtype=aval.dtype)
else:
scalar_zero = _convert_element_type(0, aval.dtype, aval.weak_type)
return broadcast(scalar_zero, aval.shape)
ad_util.aval_zeros_likers[ShapedArray] = zeros_like_shaped_array
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
dtype = dtypes.canonicalize_dtype(dtype)
size, = canonicalize_shape((size,))
return iota_p.bind(dtype=dtype, shape=(size,), dimension=0)
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
"""Convenience wrapper around ``iota``."""
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = core.concrete_or_error(
int, dimension, "dimension argument of lax.broadcasted_iota")
return iota_p.bind(dtype=dtype, shape=shape, dimension=dimension)
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.eye, create a 2D array with ones on a diagonal."""
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
bool_eye = eq(add(broadcasted_iota(np.int32, shape, 0), np.int32(offset)),
broadcasted_iota(np.int32, shape, 1))
return convert_element_type_p.bind(bool_eye, new_dtype=dtype, weak_type=False)
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
"""This utility function exists for creating Kronecker delta arrays."""
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(np.take(shape, axes)) # type: ignore[arg-type]
iotas = [broadcasted_iota(np.uint32, base_shape, i)
for i in range(len(base_shape))]
eyes = [eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = convert_element_type_p.bind(_reduce(operator.and_, eyes),
new_dtype=dtype, weak_type=False)
return broadcast_in_dim(result, shape, axes)
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.tri, create a 2D array with ones below a diagonal."""
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
bool_tri = ge(add(broadcasted_iota(np.int32, shape, 0), np.int32(offset)),
broadcasted_iota(np.int32, shape, 1))
return convert_element_type_p.bind(bool_tri, new_dtype=dtype, weak_type=False)
def stop_gradient(x: T) -> T:
"""Stops gradient computation.
Operationally ``stop_gradient`` is the identity function, that is, it returns
argument `x` unchanged. However, ``stop_gradient`` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, ``stop_gradient`` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
DeviceArray(6., dtype=float32, weak_type=True)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
DeviceArray(0., dtype=float32, weak_type=True)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
DeviceArray(2., dtype=float32, weak_type=True)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
DeviceArray(0., dtype=float32, weak_type=True)
"""
def stop(x):
if (dtypes.issubdtype(_dtype(x), np.floating) or
dtypes.issubdtype(_dtype(x), np.complexfloating)):
return ad_util.stop_gradient_p.bind(x)
else:
return x # only bind primitive on inexact dtypes, to avoid some staging
return tree_map(stop, x)
def reduce_precision(operand: Union[float, Array],
exponent_bits: int,
mantissa_bits: int) -> Array:
"""Wraps XLA's `ReducePrecision
<https://www.tensorflow.org/xla/operation_semantics#reduceprecision>`_
operator.
"""
exponent_bits = core.concrete_or_error(
operator.index, exponent_bits, "exponent_bits argument of lax.reduce_precision")
mantissa_bits = core.concrete_or_error(
operator.index, mantissa_bits, "mantissa_bits argument of lax.reduce_precision")
return reduce_precision_p.bind(operand, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits)
def squeeze(array: Array, dimensions: Sequence[int]) -> Array:
"""Squeeze any number of size 1 dimensions from an array."""
ndim = np.ndim(array)
dimensions = tuple(sorted(canonicalize_axis(i, ndim) for i in dimensions))
if not dimensions:
return array
return squeeze_p.bind(array, dimensions=dimensions)
def expand_dims(array: Array, dimensions: Sequence[int]) -> Array:
"""Insert any number of size 1 dimensions into an array."""
if len(set(dimensions)) != len(dimensions):
raise ValueError(f'repeated axis in lax.expand_dims: {dimensions}')
ndim_out = np.ndim(array) + len(dimensions)
dims = [canonicalize_axis(i, ndim_out) for i in dimensions]
if len(set(dims)) != len(dims): # check again after canonicalizing
raise ValueError(f'repeated axis in lax.expand_dims: {dims}')
dims_set = frozenset(dims)
result_shape = list(np.shape(array))
for i in sorted(dims_set):
result_shape.insert(i, 1)
broadcast_dims = [i for i in range(ndim_out) if i not in dims_set]
return broadcast_in_dim(array, result_shape, broadcast_dims)
### convenience wrappers around traceables
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape)
weak_type = dtype is None and dtypes.is_weakly_typed(x)
dtype = dtype or _dtype(x)
return full(fill_shape, _convert_element_type(fill_value, dtype, weak_type))
def collapse(operand: Array, start_dimension: int,
stop_dimension: int) -> Array:
"""Collapses dimensions of an array into a single dimension.
For example, if ``operand`` is an array with shape ``[2, 3, 4]``,
``collapse(operand, 0, 2).shape == [6, 4]``. The elements of the collapsed
dimension are laid out major-to-minor, i.e., with the lowest-numbered
dimension as the slowest varying dimension.
Args:
operand: an input array.
start_dimension: the start of the dimensions to collapse (inclusive).
stop_dimension: the end of the dimensions to collapse (exclusive).
Returns:
An array where dimensions ``[start_dimension, stop_dimension)`` have been
collapsed (raveled) into a single dimension.
"""
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def batch_matmul(lhs: Array, rhs: Array,
precision: PrecisionLike = None) -> Array:
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
r"""Elementwise square: :math:`x^2`."""
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
r"""Elementwise reciprocal: :math:`1 \over x`."""
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == np.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, np.float32)), dtype)
return f(x)
return f_wrapped
def tan(x: Array) -> Array:
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return tan_p.bind(x)
def asin(x: Array) -> Array:
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
return asin_p.bind(x)
def acos(x: Array) -> Array:
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
return acos_p.bind(x)
def atan(x: Array) -> Array:
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
return atan_p.bind(x)
def sinh(x: Array) -> Array:
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = int(tracer.shape[0])
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([slicing.index_in_dim(tracer, i, keepdims=False)
for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x: Array) -> Array:
return full_like(x, 0)
for t in itertools.chain(
dtypes.python_scalar_dtypes.keys(), array_types,
device_array.device_array_types,
[pxla.ShardedDeviceArray, pxla.pmap_lib.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[device_array._DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[device_array.Buffer] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.pmap_lib.ShardedDeviceArray] = zeros_like_array
### primitives
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: np.abs(np.zeros((), dtype)).dtype
_strip_weak_type = lambda *args, **_: False
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
weak_type_rule = partial(_naryop_weak_type_rule, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule,
weak_type_rule=weak_type_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
if aval_dtype is dtypes.float0:
raise TypeError(
f"Called {name} with a float0 at position {i}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
else:
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(np.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = [aval.shape for aval in avals if aval.shape]
if not shapes:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{}: arrays must have same number of dimensions, got {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
result_shape = []
for ds in zip(*shapes):
if all(d is ds[0] for d in ds):
# if all axes are identical objects, the resulting size is the object
result_shape.append(ds[0])
else:
# if all dims are equal (or 1), the result is the non-1 size
non_1s = {d for d in ds if not core.symbolic_equal_dim(d, 1)}
if len(non_1s) > 1:
raise TypeError(f'{name} got incompatible shapes for broadcasting: '
f'{", ".join(map(str, map(tuple, shapes)))}.')
result_shape.append(non_1s.pop() if non_1s else 1)
return tuple(result_shape)
def _naryop_weak_type_rule(name, *avals, **kwargs):
if any(aval.dtype is dtypes.float0 for aval in avals):
pos = next(i for i, aval in enumerate(avals) if aval.dtype is dtypes.float0)
raise TypeError(
f"Called {name} with a float0 at position {pos}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
return all(aval.weak_type for aval in avals)
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
weak_type_rule = partial(_naryop_weak_type_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule,
weak_type_rule=weak_type_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(op, ctx, avals_in, avals_out, *args):
"""Variant of _standard_translate that performs explicit broadcasting.
Not all XLA library functions perform their own broadcasting."""
aval_out, = avals_out
broadcasted_args = []
for aval_in, arg in zip(avals_in, args):
if aval_out.shape != aval_in.shape:
bcast_dims = tuple(range(len(aval_out.shape) - len(aval_in.shape),
len(aval_out.shape)))
arg = xops.BroadcastInDim(arg, aval_out.shape, bcast_dims)
broadcasted_args.append(arg)
return [op(*broadcasted_args)]
# Like autograd.numpy.numpy_vjps.unbroadcast, this utility handles transposition
# involving linear primitives with implicit broadcasting.
def _unbroadcast(aval, x):
if not isinstance(aval, ShapedArray):
raise TypeError("transpose with implicit broadcasting of unshaped values")
x_shape = np.shape(x)
if core.symbolic_equal_shape(aval.shape, x_shape):
return x
assert not aval.shape or len(x_shape) == len(aval.shape)
if not aval.shape:
return _reduce_sum(x, list(range(len(x_shape))))
else:
dims = [i for i, (a, b) in enumerate(zip(x_shape, aval.shape)) if not core.symbolic_equal_dim(a, b)]
if config.jax_enable_checks: assert all(aval.shape[i] == 1 for i in dims)
return reshape(_reduce_sum(x, dims), aval.shape)
def _maybe_broadcast(target_shape, x):
x_shape = np.shape(x)
if core.symbolic_equal_shape(x_shape, target_shape):
return x
else:
dims = [i for i, (a, b) in enumerate(zip(x_shape, target_shape)) if core.symbolic_equal_dim(a, b)]
squeeze_shape = [x_shape[i] for i in dims]
return broadcast_in_dim(reshape(x, squeeze_shape), target_shape, dims)
def broadcast_mhlo(
aval_out: core.ShapedArray, avals: Sequence[core.ShapedArray],
args: Sequence[ir.Value]) -> Sequence[ir.Value]:
"""Broadcasts MHLO values with broadcast-compatible shapes to the same shape.
"""
out = []
for aval, arg in zip(avals, args):
if aval.shape != aval_out.shape:
assert len(aval.shape) <= len(aval_out.shape), (aval, aval_out)
dims = mlir.dense_int_elements(
range(len(aval_out.shape) - len(aval.shape), len(aval_out.shape)))
arg = mhlo.BroadcastInDimOp(
mlir.aval_to_ir_type(aval.update(shape=aval_out.shape)), arg,
dims).result
out.append(arg)
return out
def _nary_lower_mhlo(op: Callable, ctx,
*args: Union[ir.Value, Sequence[ir.Value]],
explicit_type=False, **params):
"""Lowers an elementwise operator to its MHLO/CHLO equivalent.
Args:
explicit_type: does the MHLO/CHLO operator require its output type to be
provided?
"""
del params
aval_out, = ctx.avals_out
broadcasted_args = broadcast_mhlo(aval_out, ctx.avals_in, args)
if explicit_type:
return op(mlir.aval_to_ir_type(aval_out), *broadcasted_args).results
else:
return op(*broadcasted_args).results
_float = {np.floating}
_complex = {np.complexfloating}
_complex_elem_types = {np.float32, np.float64}
_int = {np.integer}
_bool = {np.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear2(neg_p, lambda t, operand: [neg(t)])
mlir.register_lowering(neg_p, partial(_nary_lower_mhlo, mhlo.NegOp))
def _sign_translation_rule(ctx, avals_in, avals_out, x):
c = ctx.builder
x_aval, = avals_in
dtype = x_aval.dtype
if dtypes.issubdtype(dtype, np.unsignedinteger):
zero = xops.Constant(c, np.array(0, dtype=dtype))
return [xops.Select(
xops.Eq(x, zero),
xops.Broadcast(zero, x_aval.shape),
xops.Broadcast(xops.Constant(c, np.array(1, dtype=dtype)),
x_aval.shape))]
return [xops.Sign(x)]
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
def _sign_lower_mhlo(ctx, x):
x_aval, = ctx.avals_in
if dtypes.issubdtype(x_aval.dtype, np.unsignedinteger):
return mhlo.SelectOp(
mhlo.CompareOp(
mlir.aval_to_ir_type(x_aval.update(dtype=np.dtype(np.bool_))),
x, mlir.full_like_aval(0, x_aval), ir.StringAttr.get("EQ"),
ir.StringAttr.get("UNSIGNED")).result,
mlir.full_like_aval(0, x_aval),
mlir.full_like_aval(1, x_aval)).results
return mhlo.SignOp(x).results
mlir.register_lowering(sign_p, _sign_lower_mhlo)
_nextafter_translation_rule = partial(_broadcast_translate, xops.NextAfter)
nextafter_p = standard_naryop([_float, _float], 'nextafter',
translation_rule=_nextafter_translation_rule)
mlir.register_lowering(nextafter_p, partial(_nary_lower_mhlo, chlo.NextAfterOp))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
mlir.register_lowering(floor_p, partial(_nary_lower_mhlo, mhlo.FloorOp))
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
mlir.register_lowering(ceil_p, partial(_nary_lower_mhlo, mhlo.CeilOp))
def _round_to_nearest_even(x):
half = _const(x, 0.5)
one = _const(x, 1)
round_val = floor(x)
fraction = x - round_val
nearest_even_int = sub(
round_val, mul(_const(x, 2), floor(mul(half, x))))
is_odd = eq(nearest_even_int, one)
return select(
bitwise_or(gt(fraction, half),
bitwise_and(eq(fraction, half), is_odd)),
add(round_val, one), round_val)
def _round_translation_rule(ctx, avals_in, avals_out, x, *, rounding_method):
if rounding_method is RoundingMethod.AWAY_FROM_ZERO:
return [xops.Round(x)]
else: # rounding_method is RoundingMethod.TO_NEAREST_EVEN
rounding_fun = xla.lower_fun(_round_to_nearest_even, multiple_results=False,
new_style=True)
return rounding_fun(ctx, avals_in, avals_out, x)
round_p = standard_unop(_float, 'round')
xla.register_translation(round_p, _round_translation_rule)
ad.defjvp_zero(round_p)
def _round_lower(ctx, x, *, rounding_method):
if rounding_method is RoundingMethod.AWAY_FROM_ZERO:
return mhlo.RoundOp(x).results
else:
assert rounding_method is RoundingMethod.TO_NEAREST_EVEN
round_nearest = mlir.cache_lowering(mlir.lower_fun(_round_to_nearest_even,
multiple_results=False))
return round_nearest(ctx, x)
mlir.register_lowering(round_p, _round_lower)
is_finite_p = unop(_fixed_dtype(np.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
mlir.register_lowering(is_finite_p, partial(_nary_lower_mhlo, mhlo.IsFiniteOp))
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
iad.definverse(exp_p, lambda r, x: log(r))
# For exp_p it is more efficient to use the reconstructed output for the vjp
# rule instead of computing it again from the input.
iad.primitive_ivjps[exp_p] = lambda x, y, ct: [[log(y[0])], [ct[0] * y[0]]]
mlir.register_lowering(exp_p, partial(_nary_lower_mhlo, mhlo.ExpOp))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
iad.definverse(log_p, lambda r, x: exp(r))
mlir.register_lowering(log_p, partial(_nary_lower_mhlo, mhlo.LogOp))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
mlir.register_lowering(expm1_p, partial(_nary_lower_mhlo, mhlo.Expm1Op))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
mlir.register_lowering(log1p_p, partial(_nary_lower_mhlo, mhlo.Log1pOp))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(add(g, mul(g, ans)),
sub(_one(x), ans)))
mlir.register_lowering(tanh_p, partial(_nary_lower_mhlo, mhlo.TanhOp))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
mlir.register_lowering(sin_p, partial(_nary_lower_mhlo, mhlo.SinOp))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
mlir.register_lowering(cos_p, partial(_nary_lower_mhlo, mhlo.CosOp))
@partial(xla.lower_fun, multiple_results=False, new_style=True)
@_upcast_fp16_for_computation
def tan_translation_rule(x):
return div(sin(x), cos(x))
tan_p = standard_unop(_float | _complex, 'tan',
translation_rule=tan_translation_rule)
ad.defjvp2(tan_p, lambda g, ans, x: mul(g, _const(x, 1) + square(ans)))
def asin_translation_rule(x):
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), asinh(mul(_const(x, 1j), x)))
else:
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
asin_p = standard_unop(_float | _complex, 'asin',
translation_rule=xla.lower_fun(asin_translation_rule,
multiple_results=False,
new_style=True))
ad.defjvp(asin_p, lambda g, x: mul(g, rsqrt(_const(x, 1) - square(x))))
def acos_translation_rule(x):
if dtypes.issubdtype(_dtype(x), np.complexfloating):
result = mul(_const(x, 1j), acosh(x))
# By convention, numpy chooses the branch with positive real part.
rpart = real(result)
return select(
gt(rpart, _const(rpart, 0)),
result,
neg(result)
)
else:
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, np.pi))
acos_p = standard_unop(_float | _complex, 'acos',
translation_rule=xla.lower_fun(acos_translation_rule,
multiple_results=False,
new_style=True))
ad.defjvp(acos_p, lambda g, x: mul(g, -rsqrt(_const(x, 1) - square(x))))
def atan_translation_rule(x):
return atan2(x, _const(x, 1))
atan_p = standard_unop(_float | _complex, 'atan',
translation_rule=xla.lower_fun(atan_translation_rule,
multiple_results=False,
new_style=True))
ad.defjvp(atan_p, lambda g, x: div(g, _const(x, 1) + square(x)))
atan2_p = standard_naryop([_float | _complex, _float | _complex], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: g * (y / (square(x) + square(y))),
lambda g, x, y: g * -x / (square(x) + square(y)))
mlir.register_lowering(atan2_p, partial(_nary_lower_mhlo, mhlo.Atan2Op))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
# TODO(b/209505237): the CHLO lowering of chlo.sinh is less accurate than that
# in the XLA client library. Use the fallback path for now.
# mlir.register_lowering(sinh_p, partial(_nary_lower_mhlo, chlo.SinhOp))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(reciprocal(_one(x) + x), div(g, (_one(x) - x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=partial(_broadcast_translate,
xops.RegularizedIncompleteBeta))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
mlir.register_lowering(lgamma_p, partial(_nary_lower_mhlo, chlo.LgammaOp))
digamma_p = standard_unop(_float, 'digamma')
mlir.register_lowering(digamma_p, partial(_nary_lower_mhlo, chlo.DigammaOp))
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=partial(_broadcast_translate, xops.Igamma))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=partial(_broadcast_translate, xops.IgammaGradA))
def igamma_gradx(g, a, x):
return g * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return g * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=partial(_broadcast_translate, xops.Igammac))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
random_gamma_grad_p = standard_naryop([_float, _float], 'random_gamma_grad',
translation_rule=partial(_broadcast_translate, xops.RandomGammaGrad))
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, -2. / np.sqrt(np.pi)),
mul(g, exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, np.sqrt(np.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear2(real_p, lambda t, _: [complex(t, np.zeros((), _dtype(t)))])
mlir.register_lowering(real_p, partial(_nary_lower_mhlo, mhlo.RealOp))
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.deflinear2(imag_p, lambda t, _: [complex(np.zeros((), _dtype(t)), neg(t))])
mlir.register_lowering(imag_p, partial(_nary_lower_mhlo, mhlo.ImagOp))
def _complex_transpose_rule(t, x, y):
assert ad.is_undefined_primal(x) or ad.is_undefined_primal(y)
if ad.is_undefined_primal(x) and ad.is_undefined_primal(y):
if type(t) is ad_util.Zero:
return [ad_util.Zero(x.aval), ad_util.Zero(y.aval)]
else:
return [_unbroadcast(x.aval, real(t)), _unbroadcast(y.aval, imag(neg(t)))]
elif ad.is_undefined_primal(x):
if type(t) is ad_util.Zero:
return [ad_util.Zero(x.aval), None]
else:
return [_unbroadcast(x.aval, real(t)), None]
else:
if type(t) is ad_util.Zero:
return [None, ad_util.Zero(y.aval)]
else:
return [None, _unbroadcast(y.aval, imag(neg(t)))]
_complex_dtype = lambda dtype, *args: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear2(complex_p, _complex_transpose_rule)
mlir.register_lowering(complex_p, partial(_nary_lower_mhlo, mhlo.ComplexOp))
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_impl(x, **kw):
if dtypes.issubdtype(x.dtype, np.complexfloating):
return complex(real(x), -imag(x))
else:
return complex(x, _zeros(x))
mlir.register_lowering(conj_p,
mlir.lower_fun(_conj_impl, multiple_results=False))
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, np.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.register_translation(conj_p,
lambda ctx, avals_in, avals_out, x, **kwargs: [xops.Conj(x)])
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
mlir.register_lowering(abs_p, partial(_nary_lower_mhlo, mhlo.AbsOp))
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
mlir.register_lowering(sqrt_p, partial(_nary_lower_mhlo, mhlo.SqrtOp))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), div(ans, x))))
mlir.register_lowering(rsqrt_p, partial(_nary_lower_mhlo, mhlo.RsqrtOp))
cbrt_p = standard_unop(_float, 'cbrt')
ad.defjvp2(cbrt_p,
lambda g, ans, x: mul(g, mul(_const(x, 1/3), integer_pow(ans, -2))))
mlir.register_lowering(cbrt_p, partial(_nary_lower_mhlo, mhlo.CbrtOp))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(g, jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(g, mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
mlir.register_lowering(pow_p, partial(_nary_lower_mhlo, mhlo.PowOp))
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, np.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(ctx, avals_in, avals_out, x, *, y):
# This should be kept in sync with the jax2tf translation rule.
x_aval, = avals_in
if y == 0:
one = xla.pyval_to_ir_constant(ctx.builder, np.array(1, dtype=x_aval.dtype))
return [xops.Broadcast(one, x_aval.shape)]
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return [xops.Reciprocal(acc) if is_reciprocal else acc]
def _integer_pow_jvp(g, x, *, y):
return _zeros(g) if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
def _integer_pow(x, *, y):
# This should be kept in sync with the jax2tf translation rule.
if y == 0:
return full_like(x, 1)
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else mul(acc, x)
y >>= 1
if y > 0:
# We don't call square because it calls integer_pow.
x = mul(x, x)
return div(full_like(acc, 1), acc) if is_reciprocal else acc
def _integer_pow_lowering(ctx, x, *, y):
lowering = mlir.lower_fun(_integer_pow, multiple_results=False)
# TODO(b/217551391): emitting an out-of-line call leads to a large
# expansion when the MHLO is lowered to HLO, because the HLO lowering
# clones the callee. Consider unconditionally caching when the MHLO->HLO
# lowering doesn't expand the program.
if y >= 4:
lowering = mlir.cache_lowering(lowering)
return lowering(ctx, x, y=y)
mlir.register_lowering(integer_pow_p, _integer_pow_lowering)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
ad.defjvp_zero(not_p)
mlir.register_lowering(not_p, partial(_nary_lower_mhlo, mhlo.NotOp))
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
mlir.register_lowering(and_p, partial(_nary_lower_mhlo, mhlo.AndOp))
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
mlir.register_lowering(or_p, partial(_nary_lower_mhlo, mhlo.OrOp))
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
mlir.register_lowering(xor_p, partial(_nary_lower_mhlo, mhlo.XorOp))
population_count_p = standard_unop(_int, 'population_count')
mlir.register_lowering(population_count_p,
partial(_nary_lower_mhlo, mhlo.PopulationCountOp))
clz_p = standard_unop(_int, 'clz')
mlir.register_lowering(clz_p, partial(_nary_lower_mhlo, mhlo.ClzOp))
def _add_jvp(primals, tangents):
x, y = primals
xdot, ydot = tangents
primal_out = add(x, y)
if type(xdot) is type(ydot) is ad_util.Zero:
return primal_out, ad_util.Zero.from_value(primal_out)
if type(xdot) is ad_util.Zero:
return primal_out, _maybe_broadcast(primal_out.shape, ydot)
elif type(ydot) is ad_util.Zero:
return primal_out, _maybe_broadcast(primal_out.shape, xdot)
else:
return primal_out, add(xdot, ydot)
def _add_transpose(t, x, y):
# Morally the following assertion is true, but because we instantiate zeros in
# some places (e.g. in custom_jvp) it may not always hold. For example, see
# api_test.py's CustomJVPTest.test_jaxpr_zeros.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
x_aval = x.aval if ad.is_undefined_primal(x) else _abstractify(x)
y_aval = y.aval if ad.is_undefined_primal(y) else _abstractify(y)
if type(t) is ad_util.Zero:
return [ad_util.Zero(x_aval), ad_util.Zero(y_aval)]
else:
return [_unbroadcast(x_aval, t), _unbroadcast(y_aval, t)]
def _add_inverse(r, x, y):
xr = r - y
yr = r - x
return xr, yr
# TODO(slebedev): Why does mypy fail to infer the type here?
add_p: Primitive = standard_naryop([_num, _num], 'add')
ad.primitive_jvps[add_p] = _add_jvp
ad.primitive_transposes[add_p] = _add_transpose
iad.definverse(add_p, _add_inverse)
mlir.register_lowering(add_p, partial(_nary_lower_mhlo, mhlo.AddOp))
def _sub_jvp(primals, tangents):
x, y = primals
xdot, ydot = tangents
primal_out = sub(x, y)
if type(xdot) is type(ydot) is ad_util.Zero:
return primal_out, ad_util.Zero.from_value(primal_out)
if type(xdot) is ad_util.Zero:
return primal_out, _maybe_broadcast(primal_out.shape, neg(ydot))
elif type(ydot) is ad_util.Zero:
return primal_out, _maybe_broadcast(primal_out.shape, xdot)
else:
return primal_out, sub(xdot, ydot)
def _sub_transpose(t, x, y):
# Morally the following assertion is true, but see the comment in add_p's
# transpose rule.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
x_aval = x.aval if ad.is_undefined_primal(x) else _abstractify(x)
y_aval = y.aval if ad.is_undefined_primal(y) else _abstractify(y)
if type(t) is ad_util.Zero:
return [ad_util.Zero(x_aval), ad_util.Zero(y_aval)]
else:
return [_unbroadcast(x_aval, t), _unbroadcast(y_aval, neg(t))]
sub_p = standard_naryop([_num, _num], 'sub')
ad.primitive_jvps[sub_p] = _sub_jvp
ad.primitive_transposes[sub_p] = _sub_transpose
mlir.register_lowering(sub_p, partial(_nary_lower_mhlo, mhlo.SubOp))
def _mul_transpose(ct, x, y):
assert ad.is_undefined_primal(x) ^ ad.is_undefined_primal(y)
if ad.is_undefined_primal(x):
if type(ct) is ad_util.Zero:
return [ad_util.Zero(x.aval), None]
else:
return [_unbroadcast(x.aval, mul(ct, y)), None]
else:
if type(ct) is ad_util.Zero:
return [None, ad_util.Zero(y.aval)]
else:
return [None, _unbroadcast(y.aval, mul(x, ct))]
def _mul_inverse(r, x, y):
xr = r / y
yr = r / x
return xr, yr
mul_p = standard_naryop([_num, _num], 'mul')
ad.defjvp(mul_p,
lambda xdot, x, y: mul(xdot, y),
lambda ydot, x, y: mul(x, ydot))
ad.primitive_transposes[mul_p] = _mul_transpose
iad.definverse(mul_p, _mul_inverse)
mlir.register_lowering(mul_p, partial(_nary_lower_mhlo, mhlo.MulOp))
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
if type(cotangent) is ad_util.Zero:
return [ad_util.Zero(x.aval), None]
else:
return [_unbroadcast(x.aval, div(cotangent, y)), None]
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(g, y),
lambda g, x, y: mul(mul(neg(g), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
mlir.register_lowering(div_p, partial(_nary_lower_mhlo, mhlo.DivOp))
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(
rem_p,
lambda g, x, y: _maybe_broadcast(broadcast_shapes(np.shape(x), np.shape(y)), g),
lambda g, x, y: mul(neg(g), floor(div(x, y))))
mlir.register_lowering(rem_p, partial(_nary_lower_mhlo, mhlo.RemOp))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_complex_lowering(x, y, *, lax_cmp_pick_x):
result_shape = broadcast_shapes(np.shape(x), np.shape(y))
x = _maybe_broadcast(result_shape, x)
y = _maybe_broadcast(result_shape, y)
rx = real(x)
ry = real(y)
pick_x = select(eq(rx, ry), lax_cmp_pick_x(imag(x), imag(y)),
lax_cmp_pick_x(rx, ry))
return select(pick_x, x, y)
def _minmax_translation_rule(ctx, avals_in, avals_out, x, y, *, op_minmax=None,
lax_cmp_pick_x=None):
x_aval, y_aval = avals_in
if dtypes.issubdtype(x_aval.dtype, np.complexfloating):
return xla.lower_fun(partial(_minmax_complex_lowering,
lax_cmp_pick_x=lax_cmp_pick_x),
multiple_results=False,
new_style=True)(ctx, avals_in, avals_out, x, y)
else:
return [op_minmax(x, y)]
max_p: core.Primitive = standard_naryop(
[_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, op_minmax=xops.Max, lax_cmp_pick_x=gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(g, _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(g, _balanced_eq(y, ans, x)))
mlir.register_lowering(max_p, partial(_nary_lower_mhlo, mlir.max_mhlo))
min_p: core.Primitive = standard_naryop(
[_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, op_minmax=xops.Min, lax_cmp_pick_x=lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(g, _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(g, _balanced_eq(y, ans, x)))
mlir.register_lowering(min_p, partial(_nary_lower_mhlo, mlir.min_mhlo))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
mlir.register_lowering(shift_left_p, partial(_nary_lower_mhlo, mhlo.ShiftLeftOp))
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
mlir.register_lowering(shift_right_arithmetic_p,
partial(_nary_lower_mhlo, mhlo.ShiftRightArithmeticOp))
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
mlir.register_lowering(shift_right_logical_p,
partial(_nary_lower_mhlo, mhlo.ShiftRightLogicalOp))
def _compare_lower_mhlo(direction: str, ctx, x, y):
x_aval, y_aval = ctx.avals_in
aval_out, = ctx.avals_out
x, y = broadcast_mhlo(aval_out.update(dtype=x_aval.dtype), ctx.avals_in,
(x, y))
if dtypes.issubdtype(x_aval.dtype, np.inexact):
compare_type = "FLOAT"
elif dtypes.issubdtype(x_aval.dtype, np.signedinteger):
compare_type = "SIGNED"
else:
compare_type = "UNSIGNED"
return mhlo.CompareOp(mlir.aval_to_ir_type(aval_out), x, y,
ir.StringAttr.get(direction),
ir.StringAttr.get(compare_type)).results
eq_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
mlir.register_lowering(eq_p, partial(_compare_lower_mhlo, "EQ"))
ne_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
mlir.register_lowering(ne_p, partial(_compare_lower_mhlo, "NE"))
ge_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
mlir.register_lowering(ge_p, partial(_compare_lower_mhlo, "GE"))
gt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
mlir.register_lowering(gt_p, partial(_compare_lower_mhlo, "GT"))
le_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
mlir.register_lowering(le_p, partial(_compare_lower_mhlo, "LE"))
lt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
mlir.register_lowering(lt_p, partial(_compare_lower_mhlo, "LT"))
def _convert_element_type_shape_rule(operand, *, new_dtype, weak_type):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, weak_type):
return new_dtype
def _convert_element_type_weak_type_rule(operand, *, new_dtype, weak_type):
return weak_type
def _convert_element_type_translation_rule(ctx, avals_in, avals_out, operand, *,
new_dtype, weak_type):
aval_in, = avals_in
old_dtype = aval_in.dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = xops.Real(operand)
new_etype = xla.dtype_to_primitive_type(new_dtype)
return [xops.ConvertElementType(operand, new_element_type=new_etype)]
def _convert_element_type_transpose_rule(ct, operand, *, new_dtype, weak_type):
assert ad.is_undefined_primal(operand)
old_dtype = operand.aval.dtype
old_weak_type = dtypes.is_weakly_typed(operand)
if type(ct) is ad_util.Zero:
return [ad_util.Zero(operand.aval)]
elif core.primal_dtype_to_tangent_dtype(old_dtype) is dtypes.float0:
return [ad_util.Zero(operand.aval.update(dtype=dtypes.float0, weak_type=False))]
else:
return [convert_element_type_p.bind(ct, new_dtype=old_dtype,
weak_type=old_weak_type)]
def _convert_element_type_jvp_rule(tangent, operand , *, new_dtype, weak_type):
if core.primal_dtype_to_tangent_dtype(new_dtype) is dtypes.float0:
return ad_util.Zero(tangent.aval.update(dtype=dtypes.float0, weak_type=False))
else:
return convert_element_type_p.bind(tangent, new_dtype=new_dtype,
weak_type=weak_type)
def _convert_elt_type_folding_rule(consts, eqn):
c, = consts
if type(c) in core.literalable_types and not np.shape(c):
return [np.array(c, eqn.params['new_dtype'])], None
else:
return [None], eqn
def _convert_elt_type_fwd_rule(eqn):
v, = eqn.invars
if (v.aval.dtype == eqn.params['new_dtype'] and
v.aval.weak_type == eqn.params['weak_type']):
return [v], None
else:
return [None], eqn
def _convert_elt_type_pp_rule(eqn, context, settings):
# don't print new_dtype because the output binder shows it
printed_params = {}
if eqn.params['weak_type']:
printed_params['weak_type'] = True
return [pp.text(eqn.primitive.name),
core.pp_kv_pairs(sorted(printed_params.items()), context, settings),
pp.text(" ") + core.pp_vars(eqn.invars, context)]
convert_element_type_p = Primitive('convert_element_type')
convert_element_type_p.def_impl(partial(xla.apply_primitive, convert_element_type_p))
convert_element_type_p.def_abstract_eval(
partial(standard_abstract_eval, convert_element_type_p,
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
_convert_element_type_weak_type_rule, standard_named_shape_rule))
xla.register_translation(convert_element_type_p,
_convert_element_type_translation_rule)
ad.defjvp(convert_element_type_p, _convert_element_type_jvp_rule)
ad.primitive_transposes[convert_element_type_p] = _convert_element_type_transpose_rule
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
pe.const_fold_rules[convert_element_type_p] = _convert_elt_type_folding_rule
pe.forwarding_rules[convert_element_type_p] = _convert_elt_type_fwd_rule
# TODO(mattjj): un-comment the next line (see #9456)
# core.pp_eqn_rules[convert_element_type_p] = _convert_elt_type_pp_rule
def _real_dtype(dtype): return np.finfo(dtype).dtype
def _convert_element_type_lower(ctx, operand, *, new_dtype, weak_type):
aval_in, = ctx.avals_in
aval_out, = ctx.avals_out
if (dtypes.issubdtype(aval_in.dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = mhlo.RealOp(operand).result
aval_in = aval_in.update(dtype=_real_dtype(aval_in.dtype))
return [mlir.convert_mhlo(operand, aval_in, aval_out)]
mlir.register_lowering(convert_element_type_p, _convert_element_type_lower)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
old_dtype = dtypes.canonicalize_dtype(operand.dtype)
if dtypes.issubdtype(old_dtype, np.bool_) or dtypes.issubdtype(old_dtype, np.complexfloating):
if old_dtype != new_dtype:
raise TypeError(f"`bitcast_convert_type` for operand type ({old_dtype}) cannot have different destination type ({new_dtype})")
if np.dtype(old_dtype).itemsize != np.dtype(new_dtype).itemsize:
raise TypeError(f"`bitcast_convert_type` for operand type ({old_dtype}) must have destination type ({new_dtype}) of same size.")
return new_dtype
def _bitcast_convert_type_translation_rule(ctx, avals_in, avals_out, operand, *,
new_dtype):
new_etype = xla.dtype_to_primitive_type(new_dtype)
return [xops.BitcastConvertType(operand, new_element_type=new_etype)]
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule,
weak_type_rule=_strip_weak_type)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _bitcast_convert_type_lower(ctx, operand, *, new_dtype):
aval_out, = ctx.avals_out
return mhlo.BitcastConvertOp(mlir.aval_to_ir_type(aval_out), operand).results
mlir.register_lowering(bitcast_convert_type_p, _bitcast_convert_type_lower)
def _validate_preferred_element_type(input_dtype, preferred_element_type):
allowed_types = (np.integer, np.floating, np.complexfloating)
if any(dtypes.issubdtype(input_dtype, t) and not dtypes.issubdtype(preferred_element_type, t) for t in allowed_types):
raise TypeError("`preferred_element_type` and the original type must both be integral, both be floating point, or both complex.")
if dtypes.issubdtype(input_dtype, np.signedinteger) and not dtypes.issubdtype(preferred_element_type, np.signedinteger):
raise TypeError("`preferred_element_type` must have the same signedness as the original type.")
input_bitwidth = np.dtype(input_dtype).itemsize
preferred_bitwidth = np.dtype(preferred_element_type).itemsize
if preferred_bitwidth < input_bitwidth:
raise TypeError("`preferred_element_type` must not be narrower than the original type.")
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
if isinstance(precision, tuple):
config.operand_precision.extend(precision)
else:
config.operand_precision.extend((precision, precision))
return config
return None
def _masked(padded_value, logical_shape, dimensions, value=0):
"""
Sets all padding to the given value (default is 0) in the given dimensions.
All values outside the logical shape are considered padding.
"""
if len(dimensions) == 0:
return padded_value
masks = [broadcasted_iota(np.int32, padded_value.shape, d) < logical_shape[d]
for d in dimensions]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
return select(mask_intersection, padded_value, full_like(padded_value, value))
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, lhs.ndim))
for d in (lhs_contracting, lhs_batch)):
msg = ("dot_general requires lhs dimension numbers to be nonnegative and "
"less than the number of axes of the lhs value, got "
f"lhs_batch of {lhs_batch} and lhs_contracting of {lhs_contracting} "
f"for lhs of rank {lhs.ndim}")
raise TypeError(msg)
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, rhs.ndim))
for d in (rhs_contracting, rhs_batch)):
msg = ("dot_general requires rhs dimension numbers to be nonnegative and "
"less than the number of axes of the rhs value, got "
f"rhs_batch of {rhs_batch} and rhs_contracting of {rhs_contracting} "
f"for rhs of rank {rhs.ndim}")
raise TypeError(msg)
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_contracting_set, lhs_batch_set = set(lhs_contracting), set(lhs_batch)
rhs_contracting_set, rhs_batch_set = set(rhs_contracting), set(rhs_batch)
if len(lhs_batch_set) != len(lhs_batch):
msg = ("dot_general requires lhs batch dimensions to be distinct, got "
f"lhs_batch {lhs_batch}.")
raise TypeError(msg)
if len(rhs_batch_set) != len(rhs_batch):
msg = ("dot_general requires rhs batch dimensions to be distinct, got "
f"rhs_batch {rhs_batch}.")
raise TypeError(msg)
if len(lhs_contracting_set) != len(lhs_contracting):
msg = ("dot_general requires lhs contracting dimensions to be distinct, "
f"got lhs_contracting {lhs_contracting}.")
raise TypeError(msg)
if len(rhs_contracting_set) != len(rhs_contracting):
msg = ("dot_general requires rhs contracting dimensions to be distinct, "
f"got rhs_contracting {rhs_contracting}.")
raise TypeError(msg)
if lhs_contracting_set & lhs_batch_set:
msg = ("dot_general requires lhs batch dimensions to be disjoint from "
"contracting dimensions, got lhs_batch {} and lhs_contracting {}.")
raise TypeError(msg.format(lhs_batch, lhs_contracting))
if rhs_contracting_set & rhs_batch_set:
msg = ("dot_general requires rhs batch dimensions to be disjoint from "
"contracting dimensions, got rhs_batch {} and rhs_contracting {}.")
raise TypeError(msg.format(rhs_batch, rhs_contracting))
lhs_batch_shape = tuple(lhs.shape[i] for i in lhs_batch)
rhs_batch_shape = tuple(rhs.shape[i] for i in rhs_batch)
if not core.symbolic_equal_shape(lhs_batch_shape, rhs_batch_shape):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
lhs_contracting_shape = tuple(lhs.shape[i] for i in lhs_contracting)
rhs_contracting_shape = tuple(rhs.shape[i] for i in rhs_contracting)
if not core.symbolic_equal_shape(lhs_contracting_shape, rhs_contracting_shape):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
return _dot_general_shape_computation(lhs.shape, rhs.shape, dimension_numbers)
def _dot_general_shape_computation(lhs_shape, rhs_shape, dimension_numbers):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
batch_shape = tuple(lhs_shape[i] for i in lhs_batch)
lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch)))
lhs_tensored_shape = tuple_delete(lhs_shape, lhs_contract_or_batch)
rhs_contract_or_batch = tuple(sorted(tuple(rhs_contracting) + tuple(rhs_batch)))
rhs_tensored_shape = tuple_delete(rhs_shape, rhs_contract_or_batch)
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def tuple_delete(tup, idx):
idx_ = set(idx)
return tuple(tup[i] for i in range(len(tup)) if i not in idx_)
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
input_dtype = naryop_dtype_rule(_input_dtype, [_any, _any], 'dot_general', lhs, rhs)
if preferred_element_type is None:
return input_dtype
_validate_preferred_element_type(input_dtype, preferred_element_type)
return preferred_element_type
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
preferred_element_type: Optional[DType],
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract))) # type: ignore[arg-type]
out_axes = np.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision, preferred_element_type=preferred_element_type),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
preferred_element_type=preferred_element_type,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision,
preferred_element_type: Optional[DType]):
lhs, rhs = batched_args
new_dimension_numbers, result_batch_dim = _dot_general_batch_dim_nums(
(lhs.ndim, rhs.ndim), batch_dims, dimension_numbers)
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision,
preferred_element_type=preferred_element_type)
return batched_out, result_batch_dim
def _dot_general_batch_dim_nums(ndims, batch_dims, dimension_numbers):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
lhs_ndim, rhs_ndim = ndims
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
def bump_dims(dims, b):
return tuple(np.add(dims, np.greater_equal(dims, b)))
if lbd is not None and rbd is not None:
# adding a batch dimension
lhs_batch = (lbd,) + bump_dims(lhs_batch, lbd)
rhs_batch = (rbd,) + bump_dims(rhs_batch, rbd)
lhs_contract = bump_dims(lhs_contract, lbd)
rhs_contract = bump_dims(rhs_contract, rbd)
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
other = tuple(d for d in range(lhs_ndim)
if d not in lhs_batch and d not in lhs_contract)
result_batch_dim = (len(lhs_batch) + sum(np.less(other, lbd)))
lhs_batch = bump_dims(lhs_batch, lbd)
lhs_contract = bump_dims(lhs_contract, lbd)
else:
other = tuple(d for d in range(rhs_ndim)
if d not in rhs_batch and d not in rhs_contract)
result_batch_dim = (lhs_ndim - len(lhs_contract) +
sum(np.less(other, rbd)))
rhs_batch = bump_dims(rhs_batch, rbd)
rhs_contract = bump_dims(rhs_contract, rbd)
new_dimension_numbers = ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))
return new_dimension_numbers, int(result_batch_dim)
def _dot_general_translation_rule(ctx, avals_in, avals_out, lhs, rhs, *,
dimension_numbers, precision,
preferred_element_type: Optional[DType]):
if preferred_element_type is not None:
preferred_element_type = xla.dtype_to_primitive_type(preferred_element_type)
return [xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision),
preferred_element_type=preferred_element_type)]
def _dot_general_cpu_translation_rule(ctx, avals_in, avals_out, lhs, rhs, *,
dimension_numbers, precision,
preferred_element_type: Optional[DType]):
if preferred_element_type is not None:
preferred_element_type = xla.dtype_to_primitive_type(preferred_element_type)
# TODO(b/195364460): Work around slow XLA/CPU implementation of float16 matmul
if avals_in[0].dtype == np.float16:
lhs = xops.ConvertElementType(
lhs, xla.dtype_to_primitive_type(np.dtype(np.float32)))
rhs = xops.ConvertElementType(
rhs, xla.dtype_to_primitive_type(np.dtype(np.float32)))
preferred_element_type = (
preferred_element_type or
xla.dtype_to_primitive_type(np.dtype(np.float16)))
return [xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision),
preferred_element_type=preferred_element_type)]
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision,
preferred_element_type: Optional[DType]):
lhs, rhs = padded_vals
# Only need to mask off contraction dims of one side - we mask the lhs here
# but this is arbitrary. Could check the sizes of lhs and rhs and mask
# whichever is smallest.
lhs_shape, _ = logical_shapes
(lhs_contract, _), _ = dimension_numbers
return dot_general(_masked(lhs, lhs_shape, lhs_contract),
rhs, dimension_numbers, precision=precision,
preferred_element_type=preferred_element_type)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
xla.register_translation(dot_general_p, _dot_general_cpu_translation_rule,
platform="cpu")
def precision_attr(precision: PrecisionType) -> ir.ArrayAttr:
if precision is None:
full_precision = (Precision.DEFAULT, Precision.DEFAULT)
elif not isinstance(precision, tuple):
full_precision = (precision, precision)
else:
full_precision = precision
return ir.ArrayAttr.get([ir.StringAttr.get(str(p)) for p in full_precision])
def _dot_general_lower(ctx, lhs, rhs, *, dimension_numbers,
precision, preferred_element_type: Optional[np.dtype]):
del preferred_element_type # Implied by the output aval
lhs_aval, rhs_aval = ctx.avals_in
aval_out, = ctx.avals_out
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
# TODO(b/195364460): Work around slow XLA/CPU implementation of float16 matmul
if ctx.module_context.platform == "cpu":
if lhs_aval.dtype == np.float16:
f32 = mlir.dtype_to_ir_type(np.dtype(np.float32))
lhs = mhlo.ConvertOp(ir.RankedTensorType.get(lhs_aval.shape, f32),
lhs).result
if rhs_aval.dtype == np.float16:
f32 = mlir.dtype_to_ir_type(np.dtype(np.float32))
rhs = mhlo.ConvertOp(ir.RankedTensorType.get(rhs_aval.shape, f32),
rhs).result
dot_dnums = mhlo.DotDimensionNumbers.get(
lhs_batching_dimensions=list(lhs_batch),
rhs_batching_dimensions=list(rhs_batch),
lhs_contracting_dimensions=list(lhs_contracting),
rhs_contracting_dimensions=list(rhs_contracting))
return [mhlo.DotGeneralOp(mlir.aval_to_ir_type(aval_out), lhs, rhs,
dot_dnums, precision_attr(precision)).result]
mlir.register_lowering(dot_general_p, _dot_general_lower)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = np.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if not all(core.symbolic_equal_one_of_dim(operand.shape[i],
[1, shape[broadcast_dimensions[i]]])
for i in range(operand_ndim)):
msg = (
"broadcast_in_dim operand dimension sizes must either be 1, or be "
"equal to their corresponding dimensions in the target broadcast "
"shape; got operand of shape {}, target broadcast shape {}, "
"broadcast_dimensions {} ")
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ("broadcast_in_dim broadcast_dimensions must be strictly increasing; "
"got broadcast_dimensions {}")
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(ct, operand, *, shape, broadcast_dimensions):
shape_in = operand.aval.shape
unit_dimensions = tuple(i for i, s in enumerate(shape_in) if core.symbolic_equal_dim(s, 1))
bdims = tuple(np.delete(broadcast_dimensions, unit_dimensions))
axes = tuple(np.delete(range(len(shape)), bdims))
return [expand_dims(_reduce_sum(ct, axes), unit_dimensions)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(np.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
def _broadcast_in_dim_fwd_rule(eqn):
v, *dyn = eqn.invars
if not dyn and core.symbolic_equal_shape(eqn.params['shape'], v.aval.shape):
return [v], None
else:
return [None], eqn
def _broadcast_in_dim_staging_rule(
trace, x, *dyn_shape, shape, broadcast_dimensions):
params = dict(shape=shape, broadcast_dimensions=broadcast_dimensions)
if not dyn_shape:
return trace.default_process_primitive(broadcast_in_dim_p, (x,), params)
assert len(dyn_shape) == sum(d is None for d in shape)
source_info = source_info_util.current()
ds = iter(dyn_shape)
out_shape_for_tracer: List[Union[int, core.Tracer]] = [
next(ds) if d is None else d for d in shape]
aval = core.DShapedArray(tuple(out_shape_for_tracer), x.dtype, x.weak_type)
out_tracer = pe.DynamicJaxprTracer(trace, aval, source_info)
invars = [trace.getvar(x), *(trace.getvar(d) for d in dyn_shape)]
eqn = pe.new_jaxpr_eqn(invars, [trace.makevar(out_tracer)],
broadcast_in_dim_p, params, source_info)
trace.frame.eqns.append(eqn)
return out_tracer
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
ad.deflinear2(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
pe.forwarding_rules[broadcast_in_dim_p] = _broadcast_in_dim_fwd_rule
pe.custom_staging_rules[broadcast_in_dim_p] = _broadcast_in_dim_staging_rule
def _broadcast_in_dim_lower(ctx, x, *, shape, broadcast_dimensions):
del shape
aval_out, = ctx.avals_out
return mhlo.BroadcastInDimOp(
mlir.aval_to_ir_type(aval_out), x,
mlir.dense_int_elements(broadcast_dimensions)
).results
mlir.register_lowering(broadcast_in_dim_p, _broadcast_in_dim_lower)
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
raise TypeError("clamp requires min.shape == operand.shape or min.shape == "
f"(), got min.shape={min.shape}, "
f"operand.shape={operand.shape}.")
if max.shape and max.shape != operand.shape:
raise TypeError("clamp requires max.shape == operand.shape or max.shape == "
f"(), got max.shape={max.shape}, "
f"operand.shape={operand.shape}.")
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
def _clamp_batch_rule(batched_args, batch_dims, **params):
min, x, max = batched_args
min_bdim, x_bdim, max_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if min_bdim == x_bdim == max_bdim:
if np.shape(min) == np.shape(x) == np.shape(max):
return clamp_p.bind(min, x, max), x_bdim
elif np.ndim(min) == np.ndim(max) == 0:
return clamp_p.bind(min, x, max), x_bdim
elif np.ndim(min) == np.ndim(max) == 1:
min = broadcast_in_dim(min, x.shape, [min_bdim])
max = broadcast_in_dim(max, x.shape, [max_bdim])
return clamp_p.bind(min, x, max), x_bdim
elif np.ndim(min) == 0 and np.ndim(max) == 0 and x_bdim is not None:
return clamp_p.bind(min, x, max), x_bdim
min = batching.bdim_at_front(min, min_bdim, size) if np.shape(min) else min
max = batching.bdim_at_front(max, max_bdim, size) if np.shape(max) else max
x = batching.bdim_at_front(x, x_bdim, size) if np.shape(x) else x
if np.ndim(min) == 0 and np.ndim(x) > 0:
min = broadcast(min, x.shape)
if np.ndim(max) == 0 and np.ndim(x) > 0:
max = broadcast(max, x.shape)
if 0 < np.ndim(min) < np.ndim(x):
assert np.ndim(min) == 1, np.ndim(min)
min = broadcast_in_dim(min, x.shape, [0])
if 0 < np.ndim(max) < np.ndim(x):
assert np.ndim(max) == 1, np.ndim(max)
max = broadcast_in_dim(max, x.shape, [0])
if np.ndim(min) > np.ndim(x):
assert np.ndim(x) == 0, np.ndim(x)
x = broadcast(x, min.shape)
return clamp_p.bind(min, x, max), 0
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), g, _zeros(operand)))
batching.primitive_batchers[clamp_p] = _clamp_batch_rule
mlir.register_lowering(
clamp_p, partial(_nary_lower_mhlo, mhlo.ClampOp, explicit_type=True))
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len({operand.ndim for operand in operands}) != 1:
msg = "Cannot concatenate arrays with different numbers of dimensions: got {}."
raise TypeError(msg.format(", ".join(str(o.shape) for o in operands)))
if not 0 <= dimension < operands[0].ndim:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join([str(o.shape) for o in operands])))
shapes = [operand.shape[:dimension] + operand.shape[dimension+1:]
for operand in operands]
if not shapes[:-1] == shapes[1:]:
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: concatenating along "
"dimension {} for shapes {}.")
shapes = [operand.shape for operand in operands]
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(ctx, avals_in, avals_out, *operands,
dimension, **kw):
return [xops.ConcatInDim(ctx.builder, operands, dimension)]
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if type(t) is ad_util.Zero:
return [ad_util.Zero(o.aval) if ad.is_undefined_primal(o) else None
for o in operands]
else:
limit_points = np.cumsum([shape[dimension] for shape in operand_shapes])
starts = np.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = np.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slicing.slice(t, start, limit) if ad.is_undefined_primal(o)
else None for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear2(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _concatenate_lower(ctx, *xs, dimension):
return mhlo.ConcatenateOp(xs, mlir.i64_attr(dimension)).results
mlir.register_lowering(concatenate_p, _concatenate_lower)
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
del padding_value
op_shape = np.shape(operand)
if not len(padding_config) == np.ndim(operand):
raise ValueError("length of padding_config must equal the number of axes "
f"of operand, got padding_config {padding_config} "
f"for operand shape {op_shape}")
if not all(i >= 0 for _, _, i in padding_config):
raise ValueError("interior padding in padding_config must be nonnegative, "
f"got padding_config {padding_config}")
result = tuple(core.sum_dim(l, h, core.dilate_dim(d, i + 1))
for (l, h, i), d in zip(padding_config, op_shape))
if not all(core.greater_equal_dim(d, 0) for d in result):
msg = (f"Dimension size after padding is not at least 0, "
f"got result shape {result}, for padding_config {padding_config}"
f" and operand shape {op_shape}")
raise ValueError(msg)
return result
def _pad_transpose(t, operand, padding_value, *, padding_config):
if type(t) is ad_util.Zero:
t_operand = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None
t_padv = ad_util.Zero(padding_value.aval) if ad.is_undefined_primal(padding_value) else None
else:
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = safe_zip(np.negative(lo), np.negative(hi),
np.zeros_like(interior))
unpadded = pad(t, np.array(0., t.dtype), unpad_config)
return slicing.slice(unpadded, np.zeros_like(lo), unpadded.shape,
np.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if operand_bdim is None:
operand_bdim = 0
operand = broadcast(operand, (padding_value.shape[padding_value_bdim],))
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
if padding_value_bdim is None:
return pad(operand, padding_value, padding_config), operand_bdim
assert padding_value_bdim == 0, padding_value_bdim
x = pad(operand, _zero(operand), padding_config)
mask = pad(full_like(operand, True, np.bool_), False, padding_config)
broadcasted_padding = broadcast_in_dim(padding_value, x.shape,
(operand_bdim,))
return select(mask, x, broadcasted_padding), operand_bdim
def _pad_translation_rule(ctx, avals_in, avals_out, operand, padding_value, *,
padding_config):
return [xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))]
def _pad_masking_rule(padded_vals, logical_shapes, padding_config):
operand, padding_value = padded_vals
shape, _ = logical_shapes
out = pad(operand, padding_value, padding_config)
out_shape = [lo + shape[i] * (interior + 1)
for i, (lo, hi, interior) in enumerate(padding_config)]
padded_dims = [i for i, config in enumerate(padding_config)
if config != (0, 0, 0)]
return _masked(out, out_shape, padded_dims, padding_value)
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear2(pad_p, _pad_transpose)
batching.primitive_batchers[pad_p] = _pad_batch_rule
masking.masking_rules[pad_p] = _pad_masking_rule
def _pad_lower(ctx, x, padding_value, *, padding_config):
aval_out, = ctx.avals_out
low, high, interior = util.unzip3(padding_config)
return mhlo.PadOp(mlir.aval_to_ir_type(aval_out), x, padding_value,
mlir.dense_int_elements(low),
mlir.dense_int_elements(high),
mlir.dense_int_elements(interior)).results
mlir.register_lowering(pad_p, _pad_lower)
# The squeeze primitive exists for the benefit of masking and other
# transformations that need to keep track of axis identity.
# For example, consider reshaping a 2D array with shape (1, N) into a 1D array
# with shape (N,). This results in the following JAXpr:
# reshape[ dimension=None new_sizes=(N,) ]
# For N > 1, we can match up the output array axis with the second axis of the
# input. But for N = 1, it is not clear how axes match up: all we know from the
# JAXpr is that we are reshaping from (1, 1) to (1,).
# In constrast, squeeze[ dimensions=(0,) ] is unambiguous.
def _squeeze_dtype_rule(operand, *, dimensions):
return operand.dtype
def _squeeze_shape_rule(operand, *, dimensions):
return _compute_squeeze_shape(np.shape(operand), dimensions)
def _compute_squeeze_shape(shape, dimensions):
dims_set = set(dimensions)
if len(dims_set) != len(dimensions):
raise ValueError(f"dimensions are not unique: {dimensions}")
if not all(0 <= d < len(shape) for d in dims_set):
raise ValueError(f"dimensions outside range [0, ndim): {dimensions}")
if any(not core.symbolic_equal_dim(shape[d], 1) for d in dimensions):
raise ValueError(
"cannot select an axis to squeeze out which has size not equal to "
f"one, got shape={shape} and dimensions={dimensions}")
return tuple(s for i, s in enumerate(shape) if i not in dims_set)
def _squeeze_translation_rule(ctx, avals_in, avals_out, arg, *, dimensions):
return [xops.Reshape(arg, avals_out[0].shape)]
def _squeeze_transpose_rule(t, operand, *, dimensions):
assert ad.is_undefined_primal(operand)
return [expand_dims(t, dimensions)]
def _squeeze_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
dimensions = tuple(np.add(1, dimensions))
return squeeze(operand, dimensions=dimensions), 0
squeeze_p = standard_primitive(_squeeze_shape_rule, _squeeze_dtype_rule,
'squeeze', _squeeze_translation_rule)
ad.deflinear2(squeeze_p, _squeeze_transpose_rule)
batching.primitive_batchers[squeeze_p] = _squeeze_batch_rule
def _squeeze_lower(ctx, operand, *, dimensions):
del dimensions # Implied by the output aval.
aval_out, = ctx.avals_out
return mhlo.ReshapeOp(mlir.aval_to_ir_type(aval_out), operand).results
mlir.register_lowering(squeeze_p, _squeeze_lower)
def _shape_as_value(shape: core.Shape):
"""Converts a shape that may contain Poly values into a JAX value."""
if len(shape) == 0:
return full((0,), np.array(0, np.int64))
dims = [
expand_dims(convert_element_type(core.dimension_as_value(d), np.int64),
(0,))
for d in shape
]
return concatenate(dims, dimension=0)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not all(core.greater_equal_dim(d, 0) for d in new_sizes):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if not core.same_shape_sizes(np.shape(operand), new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, np.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(np.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, np.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(ctx, avals_in, avals_out, operand, *, new_sizes,
dimensions):
if dimensions is None:
return [xops.Reshape(operand, new_sizes)]
else:
return [xops.Reshape(operand, dimensions, new_sizes)]
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, np.take(operand.aval.shape, dimensions)),
np.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(np.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
def _reshape_masking_rule(padded_args, logical_shapes, polymorphic_shapes,
new_sizes, dimensions):
operand, = padded_args
old_shape, = polymorphic_shapes
def is_poly(size): return type(size) is masking.Poly and not size.is_constant
def merge_const_sizes(shape):
"""Merges all nonpolymorphic sizes into the previous polymorphic size."""
poly_dims = [i for i, size in enumerate(shape) if is_poly(size)]
return [prod(shape[start:stop])
for start, stop in zip([0] + poly_dims, poly_dims + [len(shape)])]
if merge_const_sizes(old_shape) != merge_const_sizes(new_sizes):
raise NotImplementedError(
"Reshape on padded dimensions causing fragmentation is not supported.")
return reshape(operand,
new_sizes=masking.padded_shape_as_value(new_sizes),
dimensions=dimensions)
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
masking.masking_rules[reshape_p] = _reshape_masking_rule
def _reshape_lower(ctx, x, *, new_sizes, dimensions):
aval_out, = ctx.avals_out
if dimensions is not None:
x = mhlo.TransposeOp(x, mlir.dense_int_elements(dimensions)).result
return mhlo.ReshapeOp(mlir.aval_to_ir_type(aval_out), x).results
mlir.register_lowering(reshape_p, _reshape_lower)
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear2(rev_p, lambda t, _, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _rev_lower(ctx, x, *, dimensions):
return mhlo.ReverseOp(x, mlir.dense_int_elements(dimensions)).results
mlir.register_lowering(rev_p, _rev_lower)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, np.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(np.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
def _transpose_masking_rule(padded_vals, logical_shapes, permutation):
return transpose(*padded_vals, permutation=permutation)
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
ad.deflinear2(transpose_p,
lambda t, _, permutation: [transpose(t, np.argsort(permutation))]) # type: ignore[arg-type]
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
masking.masking_rules[transpose_p] = _transpose_masking_rule
def _transpose_lower(ctx, x, *, permutation):
aval_out, = ctx.avals_out
return mhlo.TransposeOp(x, mlir.dense_int_elements(permutation)).results
mlir.register_lowering(transpose_p, _transpose_lower)
def _select_shape_rule(which, *cases):
if len(cases) == 0:
raise TypeError("select must have at least one case")
if any(case.shape != cases[0].shape for case in cases[1:]):
msg = "select cases must have the same shapes, got [{}]."
raise TypeError(msg.format(", ".join([str(c.shape) for c in cases])))
if which.shape and which.shape != cases[0].shape:
msg = ("select `which` must be scalar or have the same shape as cases, "
"got `which` shape {} but case shape {}.")
raise TypeError(msg.format(which.shape, cases[0].shape))
return cases[0].shape
def _select_dtype_rule(which, *cases):
_check_same_dtypes("select", False, *(c.dtype for c in cases))
if (not dtypes.issubdtype(which.dtype, np.bool_) and
not dtypes.issubdtype(which.dtype, np.integer)):
raise TypeError("select `which` must be boolean or integer type, got "
f"{which.dtype}.")
if dtypes.issubdtype(which.dtype, np.bool_) and len(cases) > 2:
raise TypeError("select with boolean `which` cannot have > 2 cases.")
return cases[0].dtype
def _select_weak_type_rule(which, *cases):
return all(c.weak_type for c in cases)
def _select_transpose_rule(t, which, *cases):
assert not ad.is_undefined_primal(which)
if type(t) is ad_util.Zero:
return [None] + [ad_util.Zero(c.aval) if ad.is_undefined_primal(c) else None
for c in cases]
else:
zeros = full_like(t, 0)
return [None] + [
select(eq(which, _const(which, i)), t, zeros)
if ad.is_undefined_primal(case) else None
for i, case in enumerate(cases)
]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
which, *cases = batched_args
which_bdim, *case_bdims = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if all(which_bdim == bdim for bdim in case_bdims):
if np.shape(which) == np.shape(cases[0]):
return select_n(which, *cases), which_bdim
else:
# vmapped function had a scalar which with nonscalar args
assert np.ndim(which) == 1
which = broadcast_in_dim(which, cases[0].shape, [which_bdim])
return select_n(which, *cases), which_bdim
elif np.ndim(which) == 0 and all(bdim is not None for bdim in case_bdims):
if all(case_bdims[0] == bdim for bdim in case_bdims[1:]):
return select_n(which, *cases), case_bdims[0]
elif all(np.shape(cases[0]) == np.shape(c) for c in cases):
bdim = case_bdims[0]
other_cases = [batching.moveaxis(c, c_bdim, bdim)
for c, c_bdim in zip(cases[1:], case_bdims[1:])]
return select_n(which, cases[0], *other_cases), bdim
which = (batching.bdim_at_front(which, which_bdim, size) if np.shape(which)
else which)
if not all(() == np.shape(c) for c in cases):
cases = [batching.bdim_at_front(c, bdim, size)
for c, bdim in zip(cases, case_bdims)]
assert all(np.shape(cases[0]) == np.shape(c) for c in cases[1:])
if 0 < np.ndim(which) < np.ndim(cases[0]):
# vmapped function had a scalar which with nonscalar args
assert np.ndim(which) == 1
which = broadcast_in_dim(which, cases[0].shape, [0])
if np.ndim(which) > np.ndim(cases[0]):
assert np.ndim(cases[0]) == 0
cases = [broadcast(c, which.shape) for c in cases]
return select_n(which, *cases), 0
def _select_masking_rule(padded_vals, logical_shapes):
which_shape, true_shape, false_shape = [
masking.padded_shape_as_value(val.shape) for val in padded_vals]
assert np.array_equal(which_shape, true_shape)
assert np.array_equal(which_shape, false_shape)
return select_n(*padded_vals)
def _select_jvp(primals, tangents):
which, *case_primals = primals
case_tangents = tangents[1:]
out = select_n(which, *case_primals)
if all(type(t) is ad_util.Zero for t in case_tangents):
out_dot = ad_util.Zero(case_tangents[0].aval)
else:
z = _zeros(next(t for t in case_tangents if type(t) is not ad_util.Zero))
case_tangents = [z if type(t) is ad_util.Zero else t for t in case_tangents]
out_dot = select_n(which, *case_tangents)
return out, out_dot
def _select_xla_translation(ctx, avals_in, avals_out, which, *cases):
which_aval = avals_in[0]
if which_aval.dtype == np.dtype(np.bool_):
assert len(cases) <= 2
return cases if len(cases) == 1 else [xops.Select(which, cases[1], cases[0])]
def _select(offset, cases):
assert len(cases) > 0
if len(cases) == 1:
return cases[0]
mid = len(cases) // 2
cutoff = xla.pyval_to_ir_constant(
ctx.builder, np.array(offset + mid, dtype=which_aval.dtype))
return xops.Select(xops.Lt(which, cutoff),
_select(offset, cases[:mid]),
_select(offset + mid, cases[mid:]))
return [_select(0, cases)]
def _select_mhlo_lowering(ctx, which, *cases):
which_aval = ctx.avals_in[0]
if which_aval.dtype == np.dtype(np.bool_):
assert len(cases) <= 2
if len(cases) == 1: return cases
return mhlo.SelectOp(which, cases[1], cases[0]).results
bool_shape = ir.RankedTensorType.get(which_aval.shape,
ir.IntegerType.get_signless(1))
if dtypes.issubdtype(which_aval.dtype, np.signedinteger):
compare_type = ir.StringAttr.get("SIGNED")
else:
compare_type = ir.StringAttr.get("UNSIGNED")
lt = ir.StringAttr.get("LT")
def _select(offset, cases):
assert len(cases) > 0
if len(cases) == 1:
return cases[0]
mid = len(cases) // 2
pred = mhlo.CompareOp(
bool_shape, which, mlir.full_like_aval(offset + mid, which_aval),
lt, compare_type)
return mhlo.SelectOp(pred, _select(offset, cases[:mid]),
_select(offset + mid, cases[mid:])).result
return [_select(0, cases)]
select_n_p = standard_primitive(
_select_shape_rule, _select_dtype_rule, 'select_n',
weak_type_rule=_select_weak_type_rule,
translation_rule=_select_xla_translation)
ad.primitive_jvps[select_n_p] = _select_jvp
ad.primitive_transposes[select_n_p] = _select_transpose_rule
batching.primitive_batchers[select_n_p] = _select_batch_rule
masking.masking_rules[select_n_p] = _select_masking_rule
mlir.register_lowering(select_n_p, _select_mhlo_lowering)
def _reduce_shape_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
if any(arg.shape != () for arg in init_val_avals):
init_val_shapes = [a.shape for a in init_val_avals]
raise ValueError(f'reduce found non-scalar initial value: {init_val_shapes}')
return [tuple(np.delete(op.shape, dimensions)) for op in operand_avals]
def _reduce_dtype_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
operand_dtypes = [dtypes.canonicalize_dtype(op.dtype) for op in operand_avals]
init_val_dtypes = [dtypes.canonicalize_dtype(init.dtype) for init in init_val_avals]
if operand_dtypes != init_val_dtypes:
raise TypeError(
"reduce operand dtypes should match corresponding initial value dtypes, "
f"got operands={operand_avals} and initial_values={init_val_avals}")
return operand_dtypes
def _reduce_weak_type_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
return [op.weak_type and init_val.weak_type
for op, init_val in safe_zip(operand_avals, init_val_avals)]
def _reduce_translation_rule(ctx, avals_in, avals_out, *values, computation,
jaxpr, consts, dimensions):
c = ctx.builder
operands, init_values = split_list(values, [len(values) // 2])
if len(operands) == 1:
init_value = init_values[0]
xla_computation = _reduction_computation(ctx, jaxpr, consts, init_value)
return [xops.Reduce(c, operands, init_values, xla_computation, dimensions)]
xla_computation = _reduction_computation(ctx, jaxpr, consts, init_values,
singleton=False)
return xla.xla_destructure(
c, xops.Reduce(c, operands, init_values, xla_computation, dimensions))
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr,
consts, dimensions):
# TODO(mattjj,frostig): use batch_jaxpr, delete computation (assumes poly??)
num_operands = len(batched_args) // 2
operands, init_values = split_list(batched_args, [num_operands])
operand_bdims, init_value_bdims = split_list(batch_dims, [num_operands])
if all(init_value_bdim is batching.not_mapped
for init_value_bdim in init_value_bdims):
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operands = [batching.bdim_at_front(arg, bdim, size)
for arg, bdim in zip(operands, operand_bdims)]
new_dimensions = [d + 1 for d in dimensions]
new_operand_bdims = [0] * num_operands
return reduce_p.bind(*(operands + init_values),
computation=computation,
dimensions=tuple(new_dimensions),
consts=consts,
jaxpr=jaxpr), new_operand_bdims
else:
raise NotImplementedError # loop and stack
def _reduction_computation(ctx, jaxpr, consts, init_values, singleton=True):
c = ctx.builder
platform = ctx.platform
if singleton:
init_values = [init_values]
shapes = safe_map(c.get_shape, init_values + init_values)
axis_env = xla.AxisEnv(1, (), ()) # no parallel primitives inside reductions
subc = xc.XlaBuilder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xla.parameter(subc, i, shape) for i, shape in enumerate(shapes)]
ctx = xla.TranslationContext(subc, platform, axis_env, new_name_stack())
out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, consts, *args)
if singleton:
return subc.build(out_nodes[0])
out_nodes = xops.Tuple(subc, out_nodes)
return subc.build(out_nodes)
def _reduce_jvp(reducer, init_values, primals, tangents, axes):
input_shape = np.array(primals[0].shape, dtype=np.int_)
n = np.prod(input_shape[list(axes)])
non_axes = np.delete(np.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
primals = tuple(reshape(x, new_shape, permutation) for x in primals)
tangents = tuple(reshape(t, new_shape, permutation) for t in tangents)
for d in range(len(non_axes) + 1):
reducer = api.vmap(reducer)
def _reduce_tree(*xs, axis=0):
"""Reduce by repeatedly splitting the array and multiplying."""
while xs[0].shape[axis] > 1:
n = xs[0].shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
xs1 = [slicing.slice_in_dim(x, 0, n1) for x in xs]
xs2 = [slicing.slice_in_dim(x, n1, None) for x in xs]
if n2 != n1:
paddings = [(0, 0, 0)] * len(xs[0].shape)
paddings[axis] = (0, 1, 0)
xs2 = [pad(x2, i, paddings) for x2, i in zip(xs2, init_values)]
xs = reducer(*(xs1 + xs2))
if xs[0].shape[axis] == 0:
return [full(input_shape[non_axes], i) for i in init_values]
return tuple(squeeze(x, (axis,)) for x in xs)
return api.jvp(_reduce_tree, primals, tangents)
def _reduce_jvp_rule(primals, tangents, *, computation, jaxpr,
consts, dimensions):
primal_xs, init_values = split_list(primals, [len(primals) // 2])
tangent_xs, tangent_init = split_list(tangents, [len(tangents) // 2])
# This test may be too strict, if a value is actually zero but we cannot prove
# it is symbolically zero.
if any(type(t) is not ad_util.Zero for t in tangent_init):
raise NotImplementedError(
"Gradient of general lax.reduce with non-zero tangents for "
"initial values to reduction not implemented")
reducer = core.jaxpr_as_fun(core.ClosedJaxpr(jaxpr, consts))
return _reduce_jvp(reducer, init_values, primal_xs, tangent_xs, dimensions)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes, input_shape=None, **reduce_kwargs):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(np.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
prim_bind = partial(prim.bind, **reduce_kwargs)
bind = prim_bind if input_shape is None else partial(prim_bind, input_shape=padded_shape)
return bind(masked_val, axes=axes)
def _reduce_named_shape_rule(*avals, computation, jaxpr, consts, dimensions):
# TODO(mattjj,frostig): see the TODOs noting limitations/assumptions in
# _reduce_batching_rule. We're making the same assumptions here for now.
num_operands = len(avals) // 2
operand_avals, init_avals = split_list(avals, [num_operands])
if any(a.named_shape for a in init_avals):
raise NotImplementedError
named_shapes = [a.named_shape for a in operand_avals]
join = core.join_named_shapes(*(a.named_shape for a in operand_avals))
return [join] * len(named_shapes)
reduce_p = core.Primitive('reduce')
reduce_p.multiple_results = True
reduce_p.def_impl(partial(xla.apply_primitive, reduce_p))
reduce_p.def_abstract_eval(
partial(standard_multi_result_abstract_eval, reduce_p, _reduce_shape_rule,
_reduce_dtype_rule, _reduce_weak_type_rule,
_reduce_named_shape_rule))
xla.register_translation(reduce_p, _reduce_translation_rule)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
ad.primitive_jvps[reduce_p] = _reduce_jvp_rule
def _reduce_lower(ctx, *values, computation, jaxpr, consts, dimensions):
assert all(isinstance(x, core.ShapedArray) for x in ctx.avals_in), ctx.avals_in
operands, init_values = util.split_list(values, [len(values) // 2])
init_value_avals = ctx.avals_in[len(values) // 2:]
op = mhlo.ReduceOp([mlir.aval_to_ir_type(aval) for aval in ctx.avals_out],
operands, init_values, mlir.dense_int_elements(dimensions))
ir_types = [mlir.aval_to_ir_type(aval) for aval in init_value_avals]
reducer = op.regions[0].blocks.append(*(ir_types + ir_types))
with ir.InsertionPoint(reducer):
reducer_ctx = ctx.module_context.replace(name_stack='')
out_nodes = mlir.jaxpr_subcomp(reducer_ctx, jaxpr, consts,
*([a] for a in reducer.arguments))
mhlo.ReturnOp(util.flatten(out_nodes))
return op.results
mlir.register_lowering(reduce_p, _reduce_lower)
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, np.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, np.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(ctx, avals_in, avals_out, operand, *, axes):
operand_aval, = avals_in
scalar = ShapedArray((), operand_aval.dtype)
return [xops.Reduce(
ctx.builder, [operand],
[xla.pyval_to_ir_constant(ctx.builder, np.array(0, operand_aval.dtype))],
xla.primitive_subcomputation(ctx.platform, ctx.axis_env, add_p, scalar,
scalar), axes)]
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(np.delete(np.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: np.broadcast_to(np.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes, input_shape=None):
del input_shape # Unused.
if len(axes) != len(set(axes)):
raise ValueError(f"duplicate value in 'axes' of reduction: {axes}")
if not all(0 <= a < operand.ndim for a in axes):
raise ValueError(f"reduction axes {axes} contains out-of-bounds indices for {operand}.")
axes = frozenset(axes)
return tuple(d for i, d in enumerate(operand.shape) if i not in axes)
def _reduce_prod_translation_rule(ctx, avals_in, avals_out, operand, *, axes):
operand_aval, = avals_in
scalar = ShapedArray((), operand_aval.dtype)
return [xops.Reduce(
ctx.builder, [operand],
[xla.pyval_to_ir_constant(ctx.builder, np.array(1, operand_aval.dtype))],
xla.primitive_subcomputation(ctx.platform, ctx.axis_env, mul_p, scalar,
scalar), axes)]
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
reducer = lambda x, y: [mul(x, y)]
primals_out, tangents_out = _reduce_jvp(reducer, [_const(primals[0], 1)],
primals, tangents, axes)
return primals_out[0], tangents_out[0]
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
_masking_defreducer(reduce_prod_p,
lambda shape, dtype: np.broadcast_to(np.array(1, dtype), shape))
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(np.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, ctx, avals_in, avals_out,
operand, *, axes):
operand_aval, = avals_in
scalar = ShapedArray((), operand_aval.dtype)
return [xops.Reduce(
ctx.builder, [operand],
[xla.pyval_to_ir_constant(ctx.builder, identity(operand_aval.dtype))],
xla.primitive_subcomputation(ctx.platform, ctx.axis_env, prim, scalar,
scalar), axes)]
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_masking_defreducer(reduce_max_p,
lambda shape, dtype: np.broadcast_to(np.array(-np.inf, dtype), shape))
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
_masking_defreducer(reduce_min_p,
lambda shape, dtype: np.broadcast_to(np.array(np.inf, dtype), shape))
def _argminmax_shape_rule(operand, *, axes, index_dtype):
axis, = axes
if not (0 <= axis < len(operand.shape)):
raise ValueError(f"Invalid axis {axis} for operand shape {operand.shape}")
if not core.greater_equal_dim(operand.shape[axis], 1):
raise ValueError("argmin and argmax require non-empty reduced dimension. "
f"operand.shape={operand.shape} axis={axis}")
return tuple(np.delete(operand.shape, axis))
def _argminmax_dtype_rule(operand, *, axes, index_dtype):
if not dtypes.issubdtype(index_dtype, np.integer):
raise TypeError("index_dtype must be an integer type, but got {}"
.format(np.dtype(index_dtype).name))
return index_dtype
def _compute_argminmax(value_comparator, get_identity,
operand, *, index_dtype, axes):
# value_comparator is either lax.lt (for argmin) or lax.gt
# get_identity(operand.dtype) is inf for argmin or -inf for argmax
axis, = axes
indices = broadcasted_iota(index_dtype, np.shape(operand), axis)
def reducer_fn(op_val_index, acc_val_index):
op_val, op_index = op_val_index
acc_val, acc_index = acc_val_index
# Pick op_val if Lt (for argmin) or if NaN
pick_op_val = bitwise_or(value_comparator(op_val, acc_val),
ne(op_val, op_val))
# If x and y are not NaN and x = y, then pick the first
pick_op_index = bitwise_or(pick_op_val,
bitwise_and(eq(op_val, acc_val),
lt(op_index, acc_index)))
return (select(pick_op_val, op_val, acc_val),
select(pick_op_index, op_index, acc_index))
res = reduce([operand, indices],
[get_identity(operand.dtype), np.array(0, index_dtype)],
reducer_fn,
axes)
return res[1]
_argmin_translation_rule = xla.lower_fun(
partial(_compute_argminmax, lt, _get_min_identity),
multiple_results=False, new_style=True)
_argmax_translation_rule = xla.lower_fun(
partial(_compute_argminmax, gt, _get_max_identity),
multiple_results=False, new_style=True)
argmin_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmin', _argmin_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(argmin_p)
ad.defjvp_zero(argmin_p)
argmax_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmax', _argmax_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(argmax_p)
ad.defjvp_zero(argmax_p)
mlir.register_lowering(argmin_p, mlir.cache_lowering(mlir.lower_fun(
partial(_compute_argminmax, lt, _get_min_identity),
multiple_results=False)))
mlir.register_lowering(argmax_p, mlir.cache_lowering(mlir.lower_fun(
partial(_compute_argminmax, gt, _get_max_identity),
multiple_results=False)))
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != np.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(np.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, ctx, avals_in, avals_out,
operand, *, axes):
scalar = ShapedArray((), np.bool_)
return [xops.Reduce(
ctx.builder, [operand],
[xla.pyval_to_ir_constant(ctx.builder, identity(np.bool_))],
xla.primitive_subcomputation(ctx.platform, ctx.axis_env, prim, scalar,
scalar), axes)]
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_or', _reduce_or_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_and', _reduce_and_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(reduce_and_p)
def _unary_reduce_lower(reducer, unit_factory, ctx, x, *, axes):
aval_out, = ctx.avals_out
dtype = aval_out.dtype
op = mhlo.ReduceOp([mlir.aval_to_ir_type(aval_out)], [x],
mlir.ir_constants(unit_factory(aval_out.dtype)),
mlir.dense_int_elements(axes))
scalar_type = mlir.aval_to_ir_type(core.ShapedArray((), dtype))
reducer_region = op.regions[0].blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(reducer_region):
add = reducer(*reducer_region.arguments)
mhlo.ReturnOp(add.results)
return op.results
mlir.register_lowering(reduce_sum_p, partial(_unary_reduce_lower, mhlo.AddOp,
lambda dtype: np.array(0, dtype)))
mlir.register_lowering(reduce_prod_p, partial(_unary_reduce_lower, mhlo.MulOp,
lambda dtype: np.array(1, dtype)))
mlir.register_lowering(reduce_or_p, partial(_unary_reduce_lower, mhlo.OrOp,
lambda dtype: np.array(False, dtype)))
mlir.register_lowering(reduce_and_p, partial(_unary_reduce_lower, mhlo.AndOp,
lambda dtype: np.array(True, dtype)))
mlir.register_lowering(reduce_min_p, partial(_unary_reduce_lower, mlir.min_mhlo,
_get_min_identity))
mlir.register_lowering(reduce_max_p, partial(_unary_reduce_lower, mlir.max_mhlo,
_get_max_identity))
def _reduce_precision_shape_rule(operand, *, exponent_bits, mantissa_bits):
exponent_bits = operator.index(exponent_bits)
mantissa_bits = operator.index(mantissa_bits)
if exponent_bits < 1:
raise ValueError(f"reduce_precision: exponent_bits must be positive; got {exponent_bits}")
if mantissa_bits < 0:
raise ValueError(f"reduce_precision: mantissa_bits must be non-negative; got {mantissa_bits}")
return operand.shape
reduce_precision_p = standard_primitive(
_reduce_precision_shape_rule,
partial(unop_dtype_rule, _identity, _float, 'reduce_precision'),
name='reduce_precision')
batching.defvectorized(reduce_precision_p)
masking.defvectorized(reduce_precision_p)
def _reduce_precision_lower(ctx, operand, *, exponent_bits, mantissa_bits):
aval_out, = ctx.avals_out
return mhlo.ReducePrecisionOp(mlir.aval_to_ir_type(aval_out), operand,
mlir.i32_attr(exponent_bits),
mlir.i32_attr(mantissa_bits)).results
mlir.register_lowering(reduce_precision_p, _reduce_precision_lower)
_UINT_DTYPES = {
16: np.dtype(np.uint16),
32: np.dtype(np.uint32),
64: np.dtype(np.uint64),
}
_INT_DTYPES = {
16: np.dtype(np.int16),
32: np.dtype(np.int32),
64: np.dtype(np.int64),
}
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order. In this scheme, -0 would be before 0, and -NaN and NaN appear at
# the beginning and end of the ordering. This causes issues for stable
# sorts, so we avoid this by standardizing the representation of zeros
# and NaNs in the output.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, np.float32)
nbits = np.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
# We cannot standardize zeros in x because XLA elides this is some cases.
# We cannot standardize NaNs in x because it triggers jax.debug_nans
# So instead we do these replacements in the signed integer representation.
# Standardize zeros:
signed = select(eq(x, _zero(x)), _zeros(signed), signed)
# Standardize nans:
signed_nan = x.dtype.type(np.nan).view(signed_dtype)
signed = select(_isnan(x), full_like(signed, signed_nan), signed)
flipped = bitcast_convert_type(
sub(unsigned_dtype.type(np.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
# -infinity < ... < 0 < ... < infinity < NaN.
# 0.0 and -0.0 are treated as equivalent, as are all NaN representations.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support and lexicographic ordering to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands, num_keys=1):
x_keys, y_keys = _operands_to_keys(*operands, num_keys=num_keys)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
# Similar to sort_lt_comparator, but implements less than or equal. Used by
# the searchsorted() implementation.
def _sort_le_comparator(*operands, num_keys=1):
x_keys, y_keys = _operands_to_keys(*operands, num_keys=num_keys)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else le(xk, yk))
return p
def _operands_to_keys(*operands, num_keys=1):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
assert len(operands) // 2 >= num_keys, (operands, num_keys)
x_keys, y_keys = [], []
for x, y in zip(operands[:2*num_keys:2], operands[1:2*num_keys:2]):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if dtypes.issubdtype(x.dtype, np.complexfloating):
x_keys.extend([_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))])
y_keys.extend([_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))])
elif dtypes.issubdtype(x.dtype, np.floating):
x_keys.append(_float_to_int_for_sort(x))
y_keys.append(_float_to_int_for_sort(y))
else:
x_keys.append(x)
y_keys.append(y)
return x_keys, y_keys
def _sort_translation_rule(ctx, avals_in, avals_out, *operands, dimension,
is_stable, num_keys):
c = ctx.builder
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xc.XlaBuilder("sort_lt_comparator")
params = [xla.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(partial(_sort_lt_comparator, num_keys=num_keys),
backend=ctx.platform,
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=is_stable,
comparator=comparator)
return xla.xla_destructure(c, out) if len(operands) != 1 else [out]
def _sort_jvp(primals, tangents, *, dimension, is_stable, num_keys):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = np.int32 if size < np.iinfo(np.int32).max else np.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension,
is_stable=is_stable, num_keys=num_keys)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(t if type(t) is ad_util.Zero else t[idx] for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension, is_stable, num_keys):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = np.delete(np.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return (sort_p.bind(*new_args, dimension=new_dimension, is_stable=is_stable, num_keys=num_keys),
bdims)
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.register_translation(sort_p, _sort_translation_rule)
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _sort_lower(ctx, *operands, dimension, is_stable, num_keys):
assert all(isinstance(x, core.ShapedArray) for x in ctx.avals_in), ctx.avals_in
sort = mhlo.SortOp([mlir.aval_to_ir_type(aval) for aval in ctx.avals_out],
mlir.flatten_lowering_ir_args(operands),
mlir.i64_attr(dimension), ir.BoolAttr.get(is_stable))
scalar_avals = [aval.update(shape=()) for aval in ctx.avals_in]
scalar_types = safe_map(mlir.aval_to_ir_type, scalar_avals)
comparator = sort.comparator.blocks.append(
*util.flatten(zip(scalar_types, scalar_types)))
with ir.InsertionPoint(comparator):
lower_comparator = mlir.lower_fun(partial(_sort_lt_comparator),
multiple_results=False)
sub_ctx = mlir.LoweringRuleContext(
module_context = ctx.module_context,
primitive=None,
avals_in=util.flatten(zip(scalar_avals, scalar_avals)),
avals_out=[core.ShapedArray((), np.bool_)])
out = lower_comparator(sub_ctx, *[[a] for a in comparator.arguments],
num_keys=num_keys)
mhlo.ReturnOp(util.flatten(out))
return sort.results
mlir.register_lowering(sort_p, _sort_lower)
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (operand.update(shape=shape, dtype=operand.dtype,
weak_type=operand.weak_type),
operand.update(shape=shape, dtype=np.dtype(np.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(primals_out[0])
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = slicing.GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangent_out = slicing.gather(tangent, gather_indices, dnums, slice_sizes)
return primals_out, (tangent_out, ad_util.Zero.from_value(primals_out[1]))
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = np.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
def _top_k_translation_rule(ctx, avals_in, avals_out, x, *, k):
return xla.xla_destructure(ctx.builder, xops.TopK(x, k))
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.register_translation(top_k_p, _top_k_translation_rule)
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.Zero.from_value(x)
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(_=None):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
The argument is ignored. It exists for backward compatibility.
"""
return create_token_p.bind()
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda *_: abstract_token)
xla.register_translation(create_token_p,
lambda ctx, *_: [xops.CreateToken(ctx.builder)])
def _create_token_lowering(ctx, *operands):
aval_out, = ctx.avals_out
return mhlo.CreateTokenOp(mlir.aval_to_ir_type(aval_out)).results
mlir.register_lowering(create_token_p, _create_token_lowering)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA AfterAll operator."""
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(ctx, avals_in, avals_out, *operands):
return [xops.AfterAll(ctx.builder, operands)]
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.register_translation(after_all_p, _after_all_translation_rule)
def _after_all_lowering(ctx, *operands):
aval_out, = ctx.avals_out
return mhlo.AfterAllOp(mlir.aval_to_ir_type(aval_out), operands).results
mlir.register_lowering(after_all_p, _after_all_lowering)
def infeed(token, shape=None, partitions=None):
"""Consumes an infeed value of `shape` from the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
`partitions` may be specified inside a `sharded_jit` function.
"""
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
if partitions is not None:
# Always replicate token.
# We specifically use type() to raise an error for PartitionSpecs.
if type(partitions) != tuple: # pylint: disable=unidiomatic-typecheck
raise ValueError(f"'partitions' argument to infeed should be a tuple, "
f"got {partitions}")
partitions = partitions + (None,)
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes),
partitions=partitions)
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes, partitions):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(ctx, avals_in, avals_out, token, *, shapes,
partitions):
c = ctx.builder
shape = tuple(shape.with_major_to_minor_layout_if_absent()
for x in shapes for shape in xla.aval_to_xla_shapes(x))
build_infeed = partial(xops.InfeedWithToken, token,
xla_client.Shape.tuple_shape(shape))
if partitions:
xs_and_token = xla.with_sharding(c, partitions, build_infeed)
else:
# Note that infeed will default to replication if inside a sharded
# computation and no sharding is specified.
xs_and_token = build_infeed()
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
return [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.register_translation(infeed_p, _infeed_translation_rule)
def _infeed_lowering(ctx, token, *, shapes, partitions):
output_types = safe_map(mlir.aval_to_ir_types, ctx.avals_out[:-1])
flat_output_types = util.flatten(output_types)
# TODO(phawkins): verify `shapes` have a major-to-minor layout.
layouts = ir.ArrayAttr.get([
ir.ArrayAttr.get(
[mlir.i64_attr(i)
for i in range(len(aval.shape) - 1, -1, -1)])
for aval in shapes
])
infeed = mhlo.InfeedOp(flat_output_types + [mhlo.TokenType.get()], token,
ir.StringAttr.get(''), layouts)
if partitions is not None:
mlir.set_sharding(infeed, xla.sharding_to_proto(partitions))
token = infeed.results[-1]
outs = infeed.results[:-1]
return util.unflatten(outs, safe_map(len, output_types)) + [[
token,
]]
mlir.register_lowering(infeed_p, _infeed_lowering)
def outfeed(token, xs, partitions = None):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
`partitions` may be specified inside a `sharded_jit` or `pjit` function.
"""
if partitions is not None:
# We specifically use type() to raise an error for PartitionSpecs.
if type(partitions) != tuple: # pylint: disable=unidiomatic-typecheck
raise ValueError(f"'partitions' argument to outfeed should be a tuple, "
f"got {partitions}")
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs, partitions=partitions)
def _outfeed_abstract_eval(token, *xs, partitions):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(ctx, avals_in, avals_out, token, *xs, partitions):
c = ctx.builder
t = xops.Tuple(c, xs)
if partitions is not None:
return [xla.with_sharding(c, partitions, xops.OutfeedWithToken,
t, token, c.get_shape(t))]
else:
return [xops.OutfeedWithToken(t, token, c.get_shape(t))]
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.register_translation(outfeed_p, _outfeed_translation_rule)
def _outfeed_lowering(ctx, token, *xs, partitions):
token_aval = ctx.avals_in[0]
outfeed = mhlo.OutfeedOp(
mlir.aval_to_ir_type(token_aval), mlir.flatten_lowering_ir_args(xs),
token, ir.StringAttr.get(''))
if partitions is not None:
mlir.set_sharding(outfeed, xla.sharding_to_proto(partitions))
return outfeed.results
mlir.register_lowering(outfeed_p, _outfeed_lowering)
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b)
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return a.update(shape=shape, dtype=a.dtype,
weak_type=(a.weak_type and b.weak_type))
def _rng_uniform_translation_rule(ctx, avals_in, avals_out, a, b, *, shape):
c = ctx.builder
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return [xops.RngUniform(a, b, xla_shape)]
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.register_translation(rng_uniform_p, _rng_uniform_translation_rule)
def _rng_uniform_lowering(ctx, a, b, *, shape):
aval_out, = ctx.avals_out
shape, = mlir.ir_constants(np.array(aval_out.shape, np.int64),
canonicalize_types=False)
return mhlo.RngUniformOp(a, b, shape).results
mlir.register_lowering(rng_uniform_p, _rng_uniform_lowering)
def _rng_bit_generator_shape_rule(key, *, shape, dtype, algorithm):
del dtype, algorithm
return (key.shape, tuple(shape))
def _rng_bit_generator_dtype_rule(key, *, shape, dtype, algorithm):
del shape, algorithm
return (key.dtype, dtype)
def _rng_bit_generator_weak_type_rule(key, *, shape, dtype, algorithm):
del shape, dtype, algorithm
return (key.weak_type, False)
def _rng_bit_generator_translation_rule(
ctx, avals_in, avals_out, key, *, shape, dtype, algorithm):
c = ctx.builder
key_shape, key_dtype = c.get_shape(key).dimensions(), c.get_shape(key).numpy_dtype()
# While the RngBitGenerator HLO accepts a u64[2] key on all backends, we
# typically represent the key argument to this primitive as a u32[4] so as to
# sidestep issues with the jax_enable_x64=False configuration. As a result, we
# need to convert u32[4] -> u64[2] here in the translation rule. However, we
# also polymorphically allow a u64[2] for backward compatibility.
#
# Separately, xops.RngBitGenerator doesn't support generating u8 or
# u16, so we request u32 and truncate in that case.
assert ((key_shape == (4,) and key_dtype == np.dtype('uint32')) or
(key_shape == (2,) and key_dtype == np.dtype('uint64'))), (key_shape, key_dtype)
dtype = np.dtype(dtype)
if dtype == np.dtype('uint32') or dtype == np.dtype('uint64'):
rbg_dtype = dtype
else:
rbg_dtype = np.dtype('uint32')
xla_shape = xc.Shape.array_shape(rbg_dtype, shape)
if key_dtype == np.dtype('uint32'):
u64_etype = xla.dtype_to_primitive_type(np.dtype('uint64'))
key = xops.BitcastConvertType(xops.Reshape(key, (2, 2)), u64_etype)
out_key, out_vals = xla.xla_destructure(
c, xops.RngBitGenerator(algorithm, key, xla_shape))
if key_dtype == np.dtype('uint32'):
u32_etype = xla.dtype_to_primitive_type(np.dtype('uint32'))
out_key = xops.Reshape(xops.BitcastConvertType(out_key, u32_etype), (4,))
if rbg_dtype != dtype:
out_vals = xops.ConvertElementType(
out_vals, xla.dtype_to_primitive_type(dtype))
return [out_key, out_vals]
def _rng_bit_generator_named_shape_rule(key, *, shape, dtype, algorithm):
return [key.named_shape, key.named_shape]
rng_bit_generator_p = Primitive("rng_bit_generator")
rng_bit_generator_p.multiple_results = True
rng_bit_generator_p.def_impl(
partial(xla.apply_primitive, rng_bit_generator_p))
rng_bit_generator_p.def_abstract_eval(
partial(standard_multi_result_abstract_eval, rng_bit_generator_p,
_rng_bit_generator_shape_rule, _rng_bit_generator_dtype_rule,
_rng_bit_generator_weak_type_rule,
_rng_bit_generator_named_shape_rule))
xla.register_translation(rng_bit_generator_p,
_rng_bit_generator_translation_rule)
RandomAlgorithm = xops.RandomAlgorithm
RandomAlgorithm.__str__ = lambda algorithm: algorithm.name # type: ignore[assignment]
def _array_copy(arr):
return copy_p.bind(arr)
# The copy_p primitive exists for expressing making copies of runtime arrays.
# For that reason we don't simplify it out of jaxprs (e.g. for jit invariance).
# It's used in jnp.array(x, copy=True), which is the user-facing API.
copy_p = core.Primitive('copy')
copy_p.def_impl(partial(xla.apply_primitive, copy_p))
copy_p.def_abstract_eval(lambda x: x)
xla.register_translation(copy_p, lambda ctx, avals_in, avals_out, x: [x])
mlir.register_lowering(copy_p, lambda ctx, x: [x])
ad.deflinear(copy_p, lambda t: [copy_p.bind(t)])
batching.defvectorized(copy_p)
masking.defvectorized(copy_p)
def rng_bit_generator(key, shape, dtype=np.uint32,
algorithm=RandomAlgorithm.RNG_DEFAULT):
"""Stateless PRNG bit generator. Experimental and its use is discouraged.
Returns uniformly distributed random bits with the specified shape and dtype
(what is required to be an integer type) using the platform specific
default algorithm or the one specified.
It provides direct access to the RngBitGenerator primitive exposed by XLA
(https://www.tensorflow.org/xla/operation_semantics#rngbitgenerator) for low
level API access.
Most users should use `jax.random` instead for a stable and more user
friendly API.
"""
shape = jax.core.canonicalize_shape(shape)
dtype = dtypes.canonicalize_dtype(dtype)
if np.dtype(dtype) not in {np.dtype('uint8'), np.dtype('uint16'),
np.dtype('uint32'), np.dtype('uint64')}:
raise TypeError(f'rng_bit_generator: unsupported dtype {dtype}')
return tuple(
rng_bit_generator_p.bind(
key, shape=shape, dtype=dtype, algorithm=algorithm))
def _iota_abstract_eval(*, dtype, shape, dimension):
_check_shapelike("iota", "shape", shape)
if not any(dtypes.issubdtype(dtype, t) for t in _num):
msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(dtype).name)
accepted_typenames = (t.__name__ for t in _num)
raise TypeError(msg.format(typename, ', '.join(accepted_typenames)))
if not 0 <= dimension < len(shape):
raise ValueError("iota dimension must be between 0 and len(shape), got "
f"dimension={dimension} for shape {shape}")
return ShapedArray(shape, dtype)
def _iota_translation_rule(ctx, avals_in, avals_out, *, dtype, shape,
dimension):
etype = xla.dtype_to_primitive_type(dtype)
xla_shape = xc.Shape.array_shape(etype, shape)
return [xops.Iota(ctx.builder, xla_shape, dimension)]
iota_p = Primitive('iota')
iota_p.def_impl(partial(xla.apply_primitive, iota_p))
iota_p.def_abstract_eval(_iota_abstract_eval)
xla.register_translation(iota_p, _iota_translation_rule)
def _iota_lower(ctx, *, dtype, shape, dimension):
del dtype, shape
aval_out, = ctx.avals_out
return mhlo.IotaOp(mlir.aval_to_ir_type(aval_out),
mlir.i64_attr(dimension)).results
mlir.register_lowering(iota_p, _iota_lower)
### util
_ndim = np.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not np.all(np.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return core.dilate_shape(shape, dilation)
def _ceil_divide(x1, x2):
return -np.floor_divide(np.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = np.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
# Map of lax function to equivalent jax.numpy function for use in error string below.
_JNP_FUNCTION_EQUIVALENTS = {
'abs': 'fabs',
'acos': 'arccos',
'acosh': 'arccosh',
'add': 'add',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'bitwise_and': 'bitwise_and',
'bitwise_not': 'bitwise_not',
'bitwise_or': 'bitwise_or',
'bitwise_xor': 'bitwise_xor',
'cbrt': 'cbrt',
'ceil': 'ceil',
'concatenate': 'concatenate',
'cos': 'cos',
'cosh': 'cosh',
'div': 'divide',
'eq': 'equal',
'exp': 'exp',
'expm1': 'expm1',
'floor': 'floor',
'greater': 'greater',
'greater_equal': 'greater_equal',
'less': 'less',
'less_equal': 'less_equal',
'log': 'log',
'logical_and': 'logical_and',
'logical_not': 'logical_not',
'logical_or': 'logical_or',
'logical_xor': 'logical_xor',
'log1p': 'log1p',
'max': 'maximum',
'min': 'minimum',
'mul': 'multiply',
'ne': 'not_equal',
'neg': 'negative',
'nextafter': 'nextafter',
'pow': 'float_power',
'rount': 'rount',
'select': 'where',
'shift_left': 'left_shift',
'shift_right_logical': 'right_shift',
'shift_right_arithmetic': 'right_shift',
'sign': 'sign',
'sin': 'sin',
'sinh': 'sinh',
'sqrt': 'sqrt',
'sub': 'subtract',
'tan': 'tan',
'tanh': 'tanh'
}
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(np.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
np.floating if dtypes.issubdtype(dtype, np.floating)
else np.complexfloating if dtypes.issubdtype(dtype, np.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("lax.{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "lax.{} requires arguments to have the same dtypes, got {}."
if name in _JNP_FUNCTION_EQUIVALENTS:
equiv = _JNP_FUNCTION_EQUIVALENTS[name]
msg += f" (Tip: jnp.{equiv} is a similar function that does automatic type promotion on inputs)."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_shapelike(fun_name, arg_name, obj, non_zero_shape=False):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, np.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
if (config.jax_dynamic_shapes and isinstance(obj, (tuple, list)) and
any(isinstance(d, core.Tracer) for d in obj)):
return # TODO(mattjj): handle more checks in the dynamic shape case
obj_arr = np.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError as err:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj)))) from err
lower_bound, bound_error = (
(1, "strictly positive") if non_zero_shape else (0, "nonnegative"))
if not all(core.greater_equal_dim(d, lower_bound) for d in obj_arr):
msg = "{} {} must have every element be {}, got {}."
raise TypeError(msg.format(fun_name, arg_name, bound_error, obj))
def _const(example, val):
dtype = _dtype(example)
if dtypes.is_python_scalar(example):
val = dtypes.scalar_type_of(example)(val)
return val if dtype == _dtype(val) else np.array(val, dtype)
return np.array(val, dtype)
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = partial(dtypes.dtype, canonicalize=True)
_dtype: Callable = partial(dtypes.dtype, canonicalize=True)
def _isnan(x) -> bool:
return ne(x, x)
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), np.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
removed = set(itertools.chain(*removed_lists))
return [i for i in original if i not in removed]
def canonicalize_precision(precision: PrecisionLike) -> Optional[Tuple[PrecisionType, PrecisionType]]:
"""Turns an API precision specification, into a pair of enumeration values.
The API can take the precision as a string, or int, and either as a single
value to apply to both operands, or as a sequence of two values.
"""
if precision is None:
if config.jax_default_matmul_precision is None:
return None
try:
return type_cast(
Tuple[PrecisionType, PrecisionType],
(Precision(config.jax_default_matmul_precision),
Precision(config.jax_default_matmul_precision)))
except TypeError:
raise ValueError(
"jax_default_matmul_precision flag must be set to None or a value in "
f"{list(Precision._strings)}, but got {config.jax_default_matmul_precision}"
) from None
elif isinstance(precision, str) and precision in Precision._strings:
return type_cast(Tuple[PrecisionType, PrecisionType],
(Precision(precision), Precision(precision)))
elif isinstance(precision, xla_client.PrecisionConfig.Precision):
return type_cast(Tuple[PrecisionType, PrecisionType], (precision, precision))
elif (isinstance(precision, (list, tuple)) and len(precision) == 2 and
all(isinstance(p, xla_client.PrecisionConfig.Precision) for p in precision)):
return type_cast(Tuple[PrecisionType, PrecisionType], precision)
elif (isinstance(precision, (list, tuple)) and len(precision) == 2 and
all(isinstance(s, str) for s in precision)):
s1, s2 = precision
p1 = type_cast(Tuple[PrecisionType, PrecisionType], canonicalize_precision(s1))[0]
p2 = type_cast(Tuple[PrecisionType, PrecisionType], canonicalize_precision(s2))[0]
return (p1, p2)
else:
raise ValueError(
f"Precision argument must be None, a string in {list(Precision._strings)}, "
"a lax.Precision value or a tuple of two lax.Precision values or "
f"strings; got {precision}.")
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
# Avoid using `dtype in [...]` because of numpy dtype equality overloading.
if isinstance(dtype, type) and dtype in {bool, int, float, complex}:
return
np_dtype = np.dtype(dtype)
if np_dtype.kind not in "biufc" and np_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
msg += f" in {fun_name}" if fun_name else ""
raise TypeError(msg)
if dtype is not None and np_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = f"requested in {fun_name}" if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype), stacklevel=2)
| 41.038059
| 141
| 0.702347
|
4a09356b3183f6ada7b08928c3001fd5219f3f05
| 98,707
|
py
|
Python
|
sdk/python/pulumi_aws_native/lex/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws_native/lex/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws_native/lex/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'BotAliasAudioLogDestinationArgs',
'BotAliasAudioLogSettingArgs',
'BotAliasCloudWatchLogGroupLogDestinationArgs',
'BotAliasCodeHookSpecificationArgs',
'BotAliasConversationLogSettingsArgs',
'BotAliasLambdaCodeHookArgs',
'BotAliasLocaleSettingsItemArgs',
'BotAliasLocaleSettingsArgs',
'BotAliasS3BucketLogDestinationArgs',
'BotAliasTagArgs',
'BotAliasTextLogDestinationArgs',
'BotAliasTextLogSettingArgs',
'BotButtonArgs',
'BotCustomPayloadArgs',
'BotDialogCodeHookSettingArgs',
'BotExternalSourceSettingArgs',
'BotFulfillmentCodeHookSettingArgs',
'BotFulfillmentStartResponseSpecificationArgs',
'BotFulfillmentUpdateResponseSpecificationArgs',
'BotFulfillmentUpdatesSpecificationArgs',
'BotGrammarSlotTypeSettingArgs',
'BotGrammarSlotTypeSourceArgs',
'BotImageResponseCardArgs',
'BotInputContextArgs',
'BotIntentClosingSettingArgs',
'BotIntentConfirmationSettingArgs',
'BotIntentArgs',
'BotKendraConfigurationArgs',
'BotLocaleArgs',
'BotMessageGroupArgs',
'BotMessageArgs',
'BotMultipleValuesSettingArgs',
'BotObfuscationSettingArgs',
'BotOutputContextArgs',
'BotPlainTextMessageArgs',
'BotPostFulfillmentStatusSpecificationArgs',
'BotPromptSpecificationArgs',
'BotResponseSpecificationArgs',
'BotS3LocationArgs',
'BotSSMLMessageArgs',
'BotSampleUtteranceArgs',
'BotSampleValueArgs',
'BotSlotDefaultValueSpecificationArgs',
'BotSlotDefaultValueArgs',
'BotSlotPriorityArgs',
'BotSlotTypeValueArgs',
'BotSlotTypeArgs',
'BotSlotValueElicitationSettingArgs',
'BotSlotValueRegexFilterArgs',
'BotSlotValueSelectionSettingArgs',
'BotSlotArgs',
'BotStillWaitingResponseSpecificationArgs',
'BotTagArgs',
'BotVersionLocaleDetailsArgs',
'BotVersionLocaleSpecificationArgs',
'BotVoiceSettingsArgs',
'BotWaitAndContinueSpecificationArgs',
'DataPrivacyPropertiesArgs',
'ResourcePolicyPolicyArgs',
'SentimentAnalysisSettingsPropertiesArgs',
]
@pulumi.input_type
class BotAliasAudioLogDestinationArgs:
def __init__(__self__, *,
s3_bucket: Optional[pulumi.Input['BotAliasS3BucketLogDestinationArgs']] = None):
"""
The location of audio log files collected when conversation logging is enabled for a bot.
"""
if s3_bucket is not None:
pulumi.set(__self__, "s3_bucket", s3_bucket)
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> Optional[pulumi.Input['BotAliasS3BucketLogDestinationArgs']]:
return pulumi.get(self, "s3_bucket")
@s3_bucket.setter
def s3_bucket(self, value: Optional[pulumi.Input['BotAliasS3BucketLogDestinationArgs']]):
pulumi.set(self, "s3_bucket", value)
@pulumi.input_type
class BotAliasAudioLogSettingArgs:
def __init__(__self__, *,
destination: pulumi.Input['BotAliasAudioLogDestinationArgs'],
enabled: pulumi.Input[bool]):
"""
Settings for logging audio of conversations between Amazon Lex and a user. You specify whether to log audio and the Amazon S3 bucket where the audio file is stored.
"""
pulumi.set(__self__, "destination", destination)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def destination(self) -> pulumi.Input['BotAliasAudioLogDestinationArgs']:
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: pulumi.Input['BotAliasAudioLogDestinationArgs']):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class BotAliasCloudWatchLogGroupLogDestinationArgs:
def __init__(__self__, *,
cloud_watch_log_group_arn: pulumi.Input[str],
log_prefix: pulumi.Input[str]):
"""
:param pulumi.Input[str] cloud_watch_log_group_arn: A string used to identify this tag
:param pulumi.Input[str] log_prefix: A string containing the value for the tag
"""
pulumi.set(__self__, "cloud_watch_log_group_arn", cloud_watch_log_group_arn)
pulumi.set(__self__, "log_prefix", log_prefix)
@property
@pulumi.getter(name="cloudWatchLogGroupArn")
def cloud_watch_log_group_arn(self) -> pulumi.Input[str]:
"""
A string used to identify this tag
"""
return pulumi.get(self, "cloud_watch_log_group_arn")
@cloud_watch_log_group_arn.setter
def cloud_watch_log_group_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "cloud_watch_log_group_arn", value)
@property
@pulumi.getter(name="logPrefix")
def log_prefix(self) -> pulumi.Input[str]:
"""
A string containing the value for the tag
"""
return pulumi.get(self, "log_prefix")
@log_prefix.setter
def log_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "log_prefix", value)
@pulumi.input_type
class BotAliasCodeHookSpecificationArgs:
def __init__(__self__, *,
lambda_code_hook: pulumi.Input['BotAliasLambdaCodeHookArgs']):
"""
Contains information about code hooks that Amazon Lex calls during a conversation.
"""
pulumi.set(__self__, "lambda_code_hook", lambda_code_hook)
@property
@pulumi.getter(name="lambdaCodeHook")
def lambda_code_hook(self) -> pulumi.Input['BotAliasLambdaCodeHookArgs']:
return pulumi.get(self, "lambda_code_hook")
@lambda_code_hook.setter
def lambda_code_hook(self, value: pulumi.Input['BotAliasLambdaCodeHookArgs']):
pulumi.set(self, "lambda_code_hook", value)
@pulumi.input_type
class BotAliasConversationLogSettingsArgs:
def __init__(__self__, *,
audio_log_settings: Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasAudioLogSettingArgs']]]] = None,
text_log_settings: Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasTextLogSettingArgs']]]] = None):
"""
Contains information about code hooks that Amazon Lex calls during a conversation.
"""
if audio_log_settings is not None:
pulumi.set(__self__, "audio_log_settings", audio_log_settings)
if text_log_settings is not None:
pulumi.set(__self__, "text_log_settings", text_log_settings)
@property
@pulumi.getter(name="audioLogSettings")
def audio_log_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasAudioLogSettingArgs']]]]:
return pulumi.get(self, "audio_log_settings")
@audio_log_settings.setter
def audio_log_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasAudioLogSettingArgs']]]]):
pulumi.set(self, "audio_log_settings", value)
@property
@pulumi.getter(name="textLogSettings")
def text_log_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasTextLogSettingArgs']]]]:
return pulumi.get(self, "text_log_settings")
@text_log_settings.setter
def text_log_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotAliasTextLogSettingArgs']]]]):
pulumi.set(self, "text_log_settings", value)
@pulumi.input_type
class BotAliasLambdaCodeHookArgs:
def __init__(__self__, *,
code_hook_interface_version: pulumi.Input[str],
lambda_arn: pulumi.Input[str]):
"""
Contains information about code hooks that Amazon Lex calls during a conversation.
:param pulumi.Input[str] code_hook_interface_version: The version of the request-response that you want Amazon Lex to use to invoke your Lambda function.
:param pulumi.Input[str] lambda_arn: The Amazon Resource Name (ARN) of the Lambda function.
"""
pulumi.set(__self__, "code_hook_interface_version", code_hook_interface_version)
pulumi.set(__self__, "lambda_arn", lambda_arn)
@property
@pulumi.getter(name="codeHookInterfaceVersion")
def code_hook_interface_version(self) -> pulumi.Input[str]:
"""
The version of the request-response that you want Amazon Lex to use to invoke your Lambda function.
"""
return pulumi.get(self, "code_hook_interface_version")
@code_hook_interface_version.setter
def code_hook_interface_version(self, value: pulumi.Input[str]):
pulumi.set(self, "code_hook_interface_version", value)
@property
@pulumi.getter(name="lambdaArn")
def lambda_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the Lambda function.
"""
return pulumi.get(self, "lambda_arn")
@lambda_arn.setter
def lambda_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "lambda_arn", value)
@pulumi.input_type
class BotAliasLocaleSettingsItemArgs:
def __init__(__self__, *,
bot_alias_locale_setting: pulumi.Input['BotAliasLocaleSettingsArgs'],
locale_id: pulumi.Input[str]):
"""
A locale setting in alias
:param pulumi.Input[str] locale_id: A string used to identify the locale
"""
pulumi.set(__self__, "bot_alias_locale_setting", bot_alias_locale_setting)
pulumi.set(__self__, "locale_id", locale_id)
@property
@pulumi.getter(name="botAliasLocaleSetting")
def bot_alias_locale_setting(self) -> pulumi.Input['BotAliasLocaleSettingsArgs']:
return pulumi.get(self, "bot_alias_locale_setting")
@bot_alias_locale_setting.setter
def bot_alias_locale_setting(self, value: pulumi.Input['BotAliasLocaleSettingsArgs']):
pulumi.set(self, "bot_alias_locale_setting", value)
@property
@pulumi.getter(name="localeId")
def locale_id(self) -> pulumi.Input[str]:
"""
A string used to identify the locale
"""
return pulumi.get(self, "locale_id")
@locale_id.setter
def locale_id(self, value: pulumi.Input[str]):
pulumi.set(self, "locale_id", value)
@pulumi.input_type
class BotAliasLocaleSettingsArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
code_hook_specification: Optional[pulumi.Input['BotAliasCodeHookSpecificationArgs']] = None):
"""
You can use this parameter to specify a specific Lambda function to run different functions in different locales.
:param pulumi.Input[bool] enabled: Whether the Lambda code hook is enabled
"""
pulumi.set(__self__, "enabled", enabled)
if code_hook_specification is not None:
pulumi.set(__self__, "code_hook_specification", code_hook_specification)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Whether the Lambda code hook is enabled
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="codeHookSpecification")
def code_hook_specification(self) -> Optional[pulumi.Input['BotAliasCodeHookSpecificationArgs']]:
return pulumi.get(self, "code_hook_specification")
@code_hook_specification.setter
def code_hook_specification(self, value: Optional[pulumi.Input['BotAliasCodeHookSpecificationArgs']]):
pulumi.set(self, "code_hook_specification", value)
@pulumi.input_type
class BotAliasS3BucketLogDestinationArgs:
def __init__(__self__, *,
log_prefix: pulumi.Input[str],
s3_bucket_arn: pulumi.Input[str],
kms_key_arn: Optional[pulumi.Input[str]] = None):
"""
Specifies an Amazon S3 bucket for logging audio conversations
:param pulumi.Input[str] log_prefix: The Amazon S3 key of the deployment package.
:param pulumi.Input[str] s3_bucket_arn: The Amazon Resource Name (ARN) of an Amazon S3 bucket where audio log files are stored.
:param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key for encrypting audio log files stored in an S3 bucket.
"""
pulumi.set(__self__, "log_prefix", log_prefix)
pulumi.set(__self__, "s3_bucket_arn", s3_bucket_arn)
if kms_key_arn is not None:
pulumi.set(__self__, "kms_key_arn", kms_key_arn)
@property
@pulumi.getter(name="logPrefix")
def log_prefix(self) -> pulumi.Input[str]:
"""
The Amazon S3 key of the deployment package.
"""
return pulumi.get(self, "log_prefix")
@log_prefix.setter
def log_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "log_prefix", value)
@property
@pulumi.getter(name="s3BucketArn")
def s3_bucket_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of an Amazon S3 bucket where audio log files are stored.
"""
return pulumi.get(self, "s3_bucket_arn")
@s3_bucket_arn.setter
def s3_bucket_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket_arn", value)
@property
@pulumi.getter(name="kmsKeyArn")
def kms_key_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key for encrypting audio log files stored in an S3 bucket.
"""
return pulumi.get(self, "kms_key_arn")
@kms_key_arn.setter
def kms_key_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_arn", value)
@pulumi.input_type
class BotAliasTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A label for tagging Lex resources
:param pulumi.Input[str] key: A string used to identify this tag
:param pulumi.Input[str] value: A string containing the value for the tag
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
A string used to identify this tag
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
A string containing the value for the tag
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotAliasTextLogDestinationArgs:
def __init__(__self__, *,
cloud_watch: Optional[pulumi.Input['BotAliasCloudWatchLogGroupLogDestinationArgs']] = None):
"""
Defines the Amazon CloudWatch Logs destination log group for conversation text logs.
"""
if cloud_watch is not None:
pulumi.set(__self__, "cloud_watch", cloud_watch)
@property
@pulumi.getter(name="cloudWatch")
def cloud_watch(self) -> Optional[pulumi.Input['BotAliasCloudWatchLogGroupLogDestinationArgs']]:
return pulumi.get(self, "cloud_watch")
@cloud_watch.setter
def cloud_watch(self, value: Optional[pulumi.Input['BotAliasCloudWatchLogGroupLogDestinationArgs']]):
pulumi.set(self, "cloud_watch", value)
@pulumi.input_type
class BotAliasTextLogSettingArgs:
def __init__(__self__, *,
destination: Optional[pulumi.Input['BotAliasTextLogDestinationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None):
"""
Contains information about code hooks that Amazon Lex calls during a conversation.
"""
if destination is not None:
pulumi.set(__self__, "destination", destination)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input['BotAliasTextLogDestinationArgs']]:
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input['BotAliasTextLogDestinationArgs']]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class BotButtonArgs:
def __init__(__self__, *,
text: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A button to use on a response card used to gather slot values from a user.
:param pulumi.Input[str] text: The text that appears on the button.
:param pulumi.Input[str] value: The value returned to Amazon Lex when the user chooses this button.
"""
pulumi.set(__self__, "text", text)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def text(self) -> pulumi.Input[str]:
"""
The text that appears on the button.
"""
return pulumi.get(self, "text")
@text.setter
def text(self, value: pulumi.Input[str]):
pulumi.set(self, "text", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value returned to Amazon Lex when the user chooses this button.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotCustomPayloadArgs:
def __init__(__self__, *,
value: pulumi.Input[str]):
"""
A message in a custom format defined by the client application.
:param pulumi.Input[str] value: The string that is sent to your application.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The string that is sent to your application.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotDialogCodeHookSettingArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool]):
"""
Settings that determine the Lambda function that Amazon Lex uses for processing user responses.
"""
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class BotExternalSourceSettingArgs:
def __init__(__self__, *,
grammar_slot_type_setting: Optional[pulumi.Input['BotGrammarSlotTypeSettingArgs']] = None):
"""
Provides information about the external source of the slot type's definition.
"""
if grammar_slot_type_setting is not None:
pulumi.set(__self__, "grammar_slot_type_setting", grammar_slot_type_setting)
@property
@pulumi.getter(name="grammarSlotTypeSetting")
def grammar_slot_type_setting(self) -> Optional[pulumi.Input['BotGrammarSlotTypeSettingArgs']]:
return pulumi.get(self, "grammar_slot_type_setting")
@grammar_slot_type_setting.setter
def grammar_slot_type_setting(self, value: Optional[pulumi.Input['BotGrammarSlotTypeSettingArgs']]):
pulumi.set(self, "grammar_slot_type_setting", value)
@pulumi.input_type
class BotFulfillmentCodeHookSettingArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
fulfillment_updates_specification: Optional[pulumi.Input['BotFulfillmentUpdatesSpecificationArgs']] = None,
post_fulfillment_status_specification: Optional[pulumi.Input['BotPostFulfillmentStatusSpecificationArgs']] = None):
"""
Settings that determine if a Lambda function should be invoked to fulfill a specific intent.
"""
pulumi.set(__self__, "enabled", enabled)
if fulfillment_updates_specification is not None:
pulumi.set(__self__, "fulfillment_updates_specification", fulfillment_updates_specification)
if post_fulfillment_status_specification is not None:
pulumi.set(__self__, "post_fulfillment_status_specification", post_fulfillment_status_specification)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="fulfillmentUpdatesSpecification")
def fulfillment_updates_specification(self) -> Optional[pulumi.Input['BotFulfillmentUpdatesSpecificationArgs']]:
return pulumi.get(self, "fulfillment_updates_specification")
@fulfillment_updates_specification.setter
def fulfillment_updates_specification(self, value: Optional[pulumi.Input['BotFulfillmentUpdatesSpecificationArgs']]):
pulumi.set(self, "fulfillment_updates_specification", value)
@property
@pulumi.getter(name="postFulfillmentStatusSpecification")
def post_fulfillment_status_specification(self) -> Optional[pulumi.Input['BotPostFulfillmentStatusSpecificationArgs']]:
return pulumi.get(self, "post_fulfillment_status_specification")
@post_fulfillment_status_specification.setter
def post_fulfillment_status_specification(self, value: Optional[pulumi.Input['BotPostFulfillmentStatusSpecificationArgs']]):
pulumi.set(self, "post_fulfillment_status_specification", value)
@pulumi.input_type
class BotFulfillmentStartResponseSpecificationArgs:
def __init__(__self__, *,
delay_in_seconds: pulumi.Input[int],
message_groups: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]],
allow_interrupt: Optional[pulumi.Input[bool]] = None):
"""
Provides settings for a message that is sent to the user when a fulfillment Lambda function starts running.
:param pulumi.Input[int] delay_in_seconds: The delay between when the Lambda fulfillment function starts running and the start message is played. If the Lambda function returns before the delay is over, the start message isn't played.
:param pulumi.Input[bool] allow_interrupt: Determines whether the user can interrupt the start message while it is playing.
"""
pulumi.set(__self__, "delay_in_seconds", delay_in_seconds)
pulumi.set(__self__, "message_groups", message_groups)
if allow_interrupt is not None:
pulumi.set(__self__, "allow_interrupt", allow_interrupt)
@property
@pulumi.getter(name="delayInSeconds")
def delay_in_seconds(self) -> pulumi.Input[int]:
"""
The delay between when the Lambda fulfillment function starts running and the start message is played. If the Lambda function returns before the delay is over, the start message isn't played.
"""
return pulumi.get(self, "delay_in_seconds")
@delay_in_seconds.setter
def delay_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "delay_in_seconds", value)
@property
@pulumi.getter(name="messageGroups")
def message_groups(self) -> pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]:
return pulumi.get(self, "message_groups")
@message_groups.setter
def message_groups(self, value: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]):
pulumi.set(self, "message_groups", value)
@property
@pulumi.getter(name="allowInterrupt")
def allow_interrupt(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the user can interrupt the start message while it is playing.
"""
return pulumi.get(self, "allow_interrupt")
@allow_interrupt.setter
def allow_interrupt(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_interrupt", value)
@pulumi.input_type
class BotFulfillmentUpdateResponseSpecificationArgs:
def __init__(__self__, *,
frequency_in_seconds: pulumi.Input[int],
message_groups: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]],
allow_interrupt: Optional[pulumi.Input[bool]] = None):
"""
Provides settings for a message that is sent periodically to the user while a fulfillment Lambda function is running.
:param pulumi.Input[int] frequency_in_seconds: The frequency that a message is sent to the user. When the period ends, Amazon Lex chooses a message from the message groups and plays it to the user. If the fulfillment Lambda returns before the first period ends, an update message is not played to the user.
:param pulumi.Input[bool] allow_interrupt: Determines whether the user can interrupt an update message while it is playing.
"""
pulumi.set(__self__, "frequency_in_seconds", frequency_in_seconds)
pulumi.set(__self__, "message_groups", message_groups)
if allow_interrupt is not None:
pulumi.set(__self__, "allow_interrupt", allow_interrupt)
@property
@pulumi.getter(name="frequencyInSeconds")
def frequency_in_seconds(self) -> pulumi.Input[int]:
"""
The frequency that a message is sent to the user. When the period ends, Amazon Lex chooses a message from the message groups and plays it to the user. If the fulfillment Lambda returns before the first period ends, an update message is not played to the user.
"""
return pulumi.get(self, "frequency_in_seconds")
@frequency_in_seconds.setter
def frequency_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_in_seconds", value)
@property
@pulumi.getter(name="messageGroups")
def message_groups(self) -> pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]:
return pulumi.get(self, "message_groups")
@message_groups.setter
def message_groups(self, value: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]):
pulumi.set(self, "message_groups", value)
@property
@pulumi.getter(name="allowInterrupt")
def allow_interrupt(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the user can interrupt an update message while it is playing.
"""
return pulumi.get(self, "allow_interrupt")
@allow_interrupt.setter
def allow_interrupt(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_interrupt", value)
@pulumi.input_type
class BotFulfillmentUpdatesSpecificationArgs:
def __init__(__self__, *,
active: pulumi.Input[bool],
start_response: Optional[pulumi.Input['BotFulfillmentStartResponseSpecificationArgs']] = None,
timeout_in_seconds: Optional[pulumi.Input[int]] = None,
update_response: Optional[pulumi.Input['BotFulfillmentUpdateResponseSpecificationArgs']] = None):
"""
Provides information for updating the user on the progress of fulfilling an intent.
:param pulumi.Input[bool] active: Determines whether fulfillment updates are sent to the user. When this field is true, updates are sent.
:param pulumi.Input[int] timeout_in_seconds: The length of time that the fulfillment Lambda function should run before it times out.
"""
pulumi.set(__self__, "active", active)
if start_response is not None:
pulumi.set(__self__, "start_response", start_response)
if timeout_in_seconds is not None:
pulumi.set(__self__, "timeout_in_seconds", timeout_in_seconds)
if update_response is not None:
pulumi.set(__self__, "update_response", update_response)
@property
@pulumi.getter
def active(self) -> pulumi.Input[bool]:
"""
Determines whether fulfillment updates are sent to the user. When this field is true, updates are sent.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: pulumi.Input[bool]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="startResponse")
def start_response(self) -> Optional[pulumi.Input['BotFulfillmentStartResponseSpecificationArgs']]:
return pulumi.get(self, "start_response")
@start_response.setter
def start_response(self, value: Optional[pulumi.Input['BotFulfillmentStartResponseSpecificationArgs']]):
pulumi.set(self, "start_response", value)
@property
@pulumi.getter(name="timeoutInSeconds")
def timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The length of time that the fulfillment Lambda function should run before it times out.
"""
return pulumi.get(self, "timeout_in_seconds")
@timeout_in_seconds.setter
def timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_in_seconds", value)
@property
@pulumi.getter(name="updateResponse")
def update_response(self) -> Optional[pulumi.Input['BotFulfillmentUpdateResponseSpecificationArgs']]:
return pulumi.get(self, "update_response")
@update_response.setter
def update_response(self, value: Optional[pulumi.Input['BotFulfillmentUpdateResponseSpecificationArgs']]):
pulumi.set(self, "update_response", value)
@pulumi.input_type
class BotGrammarSlotTypeSettingArgs:
def __init__(__self__, *,
source: Optional[pulumi.Input['BotGrammarSlotTypeSourceArgs']] = None):
"""
Settings required for a slot type based on a grammar that you provide.
"""
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['BotGrammarSlotTypeSourceArgs']]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['BotGrammarSlotTypeSourceArgs']]):
pulumi.set(self, "source", value)
@pulumi.input_type
class BotGrammarSlotTypeSourceArgs:
def __init__(__self__, *,
s3_bucket_name: pulumi.Input[str],
s3_object_key: pulumi.Input[str],
kms_key_arn: Optional[pulumi.Input[str]] = None):
"""
Describes the Amazon S3 bucket name and location for the grammar that is the source for the slot type.
:param pulumi.Input[str] s3_bucket_name: The name of the S3 bucket that contains the grammar source.
:param pulumi.Input[str] s3_object_key: The path to the grammar in the S3 bucket.
:param pulumi.Input[str] kms_key_arn: The Amazon KMS key required to decrypt the contents of the grammar, if any.
"""
pulumi.set(__self__, "s3_bucket_name", s3_bucket_name)
pulumi.set(__self__, "s3_object_key", s3_object_key)
if kms_key_arn is not None:
pulumi.set(__self__, "kms_key_arn", kms_key_arn)
@property
@pulumi.getter(name="s3BucketName")
def s3_bucket_name(self) -> pulumi.Input[str]:
"""
The name of the S3 bucket that contains the grammar source.
"""
return pulumi.get(self, "s3_bucket_name")
@s3_bucket_name.setter
def s3_bucket_name(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket_name", value)
@property
@pulumi.getter(name="s3ObjectKey")
def s3_object_key(self) -> pulumi.Input[str]:
"""
The path to the grammar in the S3 bucket.
"""
return pulumi.get(self, "s3_object_key")
@s3_object_key.setter
def s3_object_key(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_object_key", value)
@property
@pulumi.getter(name="kmsKeyArn")
def kms_key_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon KMS key required to decrypt the contents of the grammar, if any.
"""
return pulumi.get(self, "kms_key_arn")
@kms_key_arn.setter
def kms_key_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_arn", value)
@pulumi.input_type
class BotImageResponseCardArgs:
def __init__(__self__, *,
title: pulumi.Input[str],
buttons: Optional[pulumi.Input[Sequence[pulumi.Input['BotButtonArgs']]]] = None,
image_url: Optional[pulumi.Input[str]] = None,
subtitle: Optional[pulumi.Input[str]] = None):
"""
A message that defines a response card that the client application can show to the user.
:param pulumi.Input[str] title: The title to display on the response card.
:param pulumi.Input[Sequence[pulumi.Input['BotButtonArgs']]] buttons: A list of buttons that should be displayed on the response card.
:param pulumi.Input[str] image_url: The URL of an image to display on the response card.
:param pulumi.Input[str] subtitle: The subtitle to display on the response card.
"""
pulumi.set(__self__, "title", title)
if buttons is not None:
pulumi.set(__self__, "buttons", buttons)
if image_url is not None:
pulumi.set(__self__, "image_url", image_url)
if subtitle is not None:
pulumi.set(__self__, "subtitle", subtitle)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
The title to display on the response card.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def buttons(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotButtonArgs']]]]:
"""
A list of buttons that should be displayed on the response card.
"""
return pulumi.get(self, "buttons")
@buttons.setter
def buttons(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotButtonArgs']]]]):
pulumi.set(self, "buttons", value)
@property
@pulumi.getter(name="imageUrl")
def image_url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of an image to display on the response card.
"""
return pulumi.get(self, "image_url")
@image_url.setter
def image_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_url", value)
@property
@pulumi.getter
def subtitle(self) -> Optional[pulumi.Input[str]]:
"""
The subtitle to display on the response card.
"""
return pulumi.get(self, "subtitle")
@subtitle.setter
def subtitle(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subtitle", value)
@pulumi.input_type
class BotInputContextArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
InputContext specified for the intent.
:param pulumi.Input[str] name: The name of the context.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the context.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class BotIntentClosingSettingArgs:
def __init__(__self__, *,
closing_response: pulumi.Input['BotResponseSpecificationArgs'],
is_active: Optional[pulumi.Input[bool]] = None):
"""
Response that Amazon Lex sends to the user when the intent is closed.
"""
pulumi.set(__self__, "closing_response", closing_response)
if is_active is not None:
pulumi.set(__self__, "is_active", is_active)
@property
@pulumi.getter(name="closingResponse")
def closing_response(self) -> pulumi.Input['BotResponseSpecificationArgs']:
return pulumi.get(self, "closing_response")
@closing_response.setter
def closing_response(self, value: pulumi.Input['BotResponseSpecificationArgs']):
pulumi.set(self, "closing_response", value)
@property
@pulumi.getter(name="isActive")
def is_active(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_active")
@is_active.setter
def is_active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_active", value)
@pulumi.input_type
class BotIntentConfirmationSettingArgs:
def __init__(__self__, *,
declination_response: pulumi.Input['BotResponseSpecificationArgs'],
prompt_specification: pulumi.Input['BotPromptSpecificationArgs'],
is_active: Optional[pulumi.Input[bool]] = None):
"""
Prompts that Amazon Lex sends to the user to confirm the completion of an intent.
"""
pulumi.set(__self__, "declination_response", declination_response)
pulumi.set(__self__, "prompt_specification", prompt_specification)
if is_active is not None:
pulumi.set(__self__, "is_active", is_active)
@property
@pulumi.getter(name="declinationResponse")
def declination_response(self) -> pulumi.Input['BotResponseSpecificationArgs']:
return pulumi.get(self, "declination_response")
@declination_response.setter
def declination_response(self, value: pulumi.Input['BotResponseSpecificationArgs']):
pulumi.set(self, "declination_response", value)
@property
@pulumi.getter(name="promptSpecification")
def prompt_specification(self) -> pulumi.Input['BotPromptSpecificationArgs']:
return pulumi.get(self, "prompt_specification")
@prompt_specification.setter
def prompt_specification(self, value: pulumi.Input['BotPromptSpecificationArgs']):
pulumi.set(self, "prompt_specification", value)
@property
@pulumi.getter(name="isActive")
def is_active(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_active")
@is_active.setter
def is_active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_active", value)
@pulumi.input_type
class BotIntentArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
dialog_code_hook: Optional[pulumi.Input['BotDialogCodeHookSettingArgs']] = None,
fulfillment_code_hook: Optional[pulumi.Input['BotFulfillmentCodeHookSettingArgs']] = None,
input_contexts: Optional[pulumi.Input[Sequence[pulumi.Input['BotInputContextArgs']]]] = None,
intent_closing_setting: Optional[pulumi.Input['BotIntentClosingSettingArgs']] = None,
intent_confirmation_setting: Optional[pulumi.Input['BotIntentConfirmationSettingArgs']] = None,
kendra_configuration: Optional[pulumi.Input['BotKendraConfigurationArgs']] = None,
output_contexts: Optional[pulumi.Input[Sequence[pulumi.Input['BotOutputContextArgs']]]] = None,
parent_intent_signature: Optional[pulumi.Input[str]] = None,
sample_utterances: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]] = None,
slot_priorities: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotPriorityArgs']]]] = None,
slots: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotArgs']]]] = None):
"""
An intent represents an action that the user wants to perform. You create a bot to support one or more related intents.
:param pulumi.Input[Sequence[pulumi.Input['BotSlotArgs']]] slots: List of slots
"""
pulumi.set(__self__, "name", name)
if description is not None:
pulumi.set(__self__, "description", description)
if dialog_code_hook is not None:
pulumi.set(__self__, "dialog_code_hook", dialog_code_hook)
if fulfillment_code_hook is not None:
pulumi.set(__self__, "fulfillment_code_hook", fulfillment_code_hook)
if input_contexts is not None:
pulumi.set(__self__, "input_contexts", input_contexts)
if intent_closing_setting is not None:
pulumi.set(__self__, "intent_closing_setting", intent_closing_setting)
if intent_confirmation_setting is not None:
pulumi.set(__self__, "intent_confirmation_setting", intent_confirmation_setting)
if kendra_configuration is not None:
pulumi.set(__self__, "kendra_configuration", kendra_configuration)
if output_contexts is not None:
pulumi.set(__self__, "output_contexts", output_contexts)
if parent_intent_signature is not None:
pulumi.set(__self__, "parent_intent_signature", parent_intent_signature)
if sample_utterances is not None:
pulumi.set(__self__, "sample_utterances", sample_utterances)
if slot_priorities is not None:
pulumi.set(__self__, "slot_priorities", slot_priorities)
if slots is not None:
pulumi.set(__self__, "slots", slots)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dialogCodeHook")
def dialog_code_hook(self) -> Optional[pulumi.Input['BotDialogCodeHookSettingArgs']]:
return pulumi.get(self, "dialog_code_hook")
@dialog_code_hook.setter
def dialog_code_hook(self, value: Optional[pulumi.Input['BotDialogCodeHookSettingArgs']]):
pulumi.set(self, "dialog_code_hook", value)
@property
@pulumi.getter(name="fulfillmentCodeHook")
def fulfillment_code_hook(self) -> Optional[pulumi.Input['BotFulfillmentCodeHookSettingArgs']]:
return pulumi.get(self, "fulfillment_code_hook")
@fulfillment_code_hook.setter
def fulfillment_code_hook(self, value: Optional[pulumi.Input['BotFulfillmentCodeHookSettingArgs']]):
pulumi.set(self, "fulfillment_code_hook", value)
@property
@pulumi.getter(name="inputContexts")
def input_contexts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotInputContextArgs']]]]:
return pulumi.get(self, "input_contexts")
@input_contexts.setter
def input_contexts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotInputContextArgs']]]]):
pulumi.set(self, "input_contexts", value)
@property
@pulumi.getter(name="intentClosingSetting")
def intent_closing_setting(self) -> Optional[pulumi.Input['BotIntentClosingSettingArgs']]:
return pulumi.get(self, "intent_closing_setting")
@intent_closing_setting.setter
def intent_closing_setting(self, value: Optional[pulumi.Input['BotIntentClosingSettingArgs']]):
pulumi.set(self, "intent_closing_setting", value)
@property
@pulumi.getter(name="intentConfirmationSetting")
def intent_confirmation_setting(self) -> Optional[pulumi.Input['BotIntentConfirmationSettingArgs']]:
return pulumi.get(self, "intent_confirmation_setting")
@intent_confirmation_setting.setter
def intent_confirmation_setting(self, value: Optional[pulumi.Input['BotIntentConfirmationSettingArgs']]):
pulumi.set(self, "intent_confirmation_setting", value)
@property
@pulumi.getter(name="kendraConfiguration")
def kendra_configuration(self) -> Optional[pulumi.Input['BotKendraConfigurationArgs']]:
return pulumi.get(self, "kendra_configuration")
@kendra_configuration.setter
def kendra_configuration(self, value: Optional[pulumi.Input['BotKendraConfigurationArgs']]):
pulumi.set(self, "kendra_configuration", value)
@property
@pulumi.getter(name="outputContexts")
def output_contexts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotOutputContextArgs']]]]:
return pulumi.get(self, "output_contexts")
@output_contexts.setter
def output_contexts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotOutputContextArgs']]]]):
pulumi.set(self, "output_contexts", value)
@property
@pulumi.getter(name="parentIntentSignature")
def parent_intent_signature(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "parent_intent_signature")
@parent_intent_signature.setter
def parent_intent_signature(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_intent_signature", value)
@property
@pulumi.getter(name="sampleUtterances")
def sample_utterances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]]:
return pulumi.get(self, "sample_utterances")
@sample_utterances.setter
def sample_utterances(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]]):
pulumi.set(self, "sample_utterances", value)
@property
@pulumi.getter(name="slotPriorities")
def slot_priorities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotPriorityArgs']]]]:
return pulumi.get(self, "slot_priorities")
@slot_priorities.setter
def slot_priorities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotPriorityArgs']]]]):
pulumi.set(self, "slot_priorities", value)
@property
@pulumi.getter
def slots(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotArgs']]]]:
"""
List of slots
"""
return pulumi.get(self, "slots")
@slots.setter
def slots(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotArgs']]]]):
pulumi.set(self, "slots", value)
@pulumi.input_type
class BotKendraConfigurationArgs:
def __init__(__self__, *,
kendra_index: pulumi.Input[str],
query_filter_string: Optional[pulumi.Input[str]] = None,
query_filter_string_enabled: Optional[pulumi.Input[bool]] = None):
"""
Configuration for searching a Amazon Kendra index specified for the intent.
:param pulumi.Input[bool] query_filter_string_enabled: Determines whether the AMAZON.KendraSearchIntent intent uses a custom query string to query the Amazon Kendra index.
"""
pulumi.set(__self__, "kendra_index", kendra_index)
if query_filter_string is not None:
pulumi.set(__self__, "query_filter_string", query_filter_string)
if query_filter_string_enabled is not None:
pulumi.set(__self__, "query_filter_string_enabled", query_filter_string_enabled)
@property
@pulumi.getter(name="kendraIndex")
def kendra_index(self) -> pulumi.Input[str]:
return pulumi.get(self, "kendra_index")
@kendra_index.setter
def kendra_index(self, value: pulumi.Input[str]):
pulumi.set(self, "kendra_index", value)
@property
@pulumi.getter(name="queryFilterString")
def query_filter_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "query_filter_string")
@query_filter_string.setter
def query_filter_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_filter_string", value)
@property
@pulumi.getter(name="queryFilterStringEnabled")
def query_filter_string_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether the AMAZON.KendraSearchIntent intent uses a custom query string to query the Amazon Kendra index.
"""
return pulumi.get(self, "query_filter_string_enabled")
@query_filter_string_enabled.setter
def query_filter_string_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "query_filter_string_enabled", value)
@pulumi.input_type
class BotLocaleArgs:
def __init__(__self__, *,
locale_id: pulumi.Input[str],
nlu_confidence_threshold: pulumi.Input[float],
description: Optional[pulumi.Input[str]] = None,
intents: Optional[pulumi.Input[Sequence[pulumi.Input['BotIntentArgs']]]] = None,
slot_types: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeArgs']]]] = None,
voice_settings: Optional[pulumi.Input['BotVoiceSettingsArgs']] = None):
"""
A locale in the bot, which contains the intents and slot types that the bot uses in conversations with users in the specified language and locale.
:param pulumi.Input[Sequence[pulumi.Input['BotIntentArgs']]] intents: List of intents
:param pulumi.Input[Sequence[pulumi.Input['BotSlotTypeArgs']]] slot_types: List of SlotTypes
"""
pulumi.set(__self__, "locale_id", locale_id)
pulumi.set(__self__, "nlu_confidence_threshold", nlu_confidence_threshold)
if description is not None:
pulumi.set(__self__, "description", description)
if intents is not None:
pulumi.set(__self__, "intents", intents)
if slot_types is not None:
pulumi.set(__self__, "slot_types", slot_types)
if voice_settings is not None:
pulumi.set(__self__, "voice_settings", voice_settings)
@property
@pulumi.getter(name="localeId")
def locale_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "locale_id")
@locale_id.setter
def locale_id(self, value: pulumi.Input[str]):
pulumi.set(self, "locale_id", value)
@property
@pulumi.getter(name="nluConfidenceThreshold")
def nlu_confidence_threshold(self) -> pulumi.Input[float]:
return pulumi.get(self, "nlu_confidence_threshold")
@nlu_confidence_threshold.setter
def nlu_confidence_threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "nlu_confidence_threshold", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def intents(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotIntentArgs']]]]:
"""
List of intents
"""
return pulumi.get(self, "intents")
@intents.setter
def intents(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotIntentArgs']]]]):
pulumi.set(self, "intents", value)
@property
@pulumi.getter(name="slotTypes")
def slot_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeArgs']]]]:
"""
List of SlotTypes
"""
return pulumi.get(self, "slot_types")
@slot_types.setter
def slot_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeArgs']]]]):
pulumi.set(self, "slot_types", value)
@property
@pulumi.getter(name="voiceSettings")
def voice_settings(self) -> Optional[pulumi.Input['BotVoiceSettingsArgs']]:
return pulumi.get(self, "voice_settings")
@voice_settings.setter
def voice_settings(self, value: Optional[pulumi.Input['BotVoiceSettingsArgs']]):
pulumi.set(self, "voice_settings", value)
@pulumi.input_type
class BotMessageGroupArgs:
def __init__(__self__, *,
message: pulumi.Input['BotMessageArgs'],
variations: Optional[pulumi.Input[Sequence[pulumi.Input['BotMessageArgs']]]] = None):
"""
One or more messages that Amazon Lex can send to the user.
:param pulumi.Input[Sequence[pulumi.Input['BotMessageArgs']]] variations: Message variations to send to the user.
"""
pulumi.set(__self__, "message", message)
if variations is not None:
pulumi.set(__self__, "variations", variations)
@property
@pulumi.getter
def message(self) -> pulumi.Input['BotMessageArgs']:
return pulumi.get(self, "message")
@message.setter
def message(self, value: pulumi.Input['BotMessageArgs']):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def variations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotMessageArgs']]]]:
"""
Message variations to send to the user.
"""
return pulumi.get(self, "variations")
@variations.setter
def variations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotMessageArgs']]]]):
pulumi.set(self, "variations", value)
@pulumi.input_type
class BotMessageArgs:
def __init__(__self__, *,
custom_payload: Optional[pulumi.Input['BotCustomPayloadArgs']] = None,
image_response_card: Optional[pulumi.Input['BotImageResponseCardArgs']] = None,
plain_text_message: Optional[pulumi.Input['BotPlainTextMessageArgs']] = None,
s_sml_message: Optional[pulumi.Input['BotSSMLMessageArgs']] = None):
"""
The primary message that Amazon Lex should send to the user.
"""
if custom_payload is not None:
pulumi.set(__self__, "custom_payload", custom_payload)
if image_response_card is not None:
pulumi.set(__self__, "image_response_card", image_response_card)
if plain_text_message is not None:
pulumi.set(__self__, "plain_text_message", plain_text_message)
if s_sml_message is not None:
pulumi.set(__self__, "s_sml_message", s_sml_message)
@property
@pulumi.getter(name="customPayload")
def custom_payload(self) -> Optional[pulumi.Input['BotCustomPayloadArgs']]:
return pulumi.get(self, "custom_payload")
@custom_payload.setter
def custom_payload(self, value: Optional[pulumi.Input['BotCustomPayloadArgs']]):
pulumi.set(self, "custom_payload", value)
@property
@pulumi.getter(name="imageResponseCard")
def image_response_card(self) -> Optional[pulumi.Input['BotImageResponseCardArgs']]:
return pulumi.get(self, "image_response_card")
@image_response_card.setter
def image_response_card(self, value: Optional[pulumi.Input['BotImageResponseCardArgs']]):
pulumi.set(self, "image_response_card", value)
@property
@pulumi.getter(name="plainTextMessage")
def plain_text_message(self) -> Optional[pulumi.Input['BotPlainTextMessageArgs']]:
return pulumi.get(self, "plain_text_message")
@plain_text_message.setter
def plain_text_message(self, value: Optional[pulumi.Input['BotPlainTextMessageArgs']]):
pulumi.set(self, "plain_text_message", value)
@property
@pulumi.getter(name="sSMLMessage")
def s_sml_message(self) -> Optional[pulumi.Input['BotSSMLMessageArgs']]:
return pulumi.get(self, "s_sml_message")
@s_sml_message.setter
def s_sml_message(self, value: Optional[pulumi.Input['BotSSMLMessageArgs']]):
pulumi.set(self, "s_sml_message", value)
@pulumi.input_type
class BotMultipleValuesSettingArgs:
def __init__(__self__, *,
allow_multiple_values: Optional[pulumi.Input[bool]] = None):
"""
Indicates whether a slot can return multiple values.
"""
if allow_multiple_values is not None:
pulumi.set(__self__, "allow_multiple_values", allow_multiple_values)
@property
@pulumi.getter(name="allowMultipleValues")
def allow_multiple_values(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "allow_multiple_values")
@allow_multiple_values.setter
def allow_multiple_values(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_multiple_values", value)
@pulumi.input_type
class BotObfuscationSettingArgs:
def __init__(__self__, *,
obfuscation_setting_type: pulumi.Input['BotObfuscationSettingObfuscationSettingType']):
"""
Determines whether Amazon Lex obscures slot values in conversation logs.
:param pulumi.Input['BotObfuscationSettingObfuscationSettingType'] obfuscation_setting_type: Value that determines whether Amazon Lex obscures slot values in conversation logs. The default is to obscure the values.
"""
pulumi.set(__self__, "obfuscation_setting_type", obfuscation_setting_type)
@property
@pulumi.getter(name="obfuscationSettingType")
def obfuscation_setting_type(self) -> pulumi.Input['BotObfuscationSettingObfuscationSettingType']:
"""
Value that determines whether Amazon Lex obscures slot values in conversation logs. The default is to obscure the values.
"""
return pulumi.get(self, "obfuscation_setting_type")
@obfuscation_setting_type.setter
def obfuscation_setting_type(self, value: pulumi.Input['BotObfuscationSettingObfuscationSettingType']):
pulumi.set(self, "obfuscation_setting_type", value)
@pulumi.input_type
class BotOutputContextArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
time_to_live_in_seconds: pulumi.Input[int],
turns_to_live: pulumi.Input[int]):
"""
A session context that is activated when an intent is fulfilled.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "time_to_live_in_seconds", time_to_live_in_seconds)
pulumi.set(__self__, "turns_to_live", turns_to_live)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="timeToLiveInSeconds")
def time_to_live_in_seconds(self) -> pulumi.Input[int]:
return pulumi.get(self, "time_to_live_in_seconds")
@time_to_live_in_seconds.setter
def time_to_live_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "time_to_live_in_seconds", value)
@property
@pulumi.getter(name="turnsToLive")
def turns_to_live(self) -> pulumi.Input[int]:
return pulumi.get(self, "turns_to_live")
@turns_to_live.setter
def turns_to_live(self, value: pulumi.Input[int]):
pulumi.set(self, "turns_to_live", value)
@pulumi.input_type
class BotPlainTextMessageArgs:
def __init__(__self__, *,
value: pulumi.Input[str]):
"""
A message in plain text format.
:param pulumi.Input[str] value: The message to send to the user.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The message to send to the user.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotPostFulfillmentStatusSpecificationArgs:
def __init__(__self__, *,
failure_response: Optional[pulumi.Input['BotResponseSpecificationArgs']] = None,
success_response: Optional[pulumi.Input['BotResponseSpecificationArgs']] = None,
timeout_response: Optional[pulumi.Input['BotResponseSpecificationArgs']] = None):
"""
Provides information for updating the user on the progress of fulfilling an intent.
"""
if failure_response is not None:
pulumi.set(__self__, "failure_response", failure_response)
if success_response is not None:
pulumi.set(__self__, "success_response", success_response)
if timeout_response is not None:
pulumi.set(__self__, "timeout_response", timeout_response)
@property
@pulumi.getter(name="failureResponse")
def failure_response(self) -> Optional[pulumi.Input['BotResponseSpecificationArgs']]:
return pulumi.get(self, "failure_response")
@failure_response.setter
def failure_response(self, value: Optional[pulumi.Input['BotResponseSpecificationArgs']]):
pulumi.set(self, "failure_response", value)
@property
@pulumi.getter(name="successResponse")
def success_response(self) -> Optional[pulumi.Input['BotResponseSpecificationArgs']]:
return pulumi.get(self, "success_response")
@success_response.setter
def success_response(self, value: Optional[pulumi.Input['BotResponseSpecificationArgs']]):
pulumi.set(self, "success_response", value)
@property
@pulumi.getter(name="timeoutResponse")
def timeout_response(self) -> Optional[pulumi.Input['BotResponseSpecificationArgs']]:
return pulumi.get(self, "timeout_response")
@timeout_response.setter
def timeout_response(self, value: Optional[pulumi.Input['BotResponseSpecificationArgs']]):
pulumi.set(self, "timeout_response", value)
@pulumi.input_type
class BotPromptSpecificationArgs:
def __init__(__self__, *,
max_retries: pulumi.Input[int],
message_groups_list: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]],
allow_interrupt: Optional[pulumi.Input[bool]] = None):
"""
Prompts the user to confirm the intent.
:param pulumi.Input[bool] allow_interrupt: Indicates whether the user can interrupt a speech prompt from the bot.
"""
pulumi.set(__self__, "max_retries", max_retries)
pulumi.set(__self__, "message_groups_list", message_groups_list)
if allow_interrupt is not None:
pulumi.set(__self__, "allow_interrupt", allow_interrupt)
@property
@pulumi.getter(name="maxRetries")
def max_retries(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_retries")
@max_retries.setter
def max_retries(self, value: pulumi.Input[int]):
pulumi.set(self, "max_retries", value)
@property
@pulumi.getter(name="messageGroupsList")
def message_groups_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]:
return pulumi.get(self, "message_groups_list")
@message_groups_list.setter
def message_groups_list(self, value: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]):
pulumi.set(self, "message_groups_list", value)
@property
@pulumi.getter(name="allowInterrupt")
def allow_interrupt(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the user can interrupt a speech prompt from the bot.
"""
return pulumi.get(self, "allow_interrupt")
@allow_interrupt.setter
def allow_interrupt(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_interrupt", value)
@pulumi.input_type
class BotResponseSpecificationArgs:
def __init__(__self__, *,
message_groups_list: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]],
allow_interrupt: Optional[pulumi.Input[bool]] = None):
"""
A list of message groups that Amazon Lex uses to respond the user input.
:param pulumi.Input[bool] allow_interrupt: Indicates whether the user can interrupt a speech prompt from the bot.
"""
pulumi.set(__self__, "message_groups_list", message_groups_list)
if allow_interrupt is not None:
pulumi.set(__self__, "allow_interrupt", allow_interrupt)
@property
@pulumi.getter(name="messageGroupsList")
def message_groups_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]:
return pulumi.get(self, "message_groups_list")
@message_groups_list.setter
def message_groups_list(self, value: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]):
pulumi.set(self, "message_groups_list", value)
@property
@pulumi.getter(name="allowInterrupt")
def allow_interrupt(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the user can interrupt a speech prompt from the bot.
"""
return pulumi.get(self, "allow_interrupt")
@allow_interrupt.setter
def allow_interrupt(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_interrupt", value)
@pulumi.input_type
class BotS3LocationArgs:
def __init__(__self__, *,
s3_bucket: pulumi.Input[str],
s3_object_key: pulumi.Input[str],
s3_object_version: Optional[pulumi.Input[str]] = None):
"""
S3 location of bot definitions zip file, if it's not defined inline in CloudFormation.
:param pulumi.Input[str] s3_bucket: An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.
:param pulumi.Input[str] s3_object_key: The Amazon S3 key of the deployment package.
:param pulumi.Input[str] s3_object_version: For versioned objects, the version of the deployment package object to use. If not specified, the current object version will be used.
"""
pulumi.set(__self__, "s3_bucket", s3_bucket)
pulumi.set(__self__, "s3_object_key", s3_object_key)
if s3_object_version is not None:
pulumi.set(__self__, "s3_object_version", s3_object_version)
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> pulumi.Input[str]:
"""
An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.
"""
return pulumi.get(self, "s3_bucket")
@s3_bucket.setter
def s3_bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_bucket", value)
@property
@pulumi.getter(name="s3ObjectKey")
def s3_object_key(self) -> pulumi.Input[str]:
"""
The Amazon S3 key of the deployment package.
"""
return pulumi.get(self, "s3_object_key")
@s3_object_key.setter
def s3_object_key(self, value: pulumi.Input[str]):
pulumi.set(self, "s3_object_key", value)
@property
@pulumi.getter(name="s3ObjectVersion")
def s3_object_version(self) -> Optional[pulumi.Input[str]]:
"""
For versioned objects, the version of the deployment package object to use. If not specified, the current object version will be used.
"""
return pulumi.get(self, "s3_object_version")
@s3_object_version.setter
def s3_object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_object_version", value)
@pulumi.input_type
class BotSSMLMessageArgs:
def __init__(__self__, *,
value: pulumi.Input[str]):
"""
A message in Speech Synthesis Markup Language (SSML).
:param pulumi.Input[str] value: The SSML text that defines the prompt.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The SSML text that defines the prompt.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotSampleUtteranceArgs:
def __init__(__self__, *,
utterance: pulumi.Input[str]):
"""
A sample utterance that invokes an intent or respond to a slot elicitation prompt.
"""
pulumi.set(__self__, "utterance", utterance)
@property
@pulumi.getter
def utterance(self) -> pulumi.Input[str]:
return pulumi.get(self, "utterance")
@utterance.setter
def utterance(self, value: pulumi.Input[str]):
pulumi.set(self, "utterance", value)
@pulumi.input_type
class BotSampleValueArgs:
def __init__(__self__, *,
value: pulumi.Input[str]):
"""
Defines one of the values for a slot type.
:param pulumi.Input[str] value: The value that can be used for a slot type.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value that can be used for a slot type.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotSlotDefaultValueSpecificationArgs:
def __init__(__self__, *,
default_value_list: pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]]):
"""
A list of values that Amazon Lex should use as the default value for a slot.
:param pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]] default_value_list: A list of slot default values
"""
pulumi.set(__self__, "default_value_list", default_value_list)
@property
@pulumi.getter(name="defaultValueList")
def default_value_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]]:
"""
A list of slot default values
"""
return pulumi.get(self, "default_value_list")
@default_value_list.setter
def default_value_list(self, value: pulumi.Input[Sequence[pulumi.Input['BotSlotDefaultValueArgs']]]):
pulumi.set(self, "default_value_list", value)
@pulumi.input_type
class BotSlotDefaultValueArgs:
def __init__(__self__, *,
default_value: pulumi.Input[str]):
"""
The default value to use when a user doesn't provide a value for a slot.
:param pulumi.Input[str] default_value: The default value to use when a user doesn't provide a value for a slot.
"""
pulumi.set(__self__, "default_value", default_value)
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> pulumi.Input[str]:
"""
The default value to use when a user doesn't provide a value for a slot.
"""
return pulumi.get(self, "default_value")
@default_value.setter
def default_value(self, value: pulumi.Input[str]):
pulumi.set(self, "default_value", value)
@pulumi.input_type
class BotSlotPriorityArgs:
def __init__(__self__, *,
priority: pulumi.Input[int],
slot_name: pulumi.Input[str]):
"""
The priority that Amazon Lex should use when eliciting slot values from a user.
:param pulumi.Input[str] slot_name: The name of the slot.
"""
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "slot_name", slot_name)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="slotName")
def slot_name(self) -> pulumi.Input[str]:
"""
The name of the slot.
"""
return pulumi.get(self, "slot_name")
@slot_name.setter
def slot_name(self, value: pulumi.Input[str]):
pulumi.set(self, "slot_name", value)
@pulumi.input_type
class BotSlotTypeValueArgs:
def __init__(__self__, *,
sample_value: pulumi.Input['BotSampleValueArgs'],
synonyms: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleValueArgs']]]] = None):
"""
Value that the slot type can take.
"""
pulumi.set(__self__, "sample_value", sample_value)
if synonyms is not None:
pulumi.set(__self__, "synonyms", synonyms)
@property
@pulumi.getter(name="sampleValue")
def sample_value(self) -> pulumi.Input['BotSampleValueArgs']:
return pulumi.get(self, "sample_value")
@sample_value.setter
def sample_value(self, value: pulumi.Input['BotSampleValueArgs']):
pulumi.set(self, "sample_value", value)
@property
@pulumi.getter
def synonyms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleValueArgs']]]]:
return pulumi.get(self, "synonyms")
@synonyms.setter
def synonyms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleValueArgs']]]]):
pulumi.set(self, "synonyms", value)
@pulumi.input_type
class BotSlotTypeArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
external_source_setting: Optional[pulumi.Input['BotExternalSourceSettingArgs']] = None,
parent_slot_type_signature: Optional[pulumi.Input[str]] = None,
slot_type_values: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeValueArgs']]]] = None,
value_selection_setting: Optional[pulumi.Input['BotSlotValueSelectionSettingArgs']] = None):
"""
A custom, extended built-in or a grammar slot type.
"""
pulumi.set(__self__, "name", name)
if description is not None:
pulumi.set(__self__, "description", description)
if external_source_setting is not None:
pulumi.set(__self__, "external_source_setting", external_source_setting)
if parent_slot_type_signature is not None:
pulumi.set(__self__, "parent_slot_type_signature", parent_slot_type_signature)
if slot_type_values is not None:
pulumi.set(__self__, "slot_type_values", slot_type_values)
if value_selection_setting is not None:
pulumi.set(__self__, "value_selection_setting", value_selection_setting)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="externalSourceSetting")
def external_source_setting(self) -> Optional[pulumi.Input['BotExternalSourceSettingArgs']]:
return pulumi.get(self, "external_source_setting")
@external_source_setting.setter
def external_source_setting(self, value: Optional[pulumi.Input['BotExternalSourceSettingArgs']]):
pulumi.set(self, "external_source_setting", value)
@property
@pulumi.getter(name="parentSlotTypeSignature")
def parent_slot_type_signature(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "parent_slot_type_signature")
@parent_slot_type_signature.setter
def parent_slot_type_signature(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_slot_type_signature", value)
@property
@pulumi.getter(name="slotTypeValues")
def slot_type_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeValueArgs']]]]:
return pulumi.get(self, "slot_type_values")
@slot_type_values.setter
def slot_type_values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSlotTypeValueArgs']]]]):
pulumi.set(self, "slot_type_values", value)
@property
@pulumi.getter(name="valueSelectionSetting")
def value_selection_setting(self) -> Optional[pulumi.Input['BotSlotValueSelectionSettingArgs']]:
return pulumi.get(self, "value_selection_setting")
@value_selection_setting.setter
def value_selection_setting(self, value: Optional[pulumi.Input['BotSlotValueSelectionSettingArgs']]):
pulumi.set(self, "value_selection_setting", value)
@pulumi.input_type
class BotSlotValueElicitationSettingArgs:
def __init__(__self__, *,
slot_constraint: pulumi.Input['BotSlotConstraint'],
default_value_specification: Optional[pulumi.Input['BotSlotDefaultValueSpecificationArgs']] = None,
prompt_specification: Optional[pulumi.Input['BotPromptSpecificationArgs']] = None,
sample_utterances: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]] = None,
wait_and_continue_specification: Optional[pulumi.Input['BotWaitAndContinueSpecificationArgs']] = None):
"""
Settings that you can use for eliciting a slot value.
:param pulumi.Input['BotSlotConstraint'] slot_constraint: Specifies whether the slot is required or optional.
:param pulumi.Input['BotSlotDefaultValueSpecificationArgs'] default_value_specification: A list of default values for a slot.
:param pulumi.Input['BotPromptSpecificationArgs'] prompt_specification: The prompt that Amazon Lex uses to elicit the slot value from the user.
:param pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]] sample_utterances: If you know a specific pattern that users might respond to an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy.
:param pulumi.Input['BotWaitAndContinueSpecificationArgs'] wait_and_continue_specification: Specifies the prompts that Amazon Lex uses while a bot is waiting for customer input.
"""
pulumi.set(__self__, "slot_constraint", slot_constraint)
if default_value_specification is not None:
pulumi.set(__self__, "default_value_specification", default_value_specification)
if prompt_specification is not None:
pulumi.set(__self__, "prompt_specification", prompt_specification)
if sample_utterances is not None:
pulumi.set(__self__, "sample_utterances", sample_utterances)
if wait_and_continue_specification is not None:
pulumi.set(__self__, "wait_and_continue_specification", wait_and_continue_specification)
@property
@pulumi.getter(name="slotConstraint")
def slot_constraint(self) -> pulumi.Input['BotSlotConstraint']:
"""
Specifies whether the slot is required or optional.
"""
return pulumi.get(self, "slot_constraint")
@slot_constraint.setter
def slot_constraint(self, value: pulumi.Input['BotSlotConstraint']):
pulumi.set(self, "slot_constraint", value)
@property
@pulumi.getter(name="defaultValueSpecification")
def default_value_specification(self) -> Optional[pulumi.Input['BotSlotDefaultValueSpecificationArgs']]:
"""
A list of default values for a slot.
"""
return pulumi.get(self, "default_value_specification")
@default_value_specification.setter
def default_value_specification(self, value: Optional[pulumi.Input['BotSlotDefaultValueSpecificationArgs']]):
pulumi.set(self, "default_value_specification", value)
@property
@pulumi.getter(name="promptSpecification")
def prompt_specification(self) -> Optional[pulumi.Input['BotPromptSpecificationArgs']]:
"""
The prompt that Amazon Lex uses to elicit the slot value from the user.
"""
return pulumi.get(self, "prompt_specification")
@prompt_specification.setter
def prompt_specification(self, value: Optional[pulumi.Input['BotPromptSpecificationArgs']]):
pulumi.set(self, "prompt_specification", value)
@property
@pulumi.getter(name="sampleUtterances")
def sample_utterances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]]:
"""
If you know a specific pattern that users might respond to an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy.
"""
return pulumi.get(self, "sample_utterances")
@sample_utterances.setter
def sample_utterances(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BotSampleUtteranceArgs']]]]):
pulumi.set(self, "sample_utterances", value)
@property
@pulumi.getter(name="waitAndContinueSpecification")
def wait_and_continue_specification(self) -> Optional[pulumi.Input['BotWaitAndContinueSpecificationArgs']]:
"""
Specifies the prompts that Amazon Lex uses while a bot is waiting for customer input.
"""
return pulumi.get(self, "wait_and_continue_specification")
@wait_and_continue_specification.setter
def wait_and_continue_specification(self, value: Optional[pulumi.Input['BotWaitAndContinueSpecificationArgs']]):
pulumi.set(self, "wait_and_continue_specification", value)
@pulumi.input_type
class BotSlotValueRegexFilterArgs:
def __init__(__self__, *,
pattern: pulumi.Input[str]):
"""
A regular expression used to validate the value of a slot.
:param pulumi.Input[str] pattern: Regex pattern
"""
pulumi.set(__self__, "pattern", pattern)
@property
@pulumi.getter
def pattern(self) -> pulumi.Input[str]:
"""
Regex pattern
"""
return pulumi.get(self, "pattern")
@pattern.setter
def pattern(self, value: pulumi.Input[str]):
pulumi.set(self, "pattern", value)
@pulumi.input_type
class BotSlotValueSelectionSettingArgs:
def __init__(__self__, *,
resolution_strategy: pulumi.Input['BotSlotValueResolutionStrategy'],
regex_filter: Optional[pulumi.Input['BotSlotValueRegexFilterArgs']] = None):
"""
Contains settings used by Amazon Lex to select a slot value.
"""
pulumi.set(__self__, "resolution_strategy", resolution_strategy)
if regex_filter is not None:
pulumi.set(__self__, "regex_filter", regex_filter)
@property
@pulumi.getter(name="resolutionStrategy")
def resolution_strategy(self) -> pulumi.Input['BotSlotValueResolutionStrategy']:
return pulumi.get(self, "resolution_strategy")
@resolution_strategy.setter
def resolution_strategy(self, value: pulumi.Input['BotSlotValueResolutionStrategy']):
pulumi.set(self, "resolution_strategy", value)
@property
@pulumi.getter(name="regexFilter")
def regex_filter(self) -> Optional[pulumi.Input['BotSlotValueRegexFilterArgs']]:
return pulumi.get(self, "regex_filter")
@regex_filter.setter
def regex_filter(self, value: Optional[pulumi.Input['BotSlotValueRegexFilterArgs']]):
pulumi.set(self, "regex_filter", value)
@pulumi.input_type
class BotSlotArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
slot_type_name: pulumi.Input[str],
value_elicitation_setting: pulumi.Input['BotSlotValueElicitationSettingArgs'],
description: Optional[pulumi.Input[str]] = None,
multiple_values_setting: Optional[pulumi.Input['BotMultipleValuesSettingArgs']] = None,
obfuscation_setting: Optional[pulumi.Input['BotObfuscationSettingArgs']] = None):
"""
A slot is a variable needed to fulfill an intent, where an intent can require zero or more slots.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "slot_type_name", slot_type_name)
pulumi.set(__self__, "value_elicitation_setting", value_elicitation_setting)
if description is not None:
pulumi.set(__self__, "description", description)
if multiple_values_setting is not None:
pulumi.set(__self__, "multiple_values_setting", multiple_values_setting)
if obfuscation_setting is not None:
pulumi.set(__self__, "obfuscation_setting", obfuscation_setting)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="slotTypeName")
def slot_type_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "slot_type_name")
@slot_type_name.setter
def slot_type_name(self, value: pulumi.Input[str]):
pulumi.set(self, "slot_type_name", value)
@property
@pulumi.getter(name="valueElicitationSetting")
def value_elicitation_setting(self) -> pulumi.Input['BotSlotValueElicitationSettingArgs']:
return pulumi.get(self, "value_elicitation_setting")
@value_elicitation_setting.setter
def value_elicitation_setting(self, value: pulumi.Input['BotSlotValueElicitationSettingArgs']):
pulumi.set(self, "value_elicitation_setting", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="multipleValuesSetting")
def multiple_values_setting(self) -> Optional[pulumi.Input['BotMultipleValuesSettingArgs']]:
return pulumi.get(self, "multiple_values_setting")
@multiple_values_setting.setter
def multiple_values_setting(self, value: Optional[pulumi.Input['BotMultipleValuesSettingArgs']]):
pulumi.set(self, "multiple_values_setting", value)
@property
@pulumi.getter(name="obfuscationSetting")
def obfuscation_setting(self) -> Optional[pulumi.Input['BotObfuscationSettingArgs']]:
return pulumi.get(self, "obfuscation_setting")
@obfuscation_setting.setter
def obfuscation_setting(self, value: Optional[pulumi.Input['BotObfuscationSettingArgs']]):
pulumi.set(self, "obfuscation_setting", value)
@pulumi.input_type
class BotStillWaitingResponseSpecificationArgs:
def __init__(__self__, *,
frequency_in_seconds: pulumi.Input[int],
message_groups_list: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]],
timeout_in_seconds: pulumi.Input[int],
allow_interrupt: Optional[pulumi.Input[bool]] = None):
"""
StillWaitingResponseSpecification.
:param pulumi.Input[bool] allow_interrupt: Indicates whether the user can interrupt a speech prompt from the bot.
"""
pulumi.set(__self__, "frequency_in_seconds", frequency_in_seconds)
pulumi.set(__self__, "message_groups_list", message_groups_list)
pulumi.set(__self__, "timeout_in_seconds", timeout_in_seconds)
if allow_interrupt is not None:
pulumi.set(__self__, "allow_interrupt", allow_interrupt)
@property
@pulumi.getter(name="frequencyInSeconds")
def frequency_in_seconds(self) -> pulumi.Input[int]:
return pulumi.get(self, "frequency_in_seconds")
@frequency_in_seconds.setter
def frequency_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_in_seconds", value)
@property
@pulumi.getter(name="messageGroupsList")
def message_groups_list(self) -> pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]:
return pulumi.get(self, "message_groups_list")
@message_groups_list.setter
def message_groups_list(self, value: pulumi.Input[Sequence[pulumi.Input['BotMessageGroupArgs']]]):
pulumi.set(self, "message_groups_list", value)
@property
@pulumi.getter(name="timeoutInSeconds")
def timeout_in_seconds(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_in_seconds")
@timeout_in_seconds.setter
def timeout_in_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_in_seconds", value)
@property
@pulumi.getter(name="allowInterrupt")
def allow_interrupt(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the user can interrupt a speech prompt from the bot.
"""
return pulumi.get(self, "allow_interrupt")
@allow_interrupt.setter
def allow_interrupt(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_interrupt", value)
@pulumi.input_type
class BotTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair for tagging Lex resources
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BotVersionLocaleDetailsArgs:
def __init__(__self__, *,
source_bot_version: pulumi.Input[str]):
"""
The version of a bot used for a bot locale.
"""
pulumi.set(__self__, "source_bot_version", source_bot_version)
@property
@pulumi.getter(name="sourceBotVersion")
def source_bot_version(self) -> pulumi.Input[str]:
return pulumi.get(self, "source_bot_version")
@source_bot_version.setter
def source_bot_version(self, value: pulumi.Input[str]):
pulumi.set(self, "source_bot_version", value)
@pulumi.input_type
class BotVersionLocaleSpecificationArgs:
def __init__(__self__, *,
bot_version_locale_details: pulumi.Input['BotVersionLocaleDetailsArgs'],
locale_id: pulumi.Input[str]):
pulumi.set(__self__, "bot_version_locale_details", bot_version_locale_details)
pulumi.set(__self__, "locale_id", locale_id)
@property
@pulumi.getter(name="botVersionLocaleDetails")
def bot_version_locale_details(self) -> pulumi.Input['BotVersionLocaleDetailsArgs']:
return pulumi.get(self, "bot_version_locale_details")
@bot_version_locale_details.setter
def bot_version_locale_details(self, value: pulumi.Input['BotVersionLocaleDetailsArgs']):
pulumi.set(self, "bot_version_locale_details", value)
@property
@pulumi.getter(name="localeId")
def locale_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "locale_id")
@locale_id.setter
def locale_id(self, value: pulumi.Input[str]):
pulumi.set(self, "locale_id", value)
@pulumi.input_type
class BotVoiceSettingsArgs:
def __init__(__self__, *,
voice_id: pulumi.Input[str]):
"""
Settings for using an Amazon Polly voice to communicate with a user.
:param pulumi.Input[str] voice_id: The Amazon Polly voice ID that Amazon Lex uses for voice interaction with the user.
"""
pulumi.set(__self__, "voice_id", voice_id)
@property
@pulumi.getter(name="voiceId")
def voice_id(self) -> pulumi.Input[str]:
"""
The Amazon Polly voice ID that Amazon Lex uses for voice interaction with the user.
"""
return pulumi.get(self, "voice_id")
@voice_id.setter
def voice_id(self, value: pulumi.Input[str]):
pulumi.set(self, "voice_id", value)
@pulumi.input_type
class BotWaitAndContinueSpecificationArgs:
def __init__(__self__, *,
continue_response: pulumi.Input['BotResponseSpecificationArgs'],
waiting_response: pulumi.Input['BotResponseSpecificationArgs'],
is_active: Optional[pulumi.Input[bool]] = None,
still_waiting_response: Optional[pulumi.Input['BotStillWaitingResponseSpecificationArgs']] = None):
"""
The prompts that Amazon Lex uses while a bot is waiting for customer input.
:param pulumi.Input['BotResponseSpecificationArgs'] continue_response: The response that Amazon Lex sends to indicate that the bot is ready to continue the conversation.
:param pulumi.Input['BotResponseSpecificationArgs'] waiting_response: The response that Amazon Lex sends to indicate that the bot is waiting for the conversation to continue.
:param pulumi.Input[bool] is_active: Specifies whether the bot will wait for a user to respond.
:param pulumi.Input['BotStillWaitingResponseSpecificationArgs'] still_waiting_response: The response that Amazon Lex sends periodically to the user to indicate that the bot is still waiting for input from the user.
"""
pulumi.set(__self__, "continue_response", continue_response)
pulumi.set(__self__, "waiting_response", waiting_response)
if is_active is not None:
pulumi.set(__self__, "is_active", is_active)
if still_waiting_response is not None:
pulumi.set(__self__, "still_waiting_response", still_waiting_response)
@property
@pulumi.getter(name="continueResponse")
def continue_response(self) -> pulumi.Input['BotResponseSpecificationArgs']:
"""
The response that Amazon Lex sends to indicate that the bot is ready to continue the conversation.
"""
return pulumi.get(self, "continue_response")
@continue_response.setter
def continue_response(self, value: pulumi.Input['BotResponseSpecificationArgs']):
pulumi.set(self, "continue_response", value)
@property
@pulumi.getter(name="waitingResponse")
def waiting_response(self) -> pulumi.Input['BotResponseSpecificationArgs']:
"""
The response that Amazon Lex sends to indicate that the bot is waiting for the conversation to continue.
"""
return pulumi.get(self, "waiting_response")
@waiting_response.setter
def waiting_response(self, value: pulumi.Input['BotResponseSpecificationArgs']):
pulumi.set(self, "waiting_response", value)
@property
@pulumi.getter(name="isActive")
def is_active(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the bot will wait for a user to respond.
"""
return pulumi.get(self, "is_active")
@is_active.setter
def is_active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_active", value)
@property
@pulumi.getter(name="stillWaitingResponse")
def still_waiting_response(self) -> Optional[pulumi.Input['BotStillWaitingResponseSpecificationArgs']]:
"""
The response that Amazon Lex sends periodically to the user to indicate that the bot is still waiting for input from the user.
"""
return pulumi.get(self, "still_waiting_response")
@still_waiting_response.setter
def still_waiting_response(self, value: Optional[pulumi.Input['BotStillWaitingResponseSpecificationArgs']]):
pulumi.set(self, "still_waiting_response", value)
@pulumi.input_type
class DataPrivacyPropertiesArgs:
def __init__(__self__, *,
child_directed: pulumi.Input[bool]):
"""
Data privacy setting of the Bot.
"""
pulumi.set(__self__, "child_directed", child_directed)
@property
@pulumi.getter(name="childDirected")
def child_directed(self) -> pulumi.Input[bool]:
return pulumi.get(self, "child_directed")
@child_directed.setter
def child_directed(self, value: pulumi.Input[bool]):
pulumi.set(self, "child_directed", value)
@pulumi.input_type
class ResourcePolicyPolicyArgs:
def __init__(__self__):
"""
A resource policy to add to the resource. The policy is a JSON structure following the IAM syntax that contains one or more statements that define the policy.
"""
pass
@pulumi.input_type
class SentimentAnalysisSettingsPropertiesArgs:
def __init__(__self__, *,
detect_sentiment: pulumi.Input[bool]):
"""
Determines whether Amazon Lex will use Amazon Comprehend to detect the sentiment of user utterances.
:param pulumi.Input[bool] detect_sentiment: Enable to call Amazon Comprehend for Sentiment natively within Lex
"""
pulumi.set(__self__, "detect_sentiment", detect_sentiment)
@property
@pulumi.getter(name="detectSentiment")
def detect_sentiment(self) -> pulumi.Input[bool]:
"""
Enable to call Amazon Comprehend for Sentiment natively within Lex
"""
return pulumi.get(self, "detect_sentiment")
@detect_sentiment.setter
def detect_sentiment(self, value: pulumi.Input[bool]):
pulumi.set(self, "detect_sentiment", value)
| 40.354456
| 314
| 0.683558
|
4a0936a0c9439087d412f3865abec9af99cba83d
| 63,562
|
py
|
Python
|
src/panoptes/pocs/camera/sbigudrv.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 69
|
2015-08-27T01:17:26.000Z
|
2022-01-05T19:11:09.000Z
|
src/panoptes/pocs/camera/sbigudrv.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 1,094
|
2016-01-19T18:18:06.000Z
|
2022-03-17T04:28:38.000Z
|
src/panoptes/pocs/camera/sbigudrv.py
|
ASTROGBAE/POCS
|
ddbc716ba375be92c7af1c8ebd536f9cdbc899da
|
[
"MIT"
] | 65
|
2015-08-27T01:17:28.000Z
|
2021-02-24T04:12:03.000Z
|
"""
Low level interface to the SBIG Unversal Driver/Library.
Reproduces in Python (using ctypes) the C interface provided by SBIG's shared
library, i.e. 1 function that does 72 different things selected by passing an
integer as the first argument. This is basically a direct translation of the
enums and structs defined in the library C-header to Python dicts and
ctypes.Structures, plus a class (SBIGDriver) to load the library
and call the single command function (SBIGDriver._send_command()).
"""
import ctypes
import enum
import threading
import time
import numpy as np
from astropy import units as u
from numpy.ctypeslib import as_ctypes
from panoptes.pocs.camera.sdk import AbstractSDKDriver
from panoptes.utils import error
from panoptes.utils.time import CountdownTimer
from panoptes.utils.utils import get_quantity_value
################################################################################
# Main SBIGDriver class
################################################################################
class SBIGDriver(AbstractSDKDriver):
def __init__(self, library_path=None, retries=1, **kwargs):
"""
Main class representing the SBIG Universal Driver/Library interface.
On construction loads SBIG's shared library which must have already
been installed (see http://archive.sbig.com/sbwhtmls/devsw.htm). The
name and location of the shared library can be manually specified with
the library_path argument, otherwise the ctypes.util.find_library function
will be used to locate it.
Args:
library_path (str, optional): path to the library e.g. '/usr/local/lib/libsbigudrv.so'.
retries (int, optional): maximum number of times to attempt to send
a command to a camera in case of failures. Default 1, i.e. only
send a command once.
Returns:
`~pocs.camera.sbigudrv.SBIGDriver`
Raises:
panoptes.utils.error.NotFound: raised if library_path not given & find_libary fails to
locate the library.
OSError: raises if the ctypes.CDLL loader cannot load the library.
"""
# Create a Lock that will used to prevent simultaneous commands from multiple
# cameras. Main reason for this is preventing overlapping readouts.
self._command_lock = threading.Lock()
self._retries = retries
super().__init__(name='sbigudrv', library_path=library_path, **kwargs)
# Properties
@property
def retries(self):
return self._retries
@retries.setter
def retries(self, retries):
retries = int(retries)
if retries < 1:
raise ValueError("retries should be 1 or greater, got {}!".format(retries))
self._retries = retries
# Methods
def get_SDK_version(self, request_type='DRIVER_STD'):
driver_info_params = GetDriverInfoParams(driver_request_codes[request_type])
driver_info_results = GetDriverInfoResults0()
self.open_driver() # Make sure driver is open
with self._command_lock:
self._send_command('CC_GET_DRIVER_INFO', driver_info_params, driver_info_results)
version_string = "{}, {}".format(driver_info_results.name.decode('ascii'),
self._bcd_to_string(driver_info_results.version))
return version_string
def get_devices(self):
"""Gets currently connected camera inf.
Returns:
dict: All currently connected camera serial numbers with corresponding handles.
"""
camera_info = QueryUSBResults2()
with self._command_lock:
self._send_command('CC_QUERY_USB2', results=camera_info)
if not camera_info.camerasFound:
raise error.PanError("No SBIG camera devices found.")
cameras = {}
for i in range(camera_info.camerasFound):
serial_number = camera_info.usbInfo[i].serialNumber.decode('ascii')
device_type = "DEV_USB{}".format(i + 1)
cameras[serial_number] = device_type
return cameras
def open_driver(self):
with self._command_lock:
self._send_command('CC_OPEN_DRIVER')
def open_device(self, device_type):
odp = OpenDeviceParams(device_type_codes[device_type], 0, 0)
with self._command_lock:
self._send_command('CC_OPEN_DEVICE', params=odp)
def establish_link(self):
elp = EstablishLinkParams()
elr = EstablishLinkResults()
with self._command_lock:
self._send_command('CC_ESTABLISH_LINK', params=elp, results=elr)
def get_link_status(self):
lsr = GetLinkStatusResults()
with self._command_lock:
self._send_command('CC_GET_LINK_STATUS', results=lsr)
link_status = {'established': bool(lsr.linkEstablished),
'base_address': int(lsr.baseAddress),
'camera_type': camera_types[lsr.cameraType],
'com_total': int(lsr.comTotal),
'com_failed': int(lsr.comFailed)}
return link_status
def get_driver_handle(self):
ghr = GetDriverHandleResults()
with self._command_lock:
self._send_command('CC_GET_DRIVER_HANDLE', results=ghr)
return ghr.handle
def set_handle(self, handle):
set_handle_params = SetDriverHandleParams(handle)
self._send_command('CC_SET_DRIVER_HANDLE', params=set_handle_params)
def get_ccd_info(self, handle):
"""
Use Get CCD Info to gather all relevant info about CCD capabilities. Already
have camera type, 'name' and serial number, this gets the rest.
"""
# 'CCD_INFO_IMAGING' will get firmware version, and a list of readout modes (binning)
# with corresponding image widths, heights, gains and also physical pixel width, height.
ccd_info_params0 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_IMAGING'])
ccd_info_results0 = GetCCDInfoResults0()
# 'CCD_INFO_EXTENDED' will get bad column info, and whether the CCD has ABG or not.
ccd_info_params2 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED'])
ccd_info_results2 = GetCCDInfoResults2()
# 'CCD_INFO_EXTENDED2_IMAGING' will get info like full frame/frame transfer, interline or
# not, presence of internal frame buffer, etc.
ccd_info_params4 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED2_IMAGING'])
ccd_info_results4 = GetCCDInfoResults4()
# 'CCD_INFO_EXTENDED3' will get info like mechanical shutter or not, mono/colour,
# Bayer/Truesense.
ccd_info_params6 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED3'])
ccd_info_results6 = GetCCDInfoResults6()
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_GET_CCD_INFO',
params=ccd_info_params0,
results=ccd_info_results0)
self._send_command('CC_GET_CCD_INFO',
params=ccd_info_params2,
results=ccd_info_results2)
self._send_command('CC_GET_CCD_INFO',
params=ccd_info_params4,
results=ccd_info_results4)
self._send_command('CC_GET_CCD_INFO',
params=ccd_info_params6,
results=ccd_info_results6)
# Now to convert all this ctypes stuff into Pythonic data structures.
ccd_info = {'firmware version': self._bcd_to_string(ccd_info_results0.firmwareVersion),
'camera type': camera_types[ccd_info_results0.cameraType],
'camera name': str(ccd_info_results0.name, encoding='ascii'),
'bad columns': ccd_info_results2.columns[0:ccd_info_results2.badColumns],
'imaging ABG': bool(ccd_info_results2.imagingABG),
'serial number': str(ccd_info_results2.serialNumber, encoding='ascii'),
'frame transfer': bool(ccd_info_results4.capabilities_b0),
'electronic shutter': bool(ccd_info_results4.capabilities_b1),
'remote guide head support': bool(ccd_info_results4.capabilities_b2),
'Biorad TDI support': bool(ccd_info_results4.capabilities_b3),
'AO8': bool(ccd_info_results4.capabilities_b4),
'frame buffer': bool(ccd_info_results4.capabilities_b5),
'dump extra': ccd_info_results4.dumpExtra,
'STXL': bool(ccd_info_results6.camera_b0),
'mechanical shutter': not bool(ccd_info_results6.camera_b1),
'colour': bool(ccd_info_results6.ccd_b0),
'Truesense': bool(ccd_info_results6.ccd_b1)}
readout_mode_info = self._parse_readout_info(
ccd_info_results0.readoutInfo[0:ccd_info_results0.readoutModes])
ccd_info['readout modes'] = readout_mode_info
return ccd_info
def disable_vdd_optimized(self, handle):
"""
Stops selective lowering of the CCD's Vdd voltage to ensure consistent bias structures.
There are many driver control parameters, almost all of which we would not want to change
from their default values. The one exception is DCP_VDD_OPTIMIZED. From the SBIG manual:
The DCP_VDD_OPTIMIZED parameter defaults to TRUE which lowers the CCD’s Vdd (which reduces
amplifier glow) only for images 3 seconds and longer. This was done to increase the image
throughput for short exposures as raising and lowering Vdd takes 100s of milliseconds. The
lowering and subsequent raising of Vdd delays the image readout slightly which causes short
exposures to have a different bias structure than long exposures. Setting this parameter to
FALSE stops the short exposure optimization from occurring.
The default behaviour will improve image throughput for exposure times of 3 seconds or less
but at the penalty of altering the bias structure between short and long exposures. This
could cause systematic errors in bias frames, dark current measurements, etc. It's probably
not worth it.
"""
set_driver_control_params = SetDriverControlParams(
driver_control_codes['DCP_VDD_OPTIMIZED'], 0)
self.logger.debug('Disabling DCP_VDD_OPTIMIZE on {}'.format(handle))
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_SET_DRIVER_CONTROL', params=set_driver_control_params)
def query_temp_status(self, handle):
qtp = QueryTemperatureStatusParams(temp_status_request_codes['TEMP_STATUS_ADVANCED2'])
qtr = QueryTemperatureStatusResults2()
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_QUERY_TEMPERATURE_STATUS', qtp, qtr)
temp_status = {'cooling_enabled': bool(qtr.coolingEnabled),
'fan_enabled': bool(qtr.fanEnabled),
'ccd_set_point': qtr.ccdSetpoint * u.Celsius,
'imaging_ccd_temperature': qtr.imagingCCDTemperature * u.Celsius,
'tracking_ccd_temperature': qtr.trackingCCDTemperature * u.Celsius,
'external_ccd_temperature': qtr.externalTrackingCCDTemperature * u.Celsius,
'ambient_temperature': qtr.ambientTemperature * u.Celsius,
'imaging_ccd_power': qtr.imagingCCDPower * u.percent,
'tracking_ccd_power': qtr.trackingCCDPower * u.percent,
'external_ccd_power': qtr.externalTrackingCCDPower * u.percent,
'heatsink_temperature': qtr.heatsinkTemperature * u.Celsius,
'fan_power': qtr.fanPower * u.percent,
'fan_speed': qtr.fanSpeed / u.minute,
'tracking_ccd_set_point': qtr.trackingCCDSetpoint * u.Celsius}
return temp_status
def set_temp_regulation(self, handle, target_temperature, enabled):
target_temperature = get_quantity_value(target_temperature, unit=u.Celsius)
if enabled:
enable_code = temperature_regulation_codes['REGULATION_ON']
else:
enable_code = temperature_regulation_codes['REGULATION_OFF']
set_temp_params = SetTemperatureRegulationParams2(enable_code, target_temperature)
# Use temperature regulation autofreeze, if available (might marginally reduce read noise).
autofreeze_code = temperature_regulation_codes['REGULATION_ENABLE_AUTOFREEZE']
set_freeze_params = SetTemperatureRegulationParams2(autofreeze_code, target_temperature)
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_SET_TEMPERATURE_REGULATION2', params=set_temp_params)
self._send_command('CC_SET_TEMPERATURE_REGULATION2', params=set_freeze_params)
def get_exposure_status(self, handle):
"""Returns the current exposure status of the camera, e.g. 'CS_IDLE', 'CS_INTEGRATING' """
query_status_params = QueryCommandStatusParams(command_codes['CC_START_EXPOSURE2'])
query_status_results = QueryCommandStatusResults()
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_QUERY_COMMAND_STATUS',
params=query_status_params,
results=query_status_results)
return statuses[query_status_results.status]
def start_exposure(self,
handle,
seconds,
dark,
antiblooming,
readout_mode,
top,
left,
height,
width):
# SBIG driver expects exposure time in 100ths of a second.
centiseconds = int(get_quantity_value(seconds, unit=u.second) * 100)
# This setting is ignored by most cameras (even if they do have ABG), only exceptions are
# the TC211 versions of the Tracking CCD on the ST-7/8/etc. and the Imaging CCD of the
# PixCel255
if antiblooming:
# Camera supports anti-blooming, use it on medium setting?
abg_command_code = abg_state_codes['ABG_CLK_MED7']
else:
# Camera doesn't support anti-blooming, don't try to use it.
abg_command_code = abg_state_codes['ABG_LOW7']
if not dark:
# Normal exposure, will open (and close) shutter
shutter_command_code = shutter_command_codes['SC_OPEN_SHUTTER']
else:
# Dark frame, will keep shutter closed throughout
shutter_command_code = shutter_command_codes['SC_CLOSE_SHUTTER']
start_exposure_params = StartExposureParams2(ccd_codes['CCD_IMAGING'],
centiseconds,
abg_command_code,
shutter_command_code,
readout_mode_codes[readout_mode],
int(get_quantity_value(top, u.pixel)),
int(get_quantity_value(left, u.pixel)),
int(get_quantity_value(height, u.pixel)),
int(get_quantity_value(width, u.pixel)))
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_START_EXPOSURE2', params=start_exposure_params)
def readout(self,
handle,
readout_mode,
top,
left,
height,
width):
# Set up all the parameter and result Structures that will be needed.
readout_mode_code = readout_mode_codes[readout_mode]
top = int(get_quantity_value(top, unit=u.pixel))
left = int(get_quantity_value(left, unit=u.pixel))
height = int(get_quantity_value(height, unit=u.pixel))
width = int(get_quantity_value(width, unit=u.pixel))
end_exposure_params = EndExposureParams(ccd_codes['CCD_IMAGING'])
start_readout_params = StartReadoutParams(ccd_codes['CCD_IMAGING'],
readout_mode_code,
top, left,
height, width)
readout_line_params = ReadoutLineParams(ccd_codes['CCD_IMAGING'],
readout_mode_code,
left, width)
end_readout_params = EndReadoutParams(ccd_codes['CCD_IMAGING'])
# Array to hold the image data
image_data = np.zeros((height, width), dtype=np.uint16)
rows_got = 0
# Readout data
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_END_EXPOSURE', params=end_exposure_params)
self._send_command('CC_START_READOUT', params=start_readout_params)
try:
for i in range(height):
self._send_command('CC_READOUT_LINE',
params=readout_line_params,
results=as_ctypes(image_data[i]))
rows_got += 1
except RuntimeError as err:
message = 'expected {} rows, got {}: {}'.format(height, rows_got, err)
raise RuntimeError(message)
try:
self._send_command('CC_END_READOUT', params=end_readout_params)
except RuntimeError as err:
message = "error ending readout: {}".format(err)
raise RuntimeError(message)
return image_data
def cfw_init(self, handle, model='AUTO', timeout=10 * u.second):
"""
Initialise colour filter wheel
Sends the initialise command to the colour filter wheel attached to the camera
specified with handle. This will generally not be required because all SBIG filter
wheels initialise themselves on power up.
Args:
handle (int): handle of the camera that the filter wheel is connected to.
model (str, optional): Model of the filter wheel to control. Default is 'AUTO', which
asks the driver to autodetect the model.
timeout (u.Quantity, optional): maximum time to wait for the move to complete. Should be
a Quantity with time units. If a numeric type without units is given seconds will be
assumed. Default is 10 seconds.
Returns:
dict: dictionary containing the 'model', 'position', 'status' and 'error' values
returned by the driver.
Raises:
RuntimeError: raised if the driver returns an error
"""
self.logger.debug("Initialising filter wheel on {}".format(handle))
cfw_init = self._cfw_params(handle, model, CFWCommand.INIT)
# The filterwheel init command does not block until complete, but this method should.
# Need to poll.
init_event = threading.Event()
# Expect filter wheel to end up in position 1 after initialisation
poll_thread = threading.Thread(target=self._cfw_poll,
args=(handle, 1, model, init_event, timeout),
daemon=True)
poll_thread.start()
init_event.wait()
return self._cfw_parse_results(cfw_init)
def cfw_query(self, handle, model='AUTO'):
"""
Query status of the colour filter wheel
This is mostly used to poll the filter wheel status after asking the filter wheel to move
in order to find out when the move has completed.
Args:
handle (int): handle of the camera that the filter wheel is connected to.
model (str, optional): Model of the filter wheel to control. Default is 'AUTO', which
asks the driver to autodetect the model.
Returns:
dict: dictionary containing the 'model', 'position', 'status' and 'error' values
returned by the driver.
Raises:
RuntimeError: raised if the driver returns an error
"""
cfw_query = self._cfw_command(handle, model, CFWCommand.QUERY)
return self._cfw_parse_results(cfw_query)
def cfw_get_info(self, handle, model='AUTO'):
"""
Get info from the colour filter wheel
This will return the usual status information plus the firmware version and the number
of filter wheel positions.
Args:
handle (int): handle of the camera that the filter wheel is connected to.
model (str, optional): Model of the filter wheel to control. Default is 'AUTO', which
asks the driver to autodetect the model.
Returns:
dict: dictionary containing the 'model', 'firmware_version' and 'n_positions' for the
filter wheel.
Raises:
RuntimeError: raised if the driver returns an error
"""
cfw_info = self._cfw_command(handle,
model,
CFWCommand.GET_INFO,
CFWGetInfoSelect.FIRMWARE_VERSION)
results = {'model': CFWModelSelect(cfw_info.cfwModel).name,
'firmware_version': int(cfw_info.cfwResults1),
'n_positions': int(cfw_info.cfwResults2)}
msg = "Filter wheel on {}, model: {}, firmware version: {}, number of positions: {}".format(
handle,
results['model'],
results['firmware_version'],
results['n_positions'])
self.logger.debug(msg)
return results
def cfw_goto(self, handle, position, model='AUTO', cfw_event=None, timeout=10 * u.second):
"""
Move colour filer wheel to a given position
This function returns immediately after starting the move but spawns a thread to poll the
filter wheel until the move completes (see _cfw_poll method for details). This thread will
log the result of the move, and optionally set a threading.Event to signal that it has
completed.
Args:
handle (int): handle of the camera that the filter wheel is connected to.
position (int): position to move the filter wheel. Must an integer >= 1.
model (str, optional): Model of the filter wheel to control. Default is 'AUTO', which
asks the driver to autodetect the model.
cfw_event (threading.Event, optional): Event to set once the move is complete
timeout (u.Quantity, optional): maximum time to wait for the move to complete. Should be
a Quantity with time units. If a numeric type without units is given seconds will be
assumed. Default is 10 seconds.
Returns:
dict: dictionary containing the 'model', 'position', 'status' and 'error' values
returned by the driver.
Raises:
RuntimeError: raised if the driver returns an error
"""
self.logger.debug("Moving filter wheel on {} to position {}".format(handle, position))
# First check that the filter wheel isn't currently moving, and that the requested
# position is valid.
info = self.cfw_get_info(handle, model)
if position < 1 or position > info['n_positions']:
msg = "Position must be between 1 and {}, got {}".format(
info['n_positions'], position)
self.logger.error(msg)
raise RuntimeError(msg)
query = self.cfw_query(handle, model)
if query['status'] == CFWStatus.BUSY:
msg = "Attempt to move filter wheel when already moving"
self.logger.error(msg)
raise RuntimeError(msg)
cfw_goto_results = self._cfw_command(handle, model, CFWCommand.GOTO, position)
# Poll filter wheel in order to set cfw_event once move is complete
poll_thread = threading.Thread(target=self._cfw_poll,
args=(handle, position, model, cfw_event, timeout),
daemon=True)
poll_thread.start()
return self._cfw_parse_results(cfw_goto_results)
# Private methods
def _cfw_poll(self, handle, position, model='AUTO', cfw_event=None, timeout=None):
"""
Polls filter wheel until the current move is complete.
Also monitors for errors while polling and checks status and position after the move is
complete. Optionally sets a threading.Event to signal the end of the move. Has an optional
timeout to raise an TimeoutError is the move takes longer than expected.
Args:
handle (int): handle of the camera that the filter wheel is connected to.
position (int): position to move the filter wheel. Must be an integer >= 1.
model (str, optional): Model of the filter wheel to control. Default is 'AUTO', which
asks the driver to autodetect the model.
cfw_event (threading.Event, optional): Event to set once the move is complete
timeout (u.Quantity, optional): maximum time to wait for the move to complete. Should be
a Quantity with time units. If a numeric type without units is given seconds will be
assumed.
Raises:
RuntimeError: raised if the driver returns an error or if the final status and position
are not as expected.
panoptes.utils.error.Timeout: raised if the move does not end within the period of time
specified by the timeout argument.
"""
if timeout is not None:
timer = CountdownTimer(duration=timeout)
try:
query = self.cfw_query(handle, model)
while query['status'] == 'BUSY':
if timeout is not None and timer.expired():
msg = "Timeout waiting for filter wheel {} to move to {}".format(
handle, position)
raise error.Timeout(msg)
time.sleep(0.1)
query = self.cfw_query(handle, model)
except RuntimeError as err:
# Error returned by driver at some point while polling
self.logger.error('Error while moving filter wheel on {} to {}: {}'.format(
handle, position, err))
raise err
else:
# No driver errors, but still check status and position
if query['status'] == 'IDLE' and query['position'] == position:
self.logger.debug('Filter wheel on {} moved to position {}'.format(
handle, query['position']))
else:
msg = 'Problem moving filter wheel on {} to {} - status: {}, position: {}'.format(
handle,
position,
query['status'],
query['position'])
self.logger.error(msg)
raise RuntimeError(msg)
finally:
# Regardless must always set the Event when the move has stopped.
if cfw_event is not None:
cfw_event.set()
def _cfw_parse_results(self, cfw_results):
"""
Converts filter wheel results Structure into something more Pythonic
"""
results = {'model': CFWModelSelect(cfw_results.cfwModel).name,
'position': int(cfw_results.cfwPosition),
'status': CFWStatus(cfw_results.cfwStatus).name,
'error': CFWError(cfw_results.cfwError).name}
if results['position'] == 0:
results['position'] = float('nan') # 0 means position unknown
return results
def _cfw_command(self, handle, model, *args):
"""
Helper function to send filter wheel commands
Args:
handle (int): handle of the camera that the filter wheel is connected to.
model (str): Model of the filter wheel to control.
*args: remaining parameters for the filter wheel command
Returns:
CFWResults: ctypes Structure containing results of the command
"""
cfw_params = CFWParams(CFWModelSelect[model], *args)
cfw_results = CFWResults()
with self._command_lock:
self.set_handle(handle)
self._send_command('CC_CFW', cfw_params, cfw_results)
return cfw_results
def _bcd_to_int(self, bcd, int_type='ushort'):
"""
Function to decode the Binary Coded Decimals returned by the Get CCD Info command.
These will be integers of C types ushort or ulong, encoding decimal numbers of the form
XX.XX or XXXXXX.XX, i.e. when converting to a numerical value they will need dividing by
100.
"""
# BCD has been automatically converted by ctypes to a Python int. Need to convert to
# bytes sequence of correct length and byte order. SBIG library seems to use
# big endian byte order for the BCDs regardless of platform.
if int_type == 'ushort':
bcd = bcd.to_bytes(ctypes.sizeof(ctypes.c_ushort), byteorder='big')
elif int_type == 'ulong':
bcd = bcd.to_bytes(ctypes.sizeof(ctypes.c_ulong), byteorder='big')
else:
self.logger.error('Unknown integer type {}!'.format(int_type))
return
# Convert bytes sequence to hexadecimal string representation, which will also be the
# string representation of the decoded binary coded decimal, apart from possible
# leading zeros. Convert back to an int to strip the leading zeros.
return int(bcd.hex())
def _bcd_to_float(self, bcd, int_type='ushort'):
# Includes conversion to intended numerical value, i.e. division by 100
return self._bcd_to_int(bcd, int_type) / 100.0
def _bcd_to_string(self, bcd, int_type='ushort'):
# Includes conversion to intended numerical value, i.e. division by 100
s = str(self._bcd_to_int(bcd, int_type))
return "{}.{}".format(s[:-2], s[-2:])
def _parse_readout_info(self, infos):
readout_mode_info = {}
for info in infos:
mode = readout_modes[info.mode]
gain = self._bcd_to_float(info.gain)
pixel_width = self._bcd_to_float(info.pixelWidth, int_type='ulong')
pixel_height = self._bcd_to_float(info.pixelHeight, int_type='ulong')
readout_mode_info[mode] = {'width': info.width * u.pixel,
'height': info.height * u.pixel,
'gain': gain * u.electron / u.adu,
'pixel width': pixel_width * u.um,
'pixel height': pixel_height * u.um}
return readout_mode_info
def _send_command(self, command, params=None, results=None):
"""
Function for sending a command to the SBIG Universal Driver/Library.
Args:
command (string): Name of command to send
params (ctypes.Structure, optional): Subclass of Structure
containing command parameters
results (ctypes.Structure, optional): Subclass of Structure to
store command results
Returns:
error (str): error message received from the SBIG driver, will be
'CE_NO_ERROR' if no error occurs.
Raises:
KeyError: Raised if command not in SBIG command list
RuntimeError: Raised if return code indicates a fatal error, or is
not recognised
"""
# Look up integer command code for the given command string, raises
# KeyError if no matches found.
try:
command_code = command_codes[command]
except KeyError:
msg = "Invalid SBIG command '{}'!".format(command)
self.logger.error(msg)
raise KeyError(msg)
error = None
retries_remaining = self.retries
while error != 'CE_NO_ERROR' and retries_remaining > 0:
# Send the command to the driver. Need to pass pointers to params,
# results structs or None (which gets converted to a null pointer).
return_code = self._CDLL.SBIGUnivDrvCommand(
command_code,
(ctypes.byref(params) if params else None),
(ctypes.byref(results) if results else None))
# Look up the error message for the return code, raises Error if no
# match found. This should never happen, and if it does it probably
# indicates a serious problem such an outdated driver that is
# incompatible with the camera in use.
try:
error = errors[return_code]
except KeyError:
msg = "SBIG Driver returned unknown error code '{}'".format(return_code)
self.logger.error(msg)
raise RuntimeError(msg)
retries_remaining -= 1
# Raise a RuntimeError exception if return code is not 0 (no error).
# This is probably excessively cautious and will need to be relaxed,
# there are likely to be situations where other return codes don't
# necessarily indicate a fatal error.
# Will not raise a RunTimeError if the error is 'CE_DRIVER_NOT_CLOSED'
# because this only indicates an attempt to open the driver then it is
# already open.
if error not in ('CE_NO_ERROR', 'CE_DRIVER_NOT_CLOSED'):
if error == 'CE_CFW_ERROR':
cfw_error_code = results.cfwError
try:
error = "CFW {}".format(CFWError(cfw_error_code).name)
except ValueError:
msg = "SBIG Driver return unknown CFW error code '{}'".format(cfw_error_code)
self.logger.error(msg)
raise RuntimeError(msg)
msg = "SBIG Driver returned error '{}'!".format(error)
self.logger.error(msg)
raise RuntimeError(msg)
return error
#################################################################################
# Commands and error messages
#################################################################################
# Camera command codes. Doesn't include the 'SBIG only" commands.
command_codes = {'CC_NULL': 0,
'CC_START_EXPOSURE': 1,
'CC_END_EXPOSURE': 2,
'CC_READOUT_LINE': 3,
'CC_DUMP_LINES': 4,
'CC_SET_TEMPERATURE_REGULATION': 5,
'CC_QUERY_TEMPERATURE_STATUS': 6,
'CC_ACTIVATE_RELAY': 7,
'CC_PULSE_OUT': 8,
'CC_ESTABLISH_LINK': 9,
'CC_GET_DRIVER_INFO': 10,
'CC_GET_CCD_INFO': 11,
'CC_QUERY_COMMAND_STATUS': 12,
'CC_MISCELLANEOUS_CONTROL': 13,
'CC_READ_SUBTRACT_LINE': 14,
'CC_UPDATE_CLOCK': 15,
'CC_READ_OFFSET': 16,
'CC_OPEN_DRIVER': 17,
'CC_CLOSE_DRIVER': 18,
'CC_TX_SERIAL_BYTES': 19,
'CC_GET_SERIAL_STATUS': 20,
'CC_AO_TIP_TILT': 21,
'CC_AO_SET_FOCUS': 22,
'CC_AO_DELAY': 23,
'CC_GET_TURBO_STATUS': 24,
'CC_END_READOUT': 25,
'CC_GET_US_TIMER': 26,
'CC_OPEN_DEVICE': 27,
'CC_CLOSE_DEVICE': 28,
'CC_SET_IRQL': 29,
'CC_GET_IRQL': 30,
'CC_GET_LINE': 31,
'CC_GET_LINK_STATUS': 32,
'CC_GET_DRIVER_HANDLE': 33,
'CC_SET_DRIVER_HANDLE': 34,
'CC_START_READOUT': 35,
'CC_GET_ERROR_STRING': 36,
'CC_SET_DRIVER_CONTROL': 37,
'CC_GET_DRIVER_CONTROL': 38,
'CC_USB_AD_CONTROL': 39,
'CC_QUERY_USB': 40,
'CC_GET_PENTIUM_CYCLE_COUNT': 41,
'CC_RW_USB_I2C': 42,
'CC_CFW': 43,
'CC_BIT_IO': 44,
'CC_USER_EEPROM': 45,
'CC_AO_CENTER': 46,
'CC_BTDI_SETUP': 47,
'CC_MOTOR_FOCUS': 48,
'CC_QUERY_ETHERNET': 49,
'CC_START_EXPOSURE2': 50,
'CC_SET_TEMPERATURE_REGULATION2': 51,
'CC_READ_OFFSET2': 52,
'CC_DIFF_GUIDER': 53,
'CC_COLUMN_EEPROM': 54,
'CC_CUSTOMER_OPTIONS': 55,
'CC_DEBUG_LOG': 56,
'CC_QUERY_USB2': 57,
'CC_QUERY_ETHERNET2': 58,
'CC_GET_AO_MODEL': 59,
'CC_QUERY_USB3': 60,
'CC_QUERY_COMMAND_STATUS2': 61}
# Reversed dictionary, just in case you ever need to look up a command given a
# command code.
commands = {code: command for command, code in command_codes.items()}
# Camera error messages
errors = {0: 'CE_NO_ERROR',
1: 'CE_CAMERA_NOT_FOUND',
2: 'CE_EXPOSURE_IN_PROGRESS',
3: 'CE_NO_EXPOSURE_IN_PROGRESS',
4: 'CE_UNKNOWN_COMMAND',
5: 'CE_BAD_CAMERA_COMMAND',
6: 'CE_BAD_PARAMETER',
7: 'CE_TX_TIMEOUT',
8: 'CE_RX_TIMEOUT',
9: 'CE_NAK_RECEIVED',
10: 'CE_CAN_RECEIVED',
11: 'CE_UNKNOWN_RESPONSE',
12: 'CE_BAD_LENGTH',
13: 'CE_AD_TIMEOUT',
14: 'CE_KBD_ESC',
15: 'CE_CHECKSUM_ERROR',
16: 'CE_EEPROM_ERROR',
17: 'CE_SHUTTER_ERROR',
18: 'CE_UNKNOWN_CAMERA',
19: 'CE_DRIVER_NOT_FOUND',
20: 'CE_DRIVER_NOT_OPEN',
21: 'CE_DRIVER_NOT_CLOSED',
22: 'CE_SHARE_ERROR',
23: 'CE_TCE_NOT_FOUND',
24: 'CE_AO_ERROR',
25: 'CE_ECP_ERROR',
26: 'CE_MEMORY_ERROR',
27: 'CE_DEVICE_NOT_FOUND',
28: 'CE_DEVICE_NOT_OPEN',
29: 'CE_DEVICE_NOT_CLOSED',
30: 'CE_DEVICE_NOT_IMPLEMENTED',
31: 'CE_DEVICE_DISABLED',
32: 'CE_OS_ERROR',
33: 'CE_SOCK_ERROR',
34: 'CE_SERVER_NOT_FOUND',
35: 'CE_CFW_ERROR',
36: 'CE_MF_ERROR',
37: 'CE_FIRMWARE_ERROR',
38: 'CE_DIFF_GUIDER_ERROR',
39: 'CE_RIPPLE_CORRECTION_ERROR',
40: 'CE_EZUSB_RESET',
41: 'CE_INCOMPATIBLE_FIRMWARE',
42: 'CE_INVALID_HANDLE',
43: 'CE_NEXT_ERROR'}
# Reverse dictionary, just in case you ever need to look up an error code given
# an error name
error_codes = {error: error_code for error_code, error in errors.items()}
#################################################################################
# Query USB Info related.
#################################################################################
class QueryUSBInfo(ctypes.Structure):
"""
ctypes (Sub-)Structure used to hold details of individual cameras returned
by 'CC_QUERY_USB' command
"""
# Rather than use C99 _Bool type SBIG library uses 0 = False, 1 = True
_fields_ = [('cameraFound', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('serialNumber', ctypes.c_char * 10)]
class QueryUSBResults(ctypes.Structure):
"""
ctypes Structure used to hold the results from 'CC_QUERY_USB' command (max 4 cameras).
"""
_fields_ = [('camerasFound', ctypes.c_ushort),
('usbInfo', QueryUSBInfo * 4)]
class QueryUSBResults2(ctypes.Structure):
"""
ctypes Structure used to hold the results from 'CC_QUERY_USB2' command (max 8 cameras).
"""
_fields_ = [('camerasFound', ctypes.c_ushort),
('usbInfo', QueryUSBInfo * 8)]
class QueryUSBResults3(ctypes.Structure):
"""
ctypes Structure used to hold the results from 'CC_QUERY_USB3' command (max 24 cameras).
"""
_fields_ = [('camerasFound', ctypes.c_ushort),
('usbInfo', QueryUSBInfo * 24)]
# Camera type codes, returned by Query USB Info, Establish Link, Get CCD Info, etc.
camera_types = {4: "ST7_CAMERA",
5: "ST8_CAMERA",
6: "ST5C_CAMERA",
7: "TCE_CONTROLLER",
8: "ST237_CAMERA",
9: "STK_CAMERA",
10: "ST9_CAMERA",
11: "STV_CAMERA",
12: "ST10_CAMERA",
13: "ST1K_CAMERA",
14: "ST2K_CAMERA",
15: "STL_CAMERA",
16: "ST402_CAMERA",
17: "STX_CAMERA",
18: "ST4K_CAMERA",
19: "STT_CAMERA",
20: "STI_CAMERA",
21: "STF_CAMERA",
22: "NEXT_CAMERA",
0xFFFF: "NO_CAMERA"}
# Reverse dictionary
camera_type_codes = {camera: code for code, camera in camera_types.items()}
#################################################################################
# Open Device, Establish Link, Get Link status related
#################################################################################
# Device types by code. Used with Open Device, etc.
device_types = {0: "DEV_NONE",
1: "DEV_LPT1",
2: "DEV_LPT2",
3: "DEV_LPT3",
0x7F00: "DEV_USB",
0x7F01: "DEV_ETH",
0x7F02: "DEV_USB1",
0x7F03: "DEV_USB2",
0x7F04: "DEV_USB3",
0x7F05: "DEV_USB4",
0x7F06: "DEV_USB5",
0x7F07: "DEV_USB6",
0x7F08: "DEV_USB7",
0x7F09: "DEV_USB8",
0x7F0A: "DEV_USB9",
0x7F0B: "DEV_USB10",
0x7F0C: "DEV_USB11",
0x7F0D: "DEV_USB12",
0x7F0E: "DEV_USB13",
0x7F0F: "DEV_USB14",
0x7F10: "DEV_USB15",
0x7F11: "DEV_USB16",
0x7F12: "DEV_USB17",
0x7F13: "DEV_USB18",
0x7F14: "DEV_USB19",
0x7F15: "DEV_USB20",
0x7F16: "DEV_USB21",
0x7F17: "DEV_USB22",
0x7F18: "DEV_USB23",
0x7F19: "DEV_USB24"}
# Reverse dictionary
device_type_codes = {device: code for code, device in device_types.items()}
class OpenDeviceParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Open Device command.
"""
_fields_ = [('deviceType', ctypes.c_ushort),
('lptBaseAddress', ctypes.c_ushort),
('ipAddress', ctypes.c_ulong)]
class EstablishLinkParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Establish Link command.
"""
_fields_ = [('sbigUseOnly', ctypes.c_ushort)]
class EstablishLinkResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Establish Link command.
"""
_fields_ = [('cameraType', ctypes.c_ushort)]
class GetLinkStatusResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get Link Status command.
"""
_fields_ = [('linkEstablished', ctypes.c_ushort),
('baseAddress', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('comTotal', ctypes.c_ulong),
('comFailed', ctypes.c_ulong)]
#################################################################################
# Get Driver Handle, Set Driver Handle related
#################################################################################
class GetDriverHandleResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get Driver Handle command.
The handle is the camera ID used when switching control between connected
cameras with the Set Driver Handle command.
"""
_fields_ = [('handle', ctypes.c_short)]
# Used to disconnect from a camera in order to get the handle for another
# Had to google to find this value, it is NOT in sbigudrv.h or the
# SBIG Universal Driver docs.
INVALID_HANDLE_VALUE = -1
class SetDriverHandleParams(ctypes.Structure):
"""
ctypes Structure to hold the parameter for the Set Driver Handle command.
"""
_fields_ = [('handle', ctypes.c_short)]
#################################################################################
# Temperature and cooling control related
#################################################################################
class QueryTemperatureStatusParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Query Temperature Status command.
"""
_fields_ = [('request', ctypes.c_ushort)]
temp_status_requests = {0: 'TEMP_STATUS_STANDARD',
1: 'TEMP_STATUS_ADVANCED',
2: 'TEMP_STATUS_ADVANCED2'}
temp_status_request_codes = {request: code for code, request in temp_status_requests.items()}
class QueryTemperatureStatusResults(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Query Temperature Status
command (standard version).
"""
_fields_ = [('enabled', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_ushort),
('power', ctypes.c_ushort),
('ccdThermistor', ctypes.c_ushort),
('ambientThermistor', ctypes.c_ushort)]
class QueryTemperatureStatusResults2(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Query Temperature Status
command (extended version).
"""
_fields_ = [('coolingEnabled', ctypes.c_ushort),
('fanEnabled', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_double),
('imagingCCDTemperature', ctypes.c_double),
('trackingCCDTemperature', ctypes.c_double),
('externalTrackingCCDTemperature', ctypes.c_double),
('ambientTemperature', ctypes.c_double),
('imagingCCDPower', ctypes.c_double),
('trackingCCDPower', ctypes.c_double),
('externalTrackingCCDPower', ctypes.c_double),
('heatsinkTemperature', ctypes.c_double),
('fanPower', ctypes.c_double),
('fanSpeed', ctypes.c_double),
('trackingCCDSetpoint', ctypes.c_double)]
temperature_regulations = {0: "REGULATION_OFF",
1: "REGULATION_ON",
2: "REGULATION_OVERRIDE",
3: "REGULATION_FREEZE",
4: "REGULATION_UNFREEZE",
5: "REGULATION_ENABLE_AUTOFREEZE",
6: "REGULATION_DISABLE_AUTOFREEZE"}
temperature_regulation_codes = {regulation: code for code, regulation in
temperature_regulations.items()}
class SetTemperatureRegulationParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Set Temperature Regulation command.
"""
_fields_ = [('regulation', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_ushort)]
class SetTemperatureRegulationParams2(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Set Temperature Regulation 2 command.
"""
_fields_ = [('regulation', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_double)]
################################################################################
# Get CCD Info related
################################################################################
class GetCCDInfoParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Get CCD Info command,
used obtain the details & capabilities of the connected camera.
"""
_fields_ = [('request', ctypes.c_ushort)]
ccd_info_requests = {0: 'CCD_INFO_IMAGING',
1: 'CCD_INFO_TRACKING',
2: 'CCD_INFO_EXTENDED',
3: 'CCD_INFO_EXTENDED_5C',
4: 'CCD_INFO_EXTENDED2_IMAGING',
5: 'CCD_INFO_EXTENDED2_TRACKING',
6: 'CCD_INFO_EXTENDED3'}
ccd_info_request_codes = {request: code for code, request in ccd_info_requests.items()}
class ReadoutInfo(ctypes.Structure):
"""
ctypes Structure to store details of an individual readout mode. An array of up
to 20 of these will be returned as part of the GetCCDInfoResults0 struct when the
Get CCD Info command is used with request 'CCD_INFO_IMAGING'.
The gain field is a 4 digit Binary Coded Decimal (yes, really) of the form XX.XX,
in units of electrons/ADU.
The pixel_width and pixel_height fields are 6 digit Binary Coded Decimals for the
form XXXXXX.XX in units of microns, helpfully supporting pixels up to 1 metre across.
"""
_fields_ = [('mode', ctypes.c_ushort),
('width', ctypes.c_ushort),
('height', ctypes.c_ushort),
('gain', ctypes.c_ushort),
('pixelWidth', ctypes.c_ulong),
('pixelHeight', ctypes.c_ulong)]
class GetCCDInfoResults0(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
requests 'CCD_INFO_IMAGING' or 'CCD_INFO_TRACKING'.
The firmwareVersion field is 4 digit binary coded decimal of the form XX.XX.
"""
_fields_ = [('firmwareVersion', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('readoutModes', ctypes.c_ushort),
('readoutInfo', ReadoutInfo * 20)]
class GetCCDInfoResults2(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
request 'CCD_INFO_EXTENDED'.
"""
_fields_ = [('badColumns', ctypes.c_ushort),
('columns', ctypes.c_ushort * 4),
('imagingABG', ctypes.c_ushort),
('serialNumber', ctypes.c_char * 10)]
class GetCCDInfoResults4(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
requests 'CCD_INFO_EXTENDED2_IMAGING' or 'CCD_INFO_EXTENDED2_TRACKING'.
The capabilitiesBits is a bitmap, yay.
"""
_fields_ = [('capabilities_b0', ctypes.c_int, 1),
('capabilities_b1', ctypes.c_int, 1),
('capabilities_b2', ctypes.c_int, 1),
('capabilities_b3', ctypes.c_int, 1),
('capabilities_b4', ctypes.c_int, 1),
('capabilities_b5', ctypes.c_int, 1),
('capabilities_unusued', ctypes.c_int, ctypes.sizeof(ctypes.c_ushort) * 8 - 6),
('dumpExtra', ctypes.c_ushort)]
class GetCCDInfoResults6(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
the request 'CCD_INFO_EXTENDED3'.
The sbigudrv.h C header says there should be three bitmask fields, each of type
ulong, which would be 64 bits on this platform (OS X), BUT trial and error has
determined they're actually 32 bits long.
"""
_fields_ = [('camera_b0', ctypes.c_int, 1),
('camera_b1', ctypes.c_int, 1),
('camera_unused', ctypes.c_int, 30),
('ccd_b0', ctypes.c_int, 1),
('ccd_b1', ctypes.c_int, 1),
('ccd_unused', ctypes.c_int, 30),
('extraBits', ctypes.c_int, 32)]
#################################################################################
# Get Driver Control, Set Driver Control related
#################################################################################
driver_control_params = {i: param for i, param in enumerate(('DCP_USB_FIFO_ENABLE',
'DCP_CALL_JOURNAL_ENABLE',
'DCP_IVTOH_RATIO',
'DCP_USB_FIFO_SIZE',
'DCP_USB_DRIVER',
'DCP_KAI_RELGAIN',
'DCP_USB_PIXEL_DL_ENABLE',
'DCP_HIGH_THROUGHPUT',
'DCP_VDD_OPTIMIZED',
'DCP_AUTO_AD_GAIN',
'DCP_NO_HCLKS_FOR_INTEGRATION',
'DCP_TDI_MODE_ENABLE',
'DCP_VERT_FLUSH_CONTROL_ENABLE',
'DCP_ETHERNET_PIPELINE_ENABLE',
'DCP_FAST_LINK',
'DCP_OVERSCAN_ROWSCOLS',
'DCP_PIXEL_PIPELINE_ENABLE',
'DCP_COLUMN_REPAIR_ENABLE',
'DCP_WARM_PIXEL_REPAIR_ENABLE',
'DCP_WARM_PIXEL_REPAIR_COUNT',
'DCP_TDI_MODE_DRIFT_RATE',
'DCP_OVERRIDE_AD_GAIN',
'DCP_ENABLE_AUTO_OFFSET',
'DCP_LAST'))}
driver_control_codes = {param: code for code, param in driver_control_params.items()}
class GetDriverControlParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Get Driver Control command,
used to query the value of a specific driver control parameter.
"""
_fields_ = [('controlParameter', ctypes.c_ushort), ]
class GetDriverControlResults(ctypes.Structure):
"""
ctypes Structure to hold the result from the Get Driver Control command,
used to query the value of a specific driver control parameter
"""
_fields_ = [('controlValue', ctypes.c_ulong), ]
class SetDriverControlParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Set Driver Control command,
used to set the value of a specific driver control parameter
"""
_fields_ = [('controlParameter', ctypes.c_ushort),
('controlValue', ctypes.c_ulong)]
#################################################################################
# Start Exposure, Query Command Status, End Exposure related
#################################################################################
class StartExposureParams2(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Start Exposure 2 command.
(The Start Exposure command is deprecated.)
"""
_fields_ = [('ccd', ctypes.c_ushort),
('exposureTime', ctypes.c_ulong),
('abgState', ctypes.c_ushort),
('openShutter', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('top', ctypes.c_ushort),
('left', ctypes.c_ushort),
('height', ctypes.c_ushort),
('width', ctypes.c_ushort)]
# CCD selection for cameras with built in or connected tracking CCDs
ccds = {0: 'CCD_IMAGING',
1: 'CCD_TRACKING',
2: 'CCD_EXT_TRACKING'}
ccd_codes = {ccd: code for code, ccd in ccds.items()}
# Anti-Blooming Gate states
abg_states = {0: 'ABG_LOW7',
1: 'ABG_CLK_LOW7',
2: 'ABG_CLK_MED7',
3: 'ABG_CLK_HI7'}
abg_state_codes = {abg: code for code, abg in abg_states.items()}
# Shutter mode commands
shutter_commands = {0: 'SC_LEAVE_SHUTTER',
1: 'SC_OPEN_SHUTTER',
2: 'SC_CLOSE_SHUTTER',
3: 'SC_INITIALIZE_SHUTTER',
4: 'SC_OPEN_EXP_SHUTTER',
5: 'SC_CLOSE_EXT_SHUTTER'}
shutter_command_codes = {command: code for code, command in shutter_commands.items()}
# Readout binning modes
readout_modes = {0: 'RM_1X1',
1: 'RM_2X2',
2: 'RM_3X3',
3: 'RM_NX1',
4: 'RM_NX2',
5: 'RM_NX3',
6: 'RM_1X1_VOFFCHIP',
7: 'RM_2X2_VOFFCHIP',
8: 'RM_3X3_VOFFCHIP',
9: 'RM_9X9',
10: 'RM_NXN'}
readout_mode_codes = {mode: code for code, mode in readout_modes.items()}
# Command status codes and corresponding messages as returned by
# Query Command Status
statuses = {0: "CS_IDLE",
1: "CS_IN_PROGRESS",
2: "CS_INTEGRATING",
3: "CS_INTEGRATION_COMPLETE"}
# Reverse dictionary
status_codes = {status: code for code, status in statuses.items()}
class QueryCommandStatusParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Query Command Status
command.
"""
_fields_ = [('command', ctypes.c_ushort)]
class QueryCommandStatusResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Query Command Status command.
"""
_fields_ = [('status', ctypes.c_ushort)]
class EndExposureParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the End Exposure command.
"""
_fields_ = [('ccd', ctypes.c_ushort)]
#################################################################################
# Start Readout, Readout Line, End Readout related
#################################################################################
class StartReadoutParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Start Readout command.
"""
_fields_ = [('ccd', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('top', ctypes.c_ushort),
('left', ctypes.c_ushort),
('height', ctypes.c_ushort),
('width', ctypes.c_ushort)]
class ReadoutLineParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Readout Line command.
"""
_fields_ = [('ccd', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('pixelStart', ctypes.c_ushort),
('pixelLength', ctypes.c_ushort)]
class EndReadoutParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the End Readout Params.
"""
_fields_ = [('ccd', ctypes.c_ushort)]
#################################################################################
# Get Driver Info related
#################################################################################
# Requests relevant to Get Driver Info command
driver_requests = {0: "DRIVER_STD",
1: "DRIVER_EXTENDED",
2: "DRIVER_USB_LOADER"}
# Reverse dictionary
driver_request_codes = {request: code for code, request in driver_requests.items()}
class GetDriverInfoParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the Get Driver Info command
"""
_fields_ = [('request', ctypes.c_ushort)]
class GetDriverInfoResults0(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Get Driver Info command
"""
_fields_ = [('version', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('maxRequest', ctypes.c_ushort)]
#################################################################################
# Filter wheel related
#################################################################################
class CFWParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the CFW (colour filter wheel) command
"""
_fields_ = [('cfwModel', ctypes.c_ushort),
('cfwCommand', ctypes.c_ushort),
('cfwParam1', ctypes.c_ulong),
('cfwParam2', ctypes.c_ulong),
('outLength', ctypes.c_ushort),
('outPtr', ctypes.c_char_p),
('inLength', ctypes.c_ushort),
('inPtr', ctypes.c_char_p)]
class CFWResults(ctypes.Structure):
"""
ctypes Structure used to fold the results from the CFW (colour filer wheel) command
"""
_fields_ = [('cfwModel', ctypes.c_ushort),
('cfwPosition', ctypes.c_ushort),
('cfwStatus', ctypes.c_ushort),
('cfwError', ctypes.c_ushort),
('cfwResults1', ctypes.c_ulong),
('cfwResults2', ctypes.c_ulong)]
@enum.unique
class CFWModelSelect(enum.IntEnum):
"""
Filter wheel model selection enum
"""
UNKNOWN = 0
CFW2 = enum.auto()
CFW5 = enum.auto()
CFW8 = enum.auto()
CFWL = enum.auto()
CFW402 = enum.auto()
AUTO = enum.auto()
CFW6A = enum.auto()
CFW10 = enum.auto()
CFW10_SERIAL = enum.auto()
CFW9 = enum.auto()
CFWL8 = enum.auto()
CFWL8G = enum.auto()
CFW1603 = enum.auto()
FW5_STX = enum.auto()
FW5_8300 = enum.auto()
FW8_8300 = enum.auto()
FW7_STX = enum.auto()
FW8_STT = enum.auto()
FW5_STF_DETENT = enum.auto()
@enum.unique
class CFWCommand(enum.IntEnum):
"""
Filter wheel command enum
"""
QUERY = 0
GOTO = enum.auto()
INIT = enum.auto()
GET_INFO = enum.auto()
OPEN_DEVICE = enum.auto()
CLOSE_DEVICE = enum.auto()
@enum.unique
class CFWStatus(enum.IntEnum):
"""
Filter wheel status enum
"""
UNKNOWN = 0
IDLE = enum.auto()
BUSY = enum.auto()
@enum.unique
class CFWError(enum.IntEnum):
"""
Filter wheel errors enum
"""
NONE = 0
BUSY = enum.auto()
BAD_COMMAND = enum.auto()
CAL_ERROR = enum.auto()
MOTOR_TIMEOUT = enum.auto()
BAD_MODEL = enum.auto()
DEVICE_NOT_CLOSED = enum.auto()
DEVICE_NOT_OPEN = enum.auto()
I2C_ERROR = enum.auto()
@enum.unique
class CFWGetInfoSelect(enum.IntEnum):
"""
Filter wheel get info select enum
"""
FIRMWARE_VERSION = 0
CAL_DATA = enum.auto()
DATA_REGISTERS = enum.auto()
| 41.060724
| 100
| 0.572669
|
4a0936b81fc3283200d64b406c2d029b78698bc2
| 6,642
|
py
|
Python
|
experiments/util.py
|
shlu2019/meta_learning_pacoh
|
376349e66bdd782e3d06b4bac2ecb56a2a10bcf6
|
[
"MIT"
] | 23
|
2020-02-13T12:45:42.000Z
|
2022-03-07T20:37:14.000Z
|
experiments/util.py
|
JeremyAlain/meta_learning_pacoh
|
b4c2c37d9715e74542bab556ac1f5d778cc3409c
|
[
"MIT"
] | 3
|
2020-09-01T15:24:04.000Z
|
2021-06-03T10:39:16.000Z
|
experiments/util.py
|
JeremyAlain/meta_learning_pacoh
|
b4c2c37d9715e74542bab556ac1f5d778cc3409c
|
[
"MIT"
] | 9
|
2020-04-15T09:43:22.000Z
|
2021-07-18T13:37:38.000Z
|
import os
import copy
import json
import hashlib
import sys
import glob
import collections
import itertools
import multiprocessing
import pandas as pd
from absl import flags
from meta_learn.util import get_logger
DEFAULT_FLAGS = ['logtostderr', 'alsologtostderr', 'v', 'verbosity',
'stderrthreshold', 'showprefixforinfo', 'run_with_pdb', 'pdb_post_mortem',
'run_with_profiling', 'profile_file', 'use_cprofile_for_profiling',
'only_check_args', '?', 'help', 'helpshort', 'helpfull', 'helpxml']
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'data')
def setup_exp_doc(exp_name, data_dir=None):
# create dictionary of flags / hyperparams
flags_dict = get_flags_dict()
flags_dict['exp_name'] = exp_name
# generate unique task identifier
task_hash = hash_dict(flags_dict)
flags_dict['task_hash'] = task_hash
# create directory for experiment task, initialize logger and save the flags_dict
exp_dir = create_exp_dir(exp_name, task_hash, data_dir=data_dir)
logger = get_logger(log_dir=exp_dir, expname=exp_name)
save_dict(flags_dict, os.path.join(exp_dir, 'config.json'))
flags_table_str = dict_to_tabular_str(flags_dict)
logger.info(" ------ Starting experiment: %s ------ \n"%exp_name+\
"----------------------------------------\n"+\
" Configuration \n"+\
"----------------------------------------"+\
"%s"%flags_table_str+\
"----------------------------------------\n")
return logger, exp_dir
def save_results(results_dict, exp_dir, log=True):
results_file = os.path.join(exp_dir, 'results.json')
save_dict(results_dict, results_file)
if log:
logger = get_logger(log_dir=exp_dir)
results_table_str = dict_to_tabular_str(results_dict)
logger.info("\n"+
"----------------------------------------\n" + \
" Results \n" + \
"----------------------------------------" + \
"%s" % results_table_str + \
"----------------------------------------\n")
def create_exp_parent_dir(exp_name, data_dir=None):
if data_dir is None:
data_dir = DATA_DIR
exp_parent_dir = os.path.join(data_dir, exp_name)
if not os.path.isdir(exp_parent_dir):
os.mkdir(exp_parent_dir)
return exp_parent_dir
def create_exp_dir(exp_name, task_hash, data_dir=None):
exp_parent_dir = create_exp_parent_dir(exp_name, data_dir=data_dir)
exp_dir = os.path.join(exp_parent_dir, str(task_hash))
if not os.path.isdir(exp_dir):
os.mkdir(exp_dir)
return exp_dir
def get_flags_dict():
flags_dict = copy.deepcopy(flags.FLAGS.flag_values_dict())
# remove absl's default flags from the dict
list(map(flags_dict.__delitem__, DEFAULT_FLAGS))
return flags_dict
def hash_dict(dict):
return hashlib.md5(str.encode((json.dumps(dict, sort_keys=True)))).hexdigest()
def save_dict(dict, dump_path):
with open(dump_path, 'w') as json_file:
json.dump(dict, json_file, indent=4, sort_keys=True)
def dict_to_tabular_str(dict):
s = "\n"
format = "{:<25}{:<10}"
for key, value in collections.OrderedDict(dict).items():
s += format.format(key, value) + '\n'
return s
def collect_exp_results(exp_name, verbose=True):
exp_dir = os.path.join(DATA_DIR, exp_name)
no_results_counter = 0
exp_dicts = []
for exp_sub_dir in glob.glob(exp_dir + '/*'):
config_file = os.path.join(exp_sub_dir, 'config.json')
results_file = os.path.join(exp_sub_dir, 'results.json')
if os.path.isfile(config_file) and os.path.isfile(results_file):
with open(config_file, 'r') as f:
exp_dict = json.load(f)
with open(results_file, 'r') as f:
exp_dict.update(json.load(f))
exp_dicts.append(exp_dict)
else:
no_results_counter += 1
if verbose:
logger = get_logger()
logger.info('Parsed results %s - found %i folders with results and %i folders without results'
%(exp_name, len(exp_dicts), no_results_counter))
return pd.DataFrame(data=exp_dicts)
def generate_launch_commands(module, exp_config, check_flags=True):
# create base command without flags
base_cmd = generate_base_command(module)
if check_flags:
allowed_flags = set(module.FLAGS.flag_values_dict().keys())
for key, value in exp_config.items():
assert hasattr(value, '__iter__')
assert key in allowed_flags, "%s is not a flag in %s"%(key, str(module))
config_product = list(itertools.product(*list(exp_config.values())))
config_product_dicts = [(dict(zip(exp_config.keys(), conf))) for conf in config_product]
# add flags to the base command
cmds = []
for config_dict in config_product_dicts:
cmd = base_cmd
for (key, value) in config_dict.items():
cmd += " --%s=%s"%(str(key), str(value))
cmds.append(cmd)
return cmds
def generate_base_command(module):
interpreter_script = sys.executable
base_exp_script = os.path.abspath(module.__file__)
base_cmd = interpreter_script + ' ' + base_exp_script
return base_cmd
class AsyncExecutor:
def __init__(self, n_jobs=1):
self.num_workers = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
self._pool = []
self._populate_pool()
def run(self, target, *args_iter, verbose=False):
workers_idle = [False] * self.num_workers
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
while not all(workers_idle):
for i in range(self.num_workers):
if not self._pool[i].is_alive():
self._pool[i].terminate()
if len(tasks) > 0:
if verbose:
print(n_tasks-len(tasks))
next_task = tasks.pop(0)
self._pool[i] = _start_process(target, next_task)
else:
workers_idle[i] = True
def _populate_pool(self):
self._pool = [_start_process(_dummy_fun) for _ in range(self.num_workers)]
def _start_process(target, args=None):
if args:
p = multiprocessing.Process(target=target, args=args)
else:
p = multiprocessing.Process(target=target)
p.start()
return p
def _dummy_fun():
pass
| 33.715736
| 102
| 0.604938
|
4a0936de6594f4c716776302ac76529180f83ebf
| 6,338
|
py
|
Python
|
ipclassifier/syllabify/syllabify.py
|
Chad-Mowbray/iamb-classifier
|
9da3bbfa2cac4518dae5b01de7c62a100a785a82
|
[
"MIT"
] | 2
|
2021-09-11T13:12:38.000Z
|
2022-03-16T23:42:34.000Z
|
ipclassifier/syllabify/syllabify.py
|
redova87/iamb-classifier
|
9da3bbfa2cac4518dae5b01de7c62a100a785a82
|
[
"MIT"
] | 2
|
2021-09-07T02:35:37.000Z
|
2022-02-06T08:29:18.000Z
|
ipclassifier/syllabify/syllabify.py
|
redova87/iamb-classifier
|
9da3bbfa2cac4518dae5b01de7c62a100a785a82
|
[
"MIT"
] | 2
|
2022-02-06T08:27:47.000Z
|
2022-02-06T17:46:34.000Z
|
# slightly modified from https://github.com/kylebgorman/syllabify
from itertools import chain
## constants
SLAX = {'IH1', 'IH2', 'EH1', 'EH2', 'AE1', 'AE2', 'AH1', 'AH2',
'UH1', 'UH2',}
VOWELS = {'IY1', 'IY2', 'IY0', 'EY1', 'EY2', 'EY0', 'AA1', 'AA2', 'AA0',
'ER1', 'ER2', 'ER0', 'AW1', 'AW2', 'AW0', 'AO1', 'AO2', 'AO0',
'AY1', 'AY2', 'AY0', 'OW1', 'OW2', 'OW0', 'OY1', 'OY2', 'OY0',
'IH0', 'EH0', 'AE0', 'AH0', 'UH0', 'UW1', 'UW2', 'UW0', 'UW',
'IY', 'EY', 'AA', 'ER', 'AW', 'AO', 'AY', 'OW', 'OY',
'UH', 'IH', 'EH', 'AE', 'AH', 'UH',} | SLAX
## licit medial onsets
O2 = {('P', 'R'), ('T', 'R'), ('K', 'R'), ('B', 'R'), ('D', 'R'),
('G', 'R'), ('F', 'R'), ('TH', 'R'),
('P', 'L'), ('K', 'L'), ('B', 'L'), ('G', 'L'),
('F', 'L'), ('S', 'L'),
('K', 'W'), ('G', 'W'), ('S', 'W'),
('S', 'P'), ('S', 'T'), ('S', 'K'),
('HH', 'Y'), # "clerihew"
('R', 'W'),}
O3 = {('S', 'T', 'R'), ('S', 'K', 'L'), ('T', 'R', 'W')} # "octroi"
# This does not represent anything like a complete list of onsets, but
# merely those that need to be maximized in medial position.
def syllabify(pron, alaska_rule=True):
"""
Syllabifies a CMU dictionary (ARPABET) word string
# Alaska rule:
>>> pprint(syllabify('AH0 L AE1 S K AH0'.split())) # Alaska
'-AH0-.L-AE1-S.K-AH0-'
>>> pprint(syllabify('AH0 L AE1 S K AH0'.split(), 0)) # Alaska
'-AH0-.L-AE1-.S K-AH0-'
# huge medial onsets:
>>> pprint(syllabify('M IH1 N S T R AH0 L'.split())) # minstrel
'M-IH1-N.S T R-AH0-L'
>>> pprint(syllabify('AA1 K T R W AA0 R'.split())) # octroi
'-AA1-K.T R W-AA0-R'
# destressing
>>> pprint(destress(syllabify('M IH1 L AH0 T EH2 R IY0'.split())))
'M-IH-.L-AH-.T-EH-.R-IY-'
# normal treatment of 'j':
>>> pprint(syllabify('M EH1 N Y UW0'.split())) # menu
'M-EH1-N.Y-UW0-'
>>> pprint(syllabify('S P AE1 N Y AH0 L'.split())) # spaniel
'S P-AE1-N.Y-AH0-L'
>>> pprint(syllabify('K AE1 N Y AH0 N'.split())) # canyon
'K-AE1-N.Y-AH0-N'
>>> pprint(syllabify('M IH0 N Y UW2 EH1 T'.split())) # minuet
'M-IH0-N.Y-UW2-.-EH1-T'
>>> pprint(syllabify('JH UW1 N Y ER0'.split())) # junior
'JH-UW1-N.Y-ER0-'
>>> pprint(syllabify('K L EH R IH HH Y UW'.split())) # clerihew
'K L-EH-.R-IH-.HH Y-UW-'
# nuclear treatment of 'j'
>>> pprint(syllabify('R EH1 S K Y UW0'.split())) # rescue
'R-EH1-S.K-Y UW0-'
>>> pprint(syllabify('T R IH1 B Y UW0 T'.split())) # tribute
'T R-IH1-B.Y-UW0-T'
>>> pprint(syllabify('N EH1 B Y AH0 L AH0'.split())) # nebula
'N-EH1-B.Y-AH0-.L-AH0-'
>>> pprint(syllabify('S P AE1 CH UH0 L AH0'.split())) # spatula
'S P-AE1-.CH-UH0-.L-AH0-'
>>> pprint(syllabify('AH0 K Y UW1 M AH0 N'.split())) # acumen
'-AH0-K.Y-UW1-.M-AH0-N'
>>> pprint(syllabify('S AH1 K Y AH0 L IH0 N T'.split())) # succulent
'S-AH1-K.Y-AH0-.L-IH0-N T'
>>> pprint(syllabify('F AO1 R M Y AH0 L AH0'.split())) # formula
'F-AO1 R-M.Y-AH0-.L-AH0-'
>>> pprint(syllabify('V AE1 L Y UW0'.split())) # value
'V-AE1-L.Y-UW0-'
# everything else
>>> pprint(syllabify('N AO0 S T AE1 L JH IH0 K'.split())) # nostalgic
'N-AO0-.S T-AE1-L.JH-IH0-K'
>>> pprint(syllabify('CH ER1 CH M AH0 N'.split())) # churchmen
'CH-ER1-CH.M-AH0-N'
>>> pprint(syllabify('K AA1 M P AH0 N S EY2 T'.split())) # compensate
'K-AA1-M.P-AH0-N.S-EY2-T'
>>> pprint(syllabify('IH0 N S EH1 N S'.split())) # inCENSE
'-IH0-N.S-EH1-N S'
>>> pprint(syllabify('IH1 N S EH2 N S'.split())) # INcense
'-IH1-N.S-EH2-N S'
>>> pprint(syllabify('AH0 S EH1 N D'.split())) # ascend
'-AH0-.S-EH1-N D'
>>> pprint(syllabify('R OW1 T EY2 T'.split())) # rotate
'R-OW1-.T-EY2-T'
>>> pprint(syllabify('AA1 R T AH0 S T'.split())) # artist
'-AA1 R-.T-AH0-S T'
>>> pprint(syllabify('AE1 K T ER0'.split())) # actor
'-AE1-K.T-ER0-'
>>> pprint(syllabify('P L AE1 S T ER0'.split())) # plaster
'P L-AE1-S.T-ER0-'
>>> pprint(syllabify('B AH1 T ER0'.split())) # butter
'B-AH1-.T-ER0-'
>>> pprint(syllabify('K AE1 M AH0 L'.split())) # camel
'K-AE1-.M-AH0-L'
>>> pprint(syllabify('AH1 P ER0'.split())) # upper
'-AH1-.P-ER0-'
>>> pprint(syllabify('B AH0 L UW1 N'.split())) # balloon
'B-AH0-.L-UW1-N'
>>> pprint(syllabify('P R OW0 K L EY1 M'.split())) # proclaim
'P R-OW0-.K L-EY1-M'
>>> pprint(syllabify('IH0 N S EY1 N'.split())) # insane
'-IH0-N.S-EY1-N'
>>> pprint(syllabify('IH0 K S K L UW1 D'.split())) # exclude
'-IH0-K.S K L-UW1-D'
"""
## main pass
mypron = list(pron)
nuclei = []
onsets = []
i = -1
for (j, seg) in enumerate(mypron):
if seg in VOWELS:
nuclei.append([seg])
onsets.append(mypron[i + 1:j]) # actually interludes, r.n.
i = j
codas = [mypron[i + 1:]]
## resolve disputes and compute coda
for i in range(1, len(onsets)):
coda = []
# boundary cases
if len(onsets[i]) > 1 and onsets[i][0] == 'R':
nuclei[i - 1].append(onsets[i].pop(0))
if len(onsets[i]) > 2 and onsets[i][-1] == 'Y':
nuclei[i].insert(0, onsets[i].pop())
if len(onsets[i]) > 1 and alaska_rule and nuclei[i-1][-1] in SLAX \
and onsets[i][0] == 'S':
coda.append(onsets[i].pop(0))
# onset maximization
depth = 1
if len(onsets[i]) > 1:
if tuple(onsets[i][-2:]) in O2:
depth = 3 if tuple(onsets[i][-3:]) in O3 else 2
for j in range(len(onsets[i]) - depth):
coda.append(onsets[i].pop(0))
codas.insert(i - 1, coda)
output = list(zip(onsets, nuclei, codas))
return output
def syllabify_pprint(syllab):
"""
Pretty #print a syllabification
"""
return '.'.join('-'.join(' '.join(p) for p in syl) for syl in syllab)
def destress(syllab):
"""
Generate a syllabification with nuclear stress information removed
"""
syls = []
for (onset, nucleus, coda) in syllab:
nuke = [p[:-1] if p[-1] in {'0', '1', '2'} else p for p in nucleus]
syls.append((onset, nuke, coda))
return syls
| 37.502959
| 75
| 0.508993
|
4a0937c909b86b0b1c344569fb707e91f1213b25
| 606
|
py
|
Python
|
model/keras_layer.py
|
yoshiinet/dcase2021_task2_ar_frame_seq_model
|
acc2fe8c7ef5be873e42a4b3b85dbf5ce30f4e01
|
[
"MIT"
] | null | null | null |
model/keras_layer.py
|
yoshiinet/dcase2021_task2_ar_frame_seq_model
|
acc2fe8c7ef5be873e42a4b3b85dbf5ce30f4e01
|
[
"MIT"
] | 1
|
2022-03-30T11:29:28.000Z
|
2022-03-30T11:29:28.000Z
|
model/keras_layer.py
|
yoshiinet/dcase2021_task2_ar_frame_seq_model
|
acc2fe8c7ef5be873e42a4b3b85dbf5ce30f4e01
|
[
"MIT"
] | 1
|
2021-09-22T11:25:22.000Z
|
2021-09-22T11:25:22.000Z
|
# Copyright (c) 2021 ralabo.jp
# This software is released under the MIT License.
# see https://opensource.org/licenses/mit-license.php
# ====================================================
import torch
from torch import nn
from torch.nn import functional as F
def Dense(dim_in, dim_out):
"""
Linear layer with initialization as same as keras
"""
m = nn.Linear(dim_in, dim_out)
nn.init.xavier_uniform_(m.weight,gain=1)
nn.init.zeros_(m.bias)
return m
def BatchNormalization(dim_out):
m = nn.BatchNorm1d(dim_out, eps=0.001, momentum=0.99)
return m
| 28.857143
| 58
| 0.623762
|
4a09381dddef13763a8eeda79936a9813b3c17b0
| 3,868
|
py
|
Python
|
modules/food.py
|
pyjka/yoda
|
19e55ace193cf157ea10ffe23d6b3201a140fb3f
|
[
"MIT"
] | null | null | null |
modules/food.py
|
pyjka/yoda
|
19e55ace193cf157ea10ffe23d6b3201a140fb3f
|
[
"MIT"
] | null | null | null |
modules/food.py
|
pyjka/yoda
|
19e55ace193cf157ea10ffe23d6b3201a140fb3f
|
[
"MIT"
] | null | null | null |
import click
import requests
FOOD_URL = 'https://www.themealdb.com/api/json/v1/1/search.php?s='
RANDOM_FOOD_URL = 'https://www.themealdb.com/api/json/v1/1/random.php'
INGREDIENTS = []
def food_request(food):
req = requests.get(FOOD_URL + food)
parsed_response = req.json()
food_json = parsed_response['meals']
return food_json
def get_meal_instructions(meal):
click.echo(food_request(meal)[0]['strInstructions'])
def get_ingridients(meal):
for ingNumber in range(1, 20):
ingredient = food_request(meal)[0]['strIngredient' + str(ingNumber)]
qty = food_request(meal)[0]['strMeasure' + str(ingNumber)]
if ingredient:
if not qty:
output_str = "{} (as needed)".format(ingredient)
else:
output_str = "{} x {}".format(ingredient, qty)
click.echo(output_str)
INGREDIENTS.append(output_str)
@click.group()
def food():
"""
Food module... yum
Suggest recipes for food, drinks, and restaurants
"""
@food.command()
def suggest_food():
"""
Suggests a random meal from the meal DB
"""
def get_meal_suggestion():
req = requests.get(RANDOM_FOOD_URL)
parsed_response = req.json()
food_json = parsed_response['meals']
meal = food_json[0]['strMeal']
click.echo('Try this amazing ' + meal + ', it\'s delicious!')
click.echo("You will need following :")
click.echo('------------')
get_ingridients(meal)
click.echo('------Follow the instructions below------ :')
get_meal_instructions(meal)
click.echo('Bon appetit ! =) ')
get_meal_suggestion()
@food.command()
@click.argument('meal')
def food_select(meal):
"""
Displays recipe with measurements and instructions for selected meal preparation
"""
def chosen_food(meal):
click.echo('-----Here are ingridients for your {0}!-----'.format(meal))
get_ingridients(meal)
click.echo('-----And here are the instructions for {0}-----'.format(meal))
get_meal_instructions(meal)
chosen_food(meal)
@food.command()
def suggest_drinks():
"""
Get suggested a random drink recipe from the Cocktail DB API.
"""
drinkURL = "https://www.thecocktaildb.com/api/json/v1/1/search.php?s="
randomDrinkURL = "https://www.thecocktaildb.com/api/json/v1/1/random.php"
drinkIngredients = []
def getDrinkSuggestion():
req = requests.get(randomDrinkURL)
parsed_response = req.json()
drinkInfoJSON = parsed_response["drinks"]
drink = drinkInfoJSON[0]["strDrink"]
click.echo("Like you need a drink you look. Hmmmmmm.")
click.echo("---------------------" + drink + "---------------------")
getIngredients(drink)
getDrinkInstructions(drink)
def getDrinkInstructions(drink):
req = requests.get(drinkURL + drink)
parsed_response = req.json()
drinkInfoJSON = parsed_response["drinks"]
drinkInstructions = drinkInfoJSON[0]["strInstructions"]
click.echo("Instructions: " + drinkInstructions)
def getIngredients(drink):
req = requests.get(drinkURL + drink)
parsed_response = req.json()
drinkInfoJSON = parsed_response["drinks"]
click.echo("Ingredients: ")
for ingNumber in range(1, 16):
ingredient = drinkInfoJSON[0]["strIngredient" + str(ingNumber)]
qty = drinkInfoJSON[0]["strMeasure" + str(ingNumber)]
if ingredient:
if not qty:
output_str = "{} (as needed)".format(ingredient)
else:
output_str = "{} x {}".format(ingredient, qty)
click.echo(output_str)
drinkIngredients.append(ingredient)
getDrinkSuggestion()
| 30.944
| 84
| 0.605739
|
4a0938c32763821352afcebe8be61e3f517d05c2
| 4,228
|
py
|
Python
|
display_layouts/views/image.py
|
FoamyGuy/circuitpython_display_layouts
|
d4380236d0613af8b77aaeeeb8d71dbd0a14dbd7
|
[
"MIT"
] | 3
|
2020-06-29T19:00:23.000Z
|
2021-05-06T21:56:07.000Z
|
display_layouts/views/image.py
|
FoamyGuy/circuitpython_display_layouts
|
d4380236d0613af8b77aaeeeb8d71dbd0a14dbd7
|
[
"MIT"
] | null | null | null |
display_layouts/views/image.py
|
FoamyGuy/circuitpython_display_layouts
|
d4380236d0613af8b77aaeeeb8d71dbd0a14dbd7
|
[
"MIT"
] | null | null | null |
import displayio
import adafruit_imageload
from display_layouts.layout_exceptions import MissingTypeError, IncorrectTypeError, MissingRequiredAttributesError
from display_layouts.views.view import View
REQUIRED_ATTRIBUTES = []
class ImageView(View):
def __init__(self, display, layout_json):
self.json = layout_json
if "view_type" not in layout_json:
raise MissingTypeError
if layout_json["view_type"] != "Image":
raise IncorrectTypeError(
"view_type '{}' does not match Layout Class 'Image'".format(layout_json["view_type"])
)
self._display = display
if "attributes" in layout_json:
_missing_attrs = []
for attribute in REQUIRED_ATTRIBUTES:
if attribute not in layout_json:
_missing_attrs.append(attribute)
if len(_missing_attrs) > 0:
raise MissingRequiredAttributesError("Missing required attributes: {}".format(_missing_attrs))
_image_filepath = None
if "image_file" in layout_json["attributes"]:
_image_filepath = layout_json["attributes"]["image_file"]
_background_color = None
if "background_color" in layout_json["attributes"]:
_background_color = int(layout_json["attributes"]["background_color"], 16)
_padding_top = 0
if "padding_top" in layout_json["attributes"]:
_padding_top = int(layout_json["attributes"]["padding_top"])
_padding_right = 0
if "padding_right" in layout_json["attributes"]:
_padding_right = int(layout_json["attributes"]["padding_right"])
_padding_left = 0
if "padding_left" in layout_json["attributes"]:
_padding_left = int(layout_json["attributes"]["padding_left"])
_padding_bottom = 0
if "padding_bottom" in layout_json["attributes"]:
_padding_bottom = int(layout_json["attributes"]["padding_bottom"])
_padding = 0
if "padding" in layout_json["attributes"]:
_padding= int(layout_json["attributes"]["padding"])
image, palette = adafruit_imageload.load(
_image_filepath, bitmap=displayio.Bitmap, palette=displayio.Palette
)
img_tile_grid = displayio.TileGrid(image, pixel_shader=palette)
group = displayio.Group()
img_tile_grid.x = _padding // 2
img_tile_grid.y = _padding // 2
_width = image.width
_height = image.height
self.width = _width
self.height = _height
if _padding and _background_color:
# Draw a green background
bg_bitmap = displayio.Bitmap(image.width + _padding, image.height + _padding, 1)
bg_palette = displayio.Palette(1)
bg_palette[0] = _background_color
_width = bg_bitmap.width
_height = bg_bitmap.height
bg_sprite = displayio.TileGrid(bg_bitmap, pixel_shader=bg_palette, x=0, y=0)
group.append(bg_sprite)
_x = 0
if "x" in layout_json["attributes"]:
_x = self.keyword_compiler(layout_json["attributes"]["x"], {"WIDTH":_width, "HEIGHT": _height})
_y = 0
if "y" in layout_json["attributes"]:
_y = self.keyword_compiler(layout_json["attributes"]["y"], {"WIDTH":_width, "HEIGHT": _height})
group.x = _x
group.y = _y
group.append(img_tile_grid)
self.image = group
if "anchor_point" in layout_json["attributes"]:
point = layout_json["attributes"]["anchor_point"]
self.image.anchor_point = (point[0], point[1])
if "anchored_position" in layout_json["attributes"]:
pos = layout_json["attributes"]["anchored_position"]
self.image.anchored_position = (self.keyword_compiler(pos[0]), self.keyword_compiler(pos[1]))
self.view = self.image
else:
#default attributes
pass
| 41.048544
| 114
| 0.594134
|
4a0938c8b877bbfb7d6f2801d0dd25cf7a29bdb4
| 1,087
|
py
|
Python
|
bin/merge_jsons.py
|
skrakau/epitopeprediction
|
a4c32a0ad83f6d7b8e9f36829af6bb75379ab60d
|
[
"MIT"
] | null | null | null |
bin/merge_jsons.py
|
skrakau/epitopeprediction
|
a4c32a0ad83f6d7b8e9f36829af6bb75379ab60d
|
[
"MIT"
] | null | null | null |
bin/merge_jsons.py
|
skrakau/epitopeprediction
|
a4c32a0ad83f6d7b8e9f36829af6bb75379ab60d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import json
import argparse
from collections import Counter
def __main__():
parser = argparse.ArgumentParser(description="")
parser.add_argument('-i', "--input", help='Input directory with JSON reports')
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
# read in json reports
data = []
for file in os.listdir(args.input):
if file.endswith(".json"):
with open(file, "r") as infile:
data.append(Counter(json.load(infile)))
# merge and write json report
merged_data = sum(data, Counter())
merged_data['prediction_methods'] = ''.join(set(merged_data['prediction_methods']))
merged_data['number_of_unique_nonbinders'] = len(set(merged_data['number_of_unique_nonbinders']))
merged_data['number_of_unique_binders'] = len(set(merged_data['number_of_unique_binders']))
with open('prediction_report.json', 'w') as outfile:
json.dump(merged_data, outfile)
if __name__ == "__main__":
__main__()
| 29.378378
| 101
| 0.669733
|
4a0939e224ad253a48d287e5a50423c32bc94844
| 13,137
|
py
|
Python
|
tests/apps/courses/test_cms_plugins_organization.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
tests/apps/courses/test_cms_plugins_organization.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
tests/apps/courses/test_cms_plugins_organization.py
|
leduong/richie
|
bf7ed379b7e2528cd790dadcec10ac2656efd189
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Unit tests for the Organization plugin and its model
"""
import re
from django import forms
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from cms.api import add_plugin, create_page
from cms.plugin_rendering import ContentRenderer
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.courses.cms_plugins import OrganizationPlugin
from richie.apps.courses.factories import OrganizationFactory
from richie.apps.courses.models import OrganizationPluginModel
class OrganizationPluginTestCase(CMSTestCase):
"""
Test that OrganizationPlugin correctly displays a Organization's page placeholders content
"""
def test_cms_plugins_organization_form_page_choices(self):
"""
The form to create a organization plugin should only list organization pages
in the select box. There shouldn't be any duplicate because of published status.
"""
class OrganizationPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = OrganizationPluginModel
fields = ["page"]
organization = OrganizationFactory(should_publish=True)
other_page_title = "other page"
create_page(
other_page_title, "richie/single_column.html", settings.LANGUAGE_CODE
)
plugin_form = OrganizationPluginModelForm()
rendered_form = plugin_form.as_table()
self.assertEqual(
rendered_form.count(organization.extended_object.get_title()), 1
)
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_organization_render_on_public_page(self):
"""
The organization plugin should render as expected on a public page.
"""
# Create an organization
organization = OrganizationFactory(
page_title={"en": "public title", "fr": "titre public"},
fill_logo={"original_filename": "logo.jpg", "default_alt_text": "my logo"},
)
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, OrganizationPlugin, "en", **{"page": organization_page})
add_plugin(placeholder, OrganizationPlugin, "fr", **{"page": organization_page})
organization_page.publish("en")
organization_page.publish("fr")
organization.refresh_from_db()
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
# The organization plugin should not be visible on the public page before it is published
organization_page.unpublish("en")
response = self.client.get(url)
self.assertNotContains(response, "public title")
# # Republish the plugin
organization_page.publish("en")
# Now modify the organization to have a draft different from the public version
title_obj = organization_page.get_title_obj(language="en")
title_obj.title = "draft title"
title_obj.save()
# Publishing the page again should make the plugin public
page.publish("en")
# Check the page content in English
response = self.client.get(url)
# The organization's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertIn(
(
'<a class="organization-glimpse" href="/en/public-title/" '
'title="public title">'
),
re.sub(" +", " ", str(response.content).replace("\\n", "")),
)
# The organization's title should be wrapped in a div
self.assertContains(
response,
'<div class="organization-glimpse__title">{:s}</div>'.format(
organization.public_extension.extended_object.get_title()
),
html=True,
)
self.assertNotContains(response, "draft title")
# Organization's logo should be present
pattern = (
r'<div class="organization-glimpse__logo">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*logo\.jpg__200x113'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertIn(
'<a class="organization-glimpse" href="/fr/titre-public/" '
'title="titre public"',
re.sub(" +", " ", str(response.content).replace("\\n", "")),
)
pattern = (
r'<div class="organization-glimpse__logo">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*logo\.jpg__200x113'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
def test_cms_plugins_organization_render_on_draft_page(self):
"""
The organization plugin should render as expected on a draft page.
"""
staff = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=staff.username, password="password")
# Create a Organization
organization = OrganizationFactory(page_title="public title")
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, OrganizationPlugin, "en", **{"page": organization_page})
organization_page.publish("en")
organization_page.unpublish("en")
organization_page.refresh_from_db()
url = "{:s}?edit".format(page.get_absolute_url(language="en"))
# The unpublished organization plugin should not be visible on the draft page
response = self.client.get(url)
self.assertNotContains(response, "public title")
# Now modify the organization to have a draft different from the public version
organization_page.publish("en")
title_obj = organization_page.get_title_obj(language="en")
title_obj.title = "draft title"
title_obj.save()
# The draft version of the organization plugin should not be visible
response = self.client.get(url)
self.assertNotContains(response, "draft title")
self.assertContains(response, "public title")
def test_cms_plugins_organization_render_instance_variant(self):
"""
The organization plugin should render according to variant variable
eventually present in the context of its container.
"""
staff = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=staff.username, password="password")
# Create an Organization
organization = OrganizationFactory(
page_title="public title", should_publish=True
)
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
# Add organization plugin with default variant
add_plugin(placeholder, OrganizationPlugin, "en", page=organization_page)
url = "{:s}?edit".format(page.get_absolute_url(language="en"))
# The organization-glimpse default variant should not have the small attribute
response = self.client.get(url)
self.assertNotContains(response, "--small")
# Add organization plugin with small variant
add_plugin(
placeholder,
OrganizationPlugin,
"en",
page=organization_page,
variant="small",
)
# The new organization-glimpse should have the small attribute
response = self.client.get(url)
self.assertContains(response, "organization-small")
def test_cms_plugins_organization_render_context_variant(self):
"""
The organization plugin should render according to the variant plugin
option.
"""
# Create an organization
organization = OrganizationFactory(
page_title="public title", should_publish=True
)
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
# Add organization plugin with default template
model_instance = add_plugin(
placeholder,
OrganizationPlugin,
"en",
page=organization_page,
variant="small",
)
# Get generated html
request = RequestFactory()
request.current_page = page
request.path_info = "/en/my-path/"
request.user = AnonymousUser()
context = {
"current_page": page,
"organization_variant": "xxl",
"request": request,
}
renderer = ContentRenderer(request=request)
html = renderer.render_plugin(model_instance, context)
self.assertIn("organization-small", html)
def test_cms_plugins_organization_fallback_when_never_published(self):
"""
The organization plugin should render in the fallback language when the organization
page has never been published in the current language.
"""
# Create a organization
organization = OrganizationFactory(
page_title={"en": "public organization", "fr": "organisation publique"},
fill_logo={"original_filename": "logo.jpg", "default_alt_text": "my logo"},
)
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, OrganizationPlugin, "en", **{"page": organization_page})
add_plugin(placeholder, OrganizationPlugin, "fr", **{"page": organization_page})
# Publish only the French version of the organization
organization_page.publish("fr")
# Check the page content in English
page.publish("en")
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Organization's name should be present as a link to the cms page
self.assertContains(
response,
(
'<a class="organization-glimpse" href="/en/organisation-publique/" '
'title="organisation publique">'
),
status_code=200,
)
# The organization's full name should be wrapped in a h2
self.assertContains(
response,
'<div class="organization-glimpse__title">organisation publique</div>',
html=True,
)
self.assertNotContains(response, "public organization")
# Organization's logo should be present
pattern = (
r'<div class="organization-glimpse__logo">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*logo\.jpg__200x113'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
def test_cms_plugins_organization_fallback_when_published_unpublished(self):
"""
The organization plugin should not render when the organization was voluntarily
unpublished in the current language.
"""
# Create a organization
organization = OrganizationFactory(
page_title={"en": "public title", "fr": "titre public"},
fill_logo={"original_filename": "logo.jpg", "default_alt_text": "my logo"},
)
organization_page = organization.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, OrganizationPlugin, "en", **{"page": organization_page})
add_plugin(placeholder, OrganizationPlugin, "fr", **{"page": organization_page})
# Publish only the French version of the organization
organization_page.publish("fr")
organization_page.publish("en")
organization_page.unpublish("en")
# Check the page content in English
page.publish("en")
url = page.get_absolute_url(language="en")
response = self.client.get(url)
self.assertNotContains(response, "glimpse")
| 38.982196
| 97
| 0.644439
|
4a093b3268c02631f28aa5566b08ec71256c560c
| 8,683
|
py
|
Python
|
smdebug/profiler/analysis/notebook_utils/heatmap.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 133
|
2019-12-03T18:56:27.000Z
|
2022-03-18T19:54:49.000Z
|
smdebug/profiler/analysis/notebook_utils/heatmap.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 384
|
2019-12-04T03:04:14.000Z
|
2022-03-31T20:42:48.000Z
|
smdebug/profiler/analysis/notebook_utils/heatmap.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 64
|
2019-12-05T20:39:51.000Z
|
2022-03-25T13:30:54.000Z
|
# Standard Library
import re
from copy import deepcopy
# Third Party
import bokeh
import numpy as np
from bokeh.io import output_notebook, show
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.models.glyphs import Image
from bokeh.models.tickers import FixedTicker
from bokeh.plotting import figure, show
output_notebook(hide_banner=True)
class Heatmap:
def __init__(
self,
metrics_reader,
select_metrics=[],
starttime=0,
endtime=None,
select_dimensions=[".*CPU", ".*GPU"],
select_events=[".*"],
plot_height=350,
show_workers=True,
):
self.select_dimensions = select_dimensions
self.select_events = select_events
self.show_workers = show_workers
self.metrics_reader = metrics_reader
self.available_dimensions = []
self.available_events = []
self.start = 0
if endtime == None:
# get timestamp of latest file and events
self.last_timestamp_system_metrics = (
self.metrics_reader.get_timestamp_of_latest_available_file()
)
else:
self.last_timestamp_system_metrics = endtime
events = self.metrics_reader.get_events(starttime, self.last_timestamp_system_metrics)
self.plot_height = plot_height
# get timestamp of latest file and events
self.last_timestamp = self.metrics_reader.get_timestamp_of_latest_available_file()
self.system_metrics = self.preprocess_system_metrics(events, system_metrics={})
self.create_plot()
def preprocess_system_metrics(self, events, system_metrics):
# read all available system metric events and store them in dict
for event in events:
if event.node_id not in system_metrics:
system_metrics[event.node_id] = {}
if event.dimension not in system_metrics[event.node_id]:
system_metrics[event.node_id][event.dimension] = {}
if event.name not in system_metrics[event.node_id][event.dimension]:
system_metrics[event.node_id][event.dimension][event.name] = []
system_metrics[event.node_id][event.dimension][event.name].append(event.value)
# number of datapoints
self.width = np.inf
# preprocess data
for node in system_metrics:
for dimension in system_metrics[node]:
if dimension not in self.available_dimensions:
self.available_dimensions.append(dimension)
for event in system_metrics[node][dimension]:
# list of available events
if event not in self.available_events:
self.available_events.append(event)
# convert to numpy
system_metrics[node][dimension][event] = np.array(
system_metrics[node][dimension][event]
)
# we may not have the exact same number of measurements per metric
if system_metrics[node][dimension][event].shape[0] < self.width:
self.width = system_metrics[node][dimension][event].shape[0]
# convert metrics to percentages
if dimension in ["Algorithm", "Platform", ""]:
max_value = np.max(system_metrics[node][dimension][event])
if max_value != 0:
system_metrics[node][dimension][event] = (
system_metrics[node][dimension][event] / max_value
)
system_metrics[node][dimension][event] = (
system_metrics[node][dimension][event] * 100
)
# compute total utilization per event dimension
for node in system_metrics:
for dimension in system_metrics[node]:
n = len(system_metrics[node][dimension])
total = [sum(x) for x in zip(*system_metrics[node][dimension].values())]
system_metrics[node][dimension]["total"] = np.array(total) / n
self.available_events.append("total")
nodes = list(system_metrics.keys())
system_metrics["node_total"] = {}
# compute total utilization per worker node
for dimension in system_metrics[nodes[0]]:
system_metrics["node_total"][dimension] = {}
node_total = []
for node in nodes:
len2 = len(node_total)
if len2 > 0:
len1 = system_metrics[node][dimension]["total"].shape[0]
if len1 < len2:
node_total[:len1] = (
node_total[:len1] + system_metrics[node][dimension]["total"]
)
else:
node_total = node_total + system_metrics[node][dimension]["total"][:len2]
else:
node_total = deepcopy(system_metrics[node][dimension]["total"])
system_metrics["node_total"][dimension]["total"] = node_total / (len(nodes))
# filter events and dimensions
self.filtered_events = []
print(f"select events:{self.select_events}")
self.filtered_dimensions = []
print(f"select dimensions:{self.select_dimensions}")
for metric in self.select_events:
r = re.compile(r".*" + metric)
self.filtered_events.extend(list(filter(r.search, self.available_events)))
self.filtered_events = set(self.filtered_events)
print(f"filtered_events:{self.filtered_events}")
for metric in self.select_dimensions:
r = re.compile(metric) # + r".*")
self.filtered_dimensions.extend(list(filter(r.search, self.available_dimensions)))
self.filtered_dimensions = set(self.filtered_dimensions)
print(f"filtered_dimensions:{self.filtered_dimensions}")
return system_metrics
def create_plot(self):
# define list of metric names (needed for tooltip)
tmp = []
metric_names = []
yaxis = {}
for node in self.system_metrics:
for dimension in self.system_metrics[node]:
if dimension in self.filtered_dimensions:
for event in self.system_metrics[node][dimension]:
if event in self.filtered_events:
values = self.system_metrics[node][dimension][event][: self.width]
tmp.append(values)
metric_names.append(dimension + "_" + event + "_" + node)
yaxis[len(tmp)] = dimension + "_" + event + "_" + node
ymax = len(tmp)
yaxis[ymax] = ""
# define figure
start = 0
if self.width > 1000:
start = self.width - 1000
self.plot = figure(
plot_height=self.plot_height,
x_range=(start, self.width),
y_range=(0, ymax),
plot_width=1000,
tools="crosshair,reset,xwheel_zoom, box_edit",
)
self.plot.xaxis.axis_label = "Indices"
# tooltip
hover = HoverTool(
tooltips=[("usage", "@image"), ("metric", "@metric"), ("index", "$x{10}")]
)
# map colors to values between 0 and 100
color_mapper = bokeh.models.LinearColorMapper(bokeh.palettes.viridis(100))
color_mapper.high = 100
color_mapper.low = 0
tmp = np.array(tmp)
# create column data source
self.source = ColumnDataSource(
data=dict(
image=[np.array(tmp[i]).reshape(1, -1) for i in range(len(tmp))],
x=[0] * ymax,
y=[i for i in range(ymax)],
dw=[self.width] * (ymax),
dh=[1.3] * (ymax),
metric=[i for i in metric_names],
)
)
# heatmap placeholder
images = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
# plot
self.plot.add_glyph(self.source, images)
self.plot.add_tools(hover)
self.plot.xgrid.visible = False
self.plot.ygrid.visible = False
self.plot.yaxis.ticker = FixedTicker(ticks=np.arange(0, ymax).tolist())
self.plot.yaxis.major_label_text_font_size = "7pt"
self.plot.yaxis.major_label_overrides = yaxis
self.plot.xaxis.major_label_text_font_size = "0pt"
self.target = show(self.plot, notebook_handle=True)
| 39.648402
| 97
| 0.575838
|
4a093b937f319b0d8b8afef1debe821c4fe3d91b
| 3,795
|
py
|
Python
|
tests/test_trimming.py
|
stephenkraemer/bistro
|
c9f63e948d20f8f1e59163f6267ad83cb70caa9d
|
[
"BSD-3-Clause"
] | 1
|
2020-11-09T13:41:46.000Z
|
2020-11-09T13:41:46.000Z
|
tests/test_trimming.py
|
stephenkraemer/bistro
|
c9f63e948d20f8f1e59163f6267ad83cb70caa9d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_trimming.py
|
stephenkraemer/bistro
|
c9f63e948d20f8f1e59163f6267ad83cb70caa9d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test Trimmer"""
import numpy as np
import pandas as pd
import pytest
from mqc.trimming import Trimmer
from mqc.flag_and_index_values import (
bsseq_strand_indices as b_inds,
methylation_status_flags as mflags,
)
class AlignedSegmentStub:
def __init__(self, tlen):
self.template_length = tlen
class PileupReadStub:
def __init__(self, pos, flen, strand,
exp_tr_flag,
mflag=mflags.is_methylated, qcflag=0, trimm_flag=0):
self.meth_status_flag = mflag
self.exp_tr_flag = exp_tr_flag
self.pos_in_read = pos
self.alignment = AlignedSegmentStub(flen)
self.qc_fail_flag = qcflag
self.trimm_flag = trimm_flag
self.bsseq_strand_ind = strand
pread = PileupReadStub
class MotifPileupStub:
def __init__(self, reads):
self.reads = reads
class CuttingSitesStub:
def __init__(self):
# dataframe index used to find flen dimension
self.df = pd.DataFrame(index=pd.MultiIndex.from_product(
['c_bc c_bc_rv'.split(), range(30)], names='bs_strand flen'.split()))
# noinspection PyMethodMayBeStatic
def as_array(self):
arr = np.zeros((4, 110 + 1, 2))
arr[b_inds.w_bc, 100, 0] = 10
arr[b_inds.w_bc, 100, 1] = 91
arr[b_inds.w_bc, 110, 0] = 10
arr[b_inds.w_bc, 110, 1] = 101
arr[b_inds.c_bc, 110, 0] = 0
arr[b_inds.c_bc, 110, 1] = 81
return arr
config_stub = {'trimming': {'max_flen_considered_for_trimming': 110}}
read_properties = [
# trimming boundaries are zero-based slice definitions of the plateau
# left-closed
{'pos': 9, 'flen': 100, 'strand': b_inds.w_bc, 'exp_tr_flag': 1},
{'pos': 10, 'flen': 100, 'strand': b_inds.w_bc, 'exp_tr_flag': 0},
{'pos': 50, 'flen': 100, 'strand': b_inds.w_bc, 'exp_tr_flag': 0},
# right-open
{'pos': 100, 'flen': 110, 'strand': b_inds.w_bc, 'exp_tr_flag': 0},
{'pos': 101, 'flen': 110, 'strand': b_inds.w_bc, 'exp_tr_flag': 1},
# Different strands have different cuttings sites
{'pos': 3, 'flen': 110, 'strand': b_inds.c_bc, 'exp_tr_flag': 0},
{'pos': 90, 'flen': 110, 'strand': b_inds.c_bc, 'exp_tr_flag': 1},
# the next two reads only differ in the qc_fail_flag or meth_na flag
# the failing/NA read should also be processed
{'pos': 100, 'flen': 100, 'strand': b_inds.w_bc, 'exp_tr_flag': 1},
{'pos': 100, 'flen': 100, 'strand': b_inds.w_bc, 'qcflag': 1, 'exp_tr_flag': 1},
{'pos': 100, 'flen': 100, 'strand': b_inds.w_bc, 'mflag': mflags.is_na, 'exp_tr_flag': 1},
# this read exceeds max_flen
{'pos': 101, 'flen': 200, 'strand': b_inds.w_bc, 'qcflag': 0, 'exp_tr_flag': 1},
]
base_reads = [pread(**i) for i in read_properties]
motif_pileup = MotifPileupStub(base_reads)
cutting_sites = CuttingSitesStub()
# noinspection PyTypeChecker
trimmer = Trimmer(cutting_sites=cutting_sites)
# noinspection PyTypeChecker
trimmer.process(motif_pileup)
@pytest.mark.parametrize('idx', range(8))
def test_out_of_bounds_read_positions_are_discarded(idx):
# order of reads in MotifPileup.reads is currently not guaranteed
# use original read list to find reads by indexing
curr_read = base_reads[idx]
assert curr_read.trimm_flag == curr_read.exp_tr_flag
@pytest.mark.parametrize('idx', (8, 9))
def test_qcfail_NAmeth_reads_are_also_processed(idx):
# order of reads in MotifPileup.reads is currently not guaranteed
# use original read list to find reads by indexing
curr_read = base_reads[idx]
assert curr_read.trimm_flag == curr_read.exp_tr_flag
@pytest.mark.parametrize('idx', (10,))
def test_flen_exceeding_max_flen_are_trimmed_like_max_flen(idx):
curr_read = base_reads[idx]
assert curr_read.trimm_flag == curr_read.exp_tr_flag
| 35.801887
| 94
| 0.676943
|
4a093d35c93be3545dc7d90d11fc5ee84dc26664
| 13,950
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20190401/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190401/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190401/get_security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSecurityRuleResult',
'AwaitableGetSecurityRuleResult',
'get_security_rule',
]
@pulumi.output_type
class GetSecurityRuleResult:
"""
Network security rule.
"""
def __init__(__self__, access=None, description=None, destination_address_prefix=None, destination_address_prefixes=None, destination_application_security_groups=None, destination_port_range=None, destination_port_ranges=None, direction=None, etag=None, id=None, name=None, priority=None, protocol=None, provisioning_state=None, source_address_prefix=None, source_address_prefixes=None, source_application_security_groups=None, source_port_range=None, source_port_ranges=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if destination_address_prefix and not isinstance(destination_address_prefix, str):
raise TypeError("Expected argument 'destination_address_prefix' to be a str")
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_address_prefixes and not isinstance(destination_address_prefixes, list):
raise TypeError("Expected argument 'destination_address_prefixes' to be a list")
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_application_security_groups and not isinstance(destination_application_security_groups, list):
raise TypeError("Expected argument 'destination_application_security_groups' to be a list")
pulumi.set(__self__, "destination_application_security_groups", destination_application_security_groups)
if destination_port_range and not isinstance(destination_port_range, str):
raise TypeError("Expected argument 'destination_port_range' to be a str")
pulumi.set(__self__, "destination_port_range", destination_port_range)
if destination_port_ranges and not isinstance(destination_port_ranges, list):
raise TypeError("Expected argument 'destination_port_ranges' to be a list")
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_address_prefix and not isinstance(source_address_prefix, str):
raise TypeError("Expected argument 'source_address_prefix' to be a str")
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_address_prefixes and not isinstance(source_address_prefixes, list):
raise TypeError("Expected argument 'source_address_prefixes' to be a list")
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_application_security_groups and not isinstance(source_application_security_groups, list):
raise TypeError("Expected argument 'source_application_security_groups' to be a list")
pulumi.set(__self__, "source_application_security_groups", source_application_security_groups)
if source_port_range and not isinstance(source_port_range, str):
raise TypeError("Expected argument 'source_port_range' to be a str")
pulumi.set(__self__, "source_port_range", source_port_range)
if source_port_ranges and not isinstance(source_port_ranges, list):
raise TypeError("Expected argument 'source_port_ranges' to be a list")
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> str:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> Optional[str]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence[str]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> Optional[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[Sequence[str]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence[str]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
class AwaitableGetSecurityRuleResult(GetSecurityRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityRuleResult(
access=self.access,
description=self.description,
destination_address_prefix=self.destination_address_prefix,
destination_address_prefixes=self.destination_address_prefixes,
destination_application_security_groups=self.destination_application_security_groups,
destination_port_range=self.destination_port_range,
destination_port_ranges=self.destination_port_ranges,
direction=self.direction,
etag=self.etag,
id=self.id,
name=self.name,
priority=self.priority,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
source_address_prefix=self.source_address_prefix,
source_address_prefixes=self.source_address_prefixes,
source_application_security_groups=self.source_application_security_groups,
source_port_range=self.source_port_range,
source_port_ranges=self.source_port_ranges)
def get_security_rule(network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
security_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityRuleResult:
"""
Network security rule.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
:param str security_rule_name: The name of the security rule.
"""
__args__ = dict()
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
__args__['securityRuleName'] = security_rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190401:getSecurityRule', __args__, opts=opts, typ=GetSecurityRuleResult).value
return AwaitableGetSecurityRuleResult(
access=__ret__.access,
description=__ret__.description,
destination_address_prefix=__ret__.destination_address_prefix,
destination_address_prefixes=__ret__.destination_address_prefixes,
destination_application_security_groups=__ret__.destination_application_security_groups,
destination_port_range=__ret__.destination_port_range,
destination_port_ranges=__ret__.destination_port_ranges,
direction=__ret__.direction,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
priority=__ret__.priority,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
source_address_prefix=__ret__.source_address_prefix,
source_address_prefixes=__ret__.source_address_prefixes,
source_application_security_groups=__ret__.source_application_security_groups,
source_port_range=__ret__.source_port_range,
source_port_ranges=__ret__.source_port_ranges)
| 45.888158
| 481
| 0.698065
|
4a093e102cb376bd2c4d6785d9a38e5bef777fc7
| 18,253
|
py
|
Python
|
VideoSource.py
|
rommyhsu/python-video-class
|
ab9abaff213191d162393f6c12f32102ef6f3104
|
[
"Apache-2.0"
] | null | null | null |
VideoSource.py
|
rommyhsu/python-video-class
|
ab9abaff213191d162393f6c12f32102ef6f3104
|
[
"Apache-2.0"
] | null | null | null |
VideoSource.py
|
rommyhsu/python-video-class
|
ab9abaff213191d162393f6c12f32102ef6f3104
|
[
"Apache-2.0"
] | null | null | null |
# Video module
import ctypes
import datetime
import os
import queue
import sys
import time
import cv2
import numpy as np
import vlc
from PIL import Image
# 設置VLC庫路徑,需在import vlc之前
# os.environ['PYTHON_VLC_MODULE_PATH'] = "C:/Users/Rommy/python/vlc-3.0.6-win64"
# os.environ['PYTHON_VLC_MODULE_PATH'] = 'C:/Program Files/VideoLAN/VLC'
os.environ['PYTHON_VLC_MODULE_PATH'] = '../vlc-3.0.6-win64'
class Camera:
def __init__(self, cam_num):
self.cam_num = cam_num
self.cap = None
self.width = 640
self.height = 480
self.last_frame = np.zeros((1, 1))
self.ifRun = True
self.initFrame = None
self.ret = False
# self.frame_queue = queue.Queue()
def initialize(self):
self.cap = cv2.VideoCapture(self.cam_num)
self.get_capture_size()
self.ret, self.initFrame = self.get_frame()
self.ifRun = True
def get_frame(self):
self.ret, self.last_frame = self.cap.read()
# self.frame_queue.put(self.last_frame)
return self.ret, self.last_frame
# def get_frames(self):
# ret, self.last_f
def acquire_movie(self, num_frames):
movie = []
for _ in range(num_frames):
self.ret, frame = self.get_frame()
movie.append(frame)
return movie
def acquire_movie(self):
# movie = []
# for _ in range(num_frames):
# movie.append(self.get_frame())
while (self.ifRun):
# movie.append(self.get_frame())
self.ret, frame = self.get_frame()
self.last_frame = frame
if (self.ret != True):
break
# return movie
def set_brightness(self, value):
self.cap.set(cv2.CAP_PROP_BRIGHTNESS, value)
def get_brightness(self):
return self.cap.get(cv2.CAP_PROP_BRIGHTNESS)
def __str__(self):
return 'OpenCV Camera {}'.format(self.cam_num)
def close_camera(self):
self.ifRun = False
self.cap.release()
# decode fourcc
def decode_fourcc(self):
fourcc = self.cap.get(cv2.CAP_PROP_FOURCC)
fourcc = int(fourcc)
return 'codec:' + "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
def get_capture_size(self):
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
return "Image Size: %d x %d" % (self.width, self.height)
def set_capture_size(self, width, height):
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
class Video:
def __init__(self, cam_num):
self.cam_num = cam_num
self.cap = None
self.width = 640
self.height = 480
self.last_frame = np.zeros((1, 1))
self.ifRun = True
self.frames = 0
# self.frame_queue = queue.Queue()
self.initFrame = None
self.ret = False
self.ifCameraInitial = False
self.ifLoop = False
def initialize(self):
self.cap = cv2.VideoCapture(self.cam_num)
self.get_capture_size()
self.frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.ifRun = True
# self.frame_queue = queue.Queue()
self.ret, self.initFrame = self.get_frame()
def get_frame(self):
self.ret, _frame = self.cap.read()
if self.ret:
self.last_frame = cv2.resize(_frame, (640, 480))
# cv2.waitKey(30)
# self.frame_queue.put(self.last_frame)
return self.ret, self.last_frame
# if( ret == True):
# cv2.waitKey(30)
# self.last_frame = np.zeros((1, 1))
return self.ret, self.last_frame
# def get_frames(self):
# ret, self.last_f
def acquire_movie(self, num_frames):
movie = []
for _ in range(num_frames):
_ret, _frame = self.get_frame()
if _ret:
movie.append(_frame)
return movie
def acquire_movie(self):
# movie = []
# for _ in range(num_frames):
# movie.append(self.get_frame())
# while (self.cap.get(cv2.CAP_PROP_POS_FRAMES) <= self.frames):
# # movie.append(self.get_frame())
# self.get_frame()
# framerate = 10000
# # frame_counter = 0
# pretime = time.time()
# next_time = pretime
while self.ifRun:
# movie.append(self.get_frame())
# if(frame_counter % framerate == 0):
if True:
self.ret, _frame = self.get_frame()
if not self.ret:
if self.ifLoop:
# self.ifRun = False
# self.last_frame = np.zeros((1, 1))
# self.frame_queue.put(self.last_frame)
print("movie rerun")
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 1)
continue
else:
self.ifRun = False
print("movie end")
self.cap.release()
break
# break
# frame_counter+= 1
time.sleep(0.030)
# cv2.waitKey(1)
# self.ifRun = False
# if(self.cap.get(cv2.CAP_PROP_POS_FRAMES) == self.frames):
# self.ifRun = False
# self.last_frame = np.zeros((1, 1))
# return movie
def set_brightness(self, value):
self.cap.set(cv2.CAP_PROP_BRIGHTNESS, value)
def get_brightness(self):
return self.cap.get(cv2.CAP_PROP_BRIGHTNESS)
def __str__(self):
return 'OpenCV Camera {}'.format(self.cam_num)
def close_camera(self):
self.ifRun = False
self.cap.release()
# decode fourcc
def decode_fourcc(self):
fourcc = self.cap.get(cv2.CAP_PROP_FOURCC)
fourcc = int(fourcc)
return 'codec:' + "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
def get_capture_size(self):
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
return "Image Size: %d x %d" % (self.width, self.height)
def set_capture_size(self, width, height):
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
class IPcam:
def __init__(self, *args):
if args:
instance = vlc.Instance(*args)
self.media = instance.media_player_new()
else:
self.media = vlc.MediaPlayer()
self.frameQueue = queue.Queue(10)
self.width = 640
self.height = 480
self.last_frame = np.zeros((1, 1))
self.ifRun = True
self.frames = 0
self.initFrame = None
self.ret = False
self.ifCameraInitial = False
self.ifLoop = False
self.rtsp_url = ''
self.CorrectVideoLockCb = ctypes.CFUNCTYPE(
ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p))
'''
args:設置 options
'''
def initialize(self, rtsp_url):
self.rtsp_url = rtsp_url
self.VIDEOWIDTH = 640
self.VIDEOHEIGHT = 480
# # size in bytes when RV32
self.size = self.VIDEOWIDTH * self.VIDEOHEIGHT * 4
# # allocate buffer
self.buf = (ctypes.c_ubyte * self.size)()
# # get pointer to buffer
self.buf_p = ctypes.cast(self.buf, ctypes.c_void_p)
self.video_set_format("BGRA", self.VIDEOWIDTH,
self.VIDEOHEIGHT, self.VIDEOWIDTH * 4)
# 設置待播放的url地址或本地文件路徑,每次調用都會重新加載資源
def set_uri(self, uri):
self.media.set_mrl(uri)
# 播放 成功返回0,失敗返回-1
def play(self, path=None):
if path:
self.set_uri(path)
return self.media.play()
else:
return self.media.play()
# 暫停
def pause(self):
self.media.pause()
# 恢復
def resume(self):
self.media.set_pause(0)
# 停止
def stop(self):
self.media.stop()
# 釋放資源
def release(self):
return self.media.release()
# 是否正在播放
def is_playing(self):
return self.media.is_playing()
# 已播放時間,返回毫秒值
def get_time(self):
return self.media.get_time()
# 拖動指定的毫秒值處播放。成功返回0,失敗返回-1 (需要注意,只有當前多媒體格式或流媒體協議支持纔會生效)
def set_time(self, ms):
return self.media.get_time()
# 音視頻總長度,返回毫秒值
def get_length(self):
return self.media.get_length()
# 獲取當前音量(0~100)
def get_volume(self):
return self.media.audio_get_volume()
# 設置音量(0~100)
def set_volume(self, volume):
return self.media.audio_set_volume(volume)
# 返回當前狀態:正在播放;暫停中;其他
def get_state(self):
state = self.media.get_state()
if state == vlc.State.Playing:
return 1
elif state == vlc.State.Paused:
return 0
else:
return -1
# 當前播放進度情況。返回0.0~1.0之間的浮點數
def get_position(self):
return self.media.get_position()
# 拖動當前進度,傳入0.0~1.0之間的浮點數(需要注意,只有當前多媒體格式或流媒體協議支持纔會生效)
def set_position(self, float_val):
return self.media.set_position(float_val)
# 獲取當前文件播放速率
def get_rate(self):
return self.media.get_rate()
# 設置播放速率(如:1.2,表示加速1.2倍播放)
def set_rate(self, rate):
return self.media.set_rate(rate)
# 設置寬高比率(如"16:9","4:3")
def set_ratio(self, ratio):
self.media.video_set_scale(0) # 必須設置爲0,否則無法修改屏幕寬高
self.media.video_set_aspect_ratio(ratio)
# 註冊監聽器
def add_callback(self, event_type, callback):
self.media.event_manager().event_attach(event_type, callback)
# 移除監聽器
def remove_callback(self, event_type, callback):
self.media.event_manager().event_detach(event_type, callback)
def libvlc_video_set_callbacks(self, _lockcb, _display, _unLockcb=None, _opaque=None):
vlc.libvlc_video_set_callbacks(
self.media, _lockcb, _unLockcb, _display, None)
def video_set_format(self, codec, videowidth, videoheight, datasize):
self.media.video_set_format(codec, videowidth, videoheight, datasize)
def enqueue(self, _frame):
self.frameQueue.put(_frame)
def dequeue(self):
if not self.frameQueue.empty():
return True, self.frameQueue.get()
else:
return False, None
def get_frame(self):
# _frame = self.last_frame
# ret, _frame = self.dequeue()
# ret, _frame = self.last_frame
ret = self.is_playing()
if ret:
_frame = self.last_frame
self.last_frame = cv2.resize(_frame, (640, 480))
# cv2.waitKey(30)
# self.frame_queue.put(self.last_frame)
self.ret = True
print("dequeue")
return self.ret, self.last_frame
# if( ret == True):
# cv2.waitKey(30)
# self.last_frame = np.zeros((1, 1))
self.ret = False
return self.ret, self.last_frame
def acquire_movie(self, num_frames):
pass
def acquire_movie(self):
#callback 存取 1 frame
@self.CorrectVideoLockCb
def _lockcb(opaque, planes):
time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S.%f')
# time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
print("lock " + time_str, file=sys.stderr)
planes[0] = self.buf_p
@vlc.CallbackDecorators.VideoDisplayCb
def _display(opaque, picture):
if True: # framenr % 24 == 0:
# shouldn't do this here! copy buffer fast and process in our own thread, or maybe cycle
# through a couple of buffers, passing one of them in _lockcb while we read from the other(s).
img = Image.frombuffer(
"RGBA", (self.VIDEOWIDTH, self.VIDEOHEIGHT), self.buf, "raw", "BGRA", 0, 1)
cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
# cv2.putText(cv_img, "Time:" + str(time_str), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
# cv2.LINE_AA)
# self.enqueue(cv_img)
self.last_frame = cv_img.copy()
print('enqueue')
self.get_frame()
self.libvlc_video_set_callbacks(_lockcb, _display, None, None)
self.play(self.rtsp_url)
time.sleep(10)
# print('Brightness:' + str(cam.get_brightness()))
while True:
# ret, frame = self.get_frame()
# cv2.waitKey(30)
if self.is_playing():
cv2.waitKey(30)
else:
print('vlc end')
self.ret = False
break
# if ret:
# # cv2.imshow('VideoSourceTest ' + caminfo, frame)
# # cv2.imshow('IPcamSourceTest ', frame)
# # print(frame)
# # if cv2.waitKey(30) & 0xFF == ord('q'):
# # break
# else:
# pass
# cv2.destroyAllWindows()
self.close_camera()
def close_camera(self):
self.ifRun = False
self.stop()
def main():
filename = "D:/movie/WIN_20191023_14_28_01_Pro.mp4"
cam = Video(filename)
cam.initialize()
# cam.acquire_movie()
# while(True):
# # if (cam.last_frame.Size ==1):
# # break
#
# cv2.imshow('VideoSourceTest ', cam.last_frame)
# # # print(frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cam.play(filename)
# time.sleep(10)
# cam = Camera(0)
# cam.initialize()
# cam.set_brightness(1)
# print(cam.get_brightness())
# cam.set_brightness(0.5)
# print(cam.get_brightness())
# print(cam)
# print(cam.decode_fourcc())
# camStr = str(cam.width) + 'x' + str(cam.height)
# caminfo = camsource + ' ' + camdecode + ' ' + cam_size
# caminfo = str(cam.__str__()) + ' ' + str(cam.decode_fourcc()) + ' ' + str(cam.get_capture_size())
print('Brightness:' + str(cam.get_brightness()))
while True:
ret, frame = cam.get_frame()
if ret:
# cv2.imshow('VideoSourceTest ' + caminfo, frame)
cv2.imshow('VideoSourceTest ', frame)
# print(frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
else:
break
cv2.destroyAllWindows()
cam.close_camera()
def main_Webcam():
cam = Camera(0)
cam.initialize()
print('Brightness:' + str(cam.get_brightness()))
while True:
ret, frame = cam.get_frame()
if ret:
# cv2.imshow('VideoSourceTest ' + caminfo, frame)
cv2.imshow('VideoSourceTest ', frame)
# print(frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
else:
break
cv2.destroyAllWindows()
cam.close_camera()
def main_IPcam():
rtsp_url = 'D:/movie/Pepper_short.mp4'
IP_cam.initialize(rtsp_url)
IP_cam.VIDEOWIDTH = 640
IP_cam.VIDEOHEIGHT = 480
IP_cam.play(rtsp_url)
time.sleep(10)
# print('Brightness:' + str(cam.get_brightness()))
while True:
ret, frame = IP_cam.get_frame()
if ret:
cv2.imshow('IPcamSourceTest ', frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
else:
print('vlc end')
break
cv2.destroyAllWindows()
IP_cam.close_camera()
# Unit Test
if __name__ == '__main__':
# mark
# filename = "D:\movie\peper.mp4"
# cam = Video(filename)
# cam.initialize()
# # cam.acquire_movie()
# # while(True):
# # # if (cam.last_frame.Size ==1):
# # # break
# #
# # cv2.imshow('VideoSourceTest ', cam.last_frame)
# # # # print(frame)
# # if cv2.waitKey(1) & 0xFF == ord('q'):
# # break
# # cam = Camera(0)
# # cam.initialize()
# # cam.set_brightness(1)
# # print(cam.get_brightness())
# # cam.set_brightness(0.5)
# # print(cam.get_brightness())
# # print(cam)
# # print(cam.decode_fourcc())
# # camStr = str(cam.width) + 'x' + str(cam.height)
# # caminfo = camsource + ' ' + camdecode + ' ' + cam_size
# # caminfo = str(cam.__str__()) + ' ' + str(cam.decode_fourcc()) + ' ' + str(cam.get_capture_size())
# print('Brightness:' + str(cam.get_brightness()))
# while True:
# ret, frame = cam.get_frame()
# if ret:
# # cv2.imshow('VideoSourceTest ' + caminfo, frame)
# cv2.imshow('VideoSourceTest ', frame)
# # print(frame)
# if cv2.waitKey(10) & 0xFF == ord('q'):
# break
# else:
# break
# cv2.destroyAllWindows()
# cam.close_camera()
# main()
# main_Webcam()
# global cam
# mark
global IP_cam
IP_cam = IPcam()
@IP_cam.CorrectVideoLockCb
def _lockcb(opaque, planes):
time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S.%f')
# time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
print("lock " + time_str, file=sys.stderr)
planes[0] = IP_cam.buf_p
@vlc.CallbackDecorators.VideoDisplayCb
def _display(opaque, picture):
if True: # framenr % 24 == 0:
# shouldn't do this here! copy buffer fast and process in our own thread, or maybe cycle
# through a couple of buffers, passing one of them in _lockcb while we read from the other(s).
img = Image.frombuffer(
"RGBA", (IP_cam.VIDEOWIDTH, IP_cam.VIDEOHEIGHT), IP_cam.buf, "raw", "BGRA", 0, 1)
cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
# cv2.putText(cv_img, "Time:" + str(time_str), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
# cv2.LINE_AA)
# IP_cam.enqueue(cv_img)
IP_cam.last_frame = cv_img.copy()
print('enqueue')
IP_cam.libvlc_video_set_callbacks(_lockcb, _display, None, None)
main_IPcam()
| 31.201709
| 118
| 0.562045
|
4a093f86880e87bd1148c71443d7d41f16a179ee
| 1,316
|
py
|
Python
|
setup.py
|
aeby/localkhan
|
01c2d5b020f158c1d6e0ec3268181364ec035c95
|
[
"MIT"
] | 3
|
2015-03-22T16:07:11.000Z
|
2015-07-02T18:44:51.000Z
|
setup.py
|
aeby/localkhan
|
01c2d5b020f158c1d6e0ec3268181364ec035c95
|
[
"MIT"
] | 1
|
2015-05-10T19:52:11.000Z
|
2015-05-13T17:58:05.000Z
|
setup.py
|
aeby/localkhan
|
01c2d5b020f158c1d6e0ec3268181364ec035c95
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
import localkhan
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='localkhan',
version=localkhan.__version__,
url='http://github.com/aeby/localkhan/',
license=localkhan.__license__,
author='Reto Aebersold',
author_email='aeby@substyle.ch',
description='Download and distribute Khan content',
long_description=long_description,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'requests==2.7.0',
'docopt==0.6.2',
'schema==0.3.1',
'clint==0.4.1',
'Flask==0.10.1',
'netifaces==0.10.4',
'pytube==0.2.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Education'
],
entry_points='''
[console_scripts]
localkhan=localkhan.cli:main
'''
)
| 27.416667
| 64
| 0.62234
|
4a09415993efcf367b628c27d133663b5d800d0d
| 1,614
|
py
|
Python
|
codereef/misc.py
|
codereef-ai/client
|
9e28b988c7b10e5c1f45a0c21d6b5f66a4a17dd9
|
[
"Apache-2.0"
] | 10
|
2019-12-13T16:05:08.000Z
|
2021-03-30T07:13:03.000Z
|
codereef/misc.py
|
codereef-ai/client
|
9e28b988c7b10e5c1f45a0c21d6b5f66a4a17dd9
|
[
"Apache-2.0"
] | 12
|
2019-12-17T15:10:46.000Z
|
2019-12-30T15:51:04.000Z
|
codereef/misc.py
|
codereef-ai/client
|
9e28b988c7b10e5c1f45a0c21d6b5f66a4a17dd9
|
[
"Apache-2.0"
] | 2
|
2020-01-07T09:17:50.000Z
|
2020-01-08T18:28:52.000Z
|
#
# Low-level API
# Developer: Grigori Fursin
#
import urllib
import json
import sys
try:
import urllib.request as urllib2
except:
import urllib2
try:
from urllib.parse import urlencode
from urllib.parse import quote_plus
except:
from urllib import urlencode
from urllib import quote_plus
def request(i):
"""
Input: {
url - URL
get - get parameters
post - post parameters
}
Output: {
return - return code = 0 if success or >0 if error
(error) - error string if return>0
}
"""
url=i['url']
# Prepare dict to send to remote server
ii=i.get('get',{})
started=False
for k in ii:
v=ii[k]
if started:
url+='&'
started=True
url+=k+'='+quote_plus(v)
# Request
request = urllib2.Request(url)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access failed ('+format(e)+')'}
# Read
try:
s=f.read()
except Exception as e:
return {'return':1, 'error':'Failed to read stream ('+format(e)+')'}
# CLose
try:
f.close()
except Exception as e:
return {'return':1, 'error':'Failed to close stream ('+format(e)+')'}
# Check UTF
try:
s=s.decode('utf8')
except Exception as e:
pass
# Check output
d={}
# Try to convert output to dictionary
try:
d=json.loads(s)
except Exception as e:
pass
return {'return':0, 'string':s, 'dict':d}
| 18.551724
| 76
| 0.547088
|
4a09417630aa6daeef8de283c5d44104dc4f1839
| 10,289
|
py
|
Python
|
tensorflow/python/estimator/run_config_test.py
|
DHsLc/test
|
f286c78b619b81ca95ba9f738cc0de4e14440e44
|
[
"Apache-2.0"
] | 5
|
2021-01-11T01:51:57.000Z
|
2021-12-11T17:19:08.000Z
|
tensorflow/python/estimator/run_config_test.py
|
radi2015/tensorflow
|
4b2fb49fd7578afe7e289936f347af581b5bdab1
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/estimator/run_config_test.py
|
radi2015/tensorflow
|
4b2fb49fd7578afe7e289936f347af581b5bdab1
|
[
"Apache-2.0"
] | 3
|
2020-07-02T13:46:32.000Z
|
2021-01-11T01:52:01.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RunConfig tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import test
_TEST_DIR = 'test_dir'
_MASTER = 'master_'
_NOT_SUPPORTED_REPLACE_PROPERTY_MSG = 'Replacing .*is not supported'
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
_MODEL_DIR_ERR = 'model_dir should be non-empty'
_SAVE_SUMMARY_STEPS_ERR = 'save_summary_steps should be >= 0'
_SAVE_CKPT_STEPS_ERR = 'save_checkpoints_steps should be >= 0'
_SAVE_CKPT_SECS_ERR = 'save_checkpoints_secs should be >= 0'
_SESSION_CONFIG_ERR = 'session_config must be instance of ConfigProto'
_KEEP_CKPT_MAX_ERR = 'keep_checkpoint_max should be >= 0'
_KEEP_CKPT_HOURS_ERR = 'keep_checkpoint_every_n_hours should be > 0'
_TF_RANDOM_SEED_ERR = 'tf_random_seed must be integer'
class RunConfigTest(test.TestCase):
def test_default_property_values(self):
config = run_config_lib.RunConfig()
self.assertIsNone(config.model_dir)
self.assertIsNone(config.session_config)
self.assertEqual(1, config.tf_random_seed)
self.assertEqual(100, config.save_summary_steps)
self.assertEqual(600, config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertEqual(5, config.keep_checkpoint_max)
self.assertEqual(10000, config.keep_checkpoint_every_n_hours)
def test_model_dir(self):
empty_config = run_config_lib.RunConfig()
self.assertIsNone(empty_config.model_dir)
new_config = empty_config.replace(model_dir=_TEST_DIR)
self.assertEqual(_TEST_DIR, new_config.model_dir)
def test_replace_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = run_config_lib.RunConfig().replace(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
def test_replace_none_value(self):
config = run_config_lib.RunConfig().replace(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
def test_replace_with_disallowallowed_properties(self):
config = run_config_lib.RunConfig()
with self.assertRaises(ValueError):
# tf_random_seed is not allowed to be replaced.
config.replace(master='_master')
with self.assertRaises(ValueError):
config.replace(some_undefined_property=123)
def test_replace(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
# master is not allowed to be replaced.
config.replace(master=_MASTER)
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
config.replace(some_undefined_property=_MASTER)
def test_replace_invalid_values(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
config.replace(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
config.replace(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
config.replace(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
config.replace(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
config.replace(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
config.replace(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
config.replace(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
config.replace(tf_random_seed=1.0)
def test_init_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = run_config_lib.RunConfig(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
def test_init_none_value(self):
config = run_config_lib.RunConfig(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
def test_init_invalid_values(self):
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
run_config_lib.RunConfig(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
run_config_lib.RunConfig(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
run_config_lib.RunConfig(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
run_config_lib.RunConfig(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
run_config_lib.RunConfig(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
run_config_lib.RunConfig(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
run_config_lib.RunConfig(tf_random_seed=1.0)
class RunConfigSaveCheckpointsTest(test.TestCase):
def test_save_checkpoint(self):
empty_config = run_config_lib.RunConfig()
self.assertEqual(600, empty_config.save_checkpoints_secs)
self.assertIsNone(empty_config.save_checkpoints_steps)
config_with_steps = empty_config.replace(save_checkpoints_steps=100)
del empty_config
self.assertEqual(100, config_with_steps.save_checkpoints_steps)
self.assertIsNone(config_with_steps.save_checkpoints_secs)
config_with_secs = config_with_steps.replace(save_checkpoints_secs=200)
del config_with_steps
self.assertEqual(200, config_with_secs.save_checkpoints_secs)
self.assertIsNone(config_with_secs.save_checkpoints_steps)
def test_save_checkpoint_both_steps_and_secs_are_not_none(self):
empty_config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
empty_config.replace(save_checkpoints_steps=100,
save_checkpoints_secs=200)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=100,
save_checkpoints_secs=200)
def test_save_checkpoint_both_steps_and_secs_are_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(
save_checkpoints_steps=None, save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_secs_to_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_steps_to_none(self):
config_with_steps = run_config_lib.RunConfig().replace(
save_checkpoints_steps=100)
config_without_ckpt = config_with_steps.replace(save_checkpoints_steps=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
if __name__ == '__main__':
test.main()
| 42.516529
| 80
| 0.777335
|
4a0941d2050418523c43bdefadc0618cfb3a6c0d
| 298
|
py
|
Python
|
training/components/training/blessed_model.py
|
anifort/vertex-mlops-airlines
|
5a213836070bcbe72419239f05dd15a42bdebd19
|
[
"MIT"
] | null | null | null |
training/components/training/blessed_model.py
|
anifort/vertex-mlops-airlines
|
5a213836070bcbe72419239f05dd15a42bdebd19
|
[
"MIT"
] | null | null | null |
training/components/training/blessed_model.py
|
anifort/vertex-mlops-airlines
|
5a213836070bcbe72419239f05dd15a42bdebd19
|
[
"MIT"
] | null | null | null |
from kfp.v2.dsl import (
component,
Input,
Metrics,
)
@component()
def blessed_model_comp(
model_1: Input[Metrics],
model_2: Input[Metrics]
) -> int:
if model_1.metadata['bt_f1_test_score']>model_2.metadata['svm_f1_test_score']:
return 1
else:
return 2
| 18.625
| 82
| 0.647651
|
4a0941de8628274736b119e31440776f970e4385
| 31,935
|
py
|
Python
|
dogechia/server/server.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 27
|
2021-07-06T16:33:50.000Z
|
2022-02-19T21:11:25.000Z
|
dogechia/server/server.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 15
|
2021-07-07T02:32:59.000Z
|
2021-10-15T21:19:51.000Z
|
dogechia/server/server.py
|
hagbardcelene/doge-chia
|
72bdf0a7b20a579fe4645f0cb132955e181e1c44
|
[
"Apache-2.0"
] | 12
|
2021-07-08T15:36:20.000Z
|
2022-03-15T08:34:01.000Z
|
import asyncio
import logging
import ssl
import time
import traceback
from ipaddress import IPv6Address, ip_address, ip_network, IPv4Network, IPv6Network
from pathlib import Path
from secrets import token_bytes
from typing import Any, Callable, Dict, List, Optional, Union, Set, Tuple
from aiohttp import ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, client_exceptions, web
from aiohttp.web_app import Application
from aiohttp.web_runner import TCPSite
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from dogechia.protocols.protocol_message_types import ProtocolMessageTypes
from dogechia.protocols.shared_protocol import protocol_version
from dogechia.server.introducer_peers import IntroducerPeers
from dogechia.server.outbound_message import Message, NodeType
from dogechia.server.ssl_context import private_ssl_paths, public_ssl_paths
from dogechia.server.ws_connection import WSDogeChiaConnection
from dogechia.types.blockchain_format.sized_bytes import bytes32
from dogechia.types.peer_info import PeerInfo
from dogechia.util.errors import Err, ProtocolError
from dogechia.util.ints import uint16
from dogechia.util.network import is_localhost, is_in_network
def ssl_context_for_server(
ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
def ssl_context_for_root(
ca_cert_file: str,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_cert_file)
return ssl_context
def ssl_context_for_client(
ca_cert: Path,
ca_key: Path,
private_cert_path: Path,
private_key_path: Path,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
class DogeChiaServer:
def __init__(
self,
port: int,
node: Any,
api: Any,
local_type: NodeType,
ping_interval: int,
network_id: str,
inbound_rate_limit_percent: int,
outbound_rate_limit_percent: int,
root_path: Path,
config: Dict,
private_ca_crt_key: Tuple[Path, Path],
dogechia_ca_crt_key: Tuple[Path, Path],
name: str = None,
introducer_peers: Optional[IntroducerPeers] = None,
):
# Keeps track of all connections to and from this node.
logging.basicConfig(level=logging.DEBUG)
self.all_connections: Dict[bytes32, WSDogeChiaConnection] = {}
self.tasks: Set[asyncio.Task] = set()
self.connection_by_type: Dict[NodeType, Dict[bytes32, WSDogeChiaConnection]] = {
NodeType.FULL_NODE: {},
NodeType.WALLET: {},
NodeType.HARVESTER: {},
NodeType.FARMER: {},
NodeType.TIMELORD: {},
NodeType.INTRODUCER: {},
}
self._port = port # TCP port to identify our node
self._local_type: NodeType = local_type
self._ping_interval = ping_interval
self._network_id = network_id
self._inbound_rate_limit_percent = inbound_rate_limit_percent
self._outbound_rate_limit_percent = outbound_rate_limit_percent
# Task list to keep references to tasks, so they don't get GCd
self._tasks: List[asyncio.Task] = []
self.log = logging.getLogger(name if name else __name__)
# Our unique random node id that we will send to other peers, regenerated on launch
self.api = api
self.node = node
self.root_path = root_path
self.config = config
self.on_connect: Optional[Callable] = None
self.incoming_messages: asyncio.Queue = asyncio.Queue()
self.shut_down_event = asyncio.Event()
if self._local_type is NodeType.INTRODUCER:
self.introducer_peers = IntroducerPeers()
if self._local_type is not NodeType.INTRODUCER:
self._private_cert_path, self._private_key_path = private_ssl_paths(root_path, config)
if self._local_type is not NodeType.HARVESTER:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(root_path, config)
else:
self.p2p_crt_path, self.p2p_key_path = None, None
self.ca_private_crt_path, self.ca_private_key_path = private_ca_crt_key
self.dogechia_ca_crt_path, self.dogechia_ca_key_path = dogechia_ca_crt_key
self.node_id = self.my_id()
self.incoming_task = asyncio.create_task(self.incoming_api_task())
self.gc_task: asyncio.Task = asyncio.create_task(self.garbage_collect_connections_task())
self.app: Optional[Application] = None
self.runner: Optional[web.AppRunner] = None
self.site: Optional[TCPSite] = None
self.connection_close_task: Optional[asyncio.Task] = None
self.site_shutdown_task: Optional[asyncio.Task] = None
self.app_shut_down_task: Optional[asyncio.Task] = None
self.received_message_callback: Optional[Callable] = None
self.api_tasks: Dict[bytes32, asyncio.Task] = {}
self.execute_tasks: Set[bytes32] = set()
self.tasks_from_peer: Dict[bytes32, Set[bytes32]] = {}
self.banned_peers: Dict[str, float] = {}
self.invalid_protocol_ban_seconds = 10
self.api_exception_ban_seconds = 10
self.exempt_peer_networks: List[Union[IPv4Network, IPv6Network]] = [
ip_network(net, strict=False) for net in config.get("exempt_peer_networks", [])
]
def my_id(self) -> bytes32:
"""If node has public cert use that one for id, if not use private."""
if self.p2p_crt_path is not None:
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend())
return bytes32(der_cert.fingerprint(hashes.SHA256()))
def set_received_message_callback(self, callback: Callable):
self.received_message_callback = callback
async def garbage_collect_connections_task(self) -> None:
"""
Periodically checks for connections with no activity (have not sent us any data), and removes them,
to allow room for other peers.
"""
while True:
await asyncio.sleep(600)
to_remove: List[WSDogeChiaConnection] = []
for connection in self.all_connections.values():
if self._local_type == NodeType.FULL_NODE and connection.connection_type == NodeType.FULL_NODE:
if time.time() - connection.last_message_time > 1800:
to_remove.append(connection)
for connection in to_remove:
self.log.debug(f"Garbage collecting connection {connection.peer_host} due to inactivity")
await connection.close()
# Also garbage collect banned_peers dict
to_remove_ban = []
for peer_ip, ban_until_time in self.banned_peers.items():
if time.time() > ban_until_time:
to_remove_ban.append(peer_ip)
for peer_ip in to_remove_ban:
del self.banned_peers[peer_ip]
async def start_server(self, on_connect: Callable = None):
if self._local_type in [NodeType.WALLET, NodeType.HARVESTER, NodeType.TIMELORD]:
return None
self.app = web.Application()
self.on_connect = on_connect
routes = [
web.get("/ws", self.incoming_connection),
]
self.app.add_routes(routes)
self.runner = web.AppRunner(self.app, access_log=None, logger=self.log)
await self.runner.setup()
authenticate = self._local_type not in (NodeType.FULL_NODE, NodeType.INTRODUCER)
if authenticate:
ssl_context = ssl_context_for_server(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(self.root_path, self.config)
ssl_context = ssl_context_for_server(
self.dogechia_ca_crt_path, self.dogechia_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
self.site = web.TCPSite(
self.runner,
port=self._port,
shutdown_timeout=3,
ssl_context=ssl_context,
)
await self.site.start()
self.log.info(f"Started listening on port: {self._port}")
async def incoming_connection(self, request):
if request.remote in self.banned_peers and time.time() < self.banned_peers[request.remote]:
self.log.warning(f"Peer {request.remote} is banned, refusing connection")
return None
ws = web.WebSocketResponse(max_msg_size=50 * 1024 * 1024)
await ws.prepare(request)
close_event = asyncio.Event()
cert_bytes = request.transport._ssl_protocol._extra["ssl_object"].getpeercert(True)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
return ws
connection: Optional[WSDogeChiaConnection] = None
try:
connection = WSDogeChiaConnection(
self._local_type,
ws,
self._port,
self.log,
False,
False,
request.remote,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
close_event,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
# Limit inbound connections to config's specifications.
if not self.accept_inbound_connections(connection.connection_type) and not is_in_network(
connection.peer_host, self.exempt_peer_networks
):
self.log.info(f"Not accepting inbound connection: {connection.get_peer_info()}.Inbound limit reached.")
await connection.close()
close_event.set()
else:
await self.connection_added(connection, self.on_connect)
if self._local_type is NodeType.INTRODUCER and connection.connection_type is NodeType.FULL_NODE:
self.introducer_peers.add(connection.get_peer_info())
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning("Invalid handshake with peer. Maybe the peer is running old software.")
close_event.set()
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
close_event.set()
elif e.code == Err.SELF_CONNECTION:
close_event.set()
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
except Exception as e:
if connection is not None:
await connection.close(ws_close_code=WSCloseCode.PROTOCOL_ERROR, error=Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
await close_event.wait()
return ws
async def connection_added(self, connection: WSDogeChiaConnection, on_connect=None):
# If we already had a connection to this peer_id, close the old one. This is secure because peer_ids are based
# on TLS public keys
if connection.peer_node_id in self.all_connections:
con = self.all_connections[connection.peer_node_id]
await con.close()
self.all_connections[connection.peer_node_id] = connection
if connection.connection_type is not None:
self.connection_by_type[connection.connection_type][connection.peer_node_id] = connection
if on_connect is not None:
await on_connect(connection)
else:
self.log.error(f"Invalid connection type for connection {connection}")
def is_duplicate_or_self_connection(self, target_node: PeerInfo) -> bool:
if is_localhost(target_node.host) and target_node.port == self._port:
# Don't connect to self
self.log.debug(f"Not connecting to {target_node}")
return True
for connection in self.all_connections.values():
if connection.host == target_node.host and connection.peer_server_port == target_node.port:
self.log.debug(f"Not connecting to {target_node}, duplicate connection")
return True
return False
async def start_client(
self,
target_node: PeerInfo,
on_connect: Callable = None,
auth: bool = False,
is_feeler: bool = False,
) -> bool:
"""
Tries to connect to the target node, adding one connection into the pipeline, if successful.
An on connect method can also be specified, and this will be saved into the instance variables.
"""
if self.is_duplicate_or_self_connection(target_node):
return False
if target_node.host in self.banned_peers and time.time() < self.banned_peers[target_node.host]:
self.log.warning(f"Peer {target_node.host} is still banned, not connecting to it")
return False
if auth:
ssl_context = ssl_context_for_client(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
ssl_context = ssl_context_for_client(
self.dogechia_ca_crt_path, self.dogechia_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
session = None
connection: Optional[WSDogeChiaConnection] = None
try:
timeout = ClientTimeout(total=30)
session = ClientSession(timeout=timeout)
try:
if type(ip_address(target_node.host)) is IPv6Address:
target_node = PeerInfo(f"[{target_node.host}]", target_node.port)
except ValueError:
pass
url = f"wss://{target_node.host}:{target_node.port}/ws"
self.log.debug(f"Connecting: {url}, Peer info: {target_node}")
try:
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=50 * 1024 * 1024
)
except ServerDisconnectedError:
self.log.debug(f"Server disconnected error connecting to {url}. Perhaps we are banned by the peer.")
return False
except asyncio.TimeoutError:
self.log.debug(f"Timeout error connecting to {url}")
return False
if ws is None:
return False
assert ws._response.connection is not None and ws._response.connection.transport is not None
transport = ws._response.connection.transport # type: ignore
cert_bytes = transport._ssl_protocol._extra["ssl_object"].getpeercert(True) # type: ignore
der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend())
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
raise RuntimeError(f"Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}")
connection = WSDogeChiaConnection(
self._local_type,
ws,
self._port,
self.log,
True,
False,
target_node.host,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
session=session,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
await self.connection_added(connection, on_connect)
# the session has been adopted by the connection, don't close it at
# the end of the function
session = None
connection_type_str = ""
if connection.connection_type is not None:
connection_type_str = connection.connection_type.name.lower()
self.log.info(f"Connected with {connection_type_str} {target_node}")
if is_feeler:
asyncio.create_task(connection.close())
return True
except client_exceptions.ClientConnectorError as e:
self.log.info(f"{e}")
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning(f"Invalid handshake with peer {target_node}. Maybe the peer is running old software.")
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
elif e.code == Err.SELF_CONNECTION:
pass
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
except Exception as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
finally:
if session is not None:
await session.close()
return False
def connection_closed(self, connection: WSDogeChiaConnection, ban_time: int):
if is_localhost(connection.peer_host) and ban_time != 0:
self.log.warning(f"Trying to ban localhost for {ban_time}, but will not ban")
ban_time = 0
self.log.info(f"Connection closed: {connection.peer_host}, node id: {connection.peer_node_id}")
if ban_time > 0:
ban_until: float = time.time() + ban_time
self.log.warning(f"Banning {connection.peer_host} for {ban_time} seconds")
if connection.peer_host in self.banned_peers:
if ban_until > self.banned_peers[connection.peer_host]:
self.banned_peers[connection.peer_host] = ban_until
else:
self.banned_peers[connection.peer_host] = ban_until
if connection.peer_node_id in self.all_connections:
self.all_connections.pop(connection.peer_node_id)
if connection.connection_type is not None:
if connection.peer_node_id in self.connection_by_type[connection.connection_type]:
self.connection_by_type[connection.connection_type].pop(connection.peer_node_id)
else:
# This means the handshake was enver finished with this peer
self.log.debug(
f"Invalid connection type for connection {connection.peer_host},"
f" while closing. Handshake never finished."
)
on_disconnect = getattr(self.node, "on_disconnect", None)
if on_disconnect is not None:
on_disconnect(connection)
self.cancel_tasks_from_peer(connection.peer_node_id)
def cancel_tasks_from_peer(self, peer_id: bytes32):
if peer_id not in self.tasks_from_peer:
return None
task_ids = self.tasks_from_peer[peer_id]
for task_id in task_ids:
if task_id in self.execute_tasks:
continue
task = self.api_tasks[task_id]
task.cancel()
async def incoming_api_task(self) -> None:
self.tasks = set()
while True:
payload_inc, connection_inc = await self.incoming_messages.get()
if payload_inc is None or connection_inc is None:
continue
async def api_call(full_message: Message, connection: WSDogeChiaConnection, task_id):
start_time = time.time()
try:
if self.received_message_callback is not None:
await self.received_message_callback(connection)
connection.log.debug(
f"<- {ProtocolMessageTypes(full_message.type).name} from peer "
f"{connection.peer_node_id} {connection.peer_host}"
)
message_type: str = ProtocolMessageTypes(full_message.type).name
f = getattr(self.api, message_type, None)
if f is None:
self.log.error(f"Non existing function: {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
if not hasattr(f, "api_function"):
self.log.error(f"Peer trying to call non api function {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
# If api is not ready ignore the request
if hasattr(self.api, "api_ready"):
if self.api.api_ready is False:
return None
timeout: Optional[int] = 600
if hasattr(f, "execute_task"):
# Don't timeout on methods with execute_task decorator, these need to run fully
self.execute_tasks.add(task_id)
timeout = None
if hasattr(f, "peer_required"):
coroutine = f(full_message.data, connection)
else:
coroutine = f(full_message.data)
async def wrapped_coroutine() -> Optional[Message]:
try:
result = await coroutine
return result
except asyncio.CancelledError:
pass
except Exception as e:
tb = traceback.format_exc()
connection.log.error(f"Exception: {e}, {connection.get_peer_info()}. {tb}")
raise e
return None
response: Optional[Message] = await asyncio.wait_for(wrapped_coroutine(), timeout=timeout)
connection.log.debug(
f"Time taken to process {message_type} from {connection.peer_node_id} is "
f"{time.time() - start_time} seconds"
)
if response is not None:
response_message = Message(response.type, full_message.id, response.data)
await connection.reply_to_request(response_message)
except Exception as e:
if self.connection_close_task is None:
tb = traceback.format_exc()
connection.log.error(
f"Exception: {e} {type(e)}, closing connection {connection.get_peer_info()}. {tb}"
)
else:
connection.log.debug(f"Exception: {e} while closing connection")
# TODO: actually throw one of the errors from errors.py and pass this to close
await connection.close(self.api_exception_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
finally:
if task_id in self.api_tasks:
self.api_tasks.pop(task_id)
if task_id in self.tasks_from_peer[connection.peer_node_id]:
self.tasks_from_peer[connection.peer_node_id].remove(task_id)
if task_id in self.execute_tasks:
self.execute_tasks.remove(task_id)
task_id = token_bytes()
api_task = asyncio.create_task(api_call(payload_inc, connection_inc, task_id))
self.api_tasks[task_id] = api_task
if connection_inc.peer_node_id not in self.tasks_from_peer:
self.tasks_from_peer[connection_inc.peer_node_id] = set()
self.tasks_from_peer[connection_inc.peer_node_id].add(task_id)
async def send_to_others(
self,
messages: List[Message],
node_type: NodeType,
origin_peer: WSDogeChiaConnection,
):
for node_id, connection in self.all_connections.items():
if node_id == origin_peer.peer_node_id:
continue
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all(self, messages: List[Message], node_type: NodeType):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all_except(self, messages: List[Message], node_type: NodeType, exclude: bytes32):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type and connection.peer_node_id != exclude:
for message in messages:
await connection.send_message(message)
async def send_to_specific(self, messages: List[Message], node_id: bytes32):
if node_id in self.all_connections:
connection = self.all_connections[node_id]
for message in messages:
await connection.send_message(message)
def get_outgoing_connections(self) -> List[WSDogeChiaConnection]:
result = []
for _, connection in self.all_connections.items():
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_outgoing_connections(self) -> List[WSDogeChiaConnection]:
result = []
connections = self.get_full_node_connections()
for connection in connections:
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_connections(self) -> List[WSDogeChiaConnection]:
return list(self.connection_by_type[NodeType.FULL_NODE].values())
def get_connections(self) -> List[WSDogeChiaConnection]:
result = []
for _, connection in self.all_connections.items():
result.append(connection)
return result
async def close_all_connections(self) -> None:
keys = [a for a, b in self.all_connections.items()]
for node_id in keys:
try:
if node_id in self.all_connections:
connection = self.all_connections[node_id]
await connection.close()
except Exception as e:
self.log.error(f"Exception while closing connection {e}")
def close_all(self) -> None:
self.connection_close_task = asyncio.create_task(self.close_all_connections())
if self.runner is not None:
self.site_shutdown_task = asyncio.create_task(self.runner.cleanup())
if self.app is not None:
self.app_shut_down_task = asyncio.create_task(self.app.shutdown())
for task_id, task in self.api_tasks.items():
task.cancel()
self.shut_down_event.set()
self.incoming_task.cancel()
self.gc_task.cancel()
async def await_closed(self) -> None:
self.log.debug("Await Closed")
await self.shut_down_event.wait()
if self.connection_close_task is not None:
await self.connection_close_task
if self.app_shut_down_task is not None:
await self.app_shut_down_task
if self.site_shutdown_task is not None:
await self.site_shutdown_task
async def get_peer_info(self) -> Optional[PeerInfo]:
ip = None
port = self._port
try:
async with ClientSession() as session:
async with session.get("https://checkip.amazonaws.com/") as resp:
if resp.status == 200:
ip = str(await resp.text())
ip = ip.rstrip()
except Exception:
ip = None
if ip is None:
return None
peer = PeerInfo(ip, uint16(port))
if not peer.is_valid():
return None
return peer
def accept_inbound_connections(self, node_type: NodeType) -> bool:
if not self._local_type == NodeType.FULL_NODE:
return True
inbound_count = len([conn for _, conn in self.connection_by_type[node_type].items() if not conn.is_outbound])
if node_type == NodeType.FULL_NODE:
return inbound_count < self.config["target_peer_count"] - self.config["target_outbound_peer_count"]
if node_type == NodeType.WALLET:
return inbound_count < self.config["max_inbound_wallet"]
if node_type == NodeType.FARMER:
return inbound_count < self.config["max_inbound_farmer"]
if node_type == NodeType.TIMELORD:
return inbound_count < self.config["max_inbound_timelord"]
return True
def is_trusted_peer(self, peer: WSDogeChiaConnection, trusted_peers: Dict) -> bool:
if trusted_peers is None:
return False
for trusted_peer in trusted_peers:
cert = self.root_path / trusted_peers[trusted_peer]
pem_cert = x509.load_pem_x509_certificate(cert.read_bytes())
cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == peer.peer_node_id:
self.log.debug(f"trusted node {peer.peer_node_id} {peer.peer_host}")
return True
return False
| 45.491453
| 119
| 0.625896
|
4a094296451a8a3646324a9909cc22d3b6f0bf06
| 2,181
|
py
|
Python
|
sdk/botservice/azure-mgmt-botservice/azure/mgmt/botservice/models/sms_channel_properties.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/botservice/azure-mgmt-botservice/azure/mgmt/botservice/models/sms_channel_properties.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/botservice/azure-mgmt-botservice/azure/mgmt/botservice/models/sms_channel_properties.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SmsChannelProperties(Model):
"""The parameters to provide for the Sms channel.
All required parameters must be populated in order to send to Azure.
:param phone: Required. The Sms phone
:type phone: str
:param account_sid: Required. The Sms account SID. Value only returned
through POST to the action Channel List API, otherwise empty.
:type account_sid: str
:param auth_token: Required. The Sms auth token. Value only returned
through POST to the action Channel List API, otherwise empty.
:type auth_token: str
:param is_validated: Whether this channel is validated for the bot
:type is_validated: bool
:param is_enabled: Required. Whether this channel is enabled for the bot
:type is_enabled: bool
"""
_validation = {
'phone': {'required': True},
'account_sid': {'required': True},
'auth_token': {'required': True},
'is_enabled': {'required': True},
}
_attribute_map = {
'phone': {'key': 'phone', 'type': 'str'},
'account_sid': {'key': 'accountSID', 'type': 'str'},
'auth_token': {'key': 'authToken', 'type': 'str'},
'is_validated': {'key': 'isValidated', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(SmsChannelProperties, self).__init__(**kwargs)
self.phone = kwargs.get('phone', None)
self.account_sid = kwargs.get('account_sid', None)
self.auth_token = kwargs.get('auth_token', None)
self.is_validated = kwargs.get('is_validated', None)
self.is_enabled = kwargs.get('is_enabled', None)
| 38.946429
| 76
| 0.615314
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.