max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/helper.py | pqtrng/f2b | 0 | 6618251 | <reponame>pqtrng/f2b
import contextlib
import datetime
import glob
import math
import os
import pathlib
import shutil
import cv2
import matplotlib.pyplot as plt
import natsort
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_addons
import tqdm
from src.config import Config
def plot_idx(idx, dataframe):
"""Plot a single image at index.
Args:
idx (Int): Index of image
dataframe (DataFrame): Dataframe
"""
original_image = cv2.imread(dataframe.iloc[idx].path)
reverse_color = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
plt.imshow(reverse_color)
def plot_batch_images(batch_size, dataframe):
"""Plot 16 images in the batch, along with the corresponding labels.
Args:
batch_size (Int): [description]
dataframe (Dataframe): [description]
"""
fig = plt.figure(figsize=(20, batch_size))
for idx in np.arange(batch_size):
ax = fig.add_subplot(4, batch_size // 4, idx + 1, xticks=[], yticks=[])
plot_idx(idx + 505, dataframe)
if "height" in dataframe.columns and "weight" in dataframe.columns:
ax.set_title(
"H:{:.1f} W:{:.1f} BMI:{:.2f}".format(
dataframe.iloc[idx + 505].height,
dataframe.iloc[idx + 505].weight,
dataframe.iloc[idx + 505].BMI,
)
)
else:
ax.set_title("BMI:{:.2f}".format(dataframe.iloc[idx].BMI))
def checking_dir(dir_name):
"""Checking if a directory is existed, if not create one.
Args:
dir_name (str): parent directory
folder (str, optional): Name of folder. Defaults to "data".
Returns:
dir (str): checked directory
"""
if not os.path.exists(dir_name):
# print(f"{dir_name} is not existed. Creating it!")
os.makedirs(dir_name)
return dir_name
def create_output_path(output_filepath, dataset_dir_name):
"""Create output directory.
Args:
output_filepath (str): output directory
dataset_dir_name (str): name of dataset
Returns:
str: path of created directories
"""
train_output_filepath = os.path.join(output_filepath, dataset_dir_name + "_train")
valid_output_filepath = os.path.join(output_filepath, dataset_dir_name + "_valid")
train_images_output_filepath = os.path.join(
train_output_filepath, Config.default_images_directory_name
)
valid_images_output_filepath = os.path.join(
valid_output_filepath, Config.default_images_directory_name
)
for dir_name in [
train_output_filepath,
valid_output_filepath,
train_images_output_filepath,
valid_images_output_filepath,
]:
_ = checking_dir(dir_name=dir_name)
return (
train_output_filepath,
train_images_output_filepath,
valid_output_filepath,
valid_images_output_filepath,
)
def get_subfolder_name(dir_name):
"""Get the sub-directory inside the given directory.
Args:
dir_name (str): given directory to check
Returns:
str: path of sub-directory if exist else raise ValueError
"""
os.chdir(dir_name)
sub_dirs = [d for d in pathlib.Path(dir_name).iterdir() if d.is_dir()]
if len(sub_dirs) == 1:
return pathlib.Path(sub_dirs[0]).absolute()
else:
raise ValueError(f"There are more than one sub-directories in {dir_name}!")
def get_annotation_file(annotation_file_path):
"""Get the annotation file of dataset.
Args:
annotation_file_path (str): Path to the dataset
Returns:
str: absolute path to the file
"""
os.chdir(annotation_file_path)
if len(glob.glob("*.csv")) == 1:
return pathlib.Path(glob.glob("*.csv")[0]).absolute()
elif len(glob.glob("*.csv")) == 0:
return None
else:
raise ValueError("There are more than 1 annotation file in path!")
def get_all_files_in_dir(dir_name, extension=None, must_sort=False):
"""Find all file in a directory.
Args:
dir_name (str): Working directory
extension (string): extension of file
Returns:
all_files (list): a list names of files in working directory
"""
import glob
os.chdir(dir_name)
file_name = "*" + extension if extension else "*"
all_files = glob.glob(file_name)
return natsort.natsorted(all_files) if must_sort else all_files
def get_images_name(
dirname, column_name=[Config.default_path_column_name, Config.x_col]
):
"""Get all images in a directory and return.
Args:
dirname (str): name of directory to check
column_name (list, optional): Name of columns to create output dataframe. Defaults to [Config.default_path_column_name, Config.x_col].
Returns:
dataframe: An dataframe with information of all image in directory
"""
all_files = get_all_files_in_dir(dir_name=dirname, must_sort=True)
# print(f"Total {len(all_files)} photos.")
file_paths = [pathlib.Path(file_name).absolute() for file_name in all_files]
image_path_name_df = pd.DataFrame(
list(zip(file_paths, all_files)), columns=column_name
)
return image_path_name_df
def create_dataframe(annotation_file_path, images_dir_name):
"""Create dataframe with images directory and annotation file.
Args:
annotation_file_path (str): path to the annotation file
images_dirname (str): name of image directory
Returns:
dataframe: Dataframe from given infomation
"""
annotation_file = get_annotation_file(annotation_file_path)
annotation_dataframe = pd.read_csv(annotation_file)
# Get rid of Unnamed colum
annotation_dataframe = annotation_dataframe.loc[
:, ~annotation_dataframe.columns.str.contains("^Unnamed")
]
image_df = get_images_name(os.path.join(annotation_file_path, images_dir_name))
full_df = image_df.merge(annotation_dataframe, left_index=True, right_index=True)
# Rename all columns to lower case
full_df.columns = full_df.columns.str.lower()
# print(f"Full dataframe has shape: {full_df.shape}.")
print(full_df.head())
return full_df
def split_dataframe(dataframe, first_dest_path, second_dest_path):
"""Split a dataframe into 2 set.
Args:
dataframe (dataframe): Dataframe to split
first_dest_path (str): Path to save first part
second_dest_path (str): Path to save second part
Returns:
dataframe: 2 new dataframes
"""
train_df_male = dataframe[dataframe.image.str.contains("^m")].sample(
frac=Config.train_test_split,
random_state=Config.seed,
)
train_df_female = dataframe[dataframe.image.str.contains("^f")].sample(
frac=Config.train_test_split,
random_state=Config.seed,
)
first_dataframe = pd.concat([train_df_female, train_df_male])
second_dataframe = dataframe.drop(first_dataframe.index)
print(
f"Splitting dataframe into \n\tfirst_set: {len(first_dataframe)} files. \n\t\tNumber of males: {len(first_dataframe[first_dataframe.image.str.contains('^m')])} files.\n\t\tNumber of females: {len(first_dataframe[first_dataframe.image.str.contains('^f')])} files.\n\tsecond_set: {len(second_dataframe)} files.\n\t\tNumber of males: {len(second_dataframe[second_dataframe.image.str.contains('^m')])} files. \n\t\tNumber of females: {len(second_dataframe[second_dataframe.image.str.contains('^f')])} files."
)
first_dataframe.to_csv(
os.path.join(first_dest_path, Config.default_annotation_file_name),
index=False,
header=True,
)
second_dataframe.to_csv(
os.path.join(second_dest_path, Config.default_annotation_file_name),
index=False,
header=True,
)
return first_dataframe, second_dataframe
def copy_image_from_dataframe(
destination, dataframe, column_name=Config.default_path_column_name
):
"""Copy images with information from dataframe to destination.
Args:
destination (str): Destination to copy
dataframe (dataframe): Dataframe contains images information
column_name (str, optional): Name of column contains image's path. Defaults to Config.default_path_column_name.
"""
for file_name in tqdm.tqdm(dataframe[column_name], total=len(dataframe.index)):
shutil.copy(src=file_name, dst=destination)
def get_dataset_info(
dataset, purpose, data_dir_name=Config.default_images_directory_name
):
"""Get dataset information from a given folder and purpose.
Args:
dataset (str): Name of dataset
purpose (str): Purpose of dataset
data_dir_name (str, optional): Name of directory which contains images. Defaults to Config.default_images_directory_name.
Returns:
dataframe: dataframe from from given info
str : path of images directory
"""
dateset_path = os.path.join(Config.processed_data_path, dataset + "_" + purpose)
annotation_dataframe = pd.read_csv(
os.path.join(dateset_path, Config.default_annotation_file_name)
)
images_dir_name = os.path.join(dateset_path, data_dir_name)
return annotation_dataframe, images_dir_name
def keras_augment_func(x):
"""Pre processing image.
Args:
x (Tensor): Image for pre processing
Returns:
Tensor: Processed image
"""
cropped_image = tf.image.stateless_random_crop(
value=x,
size=[Config.image_default_size, Config.image_default_size, 3],
seed=(Config.seed, Config.seed),
)
flipped_image = tf.image.stateless_random_flip_left_right(
image=cropped_image, seed=(Config.seed, Config.seed)
)
augmented_image = tf.keras.applications.resnet50.preprocess_input(flipped_image)
return augmented_image
def get_image_processor(purpose, augment_func=keras_augment_func):
"""Create correspond image processor for type of dataset.
Args:
purpose (str): Purpose of dataset, can be 'train', 'valid', 'test'
Raises:
ValueError: In case purpose in not specified or unknown.
Returns:
ImageDataGenerator: processor
"""
if purpose == "train":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=augment_func,
)
elif purpose == "valid":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.resnet50.preprocess_input
)
elif purpose == "test":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.resnet50.preprocess_input
)
else:
raise ValueError(
"Unknown purpose. Please set purpose to 'train', 'validate' or 'test'."
)
def create_generator(dataframe, img_dir, purpose, processor, seed=None):
"""Create a data generator for model.
Args:
dataframe (dataframe): dataframe of input data
img_dir (str): name of images directory
purpose (str): purpose to use this data
processor (Processor): Processor for dataset
seed (int, optional): Random seeding number for reproducing experiment. Defaults to None.
Raises:
ValueError: raise in case of wrong purpose
Returns:
DataGenerator: a data generator for given data and purpose
"""
if purpose == "train":
will_shuffle = True
elif purpose == "valid":
will_shuffle = False
elif purpose == "test":
will_shuffle = False
else:
raise ValueError("Unknown purpose")
generator = processor.flow_from_dataframe(
dataframe=dataframe,
directory=img_dir,
x_col=Config.x_col,
y_col=Config.y_col,
class_mode=Config.class_mode,
color_mode=Config.color_mode,
target_size=(Config.image_default_size, Config.image_default_size),
batch_size=Config.batch_size,
seed=seed,
shuffle=will_shuffle,
)
return generator
def plot_image_from_generator(generator, number_imgs_to_show=9):
"""Plotting data from a generator.
Args:
generator (ImageGenerator): Generator to plot
number_imgs_to_show (int, optional): Number of image to plot. Defaults to 9.
"""
print("Plotting images...")
n_rows_cols = int(math.ceil(math.sqrt(number_imgs_to_show)))
plot_index = 1
x_batch, _ = next(generator)
while plot_index <= number_imgs_to_show:
plt.subplot(n_rows_cols, n_rows_cols, plot_index)
plt.imshow((x_batch[plot_index - 1] * 255).astype(np.uint8))
plot_index += 1
plt.show()
def set_training_type_for_model(model, training_type, num_of_untrained_layers):
"""Set training type for model. Train all layer or train only part of it.
Args:
model (model): Model to train
training_type (str): Type of training, "top" or "all"
Raises:
ValueError: Raise if training type is unknown
"""
print(f"Training model with '{training_type}' type")
if training_type == "top":
for l in model.layers[:num_of_untrained_layers]:
l.trainable = False
elif training_type == "all":
for l in model.layers:
l.trainable = True
else:
raise ValueError(
f"{training_type} is not available. Please choose between 'top' and 'all'"
)
def compile_model(model, loss, optimizer, metrics):
"""Compile model for training.
Args:
model : Model to compile
loss : Loss function
optimizer : Optimizer of training
metrics : target metric
"""
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
def create_train_log_path(
training_type="top", dataset="original", output_network_type="current"
):
checkpoint_path = os.path.join(
Config.checkpoint_model_path, training_type, dataset, output_network_type
)
tensorboard_log_dir = os.path.join(
Config.log_path, training_type, dataset, output_network_type
)
model_path = os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
return checkpoint_path, tensorboard_log_dir, model_path
def get_best_model(dir_name):
"""Get the best model from model checkpoint folder. Normally it's the last
saved model in folder.
Args:
dir (String, optional): Directory to get the best trained model. Defaults to Config.checkpoint_model_path.
Returns:
str: name of best model
"""
h5_files = []
for _, _, files in os.walk(dir_name):
for file_name in files:
if ".h5" in file_name:
h5_files.append(file_name)
h5_files = sorted(h5_files, key=lambda x: float(x.split(":")[-1][:-3]))
return h5_files[0]
def save_trained_model(training_type, dataset, output_network_type):
"""Save the best model after training to trained folder for later evaluate.
Args:
training_type (str): type of training
dataset (str): name of dataset
output_network_type (str): type of output network
Returns:
str: time of saved
str: target value for retrieving later
"""
source = os.path.join(
Config.checkpoint_model_path, training_type, dataset, output_network_type
)
destination = checking_dir(
os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
)
best_model = get_best_model(dir_name=source)
time_slot = datetime.datetime.now().strftime("%Y.%m.%d.%H.%M")
print(f"Moving best model {best_model} from {source} to {destination}")
os.rename(
os.path.join(source, best_model),
os.path.join(destination, time_slot + "-" + best_model),
)
return time_slot, best_model.split(":")[-1][:-3]
def save_training_log(
time_slot, training_type, dataset, output_network_type, metric, history, save_path
):
"""Save training history as csv file.
Args:
time_slot (str): time of saving
training_type (str): type of training
dataset (str): name of datasset
output_network_type (str): type of output network
metric (str): target metric
history (history): Logs return from training
save_path (str): save path, normally with saved model path
"""
history_file_name = f"{time_slot}-type:{training_type}-data:{dataset}-network:{output_network_type}-metric:{metric}-"
pd.DataFrame.from_dict(history.history).to_csv(
os.path.join(
save_path,
history_file_name + "history.csv",
),
index=False,
)
def clean_up_dir(path_to_dir):
"""Delete all files in the directory.
Args:
path_to_dir (str): Path to directory
"""
try:
print(f"\n\n\tClean up{path_to_dir}\n\n")
shutil.rmtree(path_to_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def get_trained_model(training_type, dataset, output_network_type):
"""Get trained model.
Args:
training_type (str): Type of training
dataset (str): name of dataset
output_network_type (str): Type of output network
Returns:
model: Trained model
str: path to saved model
"""
dir_name = os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
file_name = get_all_files_in_dir(dir_name=dir_name, extension=".h5")[0]
model = tf.keras.models.load_model(
filepath=os.path.join(dir_name, file_name),
custom_objects={"Addons>SGDW": tensorflow_addons.optimizers.SGDW},
)
return model, dir_name
def draw_label(
image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=1, thickness=2
):
"""Draw label on image.
Args:
image (Image): Input image
point (Tuple): Index of faces
label (str): BMI value
font (str, optional): Defaults to cv2.FONT_HERSHEY_SIMPLEX.
font_scale (int, optional): Defaults to 1.
thickness (int, optional): Defaults to 2.
"""
size = cv2.getTextSize(label, font, font_scale, thickness)[0]
x, y = point
cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness)
@contextlib.contextmanager
def video_capture(*args, **kwargs):
cap = cv2.VideoCapture(*args, **kwargs)
try:
yield cap
finally:
cap.release()
def yield_images_from_camera():
with video_capture(0) as cap:
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img = cap.read()
if not ret:
raise RuntimeError("Failed to capture image")
yield img
| import contextlib
import datetime
import glob
import math
import os
import pathlib
import shutil
import cv2
import matplotlib.pyplot as plt
import natsort
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_addons
import tqdm
from src.config import Config
def plot_idx(idx, dataframe):
"""Plot a single image at index.
Args:
idx (Int): Index of image
dataframe (DataFrame): Dataframe
"""
original_image = cv2.imread(dataframe.iloc[idx].path)
reverse_color = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
plt.imshow(reverse_color)
def plot_batch_images(batch_size, dataframe):
"""Plot 16 images in the batch, along with the corresponding labels.
Args:
batch_size (Int): [description]
dataframe (Dataframe): [description]
"""
fig = plt.figure(figsize=(20, batch_size))
for idx in np.arange(batch_size):
ax = fig.add_subplot(4, batch_size // 4, idx + 1, xticks=[], yticks=[])
plot_idx(idx + 505, dataframe)
if "height" in dataframe.columns and "weight" in dataframe.columns:
ax.set_title(
"H:{:.1f} W:{:.1f} BMI:{:.2f}".format(
dataframe.iloc[idx + 505].height,
dataframe.iloc[idx + 505].weight,
dataframe.iloc[idx + 505].BMI,
)
)
else:
ax.set_title("BMI:{:.2f}".format(dataframe.iloc[idx].BMI))
def checking_dir(dir_name):
"""Checking if a directory is existed, if not create one.
Args:
dir_name (str): parent directory
folder (str, optional): Name of folder. Defaults to "data".
Returns:
dir (str): checked directory
"""
if not os.path.exists(dir_name):
# print(f"{dir_name} is not existed. Creating it!")
os.makedirs(dir_name)
return dir_name
def create_output_path(output_filepath, dataset_dir_name):
"""Create output directory.
Args:
output_filepath (str): output directory
dataset_dir_name (str): name of dataset
Returns:
str: path of created directories
"""
train_output_filepath = os.path.join(output_filepath, dataset_dir_name + "_train")
valid_output_filepath = os.path.join(output_filepath, dataset_dir_name + "_valid")
train_images_output_filepath = os.path.join(
train_output_filepath, Config.default_images_directory_name
)
valid_images_output_filepath = os.path.join(
valid_output_filepath, Config.default_images_directory_name
)
for dir_name in [
train_output_filepath,
valid_output_filepath,
train_images_output_filepath,
valid_images_output_filepath,
]:
_ = checking_dir(dir_name=dir_name)
return (
train_output_filepath,
train_images_output_filepath,
valid_output_filepath,
valid_images_output_filepath,
)
def get_subfolder_name(dir_name):
"""Get the sub-directory inside the given directory.
Args:
dir_name (str): given directory to check
Returns:
str: path of sub-directory if exist else raise ValueError
"""
os.chdir(dir_name)
sub_dirs = [d for d in pathlib.Path(dir_name).iterdir() if d.is_dir()]
if len(sub_dirs) == 1:
return pathlib.Path(sub_dirs[0]).absolute()
else:
raise ValueError(f"There are more than one sub-directories in {dir_name}!")
def get_annotation_file(annotation_file_path):
"""Get the annotation file of dataset.
Args:
annotation_file_path (str): Path to the dataset
Returns:
str: absolute path to the file
"""
os.chdir(annotation_file_path)
if len(glob.glob("*.csv")) == 1:
return pathlib.Path(glob.glob("*.csv")[0]).absolute()
elif len(glob.glob("*.csv")) == 0:
return None
else:
raise ValueError("There are more than 1 annotation file in path!")
def get_all_files_in_dir(dir_name, extension=None, must_sort=False):
"""Find all file in a directory.
Args:
dir_name (str): Working directory
extension (string): extension of file
Returns:
all_files (list): a list names of files in working directory
"""
import glob
os.chdir(dir_name)
file_name = "*" + extension if extension else "*"
all_files = glob.glob(file_name)
return natsort.natsorted(all_files) if must_sort else all_files
def get_images_name(
dirname, column_name=[Config.default_path_column_name, Config.x_col]
):
"""Get all images in a directory and return.
Args:
dirname (str): name of directory to check
column_name (list, optional): Name of columns to create output dataframe. Defaults to [Config.default_path_column_name, Config.x_col].
Returns:
dataframe: An dataframe with information of all image in directory
"""
all_files = get_all_files_in_dir(dir_name=dirname, must_sort=True)
# print(f"Total {len(all_files)} photos.")
file_paths = [pathlib.Path(file_name).absolute() for file_name in all_files]
image_path_name_df = pd.DataFrame(
list(zip(file_paths, all_files)), columns=column_name
)
return image_path_name_df
def create_dataframe(annotation_file_path, images_dir_name):
"""Create dataframe with images directory and annotation file.
Args:
annotation_file_path (str): path to the annotation file
images_dirname (str): name of image directory
Returns:
dataframe: Dataframe from given infomation
"""
annotation_file = get_annotation_file(annotation_file_path)
annotation_dataframe = pd.read_csv(annotation_file)
# Get rid of Unnamed colum
annotation_dataframe = annotation_dataframe.loc[
:, ~annotation_dataframe.columns.str.contains("^Unnamed")
]
image_df = get_images_name(os.path.join(annotation_file_path, images_dir_name))
full_df = image_df.merge(annotation_dataframe, left_index=True, right_index=True)
# Rename all columns to lower case
full_df.columns = full_df.columns.str.lower()
# print(f"Full dataframe has shape: {full_df.shape}.")
print(full_df.head())
return full_df
def split_dataframe(dataframe, first_dest_path, second_dest_path):
"""Split a dataframe into 2 set.
Args:
dataframe (dataframe): Dataframe to split
first_dest_path (str): Path to save first part
second_dest_path (str): Path to save second part
Returns:
dataframe: 2 new dataframes
"""
train_df_male = dataframe[dataframe.image.str.contains("^m")].sample(
frac=Config.train_test_split,
random_state=Config.seed,
)
train_df_female = dataframe[dataframe.image.str.contains("^f")].sample(
frac=Config.train_test_split,
random_state=Config.seed,
)
first_dataframe = pd.concat([train_df_female, train_df_male])
second_dataframe = dataframe.drop(first_dataframe.index)
print(
f"Splitting dataframe into \n\tfirst_set: {len(first_dataframe)} files. \n\t\tNumber of males: {len(first_dataframe[first_dataframe.image.str.contains('^m')])} files.\n\t\tNumber of females: {len(first_dataframe[first_dataframe.image.str.contains('^f')])} files.\n\tsecond_set: {len(second_dataframe)} files.\n\t\tNumber of males: {len(second_dataframe[second_dataframe.image.str.contains('^m')])} files. \n\t\tNumber of females: {len(second_dataframe[second_dataframe.image.str.contains('^f')])} files."
)
first_dataframe.to_csv(
os.path.join(first_dest_path, Config.default_annotation_file_name),
index=False,
header=True,
)
second_dataframe.to_csv(
os.path.join(second_dest_path, Config.default_annotation_file_name),
index=False,
header=True,
)
return first_dataframe, second_dataframe
def copy_image_from_dataframe(
destination, dataframe, column_name=Config.default_path_column_name
):
"""Copy images with information from dataframe to destination.
Args:
destination (str): Destination to copy
dataframe (dataframe): Dataframe contains images information
column_name (str, optional): Name of column contains image's path. Defaults to Config.default_path_column_name.
"""
for file_name in tqdm.tqdm(dataframe[column_name], total=len(dataframe.index)):
shutil.copy(src=file_name, dst=destination)
def get_dataset_info(
dataset, purpose, data_dir_name=Config.default_images_directory_name
):
"""Get dataset information from a given folder and purpose.
Args:
dataset (str): Name of dataset
purpose (str): Purpose of dataset
data_dir_name (str, optional): Name of directory which contains images. Defaults to Config.default_images_directory_name.
Returns:
dataframe: dataframe from from given info
str : path of images directory
"""
dateset_path = os.path.join(Config.processed_data_path, dataset + "_" + purpose)
annotation_dataframe = pd.read_csv(
os.path.join(dateset_path, Config.default_annotation_file_name)
)
images_dir_name = os.path.join(dateset_path, data_dir_name)
return annotation_dataframe, images_dir_name
def keras_augment_func(x):
"""Pre processing image.
Args:
x (Tensor): Image for pre processing
Returns:
Tensor: Processed image
"""
cropped_image = tf.image.stateless_random_crop(
value=x,
size=[Config.image_default_size, Config.image_default_size, 3],
seed=(Config.seed, Config.seed),
)
flipped_image = tf.image.stateless_random_flip_left_right(
image=cropped_image, seed=(Config.seed, Config.seed)
)
augmented_image = tf.keras.applications.resnet50.preprocess_input(flipped_image)
return augmented_image
def get_image_processor(purpose, augment_func=keras_augment_func):
"""Create correspond image processor for type of dataset.
Args:
purpose (str): Purpose of dataset, can be 'train', 'valid', 'test'
Raises:
ValueError: In case purpose in not specified or unknown.
Returns:
ImageDataGenerator: processor
"""
if purpose == "train":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=augment_func,
)
elif purpose == "valid":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.resnet50.preprocess_input
)
elif purpose == "test":
return tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.resnet50.preprocess_input
)
else:
raise ValueError(
"Unknown purpose. Please set purpose to 'train', 'validate' or 'test'."
)
def create_generator(dataframe, img_dir, purpose, processor, seed=None):
"""Create a data generator for model.
Args:
dataframe (dataframe): dataframe of input data
img_dir (str): name of images directory
purpose (str): purpose to use this data
processor (Processor): Processor for dataset
seed (int, optional): Random seeding number for reproducing experiment. Defaults to None.
Raises:
ValueError: raise in case of wrong purpose
Returns:
DataGenerator: a data generator for given data and purpose
"""
if purpose == "train":
will_shuffle = True
elif purpose == "valid":
will_shuffle = False
elif purpose == "test":
will_shuffle = False
else:
raise ValueError("Unknown purpose")
generator = processor.flow_from_dataframe(
dataframe=dataframe,
directory=img_dir,
x_col=Config.x_col,
y_col=Config.y_col,
class_mode=Config.class_mode,
color_mode=Config.color_mode,
target_size=(Config.image_default_size, Config.image_default_size),
batch_size=Config.batch_size,
seed=seed,
shuffle=will_shuffle,
)
return generator
def plot_image_from_generator(generator, number_imgs_to_show=9):
"""Plotting data from a generator.
Args:
generator (ImageGenerator): Generator to plot
number_imgs_to_show (int, optional): Number of image to plot. Defaults to 9.
"""
print("Plotting images...")
n_rows_cols = int(math.ceil(math.sqrt(number_imgs_to_show)))
plot_index = 1
x_batch, _ = next(generator)
while plot_index <= number_imgs_to_show:
plt.subplot(n_rows_cols, n_rows_cols, plot_index)
plt.imshow((x_batch[plot_index - 1] * 255).astype(np.uint8))
plot_index += 1
plt.show()
def set_training_type_for_model(model, training_type, num_of_untrained_layers):
"""Set training type for model. Train all layer or train only part of it.
Args:
model (model): Model to train
training_type (str): Type of training, "top" or "all"
Raises:
ValueError: Raise if training type is unknown
"""
print(f"Training model with '{training_type}' type")
if training_type == "top":
for l in model.layers[:num_of_untrained_layers]:
l.trainable = False
elif training_type == "all":
for l in model.layers:
l.trainable = True
else:
raise ValueError(
f"{training_type} is not available. Please choose between 'top' and 'all'"
)
def compile_model(model, loss, optimizer, metrics):
"""Compile model for training.
Args:
model : Model to compile
loss : Loss function
optimizer : Optimizer of training
metrics : target metric
"""
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
def create_train_log_path(
training_type="top", dataset="original", output_network_type="current"
):
checkpoint_path = os.path.join(
Config.checkpoint_model_path, training_type, dataset, output_network_type
)
tensorboard_log_dir = os.path.join(
Config.log_path, training_type, dataset, output_network_type
)
model_path = os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
return checkpoint_path, tensorboard_log_dir, model_path
def get_best_model(dir_name):
"""Get the best model from model checkpoint folder. Normally it's the last
saved model in folder.
Args:
dir (String, optional): Directory to get the best trained model. Defaults to Config.checkpoint_model_path.
Returns:
str: name of best model
"""
h5_files = []
for _, _, files in os.walk(dir_name):
for file_name in files:
if ".h5" in file_name:
h5_files.append(file_name)
h5_files = sorted(h5_files, key=lambda x: float(x.split(":")[-1][:-3]))
return h5_files[0]
def save_trained_model(training_type, dataset, output_network_type):
"""Save the best model after training to trained folder for later evaluate.
Args:
training_type (str): type of training
dataset (str): name of dataset
output_network_type (str): type of output network
Returns:
str: time of saved
str: target value for retrieving later
"""
source = os.path.join(
Config.checkpoint_model_path, training_type, dataset, output_network_type
)
destination = checking_dir(
os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
)
best_model = get_best_model(dir_name=source)
time_slot = datetime.datetime.now().strftime("%Y.%m.%d.%H.%M")
print(f"Moving best model {best_model} from {source} to {destination}")
os.rename(
os.path.join(source, best_model),
os.path.join(destination, time_slot + "-" + best_model),
)
return time_slot, best_model.split(":")[-1][:-3]
def save_training_log(
time_slot, training_type, dataset, output_network_type, metric, history, save_path
):
"""Save training history as csv file.
Args:
time_slot (str): time of saving
training_type (str): type of training
dataset (str): name of datasset
output_network_type (str): type of output network
metric (str): target metric
history (history): Logs return from training
save_path (str): save path, normally with saved model path
"""
history_file_name = f"{time_slot}-type:{training_type}-data:{dataset}-network:{output_network_type}-metric:{metric}-"
pd.DataFrame.from_dict(history.history).to_csv(
os.path.join(
save_path,
history_file_name + "history.csv",
),
index=False,
)
def clean_up_dir(path_to_dir):
"""Delete all files in the directory.
Args:
path_to_dir (str): Path to directory
"""
try:
print(f"\n\n\tClean up{path_to_dir}\n\n")
shutil.rmtree(path_to_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def get_trained_model(training_type, dataset, output_network_type):
"""Get trained model.
Args:
training_type (str): Type of training
dataset (str): name of dataset
output_network_type (str): Type of output network
Returns:
model: Trained model
str: path to saved model
"""
dir_name = os.path.join(
Config.trained_model_path, training_type, dataset, output_network_type
)
file_name = get_all_files_in_dir(dir_name=dir_name, extension=".h5")[0]
model = tf.keras.models.load_model(
filepath=os.path.join(dir_name, file_name),
custom_objects={"Addons>SGDW": tensorflow_addons.optimizers.SGDW},
)
return model, dir_name
def draw_label(
image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=1, thickness=2
):
"""Draw label on image.
Args:
image (Image): Input image
point (Tuple): Index of faces
label (str): BMI value
font (str, optional): Defaults to cv2.FONT_HERSHEY_SIMPLEX.
font_scale (int, optional): Defaults to 1.
thickness (int, optional): Defaults to 2.
"""
size = cv2.getTextSize(label, font, font_scale, thickness)[0]
x, y = point
cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness)
@contextlib.contextmanager
def video_capture(*args, **kwargs):
cap = cv2.VideoCapture(*args, **kwargs)
try:
yield cap
finally:
cap.release()
def yield_images_from_camera():
with video_capture(0) as cap:
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img = cap.read()
if not ret:
raise RuntimeError("Failed to capture image")
yield img | en | 0.641146 | Plot a single image at index. Args: idx (Int): Index of image dataframe (DataFrame): Dataframe Plot 16 images in the batch, along with the corresponding labels. Args: batch_size (Int): [description] dataframe (Dataframe): [description] Checking if a directory is existed, if not create one. Args: dir_name (str): parent directory folder (str, optional): Name of folder. Defaults to "data". Returns: dir (str): checked directory # print(f"{dir_name} is not existed. Creating it!") Create output directory. Args: output_filepath (str): output directory dataset_dir_name (str): name of dataset Returns: str: path of created directories Get the sub-directory inside the given directory. Args: dir_name (str): given directory to check Returns: str: path of sub-directory if exist else raise ValueError Get the annotation file of dataset. Args: annotation_file_path (str): Path to the dataset Returns: str: absolute path to the file Find all file in a directory. Args: dir_name (str): Working directory extension (string): extension of file Returns: all_files (list): a list names of files in working directory Get all images in a directory and return. Args: dirname (str): name of directory to check column_name (list, optional): Name of columns to create output dataframe. Defaults to [Config.default_path_column_name, Config.x_col]. Returns: dataframe: An dataframe with information of all image in directory # print(f"Total {len(all_files)} photos.") Create dataframe with images directory and annotation file. Args: annotation_file_path (str): path to the annotation file images_dirname (str): name of image directory Returns: dataframe: Dataframe from given infomation # Get rid of Unnamed colum # Rename all columns to lower case # print(f"Full dataframe has shape: {full_df.shape}.") Split a dataframe into 2 set. Args: dataframe (dataframe): Dataframe to split first_dest_path (str): Path to save first part second_dest_path (str): Path to save second part Returns: dataframe: 2 new dataframes Copy images with information from dataframe to destination. Args: destination (str): Destination to copy dataframe (dataframe): Dataframe contains images information column_name (str, optional): Name of column contains image's path. Defaults to Config.default_path_column_name. Get dataset information from a given folder and purpose. Args: dataset (str): Name of dataset purpose (str): Purpose of dataset data_dir_name (str, optional): Name of directory which contains images. Defaults to Config.default_images_directory_name. Returns: dataframe: dataframe from from given info str : path of images directory Pre processing image. Args: x (Tensor): Image for pre processing Returns: Tensor: Processed image Create correspond image processor for type of dataset. Args: purpose (str): Purpose of dataset, can be 'train', 'valid', 'test' Raises: ValueError: In case purpose in not specified or unknown. Returns: ImageDataGenerator: processor Create a data generator for model. Args: dataframe (dataframe): dataframe of input data img_dir (str): name of images directory purpose (str): purpose to use this data processor (Processor): Processor for dataset seed (int, optional): Random seeding number for reproducing experiment. Defaults to None. Raises: ValueError: raise in case of wrong purpose Returns: DataGenerator: a data generator for given data and purpose Plotting data from a generator. Args: generator (ImageGenerator): Generator to plot number_imgs_to_show (int, optional): Number of image to plot. Defaults to 9. Set training type for model. Train all layer or train only part of it. Args: model (model): Model to train training_type (str): Type of training, "top" or "all" Raises: ValueError: Raise if training type is unknown Compile model for training. Args: model : Model to compile loss : Loss function optimizer : Optimizer of training metrics : target metric Get the best model from model checkpoint folder. Normally it's the last saved model in folder. Args: dir (String, optional): Directory to get the best trained model. Defaults to Config.checkpoint_model_path. Returns: str: name of best model Save the best model after training to trained folder for later evaluate. Args: training_type (str): type of training dataset (str): name of dataset output_network_type (str): type of output network Returns: str: time of saved str: target value for retrieving later Save training history as csv file. Args: time_slot (str): time of saving training_type (str): type of training dataset (str): name of datasset output_network_type (str): type of output network metric (str): target metric history (history): Logs return from training save_path (str): save path, normally with saved model path Delete all files in the directory. Args: path_to_dir (str): Path to directory Get trained model. Args: training_type (str): Type of training dataset (str): name of dataset output_network_type (str): Type of output network Returns: model: Trained model str: path to saved model Draw label on image. Args: image (Image): Input image point (Tuple): Index of faces label (str): BMI value font (str, optional): Defaults to cv2.FONT_HERSHEY_SIMPLEX. font_scale (int, optional): Defaults to 1. thickness (int, optional): Defaults to 2. | 2.350398 | 2 |
DFT_1d/tests/non_interacting_solver_test.py | Chriscrosser3310/Kohn_Sham_DFT_1d | 2 | 6618252 | """
.. _single_electron_test:
Test for Single Electron Module
###############################
.. todo::
* Authors? -RJM
* Docs need love
* Should validate correct instiliation/completion. Right now just spits printouts. -RJM
* Ideally a single test script would test EVERY module, and can be easily run after each git commit. May need to make a another test script which calls this one and all others. -RJM
* Has this been linted yet? -RJM
"""
import sys
import os
currentpath = os.path.abspath('.')
sys.path.insert(0, os.path.dirname(currentpath))
import non_interacting_solver, ext_potentials
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.polynomial import polyfit
from scipy import stats
import functools
import time
import warnings
def get_plotting_params():
""" Convergence plotting parameters. """
params = {'mathtext.default': 'default'}
plt.rcParams.update(params)
plt.rcParams['axes.axisbelow'] = True
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 9
fig_size[1] = 6
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
return fig, ax
def rsquared(x, y):
""" Return R^2 where x and y are array-like."""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return r_value ** 2
def convergence_test(Solver,
test_range,
potential_fn,
boundary_condition,
n_point_stencil,
k_point=None,
num_grids_list=None,
analytical_energy=None,
plot_index=''):
"""Description.
.. todo::
* Please fill out docs
Args:
...
Returns:
...
"""
# start timer
t0 = time.time()
if num_grids_list is None:
num_grids_list = [40, 80, 120, 160, 200, 400, 600, 800, 1000]
# error list for plotting
E_abs_error = []
# get the name of potential function in order to save to local machine
try:
func_name = potential_fn.__name__
except AttributeError:
func_name = potential_fn.func.__name__
# choose whether include endpoints
if boundary_condition == 'periodic':
endpoint = False
else:
endpoint = True
# obtain lowest eigenvalue (level = 1) from exact/analytical result.
# When the exact answer is not known, simply run the solver with a large
# grid, e.g. N = 5000 to obtain the "exact" g.s. energy
if analytical_energy:
exact_gs_energy = analytical_energy
energy_form = 'analytical'
else:
# solve eigenvalue problem with matrix size N = 5000
exact_grids = np.linspace(*test_range, 5000, endpoint=endpoint)
exact_solver = Solver(exact_grids,
potential_fn=potential_fn,
k_point=k_point,
boundary_condition=boundary_condition,
n_point_stencil=n_point_stencil)
# solve ground state
exact_solver.solve_ground_state()
# obtain ground state energy as exact energy
exact_gs_energy = exact_solver.eigenvalues[0]
energy_form = '5000_grids'
# get error of energy for each num_grid compared to the exact energy
for num_grids in num_grids_list:
grids = np.linspace(*test_range, num_grids, endpoint=endpoint)
# solve eigenvalue problem with matrix size N = num_grids
solver = Solver(grids,
potential_fn=potential_fn,
k_point=k_point,
boundary_condition=boundary_condition,
n_point_stencil=n_point_stencil)
solver.solve_ground_state()
# obtain lowest eigenvalue from FDM
ground_state_energy = solver.eigenvalues[0]
# obtain g.s. wavefunction
# ground_state_wf = solver.wave_function[0]
# contruct absolute error
abs_error = np.abs(ground_state_energy - exact_gs_energy)
E_abs_error.append(abs_error)
# take (base 10) logs of items in list
log_ngl = [np.log10(x) for x in num_grids_list]
log_E = [np.log10(x) for x in E_abs_error]
# skip first 3 small N values for finding linear fit
log_ngl_fit = log_ngl[3:]
log_E_fit = log_E[3:]
# linear fitting
b, p = polyfit(log_ngl_fit, log_E_fit, 1)
r2 = '%.4f' % (rsquared(log_ngl_fit, log_E_fit))
yfit = [10 ** (b + p * xi) for xi in log_ngl]
p = '%.4f' % (p)
size_diff = np.abs(len(num_grids_list) - len(yfit))
# initialize figure for plots
fig, ax = get_plotting_params()
# obtain linear fit of data (skipping first 3 small N values).
# here p = slope in our fit, r^2 is a measure of how linear data is.
linfit = ax.plot(num_grids_list[size_diff:], yfit, alpha=0.4,
label='$p$ = ' + p + ', $r^2$ = ' + r2, linewidth=3)
# matplotlib trick to obtain same color of a previous plot
ax.plot(num_grids_list, E_abs_error, marker='o', linestyle='None',
color=linfit[0].get_color())
# log-log scale
plt.xscale('log')
plt.yscale('log')
ax.set_xlabel("$N$", fontsize=18)
ax.set_ylabel("|Error| (au)", fontsize=18)
plt.legend(fontsize=16)
plt.title(f'Error in ground state vs. number of grids\n{func_name}, '
f'{boundary_condition}, {test_range}, {n_point_stencil}-points, '
f'{energy_form}',
fontsize=20)
plt.grid(alpha=0.4)
plt.gca().xaxis.grid(True, which='minor', alpha=0.4)
plt.gca().yaxis.grid(True, which='minor', alpha=0.4)
# create folder if no such directory
if not os.path.isdir('convergence_test'):
os.mkdir('convergence_test')
if not os.path.isdir(f'convergence_test/{Solver.__name__}'):
os.mkdir(f'convergence_test/{Solver.__name__}')
# save fig
plt.savefig(f'convergence_test/{Solver.__name__}/{func_name}_'
f'{boundary_condition}_{test_range}_{n_point_stencil}_'
f'{energy_form}{plot_index}.png')
plt.close()
# stop timer
t1 = time.time()
# write time taken to complete the convergence test to log (txt) file
time_str = time.strftime("==== %Y-%m-%d %H:%M:%S ====", time.localtime())
finish_str = f'{Solver.__name__}: {func_name}_{boundary_condition}_' \
f'{test_range}_{n_point_stencil}_{energy_form}{plot_index}'
timer_str = f'Time: {t1 - t0}'
all_str = time_str + '\n' + finish_str + '\n' + timer_str + '\n\n'
with open("convergence_test/test_log.txt", "a") as text_file:
text_file.write(all_str)
print(all_str)
# plot the dispersion relation for a periodic potential
# TODO: move this to an example (not a test)
def plot_dispersion(Solver,
test_range,
potential_fn,
k_range=(-np.pi, np.pi),
eigenvalue_index=0,
n_point_stencil=5,
num_grids=1000,
num_k_grids=100):
warnings.warn('Warning: make sure potential_fn is a periodic function!')
# grids = np.linspace(*test_range, num_grids, endpoint = False)
k_list = np.linspace(*k_range, num_k_grids)
E_list = []
for k in k_list:
grids = np.linspace(*test_range, num_grids, endpoint=False)
solver = Solver(grids,
potential_fn=potential_fn,
k_point=k,
boundary_condition='periodic',
n_point_stencil=n_point_stencil,
tol=0)
solver.solve_ground_state()
# obtain lowest eigenvalue from FDM
energy = solver.eigenvalues[eigenvalue_index]
E_list.append(energy)
# initialize figure for plots
fig, ax = get_plotting_params()
# matplotlib trick to obtain same color of a previous plot
ax.plot(k_list, E_list, marker='o', linestyle='solid', color='blue')
ax.set_xlabel("k", fontsize=18)
ax.set_ylabel("E", fontsize=18)
# plt.legend(fontsize=16)
# plt.title(f'Dispersion relation {k_range} {eigenvalue_index}', fontsize=20)
plt.grid(alpha=0.4)
plt.gca().xaxis.grid(True, which='minor', alpha=0.4)
plt.gca().yaxis.grid(True, which='minor', alpha=0.4)
# create folder if no such directory
if not os.path.isdir('dispersion_plots'):
os.mkdir('dispersion_plots')
# save fig
plt.savefig(f'dispersion_plots/dispersion_relation_{k_range}_'
f'{eigenvalue_index}.png')
plt.close()
print(f'dispersion_relation_{k_range}_{eigenvalue_index} done')
if __name__ == "__main__":
""" Test convergence rates for various systems."""
test_potential_fn_list = [((0, 3),
functools.partial(ext_potentials.kronig_penney,
a=3, b=0.5, v0=-1),
'periodic'),
((-5, 5),
functools.partial(ext_potentials.poschl_teller,
lam=1), 'closed'),
((-20, 20),
functools.partial(ext_potentials.poschl_teller,
lam=1), 'open'),
((0, 2 * np.pi), np.sin, 'periodic')]
solvers = [non_interacting_solver.SparseEigenSolver, non_interacting_solver.EigenSolver]
# convergence test for the sin periodic potential on arbitrary k_point
r, p, b = test_potential_fn_list[3]
convergence_test(solvers[0], r, p, b, 5, k_point=1)
| """
.. _single_electron_test:
Test for Single Electron Module
###############################
.. todo::
* Authors? -RJM
* Docs need love
* Should validate correct instiliation/completion. Right now just spits printouts. -RJM
* Ideally a single test script would test EVERY module, and can be easily run after each git commit. May need to make a another test script which calls this one and all others. -RJM
* Has this been linted yet? -RJM
"""
import sys
import os
currentpath = os.path.abspath('.')
sys.path.insert(0, os.path.dirname(currentpath))
import non_interacting_solver, ext_potentials
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.polynomial import polyfit
from scipy import stats
import functools
import time
import warnings
def get_plotting_params():
""" Convergence plotting parameters. """
params = {'mathtext.default': 'default'}
plt.rcParams.update(params)
plt.rcParams['axes.axisbelow'] = True
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 9
fig_size[1] = 6
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
return fig, ax
def rsquared(x, y):
""" Return R^2 where x and y are array-like."""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return r_value ** 2
def convergence_test(Solver,
test_range,
potential_fn,
boundary_condition,
n_point_stencil,
k_point=None,
num_grids_list=None,
analytical_energy=None,
plot_index=''):
"""Description.
.. todo::
* Please fill out docs
Args:
...
Returns:
...
"""
# start timer
t0 = time.time()
if num_grids_list is None:
num_grids_list = [40, 80, 120, 160, 200, 400, 600, 800, 1000]
# error list for plotting
E_abs_error = []
# get the name of potential function in order to save to local machine
try:
func_name = potential_fn.__name__
except AttributeError:
func_name = potential_fn.func.__name__
# choose whether include endpoints
if boundary_condition == 'periodic':
endpoint = False
else:
endpoint = True
# obtain lowest eigenvalue (level = 1) from exact/analytical result.
# When the exact answer is not known, simply run the solver with a large
# grid, e.g. N = 5000 to obtain the "exact" g.s. energy
if analytical_energy:
exact_gs_energy = analytical_energy
energy_form = 'analytical'
else:
# solve eigenvalue problem with matrix size N = 5000
exact_grids = np.linspace(*test_range, 5000, endpoint=endpoint)
exact_solver = Solver(exact_grids,
potential_fn=potential_fn,
k_point=k_point,
boundary_condition=boundary_condition,
n_point_stencil=n_point_stencil)
# solve ground state
exact_solver.solve_ground_state()
# obtain ground state energy as exact energy
exact_gs_energy = exact_solver.eigenvalues[0]
energy_form = '5000_grids'
# get error of energy for each num_grid compared to the exact energy
for num_grids in num_grids_list:
grids = np.linspace(*test_range, num_grids, endpoint=endpoint)
# solve eigenvalue problem with matrix size N = num_grids
solver = Solver(grids,
potential_fn=potential_fn,
k_point=k_point,
boundary_condition=boundary_condition,
n_point_stencil=n_point_stencil)
solver.solve_ground_state()
# obtain lowest eigenvalue from FDM
ground_state_energy = solver.eigenvalues[0]
# obtain g.s. wavefunction
# ground_state_wf = solver.wave_function[0]
# contruct absolute error
abs_error = np.abs(ground_state_energy - exact_gs_energy)
E_abs_error.append(abs_error)
# take (base 10) logs of items in list
log_ngl = [np.log10(x) for x in num_grids_list]
log_E = [np.log10(x) for x in E_abs_error]
# skip first 3 small N values for finding linear fit
log_ngl_fit = log_ngl[3:]
log_E_fit = log_E[3:]
# linear fitting
b, p = polyfit(log_ngl_fit, log_E_fit, 1)
r2 = '%.4f' % (rsquared(log_ngl_fit, log_E_fit))
yfit = [10 ** (b + p * xi) for xi in log_ngl]
p = '%.4f' % (p)
size_diff = np.abs(len(num_grids_list) - len(yfit))
# initialize figure for plots
fig, ax = get_plotting_params()
# obtain linear fit of data (skipping first 3 small N values).
# here p = slope in our fit, r^2 is a measure of how linear data is.
linfit = ax.plot(num_grids_list[size_diff:], yfit, alpha=0.4,
label='$p$ = ' + p + ', $r^2$ = ' + r2, linewidth=3)
# matplotlib trick to obtain same color of a previous plot
ax.plot(num_grids_list, E_abs_error, marker='o', linestyle='None',
color=linfit[0].get_color())
# log-log scale
plt.xscale('log')
plt.yscale('log')
ax.set_xlabel("$N$", fontsize=18)
ax.set_ylabel("|Error| (au)", fontsize=18)
plt.legend(fontsize=16)
plt.title(f'Error in ground state vs. number of grids\n{func_name}, '
f'{boundary_condition}, {test_range}, {n_point_stencil}-points, '
f'{energy_form}',
fontsize=20)
plt.grid(alpha=0.4)
plt.gca().xaxis.grid(True, which='minor', alpha=0.4)
plt.gca().yaxis.grid(True, which='minor', alpha=0.4)
# create folder if no such directory
if not os.path.isdir('convergence_test'):
os.mkdir('convergence_test')
if not os.path.isdir(f'convergence_test/{Solver.__name__}'):
os.mkdir(f'convergence_test/{Solver.__name__}')
# save fig
plt.savefig(f'convergence_test/{Solver.__name__}/{func_name}_'
f'{boundary_condition}_{test_range}_{n_point_stencil}_'
f'{energy_form}{plot_index}.png')
plt.close()
# stop timer
t1 = time.time()
# write time taken to complete the convergence test to log (txt) file
time_str = time.strftime("==== %Y-%m-%d %H:%M:%S ====", time.localtime())
finish_str = f'{Solver.__name__}: {func_name}_{boundary_condition}_' \
f'{test_range}_{n_point_stencil}_{energy_form}{plot_index}'
timer_str = f'Time: {t1 - t0}'
all_str = time_str + '\n' + finish_str + '\n' + timer_str + '\n\n'
with open("convergence_test/test_log.txt", "a") as text_file:
text_file.write(all_str)
print(all_str)
# plot the dispersion relation for a periodic potential
# TODO: move this to an example (not a test)
def plot_dispersion(Solver,
test_range,
potential_fn,
k_range=(-np.pi, np.pi),
eigenvalue_index=0,
n_point_stencil=5,
num_grids=1000,
num_k_grids=100):
warnings.warn('Warning: make sure potential_fn is a periodic function!')
# grids = np.linspace(*test_range, num_grids, endpoint = False)
k_list = np.linspace(*k_range, num_k_grids)
E_list = []
for k in k_list:
grids = np.linspace(*test_range, num_grids, endpoint=False)
solver = Solver(grids,
potential_fn=potential_fn,
k_point=k,
boundary_condition='periodic',
n_point_stencil=n_point_stencil,
tol=0)
solver.solve_ground_state()
# obtain lowest eigenvalue from FDM
energy = solver.eigenvalues[eigenvalue_index]
E_list.append(energy)
# initialize figure for plots
fig, ax = get_plotting_params()
# matplotlib trick to obtain same color of a previous plot
ax.plot(k_list, E_list, marker='o', linestyle='solid', color='blue')
ax.set_xlabel("k", fontsize=18)
ax.set_ylabel("E", fontsize=18)
# plt.legend(fontsize=16)
# plt.title(f'Dispersion relation {k_range} {eigenvalue_index}', fontsize=20)
plt.grid(alpha=0.4)
plt.gca().xaxis.grid(True, which='minor', alpha=0.4)
plt.gca().yaxis.grid(True, which='minor', alpha=0.4)
# create folder if no such directory
if not os.path.isdir('dispersion_plots'):
os.mkdir('dispersion_plots')
# save fig
plt.savefig(f'dispersion_plots/dispersion_relation_{k_range}_'
f'{eigenvalue_index}.png')
plt.close()
print(f'dispersion_relation_{k_range}_{eigenvalue_index} done')
if __name__ == "__main__":
""" Test convergence rates for various systems."""
test_potential_fn_list = [((0, 3),
functools.partial(ext_potentials.kronig_penney,
a=3, b=0.5, v0=-1),
'periodic'),
((-5, 5),
functools.partial(ext_potentials.poschl_teller,
lam=1), 'closed'),
((-20, 20),
functools.partial(ext_potentials.poschl_teller,
lam=1), 'open'),
((0, 2 * np.pi), np.sin, 'periodic')]
solvers = [non_interacting_solver.SparseEigenSolver, non_interacting_solver.EigenSolver]
# convergence test for the sin periodic potential on arbitrary k_point
r, p, b = test_potential_fn_list[3]
convergence_test(solvers[0], r, p, b, 5, k_point=1)
| en | 0.75732 | .. _single_electron_test: Test for Single Electron Module ############################### .. todo:: * Authors? -RJM * Docs need love * Should validate correct instiliation/completion. Right now just spits printouts. -RJM * Ideally a single test script would test EVERY module, and can be easily run after each git commit. May need to make a another test script which calls this one and all others. -RJM * Has this been linted yet? -RJM Convergence plotting parameters. Return R^2 where x and y are array-like. Description. .. todo:: * Please fill out docs Args: ... Returns: ... # start timer # error list for plotting # get the name of potential function in order to save to local machine # choose whether include endpoints # obtain lowest eigenvalue (level = 1) from exact/analytical result. # When the exact answer is not known, simply run the solver with a large # grid, e.g. N = 5000 to obtain the "exact" g.s. energy # solve eigenvalue problem with matrix size N = 5000 # solve ground state # obtain ground state energy as exact energy # get error of energy for each num_grid compared to the exact energy # solve eigenvalue problem with matrix size N = num_grids # obtain lowest eigenvalue from FDM # obtain g.s. wavefunction # ground_state_wf = solver.wave_function[0] # contruct absolute error # take (base 10) logs of items in list # skip first 3 small N values for finding linear fit # linear fitting # initialize figure for plots # obtain linear fit of data (skipping first 3 small N values). # here p = slope in our fit, r^2 is a measure of how linear data is. # matplotlib trick to obtain same color of a previous plot # log-log scale # create folder if no such directory # save fig # stop timer # write time taken to complete the convergence test to log (txt) file # plot the dispersion relation for a periodic potential # TODO: move this to an example (not a test) # grids = np.linspace(*test_range, num_grids, endpoint = False) # obtain lowest eigenvalue from FDM # initialize figure for plots # matplotlib trick to obtain same color of a previous plot # plt.legend(fontsize=16) # plt.title(f'Dispersion relation {k_range} {eigenvalue_index}', fontsize=20) # create folder if no such directory # save fig Test convergence rates for various systems. # convergence test for the sin periodic potential on arbitrary k_point | 1.89319 | 2 |
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_constant.py | LastRemote/amazon-sagemaker-examples | 1 | 6618253 | <filename>reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_constant.py<gh_stars>1-10
S3_BUCKET = ""
S3_PREFIX = ""
AWS_REGION = "" | <filename>reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/test_constant.py<gh_stars>1-10
S3_BUCKET = ""
S3_PREFIX = ""
AWS_REGION = "" | none | 1 | 0.908296 | 1 | |
core/migrations/0013_auto_20180430_1644.py | araceli24/TimeWorking | 0 | 6618254 | <gh_stars>0
# Generated by Django 2.0.4 on 2018-04-30 14:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20180430_1633'),
]
operations = [
migrations.AlterField(
model_name='activityjournal',
name='start',
field=models.DateTimeField(verbose_name=datetime.datetime(2018, 4, 30, 16, 44, 2, 552191)),
),
migrations.AlterField(
model_name='registry',
name='start',
field=models.DateTimeField(verbose_name=datetime.datetime(2018, 4, 30, 16, 44, 2, 554720)),
),
]
| # Generated by Django 2.0.4 on 2018-04-30 14:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20180430_1633'),
]
operations = [
migrations.AlterField(
model_name='activityjournal',
name='start',
field=models.DateTimeField(verbose_name=datetime.datetime(2018, 4, 30, 16, 44, 2, 552191)),
),
migrations.AlterField(
model_name='registry',
name='start',
field=models.DateTimeField(verbose_name=datetime.datetime(2018, 4, 30, 16, 44, 2, 554720)),
),
] | en | 0.782873 | # Generated by Django 2.0.4 on 2018-04-30 14:44 | 1.6888 | 2 |
Optimisation Portfolios/HRP.py | BrandonAFong/Ideas | 0 | 6618255 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:46:54 2021
@author: apple
"""
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
from backtest import df_to_matrix,indexCovMatrix
def seriation(tree, points, index):
if index < points:
return [index]
else:
left = int(tree[index - points, 0])
right = int(tree[index - points, 1])
return (seriation(tree, points, left) + seriation(tree, points, right))
def compute_serial_matrix(distanceMatrix, method="ward"):
num = len(distanceMatrix)
flatDistMat = squareform(distanceMatrix)
resLinkage = linkage(flatDistMat, method=method)
resOrder = seriation(resLinkage, num, num + num - 2)
seriatedDist = np.zeros((num, num))
x,y = np.triu_indices(num, k=1)
seriatedDist[x,y] = distanceMatrix[[resOrder[i] for i in x], [resOrder[j] for j in y]]
seriatedDist[x,y] = seriatedDist[x,y]
return seriatedDist, resOrder, resLinkage
def compute_HRP_weights(covar, resOrder):
weights = pd.Series(1, index=resOrder)
alphas = [resOrder]
while len(alphas) > 0:
alphas = [cluster[start:end] for cluster in alphas
for start, end in ((0, len(cluster) // 2),
(len(cluster) // 2, len(cluster)))
if len(cluster) > 1]
for subcluster in range(0, len(alphas), 2):
lc = alphas[subcluster]
#Left Side
leftCovar = covar[lc].loc[lc]
inv_diag = 1 / np.diag(leftCovar.values)
parity_w = inv_diag * (1 / np.sum(inv_diag))
leftVar = np.dot(parity_w, np.dot(leftCovar, parity_w))
#Right Side
rc = alphas[subcluster + 1]
rightCovar = covar[rc].loc[rc]
inv_diag = 1 / np.diag(rightCovar.values)
parity_w = inv_diag * (1 / np.sum(inv_diag))
rightVar = np.dot(parity_w, np.dot(rightCovar, parity_w))
alloc_factor = 1 - leftVar / (leftVar + rightVar)
weights[lc] *= alloc_factor
weights[rc] *= 1 - alloc_factor
return weights
#Dataframe of returns
def HRP(df):
estimateCor = df.corr(method='pearson')
estimateCov, column_dic = indexCovMatrix(df)
# estimate_covar, column_dic = indexCorrMatrix(df.cov())
distances = np.sqrt((1 - estimateCor) / 2)
orderedDistanceMatrix, resOrder, linkageType = compute_serial_matrix(distances.values, method='single')
HRP_w = compute_HRP_weights(estimateCov, resOrder)
dictOrder = dict(map(reversed, column_dic.items()))
HRP_w = HRP_w.rename(index = dictOrder)
return HRP_w
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:46:54 2021
@author: apple
"""
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
from backtest import df_to_matrix,indexCovMatrix
def seriation(tree, points, index):
if index < points:
return [index]
else:
left = int(tree[index - points, 0])
right = int(tree[index - points, 1])
return (seriation(tree, points, left) + seriation(tree, points, right))
def compute_serial_matrix(distanceMatrix, method="ward"):
num = len(distanceMatrix)
flatDistMat = squareform(distanceMatrix)
resLinkage = linkage(flatDistMat, method=method)
resOrder = seriation(resLinkage, num, num + num - 2)
seriatedDist = np.zeros((num, num))
x,y = np.triu_indices(num, k=1)
seriatedDist[x,y] = distanceMatrix[[resOrder[i] for i in x], [resOrder[j] for j in y]]
seriatedDist[x,y] = seriatedDist[x,y]
return seriatedDist, resOrder, resLinkage
def compute_HRP_weights(covar, resOrder):
weights = pd.Series(1, index=resOrder)
alphas = [resOrder]
while len(alphas) > 0:
alphas = [cluster[start:end] for cluster in alphas
for start, end in ((0, len(cluster) // 2),
(len(cluster) // 2, len(cluster)))
if len(cluster) > 1]
for subcluster in range(0, len(alphas), 2):
lc = alphas[subcluster]
#Left Side
leftCovar = covar[lc].loc[lc]
inv_diag = 1 / np.diag(leftCovar.values)
parity_w = inv_diag * (1 / np.sum(inv_diag))
leftVar = np.dot(parity_w, np.dot(leftCovar, parity_w))
#Right Side
rc = alphas[subcluster + 1]
rightCovar = covar[rc].loc[rc]
inv_diag = 1 / np.diag(rightCovar.values)
parity_w = inv_diag * (1 / np.sum(inv_diag))
rightVar = np.dot(parity_w, np.dot(rightCovar, parity_w))
alloc_factor = 1 - leftVar / (leftVar + rightVar)
weights[lc] *= alloc_factor
weights[rc] *= 1 - alloc_factor
return weights
#Dataframe of returns
def HRP(df):
estimateCor = df.corr(method='pearson')
estimateCov, column_dic = indexCovMatrix(df)
# estimate_covar, column_dic = indexCorrMatrix(df.cov())
distances = np.sqrt((1 - estimateCor) / 2)
orderedDistanceMatrix, resOrder, linkageType = compute_serial_matrix(distances.values, method='single')
HRP_w = compute_HRP_weights(estimateCov, resOrder)
dictOrder = dict(map(reversed, column_dic.items()))
HRP_w = HRP_w.rename(index = dictOrder)
return HRP_w
| en | 0.426686 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Aug 31 22:46:54 2021 @author: apple #Left Side #Right Side #Dataframe of returns # estimate_covar, column_dic = indexCorrMatrix(df.cov()) | 2.286414 | 2 |
model/asset.py | keremkoseoglu/Kifu | 0 | 6618256 | """ Assets """
import json
import os
from copy import deepcopy
from datetime import datetime
from util.date_time import get_formatted_date
from model.currency import CurrencyConverter
from model.income_tax import IncomeTaxCalculatorFactory
import config
_ASSET_FILE = "asset.json"
def is_liquid(asset_type: str) -> bool:
""" Returns true if asset is liquid """
return asset_type in ("STOCK", "CRYPTO")
def get_assets(deduct_income_tax: bool = False):
""" Returns all assets """
with open(_get_file_path()) as asset_file:
json_data = json.load(asset_file)
if deduct_income_tax:
inc_tax_rate = IncomeTaxCalculatorFactory.get_instance().default_tax_rate
rate = 1 - (inc_tax_rate / 100)
for asset in json_data["assets"]:
if "income_tax" in asset and asset["income_tax"]:
asset["sales_value"] = asset["sales_value"] * rate
if "value_history" in asset:
for hist in asset["value_history"]:
hist["value"] = hist["value"] * rate
return json_data
def set_assets(assets: dict):
""" Saves assets to disk """
assets_with_history = _generate_asset_value_history(assets)
with open(_get_file_path(), "w") as ass_file:
json.dump(assets_with_history, ass_file, indent=3)
def get_asset_type_resale_value_sum(only_liquid: bool = False,
deduct_income_tax: bool = False) -> []:
""" Asset type resale value sum
Used when calculating net worth
"""
result = []
assets = get_assets(deduct_income_tax=deduct_income_tax)
currency_converter = CurrencyConverter()
for asset in assets["assets"]:
if only_liquid and not is_liquid(asset["type"]):
continue
asset_unit_value = currency_converter.convert_to_local_currency(
asset["sales_value"],
asset["currency"])
asset_value = asset_unit_value * asset["quantity"]
found = False
for res in result:
if res["type"] == asset["type"]:
res["sales_value"] = res["sales_value"] + asset_value
found = True
if not found:
res = {"type": asset["type"], "sales_value": asset_value}
result.append(res)
return result
def get_asset_resale_value_sum() -> float:
""" Asset resale value sum
Used when calculating net worth
"""
result = 0
type_sum = get_asset_type_resale_value_sum()
for entry in type_sum:
result = result + entry["sales_value"]
return result
def get_liquid_assets_in_both_currencies(deduct_income_tax: bool = False) -> []:
""" Asset balances in original and home currencies """
output = []
assets = get_assets(deduct_income_tax=deduct_income_tax)
currency_converter = CurrencyConverter()
for asset in assets["assets"]:
if not is_liquid(asset["type"]):
continue
org_amount = asset["sales_value"] * asset["quantity"]
local_amount = currency_converter.convert_to_local_currency(
org_amount,
asset["currency"])
name = asset["bank"] + " - " + asset["type"]
found = False
for out in output:
if out["name"] == name and out["original_currency"] == asset["currency"]:
found = True
out["home_balance"] += local_amount
out["original_balance"] += org_amount
break
if not found:
output_dict = {
"name": asset["bank"] + " - " + asset["type"],
"home_balance": local_amount,
"original_balance": org_amount,
"original_currency": asset["currency"],
"is_investment": True
}
output.append(output_dict)
return output
def _get_file_path():
return os.path.join(config.CONSTANTS["DATA_DIR_PATH"] + _ASSET_FILE)
def _generate_asset_value_history(assets: dict) -> dict:
""" Adds value history to asset dict """
max_val_hist_size = config.CONSTANTS["ASSET_HISTORY_SIZE"]
result = deepcopy(assets)
for asset in result["assets"]:
if "value_history" in asset:
last_hist_idx = len(asset["value_history"]) - 1
if last_hist_idx >= 0:
last_hist_val = asset["value_history"][last_hist_idx]
if last_hist_val["value"] == asset["sales_value"]:
continue
new_hist_val = {
"date": get_formatted_date(datetime.now()),
"value": asset["sales_value"]
}
asset["value_history"].append(new_hist_val)
if len(asset["value_history"]) > max_val_hist_size:
len_diff = len(asset["value_history"]) - max_val_hist_size
del asset["value_history"][0 : len_diff]
return result
| """ Assets """
import json
import os
from copy import deepcopy
from datetime import datetime
from util.date_time import get_formatted_date
from model.currency import CurrencyConverter
from model.income_tax import IncomeTaxCalculatorFactory
import config
_ASSET_FILE = "asset.json"
def is_liquid(asset_type: str) -> bool:
""" Returns true if asset is liquid """
return asset_type in ("STOCK", "CRYPTO")
def get_assets(deduct_income_tax: bool = False):
""" Returns all assets """
with open(_get_file_path()) as asset_file:
json_data = json.load(asset_file)
if deduct_income_tax:
inc_tax_rate = IncomeTaxCalculatorFactory.get_instance().default_tax_rate
rate = 1 - (inc_tax_rate / 100)
for asset in json_data["assets"]:
if "income_tax" in asset and asset["income_tax"]:
asset["sales_value"] = asset["sales_value"] * rate
if "value_history" in asset:
for hist in asset["value_history"]:
hist["value"] = hist["value"] * rate
return json_data
def set_assets(assets: dict):
""" Saves assets to disk """
assets_with_history = _generate_asset_value_history(assets)
with open(_get_file_path(), "w") as ass_file:
json.dump(assets_with_history, ass_file, indent=3)
def get_asset_type_resale_value_sum(only_liquid: bool = False,
deduct_income_tax: bool = False) -> []:
""" Asset type resale value sum
Used when calculating net worth
"""
result = []
assets = get_assets(deduct_income_tax=deduct_income_tax)
currency_converter = CurrencyConverter()
for asset in assets["assets"]:
if only_liquid and not is_liquid(asset["type"]):
continue
asset_unit_value = currency_converter.convert_to_local_currency(
asset["sales_value"],
asset["currency"])
asset_value = asset_unit_value * asset["quantity"]
found = False
for res in result:
if res["type"] == asset["type"]:
res["sales_value"] = res["sales_value"] + asset_value
found = True
if not found:
res = {"type": asset["type"], "sales_value": asset_value}
result.append(res)
return result
def get_asset_resale_value_sum() -> float:
""" Asset resale value sum
Used when calculating net worth
"""
result = 0
type_sum = get_asset_type_resale_value_sum()
for entry in type_sum:
result = result + entry["sales_value"]
return result
def get_liquid_assets_in_both_currencies(deduct_income_tax: bool = False) -> []:
""" Asset balances in original and home currencies """
output = []
assets = get_assets(deduct_income_tax=deduct_income_tax)
currency_converter = CurrencyConverter()
for asset in assets["assets"]:
if not is_liquid(asset["type"]):
continue
org_amount = asset["sales_value"] * asset["quantity"]
local_amount = currency_converter.convert_to_local_currency(
org_amount,
asset["currency"])
name = asset["bank"] + " - " + asset["type"]
found = False
for out in output:
if out["name"] == name and out["original_currency"] == asset["currency"]:
found = True
out["home_balance"] += local_amount
out["original_balance"] += org_amount
break
if not found:
output_dict = {
"name": asset["bank"] + " - " + asset["type"],
"home_balance": local_amount,
"original_balance": org_amount,
"original_currency": asset["currency"],
"is_investment": True
}
output.append(output_dict)
return output
def _get_file_path():
return os.path.join(config.CONSTANTS["DATA_DIR_PATH"] + _ASSET_FILE)
def _generate_asset_value_history(assets: dict) -> dict:
""" Adds value history to asset dict """
max_val_hist_size = config.CONSTANTS["ASSET_HISTORY_SIZE"]
result = deepcopy(assets)
for asset in result["assets"]:
if "value_history" in asset:
last_hist_idx = len(asset["value_history"]) - 1
if last_hist_idx >= 0:
last_hist_val = asset["value_history"][last_hist_idx]
if last_hist_val["value"] == asset["sales_value"]:
continue
new_hist_val = {
"date": get_formatted_date(datetime.now()),
"value": asset["sales_value"]
}
asset["value_history"].append(new_hist_val)
if len(asset["value_history"]) > max_val_hist_size:
len_diff = len(asset["value_history"]) - max_val_hist_size
del asset["value_history"][0 : len_diff]
return result
| en | 0.623289 | Assets Returns true if asset is liquid Returns all assets Saves assets to disk Asset type resale value sum Used when calculating net worth Asset resale value sum Used when calculating net worth Asset balances in original and home currencies Adds value history to asset dict | 2.868536 | 3 |
odbcli/sidebar.py | dbcli/odbc-cli | 9 | 6618257 | import sys
import platform
from cyanodbc import Connection
from typing import List, Optional, Callable
from logging import getLogger
from asyncio import get_event_loop
from threading import Thread, Lock
from prompt_toolkit.layout.containers import HSplit, Window, ScrollOffsets, ConditionalContainer, Container
from prompt_toolkit.formatted_text.base import StyleAndTextTuples
from prompt_toolkit.formatted_text import fragment_list_width
from prompt_toolkit.layout.controls import FormattedTextControl, BufferControl, UIContent
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import is_done, renderer_height_is_known
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.mouse_events import MouseEvent
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.widgets import SearchToolbar
from prompt_toolkit.filters import Condition
from .conn import sqlConnection
from .filters import ShowSidebar
from .utils import if_mousedown
from .__init__ import __version__
class myDBObject:
def __init__(
self,
my_app: "sqlApp",
conn: sqlConnection,
name: str,
otype: str,
level: Optional[int] = 0,
children: Optional[List["myDBObject"]] = None,
parent: Optional["myDBObject"] = None,
next_object: Optional["myDBObject"] = None
) -> None:
self.my_app = my_app
self.conn = conn
self.children = children
self.parent = parent
self.next_object = next_object
# Held while modifying children, parent, next_object
# As some of thes operatins (expand) happen asynchronously
self._lock = Lock()
self.name = name
self.otype = otype
self.level = level
self.selected: bool = False
def _expand_internal(self) -> None:
"""
Populates children and sets parent for children nodes
"""
raise NotImplementedError()
def expand(self) -> None:
"""
Populates children and sets parent for children nodes
"""
if self.children is not None:
return None
loop = get_event_loop()
self.my_app.show_expanding_object = True
self.my_app.application.invalidate()
def _redraw_after_io():
""" Callback, scheduled after threaded I/O
completes """
self.my_app.show_expanding_object = False
self.my_app.obj_list_changed = True
self.my_app.application.invalidate()
def _run():
""" Executes in a thread """
self._expand_internal() # Blocking I/O
loop.call_soon_threadsafe(_redraw_after_io)
# (Don't use 'run_in_executor', because daemon is ideal here.
t = Thread(target = _run, daemon = True)
t.start()
def collapse(self) -> None:
"""
Populates children and sets parent for children nodes
Note, we don't have to blow up the children; just redirect
next_object. This way we re-query the database / force re-fresh
which may be suboptimal. TODO: Codify not/refresh path
"""
if self is not self.my_app.selected_object:
return
if self.children is not None:
obj = self.children[len(self.children) - 1].next_object
while obj.level > self.level:
obj = obj.next_object
with self._lock:
self.next_object = obj
self.children = None
elif self.parent is not None:
self.my_app.selected_object = self.parent
self.parent.collapse()
self.my_app.obj_list_changed = True
def add_children(self, list_obj: List["myDBObject"]) -> None:
lst = list(filter(lambda x: x.name != "", list_obj))
if len(lst):
with self._lock:
self.children = lst
for i in range(len(self.children) - 1):
self.children[i].next_object = self.children[i + 1]
self.children[len(self.children) - 1].next_object = self.next_object
self.next_object = self.children[0]
class myDBColumn(myDBObject):
def _expand_internal(self) -> None:
return None
class myDBFunction(myDBObject):
def _expand_internal(self) -> None:
cat = "%"
schema = "%"
# https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlprocedurecolumns-function?view=sql-server-ver15
# CatalogName cannot contain a string search pattern
if self.parent is not None:
if type(self.parent).__name__ == "myDBSchema":
schema = self.conn.sanitize_search_string(self.parent.name)
elif type(self.parent).__name__ == "myDBCatalog":
cat = self.parent.name
if self.parent.parent is not None:
if type(self.parent.parent).__name__ == "myDBCatalog":
cat = self.parent.parent.name
res = self.conn.find_procedure_columns(
catalog = cat,
schema = schema,
procedure = self.conn.sanitize_search_string(self.name),
column = "%")
lst = [myDBColumn(
my_app = self.my_app,
conn = self.conn,
name = col.column,
otype = col.type_name,
parent = self,
level = self.level + 1) for col in res]
self.add_children(list_obj = lst)
return None
class myDBTable(myDBObject):
def _expand_internal(self) -> None:
cat = "%"
schema = "%"
# https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlcolumns-function?view=sql-server-ver15
# CatalogName cannot contain a string search pattern
if self.parent is not None:
if type(self.parent).__name__ == "myDBSchema":
schema = self.conn.sanitize_search_string(self.parent.name)
elif type(self.parent).__name__ == "myDBCatalog":
cat = self.parent.name
if self.parent.parent is not None:
if type(self.parent.parent).__name__ == "myDBCatalog":
cat = self.parent.parent.name
res = self.conn.find_columns(
catalog = cat,
schema = schema,
table = self.name,
column = "%")
lst = [myDBColumn(
my_app = self.my_app,
conn = self.conn,
name = col.column,
otype = col.type_name,
parent = self,
level = self.level + 1) for col in res]
self.add_children(list_obj = lst)
return None
class myDBSchema(myDBObject):
def _expand_internal(self) -> None:
cat = self.conn.sanitize_search_string(self.parent.name) if self.parent is not None else "%"
res = self.conn.find_tables(
catalog = cat,
schema = self.conn.sanitize_search_string(self.name),
table = "",
type = "")
resf = self.conn.find_procedures(
catalog = cat,
schema = self.conn.sanitize_search_string(self.name),
procedure = "")
tables = []
views = []
functions = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
for func in resf:
functions.append(func.name)
lst.append(myDBFunction(
my_app = self.my_app,
conn = self.conn,
name = func.name,
otype = "function",
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(views),
obj_type = "view")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(functions),
obj_type = "function")
self.add_children(list_obj = lst)
return None
class myDBCatalog(myDBObject):
def _expand_internal(self) -> None:
schemas = lst = []
schemas = self.conn.list_schemas(
catalog = self.conn.sanitize_search_string(self.name))
if len(schemas) < 1 or all([s == "" for s in schemas]):
res = self.conn.find_tables(
catalog = self.conn.sanitize_search_string(self.name),
schema = "",
table = "",
type = "")
schemas = [r.schema for r in res]
self.conn.dbmetadata.extend_schemas(
catalog = self.conn.escape_name(self.name),
names = self.conn.escape_names(schemas))
if not all([s == "" for s in schemas]):
# Schemas were found either having called list_schemas
# or via the find_tables call
lst = [myDBSchema(
my_app = self.my_app,
conn = self.conn,
name = schema,
otype = "schema",
parent = self,
level = self.level + 1) for schema in sorted(set(schemas))]
elif len(schemas):
# No schemas found; but if there are tables then these are direct
# descendents, i.e. MySQL
tables = []
views = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.name),
schema = "", names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.name),
schema = "", names = self.conn.escape_names(views),
obj_type = "view")
self.add_children(list_obj = lst)
return None
class myDBConn(myDBObject):
def _expand_internal(self) -> None:
if not self.conn.connected():
return None
lst = []
cat_support = self.conn.catalog_support()
if cat_support:
rows = self.conn.list_catalogs()
if len(rows):
lst = [myDBCatalog(
my_app = self.my_app,
conn = self.conn,
name = row,
otype = "catalog",
parent = self,
level = self.level + 1) for row in rows]
self.conn.dbmetadata.extend_catalogs(
self.conn.escape_names(rows))
else:
res = self.conn.find_tables(
catalog = "%",
schema = "",
table = "",
type = "")
schemas = [r.schema for r in res]
self.conn.dbmetadata.extend_schemas(catalog = "",
names = self.conn.escape_names(schemas))
if not all([s == "" for s in schemas]):
lst = [myDBSchema(
my_app = self.my_app,
conn = self.conn,
name = schema,
otype = "schema",
parent = self,
level = self.level + 1) for schema in sorted(set(schemas))]
elif len(schemas):
tables = []
views = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(catalog = "",
schema = "", names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(catalog = "",
schema = "", names = self.conn.escape_names(views),
obj_type = "view")
self.add_children(list_obj = lst)
return None
def sql_sidebar_help(my_app: "sqlApp"):
"""
Create the `Layout` for the help text for the current item in the sidebar.
"""
token = "class:<PASSWORD>"
def get_current_description():
"""
Return the description of the selected option.
"""
obj = my_app.selected_object
if obj is not None:
return obj.name
return ""
def get_help_text():
return [(token, get_current_description())]
return ConditionalContainer(
content=Window(
FormattedTextControl(get_help_text), style=token, height=Dimension(min=3)
),
filter = ~is_done
& ShowSidebar(my_app)
& Condition(
lambda: not my_app.show_exit_confirmation
))
def expanding_object_notification(my_app: "sqlApp"):
"""
Create the `Layout` for the 'Expanding object' notification.
"""
def get_text_fragments():
# Show navigation info.
return [("fg:red", "Expanding object ...")]
return ConditionalContainer(
content = Window(
FormattedTextControl(get_text_fragments),
style = "class:sidebar",
width=Dimension.exact( 45 ),
height=Dimension(max = 1),
),
filter = ~is_done
& ShowSidebar(my_app)
& Condition(
lambda: my_app.show_expanding_object
))
def sql_sidebar_navigation():
"""
Create the `Layout` showing the navigation information for the sidebar.
"""
def get_text_fragments():
# Show navigation info.
return [
("class:sidebar.navigation", " "),
("class:sidebar.navigation.key", "[Up/Dn]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Navigate"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.key", "[L/R]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Expand/Collapse"),
("class:sidebar.navigation", "\n "),
("class:sidebar.navigation.key", "[Enter]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Connect/Preview"),
]
return Window(
FormattedTextControl(get_text_fragments),
style = "class:sidebar.navigation",
width=Dimension.exact( 45 ),
height=Dimension(max = 2),
)
def show_sidebar_button_info(my_app: "sqlApp") -> Container:
"""
Create `Layout` for the information in the right-bottom corner.
(The right part of the status bar.)
"""
@if_mousedown
def toggle_sidebar(mouse_event: MouseEvent) -> None:
" Click handler for the menu. "
my_app.show_sidebar = not my_app.show_sidebar
# TO DO: app version rather than python
version = sys.version_info
tokens: StyleAndTextTuples = [
("class:status-toolbar.key", "[C-t]", toggle_sidebar),
("class:status-toolbar", " Object Browser", toggle_sidebar),
("class:status-toolbar", " - "),
("class:status-toolbar.cli-version", "odbcli %s" % __version__),
("class:status-toolbar", " "),
]
width = fragment_list_width(tokens)
def get_text_fragments() -> StyleAndTextTuples:
# Python version
return tokens
return ConditionalContainer(
content=Window(
FormattedTextControl(get_text_fragments),
style="class:status-toolbar",
height=Dimension.exact(1),
width=Dimension.exact(width),
),
filter=~is_done
& Condition(
lambda: not my_app.show_exit_confirmation
)
& renderer_height_is_known
)
def sql_sidebar(my_app: "sqlApp") -> Window:
"""
Create the `Layout` for the sidebar with the configurable objects.
"""
@if_mousedown
def expand_item(obj: "myDBObject") -> None:
obj.expand()
def tokenize_obj(obj: "myDBObject") -> StyleAndTextTuples:
" Recursively build the token list "
tokens: StyleAndTextTuples = []
selected = obj is my_app.selected_object
expanded = obj.children is not None
connected = obj.otype == "Connection" and obj.conn.connected()
active = my_app.active_conn is not None and my_app.active_conn is obj.conn and obj.level == 0
act = ",active" if active else ""
sel = ",selected" if selected else ""
if len(obj.name) > 24 - 2 * obj.level:
name_trim = obj.name[:24 - 2 * obj.level - 3] + "..."
else:
name_trim = ("%-" + str(24 - 2 * obj.level) + "s") % obj.name
tokens.append(("class:sidebar.label" + sel + act, " >" if connected else " "))
tokens.append(
("class:sidebar.label" + sel, " " * 2 * obj.level, expand_item)
)
tokens.append(
("class:sidebar.label" + sel + act,
name_trim,
expand_item)
)
tokens.append(("class:sidebar.status" + sel + act, " ", expand_item))
tokens.append(("class:sidebar.status" + sel + act, "%+12s" % obj.otype, expand_item))
if selected:
tokens.append(("[SetCursorPosition]", ""))
if expanded:
tokens.append(("class:sidebar.status" + sel + act, "\/"))
else:
tokens.append(("class:sidebar.status" + sel + act, " <" if selected else " "))
# Expand past the edge of the visible buffer to get an even panel
tokens.append(("class:sidebar.status" + sel + act, " " * 10))
return tokens
search_buffer = Buffer(name = "sidebarsearchbuffer")
search_field = SearchToolbar(
search_buffer = search_buffer,
ignore_case = True
)
def _buffer_pos_changed(buff):
""" This callback gets executed after cursor position changes. Most
of the time we register a key-press (up / down), we change the
selected object and as a result of that the cursor changes. By that
time we don't need to updat the selected object (cursor changed as
a result of the selected object being updated). The one exception
is when searching the sidebar buffer. When this happens the cursor
moves ahead of the selected object. When that happens, here we
update the selected object to follow suit.
"""
if buff.document.cursor_position_row != my_app.selected_object_idx[0]:
my_app.select(buff.document.cursor_position_row)
sidebar_buffer = Buffer(
name = "sidebarbuffer",
read_only = True,
on_cursor_position_changed = _buffer_pos_changed
)
class myLexer(Lexer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._obj_list = []
def add_objects(self, objects: List):
self._obj_list = objects
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
def get_line(lineno: int) -> StyleAndTextTuples:
# TODO: raise out-of-range exception
return tokenize_obj(self._obj_list[lineno])
return get_line
sidebar_lexer = myLexer()
class myControl(BufferControl):
def move_cursor_down(self):
my_app.select_next()
# Need to figure out what do do here
# AFAICT these are only called for the mouse handler
# when events are otherwise not handled
def move_cursor_up(self):
my_app.select_previous()
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
There is an intricate relationship between the cursor position
in the sidebar document and which object is market as 'selected'
in the linked list. Let's not muck that up by allowing the user
to change the cursor position in the sidebar document with the mouse.
"""
return NotImplemented
def create_content(self, width: int, height: Optional[int]) -> UIContent:
# Only traverse the obj_list if it has been expanded / collapsed
if not my_app.obj_list_changed:
self.buffer.cursor_position = my_app.selected_object_idx[1]
return super().create_content(width, height)
res = []
obj = my_app.obj_list[0]
res.append(obj)
while obj.next_object is not my_app.obj_list[0]:
res.append(obj.next_object)
obj = obj.next_object
self.lexer.add_objects(res)
self.buffer.set_document(Document(
text = "\n".join([a.name for a in res]), cursor_position = my_app.selected_object_idx[1]), True)
# Reset obj_list_changed flag, now that we have had a chance to
# regenerate the sidebar document content
my_app.obj_list_changed = False
return super().create_content(width, height)
sidebar_control = myControl(
buffer = sidebar_buffer,
lexer = sidebar_lexer,
search_buffer_control = search_field.control,
focusable = True,
)
return HSplit([
search_field,
Window(
sidebar_control,
right_margins = [ScrollbarMargin(display_arrows = True)],
style = "class:sidebar",
width = Dimension.exact( 45 ),
height = Dimension(min = 7, preferred = 33),
scroll_offsets = ScrollOffsets(top = 1, bottom = 1)),
Window(
height = Dimension.exact(1),
char = "\u2500",
style = "class:sidebar,separator",
),
expanding_object_notification(my_app),
sql_sidebar_navigation()])
| import sys
import platform
from cyanodbc import Connection
from typing import List, Optional, Callable
from logging import getLogger
from asyncio import get_event_loop
from threading import Thread, Lock
from prompt_toolkit.layout.containers import HSplit, Window, ScrollOffsets, ConditionalContainer, Container
from prompt_toolkit.formatted_text.base import StyleAndTextTuples
from prompt_toolkit.formatted_text import fragment_list_width
from prompt_toolkit.layout.controls import FormattedTextControl, BufferControl, UIContent
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import is_done, renderer_height_is_known
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.mouse_events import MouseEvent
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.widgets import SearchToolbar
from prompt_toolkit.filters import Condition
from .conn import sqlConnection
from .filters import ShowSidebar
from .utils import if_mousedown
from .__init__ import __version__
class myDBObject:
def __init__(
self,
my_app: "sqlApp",
conn: sqlConnection,
name: str,
otype: str,
level: Optional[int] = 0,
children: Optional[List["myDBObject"]] = None,
parent: Optional["myDBObject"] = None,
next_object: Optional["myDBObject"] = None
) -> None:
self.my_app = my_app
self.conn = conn
self.children = children
self.parent = parent
self.next_object = next_object
# Held while modifying children, parent, next_object
# As some of thes operatins (expand) happen asynchronously
self._lock = Lock()
self.name = name
self.otype = otype
self.level = level
self.selected: bool = False
def _expand_internal(self) -> None:
"""
Populates children and sets parent for children nodes
"""
raise NotImplementedError()
def expand(self) -> None:
"""
Populates children and sets parent for children nodes
"""
if self.children is not None:
return None
loop = get_event_loop()
self.my_app.show_expanding_object = True
self.my_app.application.invalidate()
def _redraw_after_io():
""" Callback, scheduled after threaded I/O
completes """
self.my_app.show_expanding_object = False
self.my_app.obj_list_changed = True
self.my_app.application.invalidate()
def _run():
""" Executes in a thread """
self._expand_internal() # Blocking I/O
loop.call_soon_threadsafe(_redraw_after_io)
# (Don't use 'run_in_executor', because daemon is ideal here.
t = Thread(target = _run, daemon = True)
t.start()
def collapse(self) -> None:
"""
Populates children and sets parent for children nodes
Note, we don't have to blow up the children; just redirect
next_object. This way we re-query the database / force re-fresh
which may be suboptimal. TODO: Codify not/refresh path
"""
if self is not self.my_app.selected_object:
return
if self.children is not None:
obj = self.children[len(self.children) - 1].next_object
while obj.level > self.level:
obj = obj.next_object
with self._lock:
self.next_object = obj
self.children = None
elif self.parent is not None:
self.my_app.selected_object = self.parent
self.parent.collapse()
self.my_app.obj_list_changed = True
def add_children(self, list_obj: List["myDBObject"]) -> None:
lst = list(filter(lambda x: x.name != "", list_obj))
if len(lst):
with self._lock:
self.children = lst
for i in range(len(self.children) - 1):
self.children[i].next_object = self.children[i + 1]
self.children[len(self.children) - 1].next_object = self.next_object
self.next_object = self.children[0]
class myDBColumn(myDBObject):
def _expand_internal(self) -> None:
return None
class myDBFunction(myDBObject):
def _expand_internal(self) -> None:
cat = "%"
schema = "%"
# https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlprocedurecolumns-function?view=sql-server-ver15
# CatalogName cannot contain a string search pattern
if self.parent is not None:
if type(self.parent).__name__ == "myDBSchema":
schema = self.conn.sanitize_search_string(self.parent.name)
elif type(self.parent).__name__ == "myDBCatalog":
cat = self.parent.name
if self.parent.parent is not None:
if type(self.parent.parent).__name__ == "myDBCatalog":
cat = self.parent.parent.name
res = self.conn.find_procedure_columns(
catalog = cat,
schema = schema,
procedure = self.conn.sanitize_search_string(self.name),
column = "%")
lst = [myDBColumn(
my_app = self.my_app,
conn = self.conn,
name = col.column,
otype = col.type_name,
parent = self,
level = self.level + 1) for col in res]
self.add_children(list_obj = lst)
return None
class myDBTable(myDBObject):
def _expand_internal(self) -> None:
cat = "%"
schema = "%"
# https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlcolumns-function?view=sql-server-ver15
# CatalogName cannot contain a string search pattern
if self.parent is not None:
if type(self.parent).__name__ == "myDBSchema":
schema = self.conn.sanitize_search_string(self.parent.name)
elif type(self.parent).__name__ == "myDBCatalog":
cat = self.parent.name
if self.parent.parent is not None:
if type(self.parent.parent).__name__ == "myDBCatalog":
cat = self.parent.parent.name
res = self.conn.find_columns(
catalog = cat,
schema = schema,
table = self.name,
column = "%")
lst = [myDBColumn(
my_app = self.my_app,
conn = self.conn,
name = col.column,
otype = col.type_name,
parent = self,
level = self.level + 1) for col in res]
self.add_children(list_obj = lst)
return None
class myDBSchema(myDBObject):
def _expand_internal(self) -> None:
cat = self.conn.sanitize_search_string(self.parent.name) if self.parent is not None else "%"
res = self.conn.find_tables(
catalog = cat,
schema = self.conn.sanitize_search_string(self.name),
table = "",
type = "")
resf = self.conn.find_procedures(
catalog = cat,
schema = self.conn.sanitize_search_string(self.name),
procedure = "")
tables = []
views = []
functions = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
for func in resf:
functions.append(func.name)
lst.append(myDBFunction(
my_app = self.my_app,
conn = self.conn,
name = func.name,
otype = "function",
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(views),
obj_type = "view")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.parent.name) if self.parent else "",
schema = self.conn.escape_name(self.name),
names = self.conn.escape_names(functions),
obj_type = "function")
self.add_children(list_obj = lst)
return None
class myDBCatalog(myDBObject):
def _expand_internal(self) -> None:
schemas = lst = []
schemas = self.conn.list_schemas(
catalog = self.conn.sanitize_search_string(self.name))
if len(schemas) < 1 or all([s == "" for s in schemas]):
res = self.conn.find_tables(
catalog = self.conn.sanitize_search_string(self.name),
schema = "",
table = "",
type = "")
schemas = [r.schema for r in res]
self.conn.dbmetadata.extend_schemas(
catalog = self.conn.escape_name(self.name),
names = self.conn.escape_names(schemas))
if not all([s == "" for s in schemas]):
# Schemas were found either having called list_schemas
# or via the find_tables call
lst = [myDBSchema(
my_app = self.my_app,
conn = self.conn,
name = schema,
otype = "schema",
parent = self,
level = self.level + 1) for schema in sorted(set(schemas))]
elif len(schemas):
# No schemas found; but if there are tables then these are direct
# descendents, i.e. MySQL
tables = []
views = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.name),
schema = "", names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(
catalog = self.conn.escape_name(self.name),
schema = "", names = self.conn.escape_names(views),
obj_type = "view")
self.add_children(list_obj = lst)
return None
class myDBConn(myDBObject):
def _expand_internal(self) -> None:
if not self.conn.connected():
return None
lst = []
cat_support = self.conn.catalog_support()
if cat_support:
rows = self.conn.list_catalogs()
if len(rows):
lst = [myDBCatalog(
my_app = self.my_app,
conn = self.conn,
name = row,
otype = "catalog",
parent = self,
level = self.level + 1) for row in rows]
self.conn.dbmetadata.extend_catalogs(
self.conn.escape_names(rows))
else:
res = self.conn.find_tables(
catalog = "%",
schema = "",
table = "",
type = "")
schemas = [r.schema for r in res]
self.conn.dbmetadata.extend_schemas(catalog = "",
names = self.conn.escape_names(schemas))
if not all([s == "" for s in schemas]):
lst = [myDBSchema(
my_app = self.my_app,
conn = self.conn,
name = schema,
otype = "schema",
parent = self,
level = self.level + 1) for schema in sorted(set(schemas))]
elif len(schemas):
tables = []
views = []
lst = []
for table in res:
if table.type.lower() == 'table':
tables.append(table.name)
if table.type.lower() == 'view':
views.append(table.name)
lst.append(myDBTable(
my_app = self.my_app,
conn = self.conn,
name = table.name,
otype = table.type.lower(),
parent = self,
level = self.level + 1))
self.conn.dbmetadata.extend_objects(catalog = "",
schema = "", names = self.conn.escape_names(tables),
obj_type = "table")
self.conn.dbmetadata.extend_objects(catalog = "",
schema = "", names = self.conn.escape_names(views),
obj_type = "view")
self.add_children(list_obj = lst)
return None
def sql_sidebar_help(my_app: "sqlApp"):
"""
Create the `Layout` for the help text for the current item in the sidebar.
"""
token = "class:<PASSWORD>"
def get_current_description():
"""
Return the description of the selected option.
"""
obj = my_app.selected_object
if obj is not None:
return obj.name
return ""
def get_help_text():
return [(token, get_current_description())]
return ConditionalContainer(
content=Window(
FormattedTextControl(get_help_text), style=token, height=Dimension(min=3)
),
filter = ~is_done
& ShowSidebar(my_app)
& Condition(
lambda: not my_app.show_exit_confirmation
))
def expanding_object_notification(my_app: "sqlApp"):
"""
Create the `Layout` for the 'Expanding object' notification.
"""
def get_text_fragments():
# Show navigation info.
return [("fg:red", "Expanding object ...")]
return ConditionalContainer(
content = Window(
FormattedTextControl(get_text_fragments),
style = "class:sidebar",
width=Dimension.exact( 45 ),
height=Dimension(max = 1),
),
filter = ~is_done
& ShowSidebar(my_app)
& Condition(
lambda: my_app.show_expanding_object
))
def sql_sidebar_navigation():
"""
Create the `Layout` showing the navigation information for the sidebar.
"""
def get_text_fragments():
# Show navigation info.
return [
("class:sidebar.navigation", " "),
("class:sidebar.navigation.key", "[Up/Dn]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Navigate"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.key", "[L/R]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Expand/Collapse"),
("class:sidebar.navigation", "\n "),
("class:sidebar.navigation.key", "[Enter]"),
("class:sidebar.navigation", " "),
("class:sidebar.navigation.description", "Connect/Preview"),
]
return Window(
FormattedTextControl(get_text_fragments),
style = "class:sidebar.navigation",
width=Dimension.exact( 45 ),
height=Dimension(max = 2),
)
def show_sidebar_button_info(my_app: "sqlApp") -> Container:
"""
Create `Layout` for the information in the right-bottom corner.
(The right part of the status bar.)
"""
@if_mousedown
def toggle_sidebar(mouse_event: MouseEvent) -> None:
" Click handler for the menu. "
my_app.show_sidebar = not my_app.show_sidebar
# TO DO: app version rather than python
version = sys.version_info
tokens: StyleAndTextTuples = [
("class:status-toolbar.key", "[C-t]", toggle_sidebar),
("class:status-toolbar", " Object Browser", toggle_sidebar),
("class:status-toolbar", " - "),
("class:status-toolbar.cli-version", "odbcli %s" % __version__),
("class:status-toolbar", " "),
]
width = fragment_list_width(tokens)
def get_text_fragments() -> StyleAndTextTuples:
# Python version
return tokens
return ConditionalContainer(
content=Window(
FormattedTextControl(get_text_fragments),
style="class:status-toolbar",
height=Dimension.exact(1),
width=Dimension.exact(width),
),
filter=~is_done
& Condition(
lambda: not my_app.show_exit_confirmation
)
& renderer_height_is_known
)
def sql_sidebar(my_app: "sqlApp") -> Window:
"""
Create the `Layout` for the sidebar with the configurable objects.
"""
@if_mousedown
def expand_item(obj: "myDBObject") -> None:
obj.expand()
def tokenize_obj(obj: "myDBObject") -> StyleAndTextTuples:
" Recursively build the token list "
tokens: StyleAndTextTuples = []
selected = obj is my_app.selected_object
expanded = obj.children is not None
connected = obj.otype == "Connection" and obj.conn.connected()
active = my_app.active_conn is not None and my_app.active_conn is obj.conn and obj.level == 0
act = ",active" if active else ""
sel = ",selected" if selected else ""
if len(obj.name) > 24 - 2 * obj.level:
name_trim = obj.name[:24 - 2 * obj.level - 3] + "..."
else:
name_trim = ("%-" + str(24 - 2 * obj.level) + "s") % obj.name
tokens.append(("class:sidebar.label" + sel + act, " >" if connected else " "))
tokens.append(
("class:sidebar.label" + sel, " " * 2 * obj.level, expand_item)
)
tokens.append(
("class:sidebar.label" + sel + act,
name_trim,
expand_item)
)
tokens.append(("class:sidebar.status" + sel + act, " ", expand_item))
tokens.append(("class:sidebar.status" + sel + act, "%+12s" % obj.otype, expand_item))
if selected:
tokens.append(("[SetCursorPosition]", ""))
if expanded:
tokens.append(("class:sidebar.status" + sel + act, "\/"))
else:
tokens.append(("class:sidebar.status" + sel + act, " <" if selected else " "))
# Expand past the edge of the visible buffer to get an even panel
tokens.append(("class:sidebar.status" + sel + act, " " * 10))
return tokens
search_buffer = Buffer(name = "sidebarsearchbuffer")
search_field = SearchToolbar(
search_buffer = search_buffer,
ignore_case = True
)
def _buffer_pos_changed(buff):
""" This callback gets executed after cursor position changes. Most
of the time we register a key-press (up / down), we change the
selected object and as a result of that the cursor changes. By that
time we don't need to updat the selected object (cursor changed as
a result of the selected object being updated). The one exception
is when searching the sidebar buffer. When this happens the cursor
moves ahead of the selected object. When that happens, here we
update the selected object to follow suit.
"""
if buff.document.cursor_position_row != my_app.selected_object_idx[0]:
my_app.select(buff.document.cursor_position_row)
sidebar_buffer = Buffer(
name = "sidebarbuffer",
read_only = True,
on_cursor_position_changed = _buffer_pos_changed
)
class myLexer(Lexer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._obj_list = []
def add_objects(self, objects: List):
self._obj_list = objects
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
def get_line(lineno: int) -> StyleAndTextTuples:
# TODO: raise out-of-range exception
return tokenize_obj(self._obj_list[lineno])
return get_line
sidebar_lexer = myLexer()
class myControl(BufferControl):
def move_cursor_down(self):
my_app.select_next()
# Need to figure out what do do here
# AFAICT these are only called for the mouse handler
# when events are otherwise not handled
def move_cursor_up(self):
my_app.select_previous()
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
There is an intricate relationship between the cursor position
in the sidebar document and which object is market as 'selected'
in the linked list. Let's not muck that up by allowing the user
to change the cursor position in the sidebar document with the mouse.
"""
return NotImplemented
def create_content(self, width: int, height: Optional[int]) -> UIContent:
# Only traverse the obj_list if it has been expanded / collapsed
if not my_app.obj_list_changed:
self.buffer.cursor_position = my_app.selected_object_idx[1]
return super().create_content(width, height)
res = []
obj = my_app.obj_list[0]
res.append(obj)
while obj.next_object is not my_app.obj_list[0]:
res.append(obj.next_object)
obj = obj.next_object
self.lexer.add_objects(res)
self.buffer.set_document(Document(
text = "\n".join([a.name for a in res]), cursor_position = my_app.selected_object_idx[1]), True)
# Reset obj_list_changed flag, now that we have had a chance to
# regenerate the sidebar document content
my_app.obj_list_changed = False
return super().create_content(width, height)
sidebar_control = myControl(
buffer = sidebar_buffer,
lexer = sidebar_lexer,
search_buffer_control = search_field.control,
focusable = True,
)
return HSplit([
search_field,
Window(
sidebar_control,
right_margins = [ScrollbarMargin(display_arrows = True)],
style = "class:sidebar",
width = Dimension.exact( 45 ),
height = Dimension(min = 7, preferred = 33),
scroll_offsets = ScrollOffsets(top = 1, bottom = 1)),
Window(
height = Dimension.exact(1),
char = "\u2500",
style = "class:sidebar,separator",
),
expanding_object_notification(my_app),
sql_sidebar_navigation()])
| en | 0.833659 | # Held while modifying children, parent, next_object # As some of thes operatins (expand) happen asynchronously Populates children and sets parent for children nodes Populates children and sets parent for children nodes Callback, scheduled after threaded I/O completes Executes in a thread # Blocking I/O # (Don't use 'run_in_executor', because daemon is ideal here. Populates children and sets parent for children nodes Note, we don't have to blow up the children; just redirect next_object. This way we re-query the database / force re-fresh which may be suboptimal. TODO: Codify not/refresh path # https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlprocedurecolumns-function?view=sql-server-ver15 # CatalogName cannot contain a string search pattern # https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlcolumns-function?view=sql-server-ver15 # CatalogName cannot contain a string search pattern # Schemas were found either having called list_schemas # or via the find_tables call # No schemas found; but if there are tables then these are direct # descendents, i.e. MySQL Create the `Layout` for the help text for the current item in the sidebar. Return the description of the selected option. Create the `Layout` for the 'Expanding object' notification. # Show navigation info. Create the `Layout` showing the navigation information for the sidebar. # Show navigation info. Create `Layout` for the information in the right-bottom corner. (The right part of the status bar.) # TO DO: app version rather than python # Python version Create the `Layout` for the sidebar with the configurable objects. # Expand past the edge of the visible buffer to get an even panel This callback gets executed after cursor position changes. Most of the time we register a key-press (up / down), we change the selected object and as a result of that the cursor changes. By that time we don't need to updat the selected object (cursor changed as a result of the selected object being updated). The one exception is when searching the sidebar buffer. When this happens the cursor moves ahead of the selected object. When that happens, here we update the selected object to follow suit. # TODO: raise out-of-range exception # Need to figure out what do do here # AFAICT these are only called for the mouse handler # when events are otherwise not handled There is an intricate relationship between the cursor position in the sidebar document and which object is market as 'selected' in the linked list. Let's not muck that up by allowing the user to change the cursor position in the sidebar document with the mouse. # Only traverse the obj_list if it has been expanded / collapsed # Reset obj_list_changed flag, now that we have had a chance to # regenerate the sidebar document content | 1.87522 | 2 |
dev.py | nitipit/icon | 0 | 6618258 | import asyncio
async def parcel():
cmd = "npx parcel watch --target=docs --no-cache 'docs-src/**/*.(ts|js|svg)'"
print(cmd)
proc = await asyncio.create_subprocess_shell(cmd)
await proc.communicate()
async def engrave():
cmd = "engrave dev docs-src docs --server=0.0.0.0:8000"
print(cmd)
proc = await asyncio.create_subprocess_shell(cmd)
await proc.communicate()
async def main():
await asyncio.gather(
parcel(),
# engrave(),
)
asyncio.run(main()) | import asyncio
async def parcel():
cmd = "npx parcel watch --target=docs --no-cache 'docs-src/**/*.(ts|js|svg)'"
print(cmd)
proc = await asyncio.create_subprocess_shell(cmd)
await proc.communicate()
async def engrave():
cmd = "engrave dev docs-src docs --server=0.0.0.0:8000"
print(cmd)
proc = await asyncio.create_subprocess_shell(cmd)
await proc.communicate()
async def main():
await asyncio.gather(
parcel(),
# engrave(),
)
asyncio.run(main()) | de | 0.302832 | # engrave(), | 2.560009 | 3 |
sdk/python/pulumi_google_native/bigtableadmin/v2/table.py | AaronFriel/pulumi-google-native | 44 | 6618259 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TableArgs', 'Table']
@pulumi.input_type
class TableArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
table_id: pulumi.Input[str],
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Table resource.
:param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
:param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
:param pulumi.Input[Sequence[pulumi.Input['SplitArgs']]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
:param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "table_id", table_id)
if column_families is not None:
pulumi.set(__self__, "column_families", column_families)
if granularity is not None:
pulumi.set(__self__, "granularity", granularity)
if initial_splits is not None:
pulumi.set(__self__, "initial_splits", initial_splits)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="tableId")
def table_id(self) -> pulumi.Input[str]:
"""
The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
"""
return pulumi.get(self, "table_id")
@table_id.setter
def table_id(self, value: pulumi.Input[str]):
pulumi.set(self, "table_id", value)
@property
@pulumi.getter(name="columnFamilies")
def column_families(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
"""
return pulumi.get(self, "column_families")
@column_families.setter
def column_families(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "column_families", value)
@property
@pulumi.getter
def granularity(self) -> Optional[pulumi.Input['TableGranularity']]:
"""
Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
"""
return pulumi.get(self, "granularity")
@granularity.setter
def granularity(self, value: Optional[pulumi.Input['TableGranularity']]):
pulumi.set(self, "granularity", value)
@property
@pulumi.getter(name="initialSplits")
def initial_splits(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]]:
"""
The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
"""
return pulumi.get(self, "initial_splits")
@initial_splits.setter
def initial_splits(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]]):
pulumi.set(self, "initial_splits", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class Table(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
:param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
:param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
:param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.
:param str resource_name: The name of the resource.
:param TableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TableArgs.__new__(TableArgs)
__props__.__dict__["column_families"] = column_families
__props__.__dict__["granularity"] = granularity
__props__.__dict__["initial_splits"] = initial_splits
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if table_id is None and not opts.urn:
raise TypeError("Missing required property 'table_id'")
__props__.__dict__["table_id"] = table_id
__props__.__dict__["cluster_states"] = None
__props__.__dict__["restore_info"] = None
super(Table, __self__).__init__(
'google-native:bigtableadmin/v2:Table',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Table':
"""
Get an existing Table resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TableArgs.__new__(TableArgs)
__props__.__dict__["cluster_states"] = None
__props__.__dict__["column_families"] = None
__props__.__dict__["granularity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["restore_info"] = None
return Table(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterStates")
def cluster_states(self) -> pulumi.Output[Mapping[str, str]]:
"""
Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN `replication_status`. Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL`
"""
return pulumi.get(self, "cluster_states")
@property
@pulumi.getter(name="columnFamilies")
def column_families(self) -> pulumi.Output[Mapping[str, str]]:
"""
The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
"""
return pulumi.get(self, "column_families")
@property
@pulumi.getter
def granularity(self) -> pulumi.Output[str]:
"""
Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
"""
return pulumi.get(self, "granularity")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="restoreInfo")
def restore_info(self) -> pulumi.Output['outputs.RestoreInfoResponse']:
"""
If this table was restored from another data source (e.g. a backup), this field will be populated with information about the restore.
"""
return pulumi.get(self, "restore_info")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TableArgs', 'Table']
@pulumi.input_type
class TableArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
table_id: pulumi.Input[str],
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Table resource.
:param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
:param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
:param pulumi.Input[Sequence[pulumi.Input['SplitArgs']]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
:param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "table_id", table_id)
if column_families is not None:
pulumi.set(__self__, "column_families", column_families)
if granularity is not None:
pulumi.set(__self__, "granularity", granularity)
if initial_splits is not None:
pulumi.set(__self__, "initial_splits", initial_splits)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="tableId")
def table_id(self) -> pulumi.Input[str]:
"""
The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
"""
return pulumi.get(self, "table_id")
@table_id.setter
def table_id(self, value: pulumi.Input[str]):
pulumi.set(self, "table_id", value)
@property
@pulumi.getter(name="columnFamilies")
def column_families(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
"""
return pulumi.get(self, "column_families")
@column_families.setter
def column_families(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "column_families", value)
@property
@pulumi.getter
def granularity(self) -> Optional[pulumi.Input['TableGranularity']]:
"""
Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
"""
return pulumi.get(self, "granularity")
@granularity.setter
def granularity(self, value: Optional[pulumi.Input['TableGranularity']]):
pulumi.set(self, "granularity", value)
@property
@pulumi.getter(name="initialSplits")
def initial_splits(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]]:
"""
The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
"""
return pulumi.get(self, "initial_splits")
@initial_splits.setter
def initial_splits(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SplitArgs']]]]):
pulumi.set(self, "initial_splits", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class Table(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
:param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.`
:param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
:param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request.
:param str resource_name: The name of the resource.
:param TableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
column_families: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
granularity: Optional[pulumi.Input['TableGranularity']] = None,
initial_splits: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TableArgs.__new__(TableArgs)
__props__.__dict__["column_families"] = column_families
__props__.__dict__["granularity"] = granularity
__props__.__dict__["initial_splits"] = initial_splits
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if table_id is None and not opts.urn:
raise TypeError("Missing required property 'table_id'")
__props__.__dict__["table_id"] = table_id
__props__.__dict__["cluster_states"] = None
__props__.__dict__["restore_info"] = None
super(Table, __self__).__init__(
'google-native:bigtableadmin/v2:Table',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Table':
"""
Get an existing Table resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TableArgs.__new__(TableArgs)
__props__.__dict__["cluster_states"] = None
__props__.__dict__["column_families"] = None
__props__.__dict__["granularity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["restore_info"] = None
return Table(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterStates")
def cluster_states(self) -> pulumi.Output[Mapping[str, str]]:
"""
Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN `replication_status`. Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL`
"""
return pulumi.get(self, "cluster_states")
@property
@pulumi.getter(name="columnFamilies")
def column_families(self) -> pulumi.Output[Mapping[str, str]]:
"""
The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
"""
return pulumi.get(self, "column_families")
@property
@pulumi.getter
def granularity(self) -> pulumi.Output[str]:
"""
Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`.
"""
return pulumi.get(self, "granularity")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="restoreInfo")
def restore_info(self) -> pulumi.Output['outputs.RestoreInfoResponse']:
"""
If this table was restored from another data source (e.g. a backup), this field will be populated with information about the restore.
"""
return pulumi.get(self, "restore_info")
| en | 0.723508 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Table resource. :param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL` :param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`. :param pulumi.Input[Sequence[pulumi.Input['SplitArgs']]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.` :param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters. The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL` Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`. The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.` The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_families: The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL` :param pulumi.Input['TableGranularity'] granularity: Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SplitArgs']]]] initial_splits: The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, `s1` and `s2`, three tablets will be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`. Example: * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` `"other", "zz"]` * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` * Key assignment: - Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other) => {"customer_2"}.` - Tablet 5 `[other, ) => {"other", "zz"}.` :param pulumi.Input[str] name: The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` :param pulumi.Input[str] table_id: The name by which the new table should be referred to within the parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. Maximum 50 characters. Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. :param str resource_name: The name of the resource. :param TableArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Table resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN `replication_status`. Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL` The column families configured for this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL` Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`. The unique name of the table. Values are of the form `projects/{project}/instances/{instance}/tables/_a-zA-Z0-9*`. Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` If this table was restored from another data source (e.g. a backup), this field will be populated with information about the restore. | 1.753958 | 2 |
server.py | khalilMejri/TalkyWalky | 8 | 6618260 | <reponame>khalilMejri/TalkyWalky
import pika
from Crypto.PublicKey import RSA
from encryption_decryption import rsa_encrypt, rsa_decrypt, get_rsa_key
class Server:
def __init__(self):
self.connected_users = {}
self.rooms={'room1':[],'room2':[],'room3':[],'room4':[]}
def connect(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
self.receive()
def receive(self):
self.channel.queue_declare(queue='main_queue')
def callback(ch, method, properties, body):
# Received a Message
tokens = body.decode().split('::')
action = tokens[0]
tokens[1] = 'amq.'+tokens[1]
print("[+] Received this ",body)
self.handleAction(action,tokens[1:])
ch.basic_ack(delivery_tag=method.delivery_tag)
self.channel.basic_consume(queue='main_queue', on_message_callback=callback)
print('Server Started !! Listening')
self.channel.start_consuming()
def handleAction(self,action,tokens):
if action == 'login':
# User send this action + his queue name + his name
queue_name = tokens[0]
user_name= tokens[1]
pubkey = tokens[2].encode()
self.connected_users.setdefault(queue_name,{'username': user_name, 'pubkey': pubkey})
self.send(queue_name,"connected::")
for queue in self.connected_users.keys():
if queue != queue_name:
self.send(queue,"connectedUsers::"+','.join([obj['username'] for obj in self.connected_users.values()]))
elif action == 'quit':
# User send his queue name
queue_name = tokens[0]
if queue_name in self.connected_users.keys():
del self.connected_users[queue_name]
self.send(queue_name,"disconnected::")
for queue in self.connected_users.keys():
if queue != queue_name:
self.send(queue,"connectedUsers::"+','.join([obj['username'] for obj in self.connected_users.values()]))
return True
else:
self.send(queue_name,"invalid::")
return False
elif action == 'getConnectedUsers':
# return all connected Users names
queue_name = tokens[0]
if( queue_name in self.connected_users.keys()):
usersNames = ','.join([obj['username'] for obj in self.connected_users.values()])
self.send(queue_name,"connectedUsers::"+usersNames)
return True
else:
self.send(queue_name,"notfound::")
return False
elif action == 'getUserData':
# return a user queue name
queue_name = tokens[0]
demanded_user_name = tokens[1]
for key,val in self.connected_users.items():
if val['username'] == demanded_user_name:
self.send(key,"chosen::"+self.connected_users[queue_name]['username']+'::'+self.connected_users[queue_name]['pubkey'].decode()+'::'+queue_name)
self.send(queue_name,"username::"+str(val['username'])+"::"+str(key)+"::"+val['pubkey'].decode())
return True
self.send(queue_name,"notfound::")
return False
elif action == 'getRooms':
queue_name = tokens[0]
if(queue_name in self.connected_users.keys()):
self.send(queue_name,"rooms::"+','.join(self.rooms.keys()))
return True
self.send(queue_name,'notfound::')
return False
elif action == 'joinRoom':
queue_name = tokens[0]
room = tokens[1]
if(queue_name in self.connected_users.keys()):
self.rooms[room].append(queue_name)
self.send(queue_name,"joinedRoom::"+room+'::')
return True
self.send(queue_name,"notfound::")
return False
elif action == 'sendToRoom':
queue_name = tokens[0]
user_name = self.connected_users[queue_name]['username']
room = tokens[1]
# We decrypted the message using the room's private key first
roomPrivateKey = get_rsa_key("./chatrooms-keys/"+room).export_key()
message = rsa_decrypt(tokens[2].encode(), roomPrivateKey).decode()
if(queue_name in self.connected_users.keys() and queue_name in self.rooms[room]):
for queue in self.rooms[room]:
# Get pubkey for each user
destPubKey = self.connected_users[queue]['pubkey']
# Encrypt the message with user's public key
print("This is the pubKey of " + self.connected_users[queue]['username'] + ": "+destPubKey.decode()[:40])
encrypted_msg = rsa_encrypt(message, destPubKey)
self.send(queue,'roomReceive::'+room+'::'+user_name+'::'+encrypted_msg.decode())
return True
else :
self.send(queue_name,'notfound::')
return False
elif action == 'leaveRoom':
queue_name = tokens[0]
room = tokens[1]
if(queue_name in self.connected_users.keys() and queue_name in self.rooms[room]):
self.rooms[room].remove(queue_name)
self.send(queue_name,"left::"+room+'::')
return True
self.send(queue_name,"notfound::")
return False
def send(self,client_queue,msg):
self.channel.exchange_declare(exchange='users_exchange', exchange_type='direct')
self.channel.basic_publish(
exchange='users_exchange',
routing_key=client_queue[4:],
body=msg,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
))
s = Server()
s.connect() | import pika
from Crypto.PublicKey import RSA
from encryption_decryption import rsa_encrypt, rsa_decrypt, get_rsa_key
class Server:
def __init__(self):
self.connected_users = {}
self.rooms={'room1':[],'room2':[],'room3':[],'room4':[]}
def connect(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
self.receive()
def receive(self):
self.channel.queue_declare(queue='main_queue')
def callback(ch, method, properties, body):
# Received a Message
tokens = body.decode().split('::')
action = tokens[0]
tokens[1] = 'amq.'+tokens[1]
print("[+] Received this ",body)
self.handleAction(action,tokens[1:])
ch.basic_ack(delivery_tag=method.delivery_tag)
self.channel.basic_consume(queue='main_queue', on_message_callback=callback)
print('Server Started !! Listening')
self.channel.start_consuming()
def handleAction(self,action,tokens):
if action == 'login':
# User send this action + his queue name + his name
queue_name = tokens[0]
user_name= tokens[1]
pubkey = tokens[2].encode()
self.connected_users.setdefault(queue_name,{'username': user_name, 'pubkey': pubkey})
self.send(queue_name,"connected::")
for queue in self.connected_users.keys():
if queue != queue_name:
self.send(queue,"connectedUsers::"+','.join([obj['username'] for obj in self.connected_users.values()]))
elif action == 'quit':
# User send his queue name
queue_name = tokens[0]
if queue_name in self.connected_users.keys():
del self.connected_users[queue_name]
self.send(queue_name,"disconnected::")
for queue in self.connected_users.keys():
if queue != queue_name:
self.send(queue,"connectedUsers::"+','.join([obj['username'] for obj in self.connected_users.values()]))
return True
else:
self.send(queue_name,"invalid::")
return False
elif action == 'getConnectedUsers':
# return all connected Users names
queue_name = tokens[0]
if( queue_name in self.connected_users.keys()):
usersNames = ','.join([obj['username'] for obj in self.connected_users.values()])
self.send(queue_name,"connectedUsers::"+usersNames)
return True
else:
self.send(queue_name,"notfound::")
return False
elif action == 'getUserData':
# return a user queue name
queue_name = tokens[0]
demanded_user_name = tokens[1]
for key,val in self.connected_users.items():
if val['username'] == demanded_user_name:
self.send(key,"chosen::"+self.connected_users[queue_name]['username']+'::'+self.connected_users[queue_name]['pubkey'].decode()+'::'+queue_name)
self.send(queue_name,"username::"+str(val['username'])+"::"+str(key)+"::"+val['pubkey'].decode())
return True
self.send(queue_name,"notfound::")
return False
elif action == 'getRooms':
queue_name = tokens[0]
if(queue_name in self.connected_users.keys()):
self.send(queue_name,"rooms::"+','.join(self.rooms.keys()))
return True
self.send(queue_name,'notfound::')
return False
elif action == 'joinRoom':
queue_name = tokens[0]
room = tokens[1]
if(queue_name in self.connected_users.keys()):
self.rooms[room].append(queue_name)
self.send(queue_name,"joinedRoom::"+room+'::')
return True
self.send(queue_name,"notfound::")
return False
elif action == 'sendToRoom':
queue_name = tokens[0]
user_name = self.connected_users[queue_name]['username']
room = tokens[1]
# We decrypted the message using the room's private key first
roomPrivateKey = get_rsa_key("./chatrooms-keys/"+room).export_key()
message = rsa_decrypt(tokens[2].encode(), roomPrivateKey).decode()
if(queue_name in self.connected_users.keys() and queue_name in self.rooms[room]):
for queue in self.rooms[room]:
# Get pubkey for each user
destPubKey = self.connected_users[queue]['pubkey']
# Encrypt the message with user's public key
print("This is the pubKey of " + self.connected_users[queue]['username'] + ": "+destPubKey.decode()[:40])
encrypted_msg = rsa_encrypt(message, destPubKey)
self.send(queue,'roomReceive::'+room+'::'+user_name+'::'+encrypted_msg.decode())
return True
else :
self.send(queue_name,'notfound::')
return False
elif action == 'leaveRoom':
queue_name = tokens[0]
room = tokens[1]
if(queue_name in self.connected_users.keys() and queue_name in self.rooms[room]):
self.rooms[room].remove(queue_name)
self.send(queue_name,"left::"+room+'::')
return True
self.send(queue_name,"notfound::")
return False
def send(self,client_queue,msg):
self.channel.exchange_declare(exchange='users_exchange', exchange_type='direct')
self.channel.basic_publish(
exchange='users_exchange',
routing_key=client_queue[4:],
body=msg,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
))
s = Server()
s.connect() | en | 0.887261 | # Received a Message # User send this action + his queue name + his name # User send his queue name # return all connected Users names # return a user queue name # We decrypted the message using the room's private key first # Get pubkey for each user # Encrypt the message with user's public key # make message persistent | 2.675843 | 3 |
plasma_cash/operator.py | zatoichi-labs/plasma-cash-vyper | 7 | 6618261 | from typing import Set
from trie.smt import SparseMerkleTree
from eth_typing import AnyAddress, ChecksumAddress, Hash32
from eth_account import Account
from eth_utils import to_bytes
from web3 import Web3
from web3.middleware.signing import construct_sign_and_send_raw_middleware
from .contracts import rootchain_interface
from .transaction import Transaction
def to_bytes32(val: int) -> bytes:
assert 0 <= val < 2**256, "Value out of range!"
return val.to_bytes(32, byteorder='big')
class TokenToTxnHashIdSMT(SparseMerkleTree):
def __init__(self):
# Tokens are 32 bytes big
super().__init__(key_size=32)
def get(self, token_uid: int) -> Hash32:
return super().get(to_bytes32(token_uid))
def branch(self, token_uid: int) -> Set[Hash32]:
return super().branch(to_bytes32(token_uid))
def set(self, token_uid: int, txn: Transaction) -> Set[Hash32]:
return super().set(to_bytes32(token_uid), txn.msg_hash)
def exists(self, token_uid: int) -> bool:
return super().exists(to_bytes32(token_uid))
class Operator:
def __init__(self,
w3: Web3,
rootchain_address: AnyAddress,
private_key: bytes):
self._w3 = w3
self._rootchain = self._w3.eth.contract(rootchain_address, **rootchain_interface)
self._acct = Account.from_key(private_key)
# Allow web3 to autosign with account
middleware = construct_sign_and_send_raw_middleware(private_key)
self._w3.middleware_onion.add(middleware)
# Set up dats structures
self.pending_deposits = {} # Dict mapping tokenId to deposit txn in Rootchain contract
self.deposits = {} # Dict mapping tokenId to last known txn
self.transactions = [TokenToTxnHashIdSMT()] # Ordered list of block txn dbs
self.last_sync_time = self._w3.eth.blockNumber
# Add listeners (dict of filters: callbacks)
self.listeners = {}
# Add listener for deposits
self.listeners[
self._rootchain.events.DepositAdded.createFilter(
fromBlock=self._w3.eth.blockNumber
)
] = self.addDeposit
# Add listener for deposit cancellations
self.listeners[
self._rootchain.events.DepositCancelled.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.remDeposit
# Add listener for challenging withdrawals
self.listeners[
self._rootchain.events.ExitStarted.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.checkExit
# Add listener for finalized withdrawals
self.listeners[
self._rootchain.events.ExitFinished.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.remDeposit
@property
def address(self) -> ChecksumAddress:
return self._acct.address
# TODO Make this async loop
def monitor(self):
for log_filter, callback_fn in self.listeners.items():
for log in log_filter.get_new_entries():
callback_fn(log)
if self._w3.eth.blockNumber - self.last_sync_time > 2:
self.publish_block()
self.last_sync_time = self._w3.eth.blockNumber
def addDeposit(self, log):
if not self.is_tracking(log.args['tokenId']):
self.pending_deposits[log.args['tokenId']] = Transaction(
self._w3.eth.chainId,
self._rootchain.address,
**log.args,
)
def remDeposit(self, log):
if log.args['tokenId'] in self.pending_deposits.keys():
del self.pending_deposits[log.args['tokenId']]
if log.args['tokenId'] in self.deposits.keys():
del self.deposits[log.args['tokenId']]
def checkExit(self, log):
# TODO Also validate that exit hasn't been challenged yet
if self.deposits[log.args['tokenId']].newOwner != log.args['owner']:
pass # TODO Challenge exit by looking up appropiate challenge txn
def addTransaction(self, transaction: Transaction):
"""
Sender asked for a transaction through us
Validate with the receiver that they want it
If valid, tracking in the transaction queue until publishing
Don't forget to reply to the sender's request
"""
# Can't transfer a token we aren't tracking in our db
if not self.is_tracking(transaction.tokenId):
print("Not Tracking!")
return False
# Holder of token didn't sign it
if self.deposits[transaction.tokenId].newOwner != transaction.signer:
print("Not signed by current holder!")
return False
# NOTE This allows multiple transactions in a single block
self.transactions[-1].set(transaction.tokenId, transaction)
# Update last known transaction for deposit
self.deposits[transaction.tokenId] = transaction
return True
def publish_block(self):
# Process all the pending deposits we have
for token_id, txn in self.pending_deposits.items():
assert not self.is_tracking(token_id)
self.deposits[token_id] = txn
self.transactions[-1].set(token_id, txn)
self.pending_deposits = {}
# Submit the roothash for transactions
txn_hash = self._rootchain.functions.submitBlock(
self.transactions[-1].root_hash
).transact({'from': self.address})
self._w3.eth.waitForTransactionReceipt(txn_hash) # FIXME Shouldn't have to wait
# Reset transactions db
self.transactions.append(TokenToTxnHashIdSMT())
def is_tracking(self, token_uid):
# Respond to user's request of whether we are tracking this token yet
return token_uid in self.deposits.keys()
def get_branch(self, token_uid, block_num):
return self.transactions[block_num].branch(token_uid)
| from typing import Set
from trie.smt import SparseMerkleTree
from eth_typing import AnyAddress, ChecksumAddress, Hash32
from eth_account import Account
from eth_utils import to_bytes
from web3 import Web3
from web3.middleware.signing import construct_sign_and_send_raw_middleware
from .contracts import rootchain_interface
from .transaction import Transaction
def to_bytes32(val: int) -> bytes:
assert 0 <= val < 2**256, "Value out of range!"
return val.to_bytes(32, byteorder='big')
class TokenToTxnHashIdSMT(SparseMerkleTree):
def __init__(self):
# Tokens are 32 bytes big
super().__init__(key_size=32)
def get(self, token_uid: int) -> Hash32:
return super().get(to_bytes32(token_uid))
def branch(self, token_uid: int) -> Set[Hash32]:
return super().branch(to_bytes32(token_uid))
def set(self, token_uid: int, txn: Transaction) -> Set[Hash32]:
return super().set(to_bytes32(token_uid), txn.msg_hash)
def exists(self, token_uid: int) -> bool:
return super().exists(to_bytes32(token_uid))
class Operator:
def __init__(self,
w3: Web3,
rootchain_address: AnyAddress,
private_key: bytes):
self._w3 = w3
self._rootchain = self._w3.eth.contract(rootchain_address, **rootchain_interface)
self._acct = Account.from_key(private_key)
# Allow web3 to autosign with account
middleware = construct_sign_and_send_raw_middleware(private_key)
self._w3.middleware_onion.add(middleware)
# Set up dats structures
self.pending_deposits = {} # Dict mapping tokenId to deposit txn in Rootchain contract
self.deposits = {} # Dict mapping tokenId to last known txn
self.transactions = [TokenToTxnHashIdSMT()] # Ordered list of block txn dbs
self.last_sync_time = self._w3.eth.blockNumber
# Add listeners (dict of filters: callbacks)
self.listeners = {}
# Add listener for deposits
self.listeners[
self._rootchain.events.DepositAdded.createFilter(
fromBlock=self._w3.eth.blockNumber
)
] = self.addDeposit
# Add listener for deposit cancellations
self.listeners[
self._rootchain.events.DepositCancelled.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.remDeposit
# Add listener for challenging withdrawals
self.listeners[
self._rootchain.events.ExitStarted.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.checkExit
# Add listener for finalized withdrawals
self.listeners[
self._rootchain.events.ExitFinished.createFilter(
fromBlock=self._w3.eth.blockNumber,
)
] = self.remDeposit
@property
def address(self) -> ChecksumAddress:
return self._acct.address
# TODO Make this async loop
def monitor(self):
for log_filter, callback_fn in self.listeners.items():
for log in log_filter.get_new_entries():
callback_fn(log)
if self._w3.eth.blockNumber - self.last_sync_time > 2:
self.publish_block()
self.last_sync_time = self._w3.eth.blockNumber
def addDeposit(self, log):
if not self.is_tracking(log.args['tokenId']):
self.pending_deposits[log.args['tokenId']] = Transaction(
self._w3.eth.chainId,
self._rootchain.address,
**log.args,
)
def remDeposit(self, log):
if log.args['tokenId'] in self.pending_deposits.keys():
del self.pending_deposits[log.args['tokenId']]
if log.args['tokenId'] in self.deposits.keys():
del self.deposits[log.args['tokenId']]
def checkExit(self, log):
# TODO Also validate that exit hasn't been challenged yet
if self.deposits[log.args['tokenId']].newOwner != log.args['owner']:
pass # TODO Challenge exit by looking up appropiate challenge txn
def addTransaction(self, transaction: Transaction):
"""
Sender asked for a transaction through us
Validate with the receiver that they want it
If valid, tracking in the transaction queue until publishing
Don't forget to reply to the sender's request
"""
# Can't transfer a token we aren't tracking in our db
if not self.is_tracking(transaction.tokenId):
print("Not Tracking!")
return False
# Holder of token didn't sign it
if self.deposits[transaction.tokenId].newOwner != transaction.signer:
print("Not signed by current holder!")
return False
# NOTE This allows multiple transactions in a single block
self.transactions[-1].set(transaction.tokenId, transaction)
# Update last known transaction for deposit
self.deposits[transaction.tokenId] = transaction
return True
def publish_block(self):
# Process all the pending deposits we have
for token_id, txn in self.pending_deposits.items():
assert not self.is_tracking(token_id)
self.deposits[token_id] = txn
self.transactions[-1].set(token_id, txn)
self.pending_deposits = {}
# Submit the roothash for transactions
txn_hash = self._rootchain.functions.submitBlock(
self.transactions[-1].root_hash
).transact({'from': self.address})
self._w3.eth.waitForTransactionReceipt(txn_hash) # FIXME Shouldn't have to wait
# Reset transactions db
self.transactions.append(TokenToTxnHashIdSMT())
def is_tracking(self, token_uid):
# Respond to user's request of whether we are tracking this token yet
return token_uid in self.deposits.keys()
def get_branch(self, token_uid, block_num):
return self.transactions[block_num].branch(token_uid)
| en | 0.860492 | # Tokens are 32 bytes big # Allow web3 to autosign with account # Set up dats structures # Dict mapping tokenId to deposit txn in Rootchain contract # Dict mapping tokenId to last known txn # Ordered list of block txn dbs # Add listeners (dict of filters: callbacks) # Add listener for deposits # Add listener for deposit cancellations # Add listener for challenging withdrawals # Add listener for finalized withdrawals # TODO Make this async loop # TODO Also validate that exit hasn't been challenged yet # TODO Challenge exit by looking up appropiate challenge txn Sender asked for a transaction through us Validate with the receiver that they want it If valid, tracking in the transaction queue until publishing Don't forget to reply to the sender's request # Can't transfer a token we aren't tracking in our db # Holder of token didn't sign it # NOTE This allows multiple transactions in a single block # Update last known transaction for deposit # Process all the pending deposits we have # Submit the roothash for transactions # FIXME Shouldn't have to wait # Reset transactions db # Respond to user's request of whether we are tracking this token yet | 2.07917 | 2 |
src/preprocessing.py | gregoryverghese/schizophrenia-twitter | 0 | 6618262 | import pandas as pd
import numpy as np
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
from gensim.models.phrases import Phrases, Phraser
from emoji import UNICODE_EMOJI
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('english')
nltk.download('wordnet')
lemmatizer = WordNetLemmatizer()
NOOCC_TOKEN = 'NO<PASSWORD>'
MODEL_NUM=2
def getFile(fileName, column=None):
socialDf = pd.read_csv(fileName, encoding='utf-8')
return socialDf if column==None else socialDf[column]
def cleanScizAnnTwitter(annTwitter):
annTwitter = annTwitter.dropna()
annTwitter = annTwitter[annTwitter['Classification']!='o']
annTwitter = annTwitter.astype({"Classification": int})
annTwitter = annTwitter[annTwitter['Classification']!=1]
annTwitter = annTwitter.replace(2, 1)
return annTwitter
class SocialPreProcessing():
def __init__(self, text, character):
self.text = text
self.character = character
def clean(self, methods=['Tokens', 'Lemma']):
for f in methods:
print(f)
self.text = self.text.apply(lambda x: getattr(self, 'get'+f)(x))
return self.text
def getTokens(self, sentences):
tokens = word_tokenize(sentences) if not self.character else list(sentences)
return tokens
def getLemma(self, tokens):
tokens = map(lemmatizer.lemmatize, tokens)
return tokens
def getStopwords(self, tokens):
tokens = [t for t in tokens if t not in stopwords]
return tokens
def getEmoticons(self, tokens):
return [t for t in tokens if t not in UNICODE_EMOJI]
def getLowercase(self, tokens):
return [t.lower() for t in tokens]
def getPhrases(self, tokens):
phrases = Phrases(tokens, min_count=1, threshold=1)
bigrams = Phraser(phrases)
text = [sent for sent in bigrams[tokens]]
return tokens
| import pandas as pd
import numpy as np
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
from gensim.models.phrases import Phrases, Phraser
from emoji import UNICODE_EMOJI
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('english')
nltk.download('wordnet')
lemmatizer = WordNetLemmatizer()
NOOCC_TOKEN = 'NO<PASSWORD>'
MODEL_NUM=2
def getFile(fileName, column=None):
socialDf = pd.read_csv(fileName, encoding='utf-8')
return socialDf if column==None else socialDf[column]
def cleanScizAnnTwitter(annTwitter):
annTwitter = annTwitter.dropna()
annTwitter = annTwitter[annTwitter['Classification']!='o']
annTwitter = annTwitter.astype({"Classification": int})
annTwitter = annTwitter[annTwitter['Classification']!=1]
annTwitter = annTwitter.replace(2, 1)
return annTwitter
class SocialPreProcessing():
def __init__(self, text, character):
self.text = text
self.character = character
def clean(self, methods=['Tokens', 'Lemma']):
for f in methods:
print(f)
self.text = self.text.apply(lambda x: getattr(self, 'get'+f)(x))
return self.text
def getTokens(self, sentences):
tokens = word_tokenize(sentences) if not self.character else list(sentences)
return tokens
def getLemma(self, tokens):
tokens = map(lemmatizer.lemmatize, tokens)
return tokens
def getStopwords(self, tokens):
tokens = [t for t in tokens if t not in stopwords]
return tokens
def getEmoticons(self, tokens):
return [t for t in tokens if t not in UNICODE_EMOJI]
def getLowercase(self, tokens):
return [t.lower() for t in tokens]
def getPhrases(self, tokens):
phrases = Phrases(tokens, min_count=1, threshold=1)
bigrams = Phraser(phrases)
text = [sent for sent in bigrams[tokens]]
return tokens
| none | 1 | 2.766902 | 3 | |
custom_components/selve/__init__.py | Kannix2005/homeassistant-selve | 0 | 6618263 | <gh_stars>0
"""
Support for Selve devices.
"""
from __future__ import annotations
from homeassistant.components import discovery
from homeassistant.core import HomeAssistant, callback
from .const import DOMAIN, SELVE_TYPES
from collections import defaultdict
import logging
import voluptuous as vol
from homeassistant.const import CONF_PORT
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from selve import Gateway
REQUIREMENTS = ["python-selve-new"]
PLATFORMS = ["cover"]#, "switch", "light", "climate"]
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): cv.string,
}
),
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Hello World component."""
# Ensure our name space for storing objects is a known type. A dict is
# common/preferred as it allows a separate instance of your class for each
# instance that has been created in the UI.
hass.data.setdefault(DOMAIN, {})
serial_port = config[DOMAIN][CONF_PORT]
try:
selve = Gateway(serial_port, False)
except:
_LOGGER.exception("Error when trying to connect to the selve gateway")
return False
hass.data[DOMAIN] = {"controller": selve, "devices": defaultdict(list)}
try:
await selve.discover()
devices = list(selve.devices.values())
except:
_LOGGER.exception("Error when getting devices from the Selve API")
return False
hass.data[DOMAIN] = {"controller": selve, "devices": defaultdict(list)}
for device in devices:
_device = device
device_type = map_selve_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Selve device %s",
_device.device_type,
_device.name,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for platform in PLATFORMS:
hass.async_create_task(
discovery.async_load_platform(hass, platform, DOMAIN, {}, config)
)
return True
def map_selve_device(selve_device):
"""Map Selve device types to Home Assistant components."""
return SELVE_TYPES.get(selve_device.device_type.value)
class SelveDevice(Entity):
"""Representation of a Selve device entity."""
def __init__(self, selve_device, controller):
"""Initialize the device."""
self.selve_device = selve_device
self.controller = controller
self._name = self.selve_device.name
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
@property
def unique_id(self):
"""Return the unique id base on the id returned by gateway."""
return self.selve_device.ID
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"selve_device_id": self.selve_device.ID}
| """
Support for Selve devices.
"""
from __future__ import annotations
from homeassistant.components import discovery
from homeassistant.core import HomeAssistant, callback
from .const import DOMAIN, SELVE_TYPES
from collections import defaultdict
import logging
import voluptuous as vol
from homeassistant.const import CONF_PORT
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from selve import Gateway
REQUIREMENTS = ["python-selve-new"]
PLATFORMS = ["cover"]#, "switch", "light", "climate"]
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): cv.string,
}
),
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Hello World component."""
# Ensure our name space for storing objects is a known type. A dict is
# common/preferred as it allows a separate instance of your class for each
# instance that has been created in the UI.
hass.data.setdefault(DOMAIN, {})
serial_port = config[DOMAIN][CONF_PORT]
try:
selve = Gateway(serial_port, False)
except:
_LOGGER.exception("Error when trying to connect to the selve gateway")
return False
hass.data[DOMAIN] = {"controller": selve, "devices": defaultdict(list)}
try:
await selve.discover()
devices = list(selve.devices.values())
except:
_LOGGER.exception("Error when getting devices from the Selve API")
return False
hass.data[DOMAIN] = {"controller": selve, "devices": defaultdict(list)}
for device in devices:
_device = device
device_type = map_selve_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Selve device %s",
_device.device_type,
_device.name,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for platform in PLATFORMS:
hass.async_create_task(
discovery.async_load_platform(hass, platform, DOMAIN, {}, config)
)
return True
def map_selve_device(selve_device):
"""Map Selve device types to Home Assistant components."""
return SELVE_TYPES.get(selve_device.device_type.value)
class SelveDevice(Entity):
"""Representation of a Selve device entity."""
def __init__(self, selve_device, controller):
"""Initialize the device."""
self.selve_device = selve_device
self.controller = controller
self._name = self.selve_device.name
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
@property
def unique_id(self):
"""Return the unique id base on the id returned by gateway."""
return self.selve_device.ID
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"selve_device_id": self.selve_device.ID} | en | 0.926981 | Support for Selve devices. #, "switch", "light", "climate"] Set up the Hello World component. # Ensure our name space for storing objects is a known type. A dict is # common/preferred as it allows a separate instance of your class for each # instance that has been created in the UI. Map Selve device types to Home Assistant components. Representation of a Selve device entity. Initialize the device. Register callbacks to update hass after device was changed. Return the unique id base on the id returned by gateway. Return the name of the device. Return the state attributes of the device. | 2.317954 | 2 |
py_reportit/shared/repository/crawl_item.py | fedus/py_reportit | 1 | 6618264 | <gh_stars>1-10
from typing import Optional
from sqlalchemy import select
from sqlalchemy.orm import Session
from py_reportit.shared.repository.abstract_repository import AbstractRepository
from py_reportit.shared.model.crawl_item import CrawlItem, CrawlItemState
class CrawlItemRepository(AbstractRepository[CrawlItem]):
model = CrawlItem
def get_next_waiting(self, session: Session, crawl_id: int) -> Optional[CrawlItem]:
result = session.execute(
select(CrawlItem).where(
CrawlItem.crawl_id == crawl_id,
CrawlItem.state == CrawlItemState.WAITING
).order_by(CrawlItem.scheduled_for.asc())).first()
if result:
return result[0]
return None
| from typing import Optional
from sqlalchemy import select
from sqlalchemy.orm import Session
from py_reportit.shared.repository.abstract_repository import AbstractRepository
from py_reportit.shared.model.crawl_item import CrawlItem, CrawlItemState
class CrawlItemRepository(AbstractRepository[CrawlItem]):
model = CrawlItem
def get_next_waiting(self, session: Session, crawl_id: int) -> Optional[CrawlItem]:
result = session.execute(
select(CrawlItem).where(
CrawlItem.crawl_id == crawl_id,
CrawlItem.state == CrawlItemState.WAITING
).order_by(CrawlItem.scheduled_for.asc())).first()
if result:
return result[0]
return None | none | 1 | 2.280075 | 2 | |
src/generic.py | caiofattori/PyPFSDesign | 0 | 6618265 | from PyQt5.QtWidgets import QGraphicsItem, QColorDialog, QWidget, QDialog
from PyQt5.QtGui import QPen, QColor, QBrush, QFont, QFontMetrics, QPainter, QMouseEvent
from PyQt5.QtCore import Qt, pyqtSignal, QObject, QRect, QXmlStreamWriter
from undo import PFSUndoPropertyText, PFSUndoPropertyButton, PFSUndoPropertyCombo, PFSUndoAddTag, PFSUndoRemoveTag
from dialog import PFSDialogTag
from PyQt5.QtXml import QDomNode
class PFSSenderSignal(QObject):
changed = pyqtSignal()
deleted = pyqtSignal()
penEdited = pyqtSignal(object)
def __init__(self):
super(QObject, self).__init__()
class PFSTags(QWidget):
removed = pyqtSignal(object)
def __init__(self, name, use=""):
QWidget.__init__(self)
self._name = name
self._use = use
self._font = QFont("Serif", 8)
self._rect = QRect(0,0,10,10)
self._brush = QBrush(Qt.white, Qt.SolidPattern)
def clone(self):
ans = PFSTags(self._name, self._use)
ans.removed.connect(self.removed.emit)
return ans
def simpleUse(self):
if len(self._use) > 16:
return self._use[:8] + "\n" + self._use[8:13] + "..."
if len(self._use) > 8:
return self._use[:5] + "..."
return self._use
def simpleName(self):
if len(self._name) > 30:
return self._name[:10] + "\n" + self._name[10:20] + "\n" + self._name[20:27] + "..."
if len(self._name) > 20:
return self._name[:10] + "\n" + self._name[10:17] + "..."
if len(self._name) > 10:
return self._name[:7] + "..."
return self._name
def updateRect(self):
fm =QFontMetrics(self._font)
self._useRect = fm.size(Qt.TextExpandTabs, self.simpleUse())
self._nameRect = fm.size(Qt.TextExpandTabs, self.simpleName())
self._rect = QRect(0,0, self._useRect.width() + self._nameRect.width()+23, max(self._useRect.height(), self._nameRect.height())+4)
x = self._useRect.width() + self._nameRect.width() + 12
y = self._rect.center().y() - 3
self._closeRect = QRect(x, y, 6, 6)
def paintEvent(self, ev):
self.updateRect()
p = QPainter(self)
p.setBrush(self._brush)
p.drawRoundedRect(self._rect, 10, 10)
p.drawLine(self._useRect.width() + 6, self._rect.top(), self._useRect.width() + 6, self._rect.bottom())
p.setFont(self._font)
p.drawText(3, 0, self._useRect.width(), self._rect.height(), Qt.AlignCenter, self.simpleUse())
p.drawText(self._useRect.width() + 9, 0, self._nameRect.width(), self._rect.height(), Qt.AlignCenter, self.simpleName())
p.drawLine(self._closeRect.topLeft(), self._closeRect.bottomRight())
p.drawLine(self._closeRect.bottomLeft(), self._closeRect.topRight())
def mousePressEvent(self, ev: QMouseEvent):
if self._closeRect.contains(ev.pos()):
self.removed.emit(self)
def __eq__(self, other):
if not isinstance(other, PFSTags):
return False
return self._name == other._name and self._use == other._use
class PFSBasicElement(object):
def __init__(self, id):
self._id = id
self._tags = []
def addTag(self, name, use=""):
tag = PFSTags(name, use)
tag.removed.connect(self.deleteTag)
self._tags.append(tag)
def createTag(self, net):
name, use, ans = PFSDialogTag.getTag()
if ans:
x = PFSUndoAddTag(self, name, use)
net.undoStack.push(x)
def removeTag(self, name, use):
for tag in self._tags:
if tag._name == name and tag._use == use:
self._tags.remove(tag)
return
def generateXml(self, xml: QXmlStreamWriter):
if len(self._tags) == 0:
return
xml.writeStartElement("tags")
for tag in self._tags:
xml.writeStartElement("tag")
xml.writeAttribute("name", tag._name)
xml.writeAttribute("use", tag._use)
xml.writeEndElement()
xml.writeEndElement()
def createFromXml(node: QDomNode):
ans = []
if node.nodeName() != "tags":
return ans
childs = node.childNodes()
for i in range(childs.count()):
child = childs.at(i)
if child.nodeName() != "tag":
continue
attr = child.attributes()
if not (child.hasAttributes() and attr.contains("name")):
continue
name = attr.namedItem("name").nodeValue()
use = ""
if attr.contains("use"):
use = attr.namedItem("use").nodeValue()
ans.append(PFSTags(name, use))
return ans
class PFSElement(PFSBasicElement, QGraphicsItem):
SELECTED_PEN = Qt.red
SELECTED_PEN_ALT = Qt.blue
PEN_LIST = {"Solida": Qt.SolidLine, "Tracejada": Qt.DashLine, "Pontilhada": Qt.DotLine}
def __init__(self, id: str):
PFSBasicElement.__init__(self, id)
QGraphicsItem.__init__(self)
self.setFlag(QGraphicsItem.ItemIsSelectable)
def createTag(self):
PFSBasicElement.createTag(self, self.scene()._page._net)
def deleteTag(self, tag):
x = PFSUndoRemoveTag(self, tag)
self.scene()._page._net.undoStack.push(x)
def removeTag(self, name, use):
PFSBasicElement.removeTag(self, name, use)
self.scene()._page._net.fillProperties(self.propertiesTable())
def addTag(self, name, use="", update=True):
PFSBasicElement.addTag(self, name, use)
if update:
self.scene()._page._net.fillProperties(self.propertiesTable())
def selectSingle(self):
self.scene()._page._net.showPage(self.scene()._page)
self.scene().clearSelection()
self.setSelected(True)
self.scene()._page._net.fillProperties(self.propertiesTable())
def canDelete(self):
return True
class PFSNode(PFSElement):
def __init__(self, id: str, x: int, y: int):
PFSElement.__init__(self, id)
self.setPos(x/2, y/2)
self._width = 0
self._height = 0
self._pen = QPen(Qt.black)
self._brush = QBrush(Qt.white, Qt.SolidPattern)
self.emitter = PFSSenderSignal()
self.changed = self.emitter.changed
self.deleted = self.emitter.deleted
self.penEdited = self.emitter.penEdited
def move(self, x, y):
self.moveBy(x/2, y/2)
self.changed.emit()
for it in self.scene().items():
print(str(it.__class__) + " " + str(it.shape().boundingRect()))
def setPenColor(self, color: QColor):
self._pen.setColor(color)
self.scene().update()
def setPenStyle(self, style: Qt):
self._pen.setStyle(style)
self.scene().update()
self.penEdited.emit(self)
def setPenWidth(self, width: str):
self._pen.setWidth(float(width))
self.scene().update()
def setBrushColor(self, color: QColor):
self._brush.setColor(color)
self.scene().update()
def changeElementPosX(self, prop):
x = PFSUndoPropertyText(prop, self.moveX)
self.scene()._page._net.undoStack.push(x)
def changeElementPosY(self, prop):
x = PFSUndoPropertyText(prop, self.moveY)
self.scene()._page._net.undoStack.push(x)
def changeElementWidth(self, prop):
x = PFSUndoPropertyText(prop, self.resizeWidth)
self.scene()._page._net.undoStack.push(x)
def changeElementHeight(self, prop):
x = PFSUndoPropertyText(prop, self.resizeHeight)
self.scene()._page._net.undoStack.push(x)
def changeLineColor(self):
color = QColorDialog.getColor(self._pen.color(), self.scene()._page._net, "Escolha a cor do contorno")
if color.isValid() and color != self._pen.color():
x = PFSUndoPropertyButton(color, self._pen.color(), self.setPenColor)
self.scene()._page._net.undoStack.push(x)
def changeLineStyle(self, text):
if text in self.PEN_LIST:
x = PFSUndoPropertyCombo(self.PEN_LIST[text], self._pen.style(), self.setPenStyle)
self.scene()._page._net.undoStack.push(x)
def changeLineWidth(self, prop):
x = PFSUndoPropertyText(prop, self.setPenWidth)
self.scene()._page._net.undoStack.push(x)
def changeFillColor(self):
color = QColorDialog.getColor(self._brush.color(), self.scene()._page._net, "Escolha a cor do preenchimento")
if color.isValid() and color != self._brush.color():
x = PFSUndoPropertyButton(color, self._brush.color(), self.setBrushColor)
self.scene()._page._net.undoStack.push(x)
def moveX(self, txt, update=True):
self.moveBy(float(txt)/2, 0)
if update:
self.scene().update()
def moveY(self, txt, update=True):
self.moveBy(0, float(txt)/2)
if update:
self.scene().update()
def resizeWidth(self, txt):
self._width = float(txt)
self.changed.emit()
self.scene().update()
def resizeHeight(self, txt):
self._height = float(txt)
self.changed.emit()
self.scene().update()
class PFSActive(PFSNode):
def __init__(self, id, x, y):
PFSNode.__init__(self, id, x, y)
class PFSPassive(PFSNode):
def __init__(self, id, x, y):
PFSNode.__init__(self, id, x, y) | from PyQt5.QtWidgets import QGraphicsItem, QColorDialog, QWidget, QDialog
from PyQt5.QtGui import QPen, QColor, QBrush, QFont, QFontMetrics, QPainter, QMouseEvent
from PyQt5.QtCore import Qt, pyqtSignal, QObject, QRect, QXmlStreamWriter
from undo import PFSUndoPropertyText, PFSUndoPropertyButton, PFSUndoPropertyCombo, PFSUndoAddTag, PFSUndoRemoveTag
from dialog import PFSDialogTag
from PyQt5.QtXml import QDomNode
class PFSSenderSignal(QObject):
changed = pyqtSignal()
deleted = pyqtSignal()
penEdited = pyqtSignal(object)
def __init__(self):
super(QObject, self).__init__()
class PFSTags(QWidget):
removed = pyqtSignal(object)
def __init__(self, name, use=""):
QWidget.__init__(self)
self._name = name
self._use = use
self._font = QFont("Serif", 8)
self._rect = QRect(0,0,10,10)
self._brush = QBrush(Qt.white, Qt.SolidPattern)
def clone(self):
ans = PFSTags(self._name, self._use)
ans.removed.connect(self.removed.emit)
return ans
def simpleUse(self):
if len(self._use) > 16:
return self._use[:8] + "\n" + self._use[8:13] + "..."
if len(self._use) > 8:
return self._use[:5] + "..."
return self._use
def simpleName(self):
if len(self._name) > 30:
return self._name[:10] + "\n" + self._name[10:20] + "\n" + self._name[20:27] + "..."
if len(self._name) > 20:
return self._name[:10] + "\n" + self._name[10:17] + "..."
if len(self._name) > 10:
return self._name[:7] + "..."
return self._name
def updateRect(self):
fm =QFontMetrics(self._font)
self._useRect = fm.size(Qt.TextExpandTabs, self.simpleUse())
self._nameRect = fm.size(Qt.TextExpandTabs, self.simpleName())
self._rect = QRect(0,0, self._useRect.width() + self._nameRect.width()+23, max(self._useRect.height(), self._nameRect.height())+4)
x = self._useRect.width() + self._nameRect.width() + 12
y = self._rect.center().y() - 3
self._closeRect = QRect(x, y, 6, 6)
def paintEvent(self, ev):
self.updateRect()
p = QPainter(self)
p.setBrush(self._brush)
p.drawRoundedRect(self._rect, 10, 10)
p.drawLine(self._useRect.width() + 6, self._rect.top(), self._useRect.width() + 6, self._rect.bottom())
p.setFont(self._font)
p.drawText(3, 0, self._useRect.width(), self._rect.height(), Qt.AlignCenter, self.simpleUse())
p.drawText(self._useRect.width() + 9, 0, self._nameRect.width(), self._rect.height(), Qt.AlignCenter, self.simpleName())
p.drawLine(self._closeRect.topLeft(), self._closeRect.bottomRight())
p.drawLine(self._closeRect.bottomLeft(), self._closeRect.topRight())
def mousePressEvent(self, ev: QMouseEvent):
if self._closeRect.contains(ev.pos()):
self.removed.emit(self)
def __eq__(self, other):
if not isinstance(other, PFSTags):
return False
return self._name == other._name and self._use == other._use
class PFSBasicElement(object):
def __init__(self, id):
self._id = id
self._tags = []
def addTag(self, name, use=""):
tag = PFSTags(name, use)
tag.removed.connect(self.deleteTag)
self._tags.append(tag)
def createTag(self, net):
name, use, ans = PFSDialogTag.getTag()
if ans:
x = PFSUndoAddTag(self, name, use)
net.undoStack.push(x)
def removeTag(self, name, use):
for tag in self._tags:
if tag._name == name and tag._use == use:
self._tags.remove(tag)
return
def generateXml(self, xml: QXmlStreamWriter):
if len(self._tags) == 0:
return
xml.writeStartElement("tags")
for tag in self._tags:
xml.writeStartElement("tag")
xml.writeAttribute("name", tag._name)
xml.writeAttribute("use", tag._use)
xml.writeEndElement()
xml.writeEndElement()
def createFromXml(node: QDomNode):
ans = []
if node.nodeName() != "tags":
return ans
childs = node.childNodes()
for i in range(childs.count()):
child = childs.at(i)
if child.nodeName() != "tag":
continue
attr = child.attributes()
if not (child.hasAttributes() and attr.contains("name")):
continue
name = attr.namedItem("name").nodeValue()
use = ""
if attr.contains("use"):
use = attr.namedItem("use").nodeValue()
ans.append(PFSTags(name, use))
return ans
class PFSElement(PFSBasicElement, QGraphicsItem):
SELECTED_PEN = Qt.red
SELECTED_PEN_ALT = Qt.blue
PEN_LIST = {"Solida": Qt.SolidLine, "Tracejada": Qt.DashLine, "Pontilhada": Qt.DotLine}
def __init__(self, id: str):
PFSBasicElement.__init__(self, id)
QGraphicsItem.__init__(self)
self.setFlag(QGraphicsItem.ItemIsSelectable)
def createTag(self):
PFSBasicElement.createTag(self, self.scene()._page._net)
def deleteTag(self, tag):
x = PFSUndoRemoveTag(self, tag)
self.scene()._page._net.undoStack.push(x)
def removeTag(self, name, use):
PFSBasicElement.removeTag(self, name, use)
self.scene()._page._net.fillProperties(self.propertiesTable())
def addTag(self, name, use="", update=True):
PFSBasicElement.addTag(self, name, use)
if update:
self.scene()._page._net.fillProperties(self.propertiesTable())
def selectSingle(self):
self.scene()._page._net.showPage(self.scene()._page)
self.scene().clearSelection()
self.setSelected(True)
self.scene()._page._net.fillProperties(self.propertiesTable())
def canDelete(self):
return True
class PFSNode(PFSElement):
def __init__(self, id: str, x: int, y: int):
PFSElement.__init__(self, id)
self.setPos(x/2, y/2)
self._width = 0
self._height = 0
self._pen = QPen(Qt.black)
self._brush = QBrush(Qt.white, Qt.SolidPattern)
self.emitter = PFSSenderSignal()
self.changed = self.emitter.changed
self.deleted = self.emitter.deleted
self.penEdited = self.emitter.penEdited
def move(self, x, y):
self.moveBy(x/2, y/2)
self.changed.emit()
for it in self.scene().items():
print(str(it.__class__) + " " + str(it.shape().boundingRect()))
def setPenColor(self, color: QColor):
self._pen.setColor(color)
self.scene().update()
def setPenStyle(self, style: Qt):
self._pen.setStyle(style)
self.scene().update()
self.penEdited.emit(self)
def setPenWidth(self, width: str):
self._pen.setWidth(float(width))
self.scene().update()
def setBrushColor(self, color: QColor):
self._brush.setColor(color)
self.scene().update()
def changeElementPosX(self, prop):
x = PFSUndoPropertyText(prop, self.moveX)
self.scene()._page._net.undoStack.push(x)
def changeElementPosY(self, prop):
x = PFSUndoPropertyText(prop, self.moveY)
self.scene()._page._net.undoStack.push(x)
def changeElementWidth(self, prop):
x = PFSUndoPropertyText(prop, self.resizeWidth)
self.scene()._page._net.undoStack.push(x)
def changeElementHeight(self, prop):
x = PFSUndoPropertyText(prop, self.resizeHeight)
self.scene()._page._net.undoStack.push(x)
def changeLineColor(self):
color = QColorDialog.getColor(self._pen.color(), self.scene()._page._net, "Escolha a cor do contorno")
if color.isValid() and color != self._pen.color():
x = PFSUndoPropertyButton(color, self._pen.color(), self.setPenColor)
self.scene()._page._net.undoStack.push(x)
def changeLineStyle(self, text):
if text in self.PEN_LIST:
x = PFSUndoPropertyCombo(self.PEN_LIST[text], self._pen.style(), self.setPenStyle)
self.scene()._page._net.undoStack.push(x)
def changeLineWidth(self, prop):
x = PFSUndoPropertyText(prop, self.setPenWidth)
self.scene()._page._net.undoStack.push(x)
def changeFillColor(self):
color = QColorDialog.getColor(self._brush.color(), self.scene()._page._net, "Escolha a cor do preenchimento")
if color.isValid() and color != self._brush.color():
x = PFSUndoPropertyButton(color, self._brush.color(), self.setBrushColor)
self.scene()._page._net.undoStack.push(x)
def moveX(self, txt, update=True):
self.moveBy(float(txt)/2, 0)
if update:
self.scene().update()
def moveY(self, txt, update=True):
self.moveBy(0, float(txt)/2)
if update:
self.scene().update()
def resizeWidth(self, txt):
self._width = float(txt)
self.changed.emit()
self.scene().update()
def resizeHeight(self, txt):
self._height = float(txt)
self.changed.emit()
self.scene().update()
class PFSActive(PFSNode):
def __init__(self, id, x, y):
PFSNode.__init__(self, id, x, y)
class PFSPassive(PFSNode):
def __init__(self, id, x, y):
PFSNode.__init__(self, id, x, y) | none | 1 | 2.103783 | 2 | |
pizza_cutter/des_pizza_cutter/tests/test_pizza_cutter_object_data.py | beckermr/pizza-cutter | 0 | 6618266 | import numpy as np
from .._pizza_cutter import _build_object_data
def test_pizza_cutter_build_object_data(coadd_image_data):
d = _build_object_data(
central_size=250,
buffer_size=125,
image_width=10000,
wcs=coadd_image_data['eu_wcs'],
psf_box_size=21,
position_offset=coadd_image_data['position_offset']
)
assert np.array_equal(d['id'], np.arange(d.shape[0]))
assert np.all(d['box_size'] == 500)
assert np.all(d['file_id'] == -1)
assert np.all(d['psf_box_size'] == 21)
assert np.all(d['psf_cutout_row'] == 10)
assert np.all(d['psf_cutout_col'] == 10)
n = (10000 - 2*12) // 250
half = (250 - 1) / 2
for row_ind in range(n):
for col_ind in range(n):
row = row_ind * 250 + 125 + half
col = col_ind * 250 + 125 + half
index = row_ind * n + col_ind
assert d['orig_row'][index, 0] == row
assert d['orig_col'][index, 0] == col
ra, dec = coadd_image_data['eu_wcs'].image2sky(
col + coadd_image_data['position_offset'],
row + coadd_image_data['position_offset']
)
assert d['ra'][index] == ra
assert d['dec'][index] == dec
assert d['orig_start_row'][index, 0] == row - 125 - half
assert d['orig_start_col'][index, 0] == col - 125 - half
assert d['cutout_row'][index, 0] == 125 + half
assert d['cutout_col'][index, 0] == 125 + half
ra, dec = coadd_image_data['eu_wcs'].image2sky(
(d['orig_start_col'][index, 0] +
d['cutout_col'][index, 0] +
coadd_image_data['position_offset']),
(d['orig_start_row'][index, 0] +
d['cutout_row'][index, 0] +
coadd_image_data['position_offset'])
)
assert d['ra'][index] == ra
assert d['dec'][index] == dec
jacob = coadd_image_data['eu_wcs'].get_jacobian(
col + coadd_image_data['position_offset'],
row + coadd_image_data['position_offset']
)
assert d['dudcol'][index, 0] == jacob[0]
assert d['dudrow'][index, 0] == jacob[1]
assert d['dvdcol'][index, 0] == jacob[2]
assert d['dvdrow'][index, 0] == jacob[3]
| import numpy as np
from .._pizza_cutter import _build_object_data
def test_pizza_cutter_build_object_data(coadd_image_data):
d = _build_object_data(
central_size=250,
buffer_size=125,
image_width=10000,
wcs=coadd_image_data['eu_wcs'],
psf_box_size=21,
position_offset=coadd_image_data['position_offset']
)
assert np.array_equal(d['id'], np.arange(d.shape[0]))
assert np.all(d['box_size'] == 500)
assert np.all(d['file_id'] == -1)
assert np.all(d['psf_box_size'] == 21)
assert np.all(d['psf_cutout_row'] == 10)
assert np.all(d['psf_cutout_col'] == 10)
n = (10000 - 2*12) // 250
half = (250 - 1) / 2
for row_ind in range(n):
for col_ind in range(n):
row = row_ind * 250 + 125 + half
col = col_ind * 250 + 125 + half
index = row_ind * n + col_ind
assert d['orig_row'][index, 0] == row
assert d['orig_col'][index, 0] == col
ra, dec = coadd_image_data['eu_wcs'].image2sky(
col + coadd_image_data['position_offset'],
row + coadd_image_data['position_offset']
)
assert d['ra'][index] == ra
assert d['dec'][index] == dec
assert d['orig_start_row'][index, 0] == row - 125 - half
assert d['orig_start_col'][index, 0] == col - 125 - half
assert d['cutout_row'][index, 0] == 125 + half
assert d['cutout_col'][index, 0] == 125 + half
ra, dec = coadd_image_data['eu_wcs'].image2sky(
(d['orig_start_col'][index, 0] +
d['cutout_col'][index, 0] +
coadd_image_data['position_offset']),
(d['orig_start_row'][index, 0] +
d['cutout_row'][index, 0] +
coadd_image_data['position_offset'])
)
assert d['ra'][index] == ra
assert d['dec'][index] == dec
jacob = coadd_image_data['eu_wcs'].get_jacobian(
col + coadd_image_data['position_offset'],
row + coadd_image_data['position_offset']
)
assert d['dudcol'][index, 0] == jacob[0]
assert d['dudrow'][index, 0] == jacob[1]
assert d['dvdcol'][index, 0] == jacob[2]
assert d['dvdrow'][index, 0] == jacob[3]
| none | 1 | 2.06856 | 2 | |
Aula 04/twoSum.py | diexmontana/LabADAGrupoC | 0 | 6618267 | <filename>Aula 04/twoSum.py
# 3. twoSum
# Analizar la complejidad del código:
def twoSum(array): # O(n^2)
# Retorana verdadero si los valores de dos posiciones diferentes suman 10
for i in range(len(array)): # O(n^2) <- n*n
for j in range(len(array)): # O(n)
if i != j and array[i] + array[j] == 10:
return True
return False
arreglo = [1,4,8,2]
twoSum(arreglo) | <filename>Aula 04/twoSum.py
# 3. twoSum
# Analizar la complejidad del código:
def twoSum(array): # O(n^2)
# Retorana verdadero si los valores de dos posiciones diferentes suman 10
for i in range(len(array)): # O(n^2) <- n*n
for j in range(len(array)): # O(n)
if i != j and array[i] + array[j] == 10:
return True
return False
arreglo = [1,4,8,2]
twoSum(arreglo) | es | 0.780566 | # 3. twoSum # Analizar la complejidad del código: # O(n^2) # Retorana verdadero si los valores de dos posiciones diferentes suman 10 # O(n^2) <- n*n # O(n) | 3.479834 | 3 |
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_1/_pkg0_1_0_1_1/__init__.py | jnthn/intellij-community | 2 | 6618268 | from ._mod0_1_0_1_1_0 import *
from ._mod0_1_0_1_1_1 import *
from ._mod0_1_0_1_1_2 import *
from ._mod0_1_0_1_1_3 import *
from ._mod0_1_0_1_1_4 import * | from ._mod0_1_0_1_1_0 import *
from ._mod0_1_0_1_1_1 import *
from ._mod0_1_0_1_1_2 import *
from ._mod0_1_0_1_1_3 import *
from ._mod0_1_0_1_1_4 import * | none | 1 | 1.173858 | 1 | |
tensorpack/plot.py | meyer-lab/tensorpack | 4 | 6618269 | <gh_stars>1-10
"""
This file makes all standard plots for tensor analysis. Requires a Decomposition object after running relevant values.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import ScalarFormatter
from .decomposition import Decomposition
from tensorpack import perform_CP
import seaborn as sns
import time
def tfacr2x(ax, decomp:Decomposition):
"""
Plots R2X for tensor factorizations for all components up to decomp.max_rr.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f. See getSetup() in tensorpack.test.common.py for more detail.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.perform_tfac().
"""
comps = decomp.rrs
ax.scatter(comps, decomp.TR2X, s=10)
ax.set_ylabel("Tensor Fac R2X")
ax.set_xlabel("Number of Components")
ax.set_title("Variance explained by tensor decomposition")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(0, 1)
ax.set_xlim(0.5, np.amax(comps) + 0.5)
def reduction(ax, decomp):
"""
Plots size reduction for tensor factorization versus PCA for all components up to decomp.max_rr.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.perform_tfac() and decomp.perform_PCA().
"""
CPR2X, PCAR2X, sizeTfac, sizePCA = np.asarray(decomp.TR2X), np.asarray(decomp.PCAR2X), decomp.sizeT, decomp.sizePCA
ax.set_xscale("log", base=2)
ax.plot(sizeTfac, 1.0 - CPR2X, ".", label="TFac")
ax.plot(sizePCA, 1.0 - PCAR2X, ".", label="PCA")
ax.set_ylabel("Normalized Unexplained Variance")
ax.set_xlabel("Size of Reduced Data")
ax.set_title("Data reduction, TFac vs. PCA")
ax.set_ylim(bottom=0.0)
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.legend()
def q2xchord(ax, decomp):
"""
Plots Q2X for tensor factorization when removing chords from a single mode for all components up to decomp.max_rr.
Requires multiple runs to generate error bars.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.Q2X_chord().
"""
chords_df = decomp.chordQ2X
comps = decomp.rrs
chords_df = pd.DataFrame(decomp.chordQ2X).T
chords_df.index = comps
chords_df['mean'] = chords_df.mean(axis=1)
chords_df['sem'] = chords_df.sem(axis=1)
Q2Xchord = chords_df['mean']
Q2Xerrors = chords_df['sem']
ax.scatter(comps, Q2Xchord, s=10)
ax.errorbar(comps, Q2Xchord, yerr=Q2Xerrors, fmt='none')
ax.set_ylabel("Q2X of Chord Imputation")
ax.set_xlabel("Number of Components")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(bottom=0.0, top=1.0)
def q2xentry(ax, decomp, methodname = "CP"):
"""
Plots Q2X for tensor factorization versus PCA when removing entries for all components up to decomp.max_rr.
Requires multiple runs to generate error bars.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.entry().
methodname : str
Allows for proper tensor method when naming graph axes.
"""
entry_df = pd.DataFrame(decomp.entryQ2X).T
entrypca_df = pd.DataFrame(decomp.entryQ2XPCA).T
comps = decomp.rrs
entry_df.index = comps
entry_df['mean'] = entry_df.mean(axis=1)
entry_df['sem'] = entry_df.sem(axis=1)
entrypca_df.index = comps
entrypca_df['mean'] = entrypca_df.mean(axis=1)
entrypca_df['sem'] = entrypca_df.sem(axis=1)
TR2X = entry_df['mean']
TErr = entry_df['sem']
PCAR2X = entrypca_df['mean']
PCAErr = entrypca_df['sem']
ax.plot(comps - 0.05, TR2X, ".", label=methodname)
ax.plot(comps + 0.05, PCAR2X, ".", label="PCA")
ax.errorbar(comps - 0.05, TR2X, yerr=TErr, fmt='none', ecolor='b')
ax.errorbar(comps + 0.05, PCAR2X, yerr=PCAErr, fmt='none', ecolor='darkorange')
ax.set_ylabel("Q2X of Entry Imputation")
ax.set_xlabel("Number of Components")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(0, 1)
ax.legend(loc=4)
def tucker_reduced_Dsize(tensor, ranks:list):
""" Output the error (1 - r2x) for each size of the data at each component # for tucker decomposition.
This forms the x-axis of the error vs. data size plot.
Parameters
----------
tensor : xarray or numpy.ndarray
the multi-dimensional input data
ranks : list
the list of minimum-error Tucker fits for each component-combinations.
Returns
-------
sizes : list
the size of reduced data by Tucker for each error.
"""
# if tensor is xarray...
if type(tensor) is not np.ndarray:
tensor = tensor.to_numpy()
sizes = []
for rank in ranks:
sum_comps = 0
for i in range(len(tensor.shape)):
sum_comps += rank[i] * tensor.shape[i]
sizes.append(sum_comps)
return sizes
def tucker_reduction(ax, decomp:Decomposition, cp_decomp:Decomposition):
""" Error versus data size for minimum error combination of rank from Tucker decomposition versus CP decomposition.
The error for those combinations that are the same dimensions, ie., for a 3-D tensor, [1, 1, 1], [2, 2, 2], etc
are shown by a different marker shape and color.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object to run perform_tucker().
cp_decomp : Decomposition
Takes a Decomposition object to run perform_CP().
Example
-------
from tensorpack.tucker import tucker_decomp
from tensorpack.plot import tucker_reduced_Dsize, tucker_reduction
from tensordata.zohar import data3D as zohar
from tensorpack.decomposition import Decomposition
b = Decomposition(zohar().tensor, method=tucker_decomp)
c = Decomposition(zohar().tensor)
import matplotlib.pyplot as plt
f = plt.figure()
ax = f.add_subplot()
fig = tucker_reduction(ax, b, c)
plt.savefig("tucker_cp.svg")
"""
# tucker decomp
decomp.perform_tucker()
sizes = tucker_reduced_Dsize(decomp.data, decomp.TuckRank)
# CP decomp
cp_decomp.perform_tfac()
CPR2X, sizeTfac = np.asarray(cp_decomp.TR2X), cp_decomp.sizeT
ax.plot(sizes, decomp.TuckErr, label="Tucker", color='C0', lw=3)
ax.plot(sizeTfac, 1.0 - CPR2X, ".", label="CP", color='C1', markersize=12)
ax.set_ylim((0.0, 1.0))
ax.set_xscale("log", base=2)
ax.set_title('Data Reduction Comparison')
ax.set_ylabel('Normalized Unexplained Variance')
ax.set_xlabel('Size of Reduced Data')
ax.legend()
def plot_weight_mode(ax, factor, labels=False, title = ""):
"""
Plots heatmaps for a single mode factors.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
factor: numpy array
Factorized mode
labels: list of string or False
Labels for each of the elements
title" String
Figure title
"""
rank = np.shape(factor)[1]
components = [str(ii + 1) for ii in range(rank)]
facs = pd.DataFrame(factor, columns=[f"Cmp. {i}" for i in np.arange(1, rank + 1)],
index=labels if labels is not False else list(range(np.shape(factor)[0])))
sns.heatmap(facs, cmap="PiYG", center=0, xticklabels=components, yticklabels=labels, cbar=True, vmin=-1.0,
vmax=1.0, ax=ax)
ax.set_xlabel("Components")
ax.set_title(title)
class tracker():
"""
Creates an array, tracks next unfilled entry & runtime, holds tracked name for plotting
"""
def __init__(self, entry_type = 'R2X', track_runtime = False) :
self.metric = entry_type
self.track_runtime = track_runtime
def begin(self):
""" Must run to track runtime """
self.start = time.time()
def first_entry(self, tFac):
self.array = np.full((1,1), 1 - tFac.R2X)
if self.track_runtime:
self.time_array = np.full((1,1), time.time() - self.start)
def update(self, tFac):
self.array = np.append(self.array, 1 - tFac.R2X)
if self.track_runtime:
self.time_array = np.append(self.time_array, time.time() - self.start)
def plot_iteration(self, ax):
ax.plot(range(1, self.array.size+1), self.array)
ax.set_ylim((0.0, 1.0))
ax.set_xlim((0, self.array.size))
ax.set_xlabel('Iteration')
ax.set_ylabel(self.metric)
def plot_runtime(self, ax):
assert self.track_runtime
self.time_array
ax.plot(self.time_array, self.array)
ax.set_ylim((0.0, 1.0))
ax.set_xlim((0, np.max(self.time_array)*1.2))
ax.set_xlabel('Runtime')
ax.set_ylabel(self.metric)
| """
This file makes all standard plots for tensor analysis. Requires a Decomposition object after running relevant values.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import ScalarFormatter
from .decomposition import Decomposition
from tensorpack import perform_CP
import seaborn as sns
import time
def tfacr2x(ax, decomp:Decomposition):
"""
Plots R2X for tensor factorizations for all components up to decomp.max_rr.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f. See getSetup() in tensorpack.test.common.py for more detail.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.perform_tfac().
"""
comps = decomp.rrs
ax.scatter(comps, decomp.TR2X, s=10)
ax.set_ylabel("Tensor Fac R2X")
ax.set_xlabel("Number of Components")
ax.set_title("Variance explained by tensor decomposition")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(0, 1)
ax.set_xlim(0.5, np.amax(comps) + 0.5)
def reduction(ax, decomp):
"""
Plots size reduction for tensor factorization versus PCA for all components up to decomp.max_rr.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.perform_tfac() and decomp.perform_PCA().
"""
CPR2X, PCAR2X, sizeTfac, sizePCA = np.asarray(decomp.TR2X), np.asarray(decomp.PCAR2X), decomp.sizeT, decomp.sizePCA
ax.set_xscale("log", base=2)
ax.plot(sizeTfac, 1.0 - CPR2X, ".", label="TFac")
ax.plot(sizePCA, 1.0 - PCAR2X, ".", label="PCA")
ax.set_ylabel("Normalized Unexplained Variance")
ax.set_xlabel("Size of Reduced Data")
ax.set_title("Data reduction, TFac vs. PCA")
ax.set_ylim(bottom=0.0)
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.legend()
def q2xchord(ax, decomp):
"""
Plots Q2X for tensor factorization when removing chords from a single mode for all components up to decomp.max_rr.
Requires multiple runs to generate error bars.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.Q2X_chord().
"""
chords_df = decomp.chordQ2X
comps = decomp.rrs
chords_df = pd.DataFrame(decomp.chordQ2X).T
chords_df.index = comps
chords_df['mean'] = chords_df.mean(axis=1)
chords_df['sem'] = chords_df.sem(axis=1)
Q2Xchord = chords_df['mean']
Q2Xerrors = chords_df['sem']
ax.scatter(comps, Q2Xchord, s=10)
ax.errorbar(comps, Q2Xchord, yerr=Q2Xerrors, fmt='none')
ax.set_ylabel("Q2X of Chord Imputation")
ax.set_xlabel("Number of Components")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(bottom=0.0, top=1.0)
def q2xentry(ax, decomp, methodname = "CP"):
"""
Plots Q2X for tensor factorization versus PCA when removing entries for all components up to decomp.max_rr.
Requires multiple runs to generate error bars.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object that has successfully run decomp.entry().
methodname : str
Allows for proper tensor method when naming graph axes.
"""
entry_df = pd.DataFrame(decomp.entryQ2X).T
entrypca_df = pd.DataFrame(decomp.entryQ2XPCA).T
comps = decomp.rrs
entry_df.index = comps
entry_df['mean'] = entry_df.mean(axis=1)
entry_df['sem'] = entry_df.sem(axis=1)
entrypca_df.index = comps
entrypca_df['mean'] = entrypca_df.mean(axis=1)
entrypca_df['sem'] = entrypca_df.sem(axis=1)
TR2X = entry_df['mean']
TErr = entry_df['sem']
PCAR2X = entrypca_df['mean']
PCAErr = entrypca_df['sem']
ax.plot(comps - 0.05, TR2X, ".", label=methodname)
ax.plot(comps + 0.05, PCAR2X, ".", label="PCA")
ax.errorbar(comps - 0.05, TR2X, yerr=TErr, fmt='none', ecolor='b')
ax.errorbar(comps + 0.05, PCAR2X, yerr=PCAErr, fmt='none', ecolor='darkorange')
ax.set_ylabel("Q2X of Entry Imputation")
ax.set_xlabel("Number of Components")
ax.set_xticks([x for x in comps])
ax.set_xticklabels([x for x in comps])
ax.set_ylim(0, 1)
ax.legend(loc=4)
def tucker_reduced_Dsize(tensor, ranks:list):
""" Output the error (1 - r2x) for each size of the data at each component # for tucker decomposition.
This forms the x-axis of the error vs. data size plot.
Parameters
----------
tensor : xarray or numpy.ndarray
the multi-dimensional input data
ranks : list
the list of minimum-error Tucker fits for each component-combinations.
Returns
-------
sizes : list
the size of reduced data by Tucker for each error.
"""
# if tensor is xarray...
if type(tensor) is not np.ndarray:
tensor = tensor.to_numpy()
sizes = []
for rank in ranks:
sum_comps = 0
for i in range(len(tensor.shape)):
sum_comps += rank[i] * tensor.shape[i]
sizes.append(sum_comps)
return sizes
def tucker_reduction(ax, decomp:Decomposition, cp_decomp:Decomposition):
""" Error versus data size for minimum error combination of rank from Tucker decomposition versus CP decomposition.
The error for those combinations that are the same dimensions, ie., for a 3-D tensor, [1, 1, 1], [2, 2, 2], etc
are shown by a different marker shape and color.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
decomp : Decomposition
Takes a Decomposition object to run perform_tucker().
cp_decomp : Decomposition
Takes a Decomposition object to run perform_CP().
Example
-------
from tensorpack.tucker import tucker_decomp
from tensorpack.plot import tucker_reduced_Dsize, tucker_reduction
from tensordata.zohar import data3D as zohar
from tensorpack.decomposition import Decomposition
b = Decomposition(zohar().tensor, method=tucker_decomp)
c = Decomposition(zohar().tensor)
import matplotlib.pyplot as plt
f = plt.figure()
ax = f.add_subplot()
fig = tucker_reduction(ax, b, c)
plt.savefig("tucker_cp.svg")
"""
# tucker decomp
decomp.perform_tucker()
sizes = tucker_reduced_Dsize(decomp.data, decomp.TuckRank)
# CP decomp
cp_decomp.perform_tfac()
CPR2X, sizeTfac = np.asarray(cp_decomp.TR2X), cp_decomp.sizeT
ax.plot(sizes, decomp.TuckErr, label="Tucker", color='C0', lw=3)
ax.plot(sizeTfac, 1.0 - CPR2X, ".", label="CP", color='C1', markersize=12)
ax.set_ylim((0.0, 1.0))
ax.set_xscale("log", base=2)
ax.set_title('Data Reduction Comparison')
ax.set_ylabel('Normalized Unexplained Variance')
ax.set_xlabel('Size of Reduced Data')
ax.legend()
def plot_weight_mode(ax, factor, labels=False, title = ""):
"""
Plots heatmaps for a single mode factors.
Parameters
----------
ax : axis object
Plot information for a subplot of figure f.
factor: numpy array
Factorized mode
labels: list of string or False
Labels for each of the elements
title" String
Figure title
"""
rank = np.shape(factor)[1]
components = [str(ii + 1) for ii in range(rank)]
facs = pd.DataFrame(factor, columns=[f"Cmp. {i}" for i in np.arange(1, rank + 1)],
index=labels if labels is not False else list(range(np.shape(factor)[0])))
sns.heatmap(facs, cmap="PiYG", center=0, xticklabels=components, yticklabels=labels, cbar=True, vmin=-1.0,
vmax=1.0, ax=ax)
ax.set_xlabel("Components")
ax.set_title(title)
class tracker():
"""
Creates an array, tracks next unfilled entry & runtime, holds tracked name for plotting
"""
def __init__(self, entry_type = 'R2X', track_runtime = False) :
self.metric = entry_type
self.track_runtime = track_runtime
def begin(self):
""" Must run to track runtime """
self.start = time.time()
def first_entry(self, tFac):
self.array = np.full((1,1), 1 - tFac.R2X)
if self.track_runtime:
self.time_array = np.full((1,1), time.time() - self.start)
def update(self, tFac):
self.array = np.append(self.array, 1 - tFac.R2X)
if self.track_runtime:
self.time_array = np.append(self.time_array, time.time() - self.start)
def plot_iteration(self, ax):
ax.plot(range(1, self.array.size+1), self.array)
ax.set_ylim((0.0, 1.0))
ax.set_xlim((0, self.array.size))
ax.set_xlabel('Iteration')
ax.set_ylabel(self.metric)
def plot_runtime(self, ax):
assert self.track_runtime
self.time_array
ax.plot(self.time_array, self.array)
ax.set_ylim((0.0, 1.0))
ax.set_xlim((0, np.max(self.time_array)*1.2))
ax.set_xlabel('Runtime')
ax.set_ylabel(self.metric) | en | 0.669513 | This file makes all standard plots for tensor analysis. Requires a Decomposition object after running relevant values. Plots R2X for tensor factorizations for all components up to decomp.max_rr. Parameters ---------- ax : axis object Plot information for a subplot of figure f. See getSetup() in tensorpack.test.common.py for more detail. decomp : Decomposition Takes a Decomposition object that has successfully run decomp.perform_tfac(). Plots size reduction for tensor factorization versus PCA for all components up to decomp.max_rr. Parameters ---------- ax : axis object Plot information for a subplot of figure f. decomp : Decomposition Takes a Decomposition object that has successfully run decomp.perform_tfac() and decomp.perform_PCA(). Plots Q2X for tensor factorization when removing chords from a single mode for all components up to decomp.max_rr. Requires multiple runs to generate error bars. Parameters ---------- ax : axis object Plot information for a subplot of figure f. decomp : Decomposition Takes a Decomposition object that has successfully run decomp.Q2X_chord(). Plots Q2X for tensor factorization versus PCA when removing entries for all components up to decomp.max_rr. Requires multiple runs to generate error bars. Parameters ---------- ax : axis object Plot information for a subplot of figure f. decomp : Decomposition Takes a Decomposition object that has successfully run decomp.entry(). methodname : str Allows for proper tensor method when naming graph axes. Output the error (1 - r2x) for each size of the data at each component # for tucker decomposition. This forms the x-axis of the error vs. data size plot. Parameters ---------- tensor : xarray or numpy.ndarray the multi-dimensional input data ranks : list the list of minimum-error Tucker fits for each component-combinations. Returns ------- sizes : list the size of reduced data by Tucker for each error. # if tensor is xarray... Error versus data size for minimum error combination of rank from Tucker decomposition versus CP decomposition. The error for those combinations that are the same dimensions, ie., for a 3-D tensor, [1, 1, 1], [2, 2, 2], etc are shown by a different marker shape and color. Parameters ---------- ax : axis object Plot information for a subplot of figure f. decomp : Decomposition Takes a Decomposition object to run perform_tucker(). cp_decomp : Decomposition Takes a Decomposition object to run perform_CP(). Example ------- from tensorpack.tucker import tucker_decomp from tensorpack.plot import tucker_reduced_Dsize, tucker_reduction from tensordata.zohar import data3D as zohar from tensorpack.decomposition import Decomposition b = Decomposition(zohar().tensor, method=tucker_decomp) c = Decomposition(zohar().tensor) import matplotlib.pyplot as plt f = plt.figure() ax = f.add_subplot() fig = tucker_reduction(ax, b, c) plt.savefig("tucker_cp.svg") # tucker decomp # CP decomp Plots heatmaps for a single mode factors. Parameters ---------- ax : axis object Plot information for a subplot of figure f. factor: numpy array Factorized mode labels: list of string or False Labels for each of the elements title" String Figure title Creates an array, tracks next unfilled entry & runtime, holds tracked name for plotting Must run to track runtime | 3.022421 | 3 |
easy/Q141_LinkedListCycle.py | Kaciras/leetcode | 0 | 6618270 | <filename>easy/Q141_LinkedListCycle.py
from utils import ListNode, print_linked_list
class Solution:
def hasCycle(self, head: ListNode) -> bool:
slow = fast = head
while fast:
slow, fast = slow.next, fast.next
if not fast:
return False
fast = fast.next
if fast == slow:
return True
return False
if __name__ == '__main__':
a = ListNode(0)
b = ListNode(1)
c = ListNode(2)
d = ListNode(3)
a.next = b
b.next = c
c.next = d
d.next = b
print_linked_list(a)
print(Solution().hasCycle(a))
d.next = None
print(Solution().hasCycle(a))
| <filename>easy/Q141_LinkedListCycle.py
from utils import ListNode, print_linked_list
class Solution:
def hasCycle(self, head: ListNode) -> bool:
slow = fast = head
while fast:
slow, fast = slow.next, fast.next
if not fast:
return False
fast = fast.next
if fast == slow:
return True
return False
if __name__ == '__main__':
a = ListNode(0)
b = ListNode(1)
c = ListNode(2)
d = ListNode(3)
a.next = b
b.next = c
c.next = d
d.next = b
print_linked_list(a)
print(Solution().hasCycle(a))
d.next = None
print(Solution().hasCycle(a))
| none | 1 | 3.905926 | 4 | |
archai/algos/nasbench101/nasbench101_model_desc_builder.py | cclauss/archai | 1 | 6618271 | <reponame>cclauss/archai
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterable, Sequence, Tuple, List, Set, Optional
import copy
from overrides import overrides
from archai.nas.model_desc import ConvMacroParams, CellDesc, CellType, OpDesc, \
EdgeDesc, TensorShape, TensorShapes, TensorShapesList, NodeDesc, AuxTowerDesc
from archai.common.config import Config
from archai.nas.model_desc_builder import ModelDescBuilder
from archai.nas.operations import MultiOp, Op
from . import model_matrix
from .nasbench101_op import NasBench101Op
class NasBench101CellBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('nasbench101_op',
lambda op_desc, arch_params, affine:
NasBench101Op(op_desc, arch_params, affine))
@overrides
def build_cells(self, in_shapes:TensorShapesList, conf_model_desc:Config)\
->Tuple[List[CellDesc], List[Optional[AuxTowerDesc]]]:
params = conf_model_desc['params'].to_dict()
cell_matrix = params['cell_matrix']
vertex_ops = params['vertex_ops']
self._cell_matrix, self._vertex_ops = model_matrix.prune(cell_matrix, vertex_ops)
return super().build_cells(in_shapes, conf_model_desc)
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
for i in range(node_count):
edges = []
input_ids = []
first_proj = False # if input node is connected then it needs projection
if self._cell_matrix[0, i+1]: # nadbench internal node starts at 1
input_ids.append(0) # connect to s0
first_proj = True
for j in range(i): # look at all internal vertex before us
if self._cell_matrix[j+1, i+1]: # if there is connection
input_ids.append(j+2) # offset because of s0, s1
op_desc = OpDesc('nasbench101_op',
params={
'conv': conv_params,
'stride': 1,
'vertex_op': self._vertex_ops[i+1], # offset because of input node
'first_proj': first_proj
}, in_len=len(input_ids), trainables=None, children=None) # TODO: should we pass children here?
edge = EdgeDesc(op_desc, input_ids=input_ids)
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterable, Sequence, Tuple, List, Set, Optional
import copy
from overrides import overrides
from archai.nas.model_desc import ConvMacroParams, CellDesc, CellType, OpDesc, \
EdgeDesc, TensorShape, TensorShapes, TensorShapesList, NodeDesc, AuxTowerDesc
from archai.common.config import Config
from archai.nas.model_desc_builder import ModelDescBuilder
from archai.nas.operations import MultiOp, Op
from . import model_matrix
from .nasbench101_op import NasBench101Op
class NasBench101CellBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('nasbench101_op',
lambda op_desc, arch_params, affine:
NasBench101Op(op_desc, arch_params, affine))
@overrides
def build_cells(self, in_shapes:TensorShapesList, conf_model_desc:Config)\
->Tuple[List[CellDesc], List[Optional[AuxTowerDesc]]]:
params = conf_model_desc['params'].to_dict()
cell_matrix = params['cell_matrix']
vertex_ops = params['vertex_ops']
self._cell_matrix, self._vertex_ops = model_matrix.prune(cell_matrix, vertex_ops)
return super().build_cells(in_shapes, conf_model_desc)
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
for i in range(node_count):
edges = []
input_ids = []
first_proj = False # if input node is connected then it needs projection
if self._cell_matrix[0, i+1]: # nadbench internal node starts at 1
input_ids.append(0) # connect to s0
first_proj = True
for j in range(i): # look at all internal vertex before us
if self._cell_matrix[j+1, i+1]: # if there is connection
input_ids.append(j+2) # offset because of s0, s1
op_desc = OpDesc('nasbench101_op',
params={
'conv': conv_params,
'stride': 1,
'vertex_op': self._vertex_ops[i+1], # offset because of input node
'first_proj': first_proj
}, in_len=len(input_ids), trainables=None, children=None) # TODO: should we pass children here?
edge = EdgeDesc(op_desc, input_ids=input_ids)
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes | en | 0.856042 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. # if input node is connected then it needs projection # nadbench internal node starts at 1 # connect to s0 # look at all internal vertex before us # if there is connection # offset because of s0, s1 # offset because of input node # TODO: should we pass children here? | 1.690768 | 2 |
libcity/data/dataset/traffic_state_od_dataset.py | moghadas76/test_bigcity | 221 | 6618272 | import os
import numpy as np
from libcity.data.dataset import TrafficStateDataset
class TrafficStateOdDataset(TrafficStateDataset):
def __init__(self, config):
super().__init__(config)
self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/',
'od_based_{}.npz'.format(self.parameters_str))
self._load_rel() # don't care whether there is a .rel file
def _load_dyna(self, filename):
return super(TrafficStateOdDataset, self)._load_od_4d(filename)
def _load_geo(self):
"""
加载.geo文件,格式[geo_id, type, coordinates, properties(若干列)]
"""
super()._load_geo()
def _load_rel(self):
"""
加载.rel文件,格式[rel_id, type, origin_id, destination_id, properties(若干列)]
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
super()._load_rel()
def _add_external_information(self, df, ext_data=None):
"""
增加外部信息(一周中的星期几/day of week,一天中的某个时刻/time of day,外部数据),
Args:
df(np.ndarray): 交通状态数据多维数组, (len_time, ..., feature_dim)
ext_data(np.ndarray): 外部数据
Returns:
np.ndarray: 融合后的外部数据和交通状态数据, (len_time, ..., feature_dim_plus)
"""
return super()._add_external_information_4d(df, ext_data)
def get_data_feature(self):
"""
返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数,
feature_dim是输入数据的维度,output_dim是模型输出的维度
Returns:
dict: 包含数据集的相关特征的字典
"""
return {"scaler": self.scaler, "adj_mx": self.adj_mx,
"num_nodes": self.num_nodes, "feature_dim": self.feature_dim, "ext_dim": self.ext_dim,
"output_dim": self.output_dim, "num_batches": self.num_batches}
| import os
import numpy as np
from libcity.data.dataset import TrafficStateDataset
class TrafficStateOdDataset(TrafficStateDataset):
def __init__(self, config):
super().__init__(config)
self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/',
'od_based_{}.npz'.format(self.parameters_str))
self._load_rel() # don't care whether there is a .rel file
def _load_dyna(self, filename):
return super(TrafficStateOdDataset, self)._load_od_4d(filename)
def _load_geo(self):
"""
加载.geo文件,格式[geo_id, type, coordinates, properties(若干列)]
"""
super()._load_geo()
def _load_rel(self):
"""
加载.rel文件,格式[rel_id, type, origin_id, destination_id, properties(若干列)]
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
super()._load_rel()
def _add_external_information(self, df, ext_data=None):
"""
增加外部信息(一周中的星期几/day of week,一天中的某个时刻/time of day,外部数据),
Args:
df(np.ndarray): 交通状态数据多维数组, (len_time, ..., feature_dim)
ext_data(np.ndarray): 外部数据
Returns:
np.ndarray: 融合后的外部数据和交通状态数据, (len_time, ..., feature_dim_plus)
"""
return super()._add_external_information_4d(df, ext_data)
def get_data_feature(self):
"""
返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数,
feature_dim是输入数据的维度,output_dim是模型输出的维度
Returns:
dict: 包含数据集的相关特征的字典
"""
return {"scaler": self.scaler, "adj_mx": self.adj_mx,
"num_nodes": self.num_nodes, "feature_dim": self.feature_dim, "ext_dim": self.ext_dim,
"output_dim": self.output_dim, "num_batches": self.num_batches}
| zh | 0.431949 | # don't care whether there is a .rel file 加载.geo文件,格式[geo_id, type, coordinates, properties(若干列)] 加载.rel文件,格式[rel_id, type, origin_id, destination_id, properties(若干列)] Returns: np.ndarray: self.adj_mx, N*N的邻接矩阵 增加外部信息(一周中的星期几/day of week,一天中的某个时刻/time of day,外部数据), Args: df(np.ndarray): 交通状态数据多维数组, (len_time, ..., feature_dim) ext_data(np.ndarray): 外部数据 Returns: np.ndarray: 融合后的外部数据和交通状态数据, (len_time, ..., feature_dim_plus) 返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数, feature_dim是输入数据的维度,output_dim是模型输出的维度 Returns: dict: 包含数据集的相关特征的字典 | 2.417269 | 2 |
app/models/user.py | gpp0725/flask-yushu | 0 | 6618273 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/12 0012 19:29
# @Author : Gpp
# @File : user.py
from werkzeug.security import generate_password_hash, check_password_hash
from app.models.base import Base
from sqlalchemy import Column, Integer, String, Boolean, Float
from flask_login import UserMixin
class User(UserMixin, Base):
id = Column(Integer, primary_key=True)
nickname = Column(String(24), nullable=False)
phone_number = Column(String(18), unique=True)
_password = Column('password', String(64), nullable=False)
email = Column(String(50), unique=True, nullable=False)
confirmed = Column(Boolean, default=False)
beans = Column(Float, default=0)
send_count = Column(Integer, default=0)
receive_counter = Column(Integer, default=0)
wx_open_id = Column(String(50))
wx_name = Column(String(32))
@property
def password(self):
return self._password
@password.setter
def password(self, raw):
self._password = generate_password_hash(raw)
def check_password(self, raw):
return check_password_hash(self._password, raw)
# get_id 是固定的
def get_id(self):
return self.id
# @login_manager.user_loader
# def load_user(userid):
# return User.get(userid) | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/12 0012 19:29
# @Author : Gpp
# @File : user.py
from werkzeug.security import generate_password_hash, check_password_hash
from app.models.base import Base
from sqlalchemy import Column, Integer, String, Boolean, Float
from flask_login import UserMixin
class User(UserMixin, Base):
id = Column(Integer, primary_key=True)
nickname = Column(String(24), nullable=False)
phone_number = Column(String(18), unique=True)
_password = Column('password', String(64), nullable=False)
email = Column(String(50), unique=True, nullable=False)
confirmed = Column(Boolean, default=False)
beans = Column(Float, default=0)
send_count = Column(Integer, default=0)
receive_counter = Column(Integer, default=0)
wx_open_id = Column(String(50))
wx_name = Column(String(32))
@property
def password(self):
return self._password
@password.setter
def password(self, raw):
self._password = generate_password_hash(raw)
def check_password(self, raw):
return check_password_hash(self._password, raw)
# get_id 是固定的
def get_id(self):
return self.id
# @login_manager.user_loader
# def load_user(userid):
# return User.get(userid) | en | 0.12112 | # !/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2020/2/12 0012 19:29 # @Author : Gpp # @File : user.py # get_id 是固定的 # @login_manager.user_loader # def load_user(userid): # return User.get(userid) | 2.38926 | 2 |
test/e2e/replacement_values.py | RedbackThomson/ack-eks-controller | 10 | 6618274 | <gh_stars>1-10
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Stores the values used by each of the integration tests for replacing the
EKS-specific test variables.
"""
from e2e.bootstrap_resources import get_bootstrap_resources
REPLACEMENT_VALUES = {
"CLUSTER_ROLE": get_bootstrap_resources().ClusterRole.arn,
"FARGATE_POD_ROLE": get_bootstrap_resources().FargatePodRole.arn,
"NODEGROUP_ROLE": get_bootstrap_resources().NodegroupRole.arn,
"PUBLIC_SUBNET_1": get_bootstrap_resources().ClusterVPC.public_subnets.subnet_ids[0],
"PUBLIC_SUBNET_2": get_bootstrap_resources().ClusterVPC.public_subnets.subnet_ids[1],
"PRIVATE_SUBNET_1": get_bootstrap_resources().ClusterVPC.private_subnets.subnet_ids[0],
"PRIVATE_SUBNET_2": get_bootstrap_resources().ClusterVPC.private_subnets.subnet_ids[1]
} | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Stores the values used by each of the integration tests for replacing the
EKS-specific test variables.
"""
from e2e.bootstrap_resources import get_bootstrap_resources
REPLACEMENT_VALUES = {
"CLUSTER_ROLE": get_bootstrap_resources().ClusterRole.arn,
"FARGATE_POD_ROLE": get_bootstrap_resources().FargatePodRole.arn,
"NODEGROUP_ROLE": get_bootstrap_resources().NodegroupRole.arn,
"PUBLIC_SUBNET_1": get_bootstrap_resources().ClusterVPC.public_subnets.subnet_ids[0],
"PUBLIC_SUBNET_2": get_bootstrap_resources().ClusterVPC.public_subnets.subnet_ids[1],
"PRIVATE_SUBNET_1": get_bootstrap_resources().ClusterVPC.private_subnets.subnet_ids[0],
"PRIVATE_SUBNET_2": get_bootstrap_resources().ClusterVPC.private_subnets.subnet_ids[1]
} | en | 0.873041 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may # not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. Stores the values used by each of the integration tests for replacing the EKS-specific test variables. | 1.556835 | 2 |
glitter/pages/models.py | dhamaniasad/django-glitter | 3 | 6618275 | from django.contrib.contenttypes.models import ContentType
from django.db import models
from glitter.mixins import GlitterMixin
from glitter.models import Version
from mptt.managers import TreeManager
from mptt.models import MPTTModel, TreeForeignKey
from taggit.managers import TaggableManager
from .validators import validate_page_url
class PageManager(TreeManager):
def published(self):
return self.filter(published=True).exclude(current_version=None)
def unpublished(self):
return self.filter(published=True, current_version__isnull=True)
class Page(MPTTModel, GlitterMixin):
url = models.CharField('URL', max_length=100, unique=True, validators=[validate_page_url])
title = models.CharField(max_length=100)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
login_required = models.BooleanField(default=False)
show_in_navigation = models.BooleanField(default=True, db_index=True)
unpublished_count = models.PositiveIntegerField(default=0, editable=False)
tags = TaggableManager(blank=True)
glitter_app_name = models.CharField(
verbose_name='Glitter App', max_length=255, db_index=True, blank=True
)
objects = PageManager()
def __str__(self):
return '%s -- %s' % (self.title, self.url)
class Meta(GlitterMixin.Meta):
verbose_name = 'page'
ordering = ('url',)
permissions = (
('view_protected_page', 'Can view protected page'),
)
def get_absolute_url(self):
return self.url
def save(self, *args, **kwargs):
# Find the number of unpublished pages
content_type = ContentType.objects.get_for_model(self)
unpublished_pages = Version.objects.filter(
content_type=content_type, object_id=self.id
).exclude(version_number__isnull=True)
if self.current_version:
unpublished_pages = unpublished_pages.filter(
version_number__gt=self.current_version.version_number
)
self.unpublished_count = unpublished_pages.count()
super().save(*args, **kwargs)
@property
def is_visible(self):
"""
Return a boolean if the page is visible in navigation.
Pages must have show in navigation set. Regular pages must be published (published and
have a current version - checked with `is_published`), pages with a glitter app associated
don't need any page versions.
"""
if self.glitter_app_name:
visible = self.show_in_navigation
else:
visible = self.show_in_navigation and self.is_published
return visible
| from django.contrib.contenttypes.models import ContentType
from django.db import models
from glitter.mixins import GlitterMixin
from glitter.models import Version
from mptt.managers import TreeManager
from mptt.models import MPTTModel, TreeForeignKey
from taggit.managers import TaggableManager
from .validators import validate_page_url
class PageManager(TreeManager):
def published(self):
return self.filter(published=True).exclude(current_version=None)
def unpublished(self):
return self.filter(published=True, current_version__isnull=True)
class Page(MPTTModel, GlitterMixin):
url = models.CharField('URL', max_length=100, unique=True, validators=[validate_page_url])
title = models.CharField(max_length=100)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
login_required = models.BooleanField(default=False)
show_in_navigation = models.BooleanField(default=True, db_index=True)
unpublished_count = models.PositiveIntegerField(default=0, editable=False)
tags = TaggableManager(blank=True)
glitter_app_name = models.CharField(
verbose_name='Glitter App', max_length=255, db_index=True, blank=True
)
objects = PageManager()
def __str__(self):
return '%s -- %s' % (self.title, self.url)
class Meta(GlitterMixin.Meta):
verbose_name = 'page'
ordering = ('url',)
permissions = (
('view_protected_page', 'Can view protected page'),
)
def get_absolute_url(self):
return self.url
def save(self, *args, **kwargs):
# Find the number of unpublished pages
content_type = ContentType.objects.get_for_model(self)
unpublished_pages = Version.objects.filter(
content_type=content_type, object_id=self.id
).exclude(version_number__isnull=True)
if self.current_version:
unpublished_pages = unpublished_pages.filter(
version_number__gt=self.current_version.version_number
)
self.unpublished_count = unpublished_pages.count()
super().save(*args, **kwargs)
@property
def is_visible(self):
"""
Return a boolean if the page is visible in navigation.
Pages must have show in navigation set. Regular pages must be published (published and
have a current version - checked with `is_published`), pages with a glitter app associated
don't need any page versions.
"""
if self.glitter_app_name:
visible = self.show_in_navigation
else:
visible = self.show_in_navigation and self.is_published
return visible
| en | 0.915859 | # Find the number of unpublished pages Return a boolean if the page is visible in navigation. Pages must have show in navigation set. Regular pages must be published (published and have a current version - checked with `is_published`), pages with a glitter app associated don't need any page versions. | 1.941016 | 2 |
python/testData/inspections/PyMethodMayBeStaticInspection/attributeNamedSelf.py | jnthn/intellij-community | 2 | 6618276 | <filename>python/testData/inspections/PyMethodMayBeStaticInspection/attributeNamedSelf.py<gh_stars>1-10
x = object()
x.self = 42
class C:
def <weak_warning descr="Method 'method' may be 'static'">method</weak_warning>(self):
print(x.self)
| <filename>python/testData/inspections/PyMethodMayBeStaticInspection/attributeNamedSelf.py<gh_stars>1-10
x = object()
x.self = 42
class C:
def <weak_warning descr="Method 'method' may be 'static'">method</weak_warning>(self):
print(x.self)
| none | 1 | 1.966419 | 2 | |
managers/operatorsSimaudit/stimulator.py | HarshKhilawala/cerebmodels | 0 | 6618277 | <reponame>HarshKhilawala/cerebmodels
# ~/managers/operatorsSimaudit/stimulator.py
import os
import sys
from neuron import h
# import modules from other directories
# set to ~/cerebmodels
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
#
from utilities import UsefulUtils as uu
class Stimulator(object):
"""
**Available methods:**
+----------------------------------+------------------+
| Method name | Method type |
+==================================+==================+
| :py:meth:`.inject_IClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_IRamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_GrC_Sine` | static method |
+----------------------------------+------------------+
| :py:meth:`inject_current_NEURON` | class method |
+----------------------------------+------------------+
| :py:meth:`.inject_SEClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_VClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`inject_voltage_NEURON` | class method |
+----------------------------------+------------------+
*NOTE:*
* ``inject_IClamp`` returns list_of_currents where each element is ``hoc`` object ``h.IClamp``
* ``inject_IRamp`` returns list_of_currents where each element is ``hoc`` object ``h.IRamp``
* ``inject_current_NEURON`` returns stimuli_list where each element is ``hoc`` object ``h.IClamp`` or ``h.IRamp` depending on current type.
Notice that ``h.IClamp`` is a default object in `NEURON <https://neuron.yale.edu/neuron/>`_ but ``h.IRamp`` and ``h.GrC_Sine`` are custom built objects. The former is built here in ``DummyModel`` of a cell which will get compiled. The later is build in the ``GrC2001Egidio`` model, a sinusoidal current injection for the Granule cell. However, ``h.IRamp`` is for injecting ramping current of the form "/" (up ramp), "\" (down ramp) or a combination ("/\").
"""
def __init__(self):
#self.h = neuron_dot_h
pass
@staticmethod
def inject_IClamp(parameters, injectsite):
"""Injects IClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.5, "dur": 100.0, "delay": 10.0}, |
| | {"amp": 1.0, "dur": 50.0, "delay": 10.0+100.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.IClamp``.
**NOTE:** The ``h.IClamp`` function is available in `NEURON <https://neuron.yale.edu/neuron/>`_ by default.
"""
no_of_currents = len(parameters) # number of currents
list_of_currents = []
for i in range(no_of_currents):
list_of_currents.append( h.IClamp(0.5, sec=injectsite) )
for key, value in parameters[i].items():
if key in list_of_currents[i].__dict__:
setattr(list_of_currents[i], key, value)
else:
raise AttributeError( key + " is not an attribute in h.IClamp." )
return list_of_currents
@staticmethod
def inject_IRamp(parameters, injectsite):
"""Injects ``IRamp`` for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp_initial": 0.0, "amp_final": 1.0, "dur": 100.0, |
| | "delay": 10.0}, |
| | {"amp_initial"": 1.0, "amp_final": 0.0, "dur": 100.0,|
| | "delay": 10.0+100.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.IRamp``.
*NOTE:* The ``h.IRamp`` function is available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod``.
"""
no_of_currents = len(parameters) # number of currents
list_of_currents = []
for i in range(no_of_currents):
list_of_currents.append( h.IRamp(0.5, sec=injectsite) )
for key, value in parameters[i].items():
if key not in list_of_currents[i].__dict__:
raise AttributeError( key + " is not an attribute in h.IRamp." )
else:
if key=="amp_final":
adjusted_value = value - parameters[i]["amp_initial"]
setattr(list_of_currents[i], key, adjusted_value)
else:
setattr(list_of_currents[i], key, value)
return list_of_currents
@staticmethod
def inject_GrC_Sine(parameters, injecsite):
"""Injects ``GrC_Sine`` for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.006, "dur": 800.0, "delay": 100.0, |
| | "freq": 4.0, "phase": 0.0}, |
| | {"amp": 0.006, "dur": 400.0, "delay": 100.0+800.0, |
| | "freq": 8.0, "phase": 0.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.GrC_Sine``.
*NOTE:* This function is available in ``~/cerebmodels/models/cells/GrC2001Dangelo/mod_files/Grc_sine.mod``.
"""
no_of_sinucurrents = len(parameters)
list_of_sinucurrents = []
for i in range(no_of_sinucurrents):
list_of_sinucurrents.append( h.GrC_Sine(0.5, sec=injecsite) )
for key, value in parameters[i].iteritems():
if key in list_of_sinucurrents[i].__dict__:
setattr(list_of_sinucurrents[i], key, value)
else:
raise AttributeError( key + " is not an attribute in h.GrC_Sine" )
return list_of_sinucurrents
def inject_current_NEURON(self, currenttype=None, injparameters=None, neuronsection=None):
"""Sets current injection parameters to either ``h.IClamp``, ``h.IRamp``, ``GrC_Sine``
**Keyword Arguments:**
+-------------------+--------------------------------------------------------------+
| Keys | Value type |
+===================+==============================================================+
| ``currenttype`` | string; ``"IClamp"``, ``"IRamp"``, or ``"GrC_Sine"``. |
+-------------------+--------------------------------------------------------------+
| ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - for ``IClamp`` see :py:meth:`.inject_IClamp`. |
| | - for ``IRamp`` see :py:meth:`.inject_IRamp`. |
| | - for ``GrC_Sine`` see :py:meth:`.inject_GrC_Sine`. |
+-------------------+--------------------------------------------------------------+
| ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+-------------------+--------------------------------------------------------------+
**Returned values:**Stimuli list where each element is ``hoc`` object ``h.IClamp``, ``h.IRamp`` or ``h.GrC_Sine``, depending on the given ``currenttype`` parameter.
*NOTE:*
* depending on the currenttype choice :py:meth:`.inject_IClamp`, :py:meth:`.inject_IRamp` or :py:meth:`.inject_GrC_Sine` is called
* ``h.IClamp`` is available in NEURON by default
* ``h.IRamp`` is custom available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod``
* ``h.GrC_Sine`` is custom available in ``~/cerebmodels/models/cells/GrC2001DAngela/mod_files/Grc_sine.mod``
"""
if currenttype is None or injparameters is None or neuronsection is None:
raise ValueError("currenttype must be either 'IClamp' or 'IRamp'. injparameters must be a list such that its elements are dictionaries [ {}, {}, ... ]. neuronsection must be for eg cell.soma where cell = CellTemplate().")
else:
if currenttype is "IClamp" or \
currenttype is "IRamp" or \
currenttype is "GrC_Sine":
desiredfunc = self.__getattribute__( "inject_"+currenttype )
stimuli_list = desiredfunc( injparameters, neuronsection )
else:
raise ValueError("currenttype must be 'IClamp', 'IRamp','GrC_Sine'")
return stimuli_list
@staticmethod
def inject_SEClamp(parameters, injectsite):
"""Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp1": 0.0, "dur1": 50.0, "rs": 1E-6}, |
| | {"amp2": 10.0, "dur2": 100.0}, |
| | {"amp3": 20.0, "dur3": 150.0} ] |
| |**NOTE** There is no "amp>3" (therefore no "dur>3") |
| | - To add the electrode/pipette resistance do it just once |
| | with key "rs". This should be the same for all because its |
| | the same setup, just the amplitudes differ. |
| | - Since "Clamp is on at time 0, off at time dur1+dur2+dur3" |
| |if you don't want to start the simulation with it just set |
| |"amp1": 0.0 |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.SEClamp``.
**NOTE:**
- The ``h.SEClamp`` function is available in NEURON as `SEClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#SEClamp>`_ by default.
- By default the electrode resistance (Re but SEClamp attribute is ``rs``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin.
"""
# NOTE: Do not insert several instances of this model at the same location
# to make level changes. That is equivalent to independent clamps and they
# will have incompatible internal state values.
no_of_voltages = len(parameters)
clampingvoltages = h.SEClamp(0.5, sec=injectsite)
clampingvoltages.rs = 1E-6
for i in range(no_of_voltages):
for key, value in parameters[i].items():
if key in clampingvoltages.__dict__:
setattr(clampingvoltages, key, value)
else:
raise AttributeError( key + " is not an attribute in h.SEClamp." )
return clampingvoltages
@staticmethod
def inject_VClamp(parameters, injectsite):
"""Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.0, "dur": 50.0}, |
| | {"amp": 10.0, "dur": 100.0}, |
| | {"amp": 20.0, "dur": 150.0} ] |
| |**NOTE** There is no amp[>2] (therefore no dur[>2]) |
| | - To change clamp parameters with keys "gain", "rstim", |
} |"tau1" and "tau2", do it just once. They should be the same |
| |for all because its the same setup, just the amplitude changes|
| | - Since "Clamp is on at 0, off at time dur[0]+dur[1]+dur[2]" |
| |if you don't want to start the simulation with it just set the|
| | first "amp": 0.0 |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** a ``hoc`` object ``h.VClamp``.
**NOTE:**
- The ``h.VClamp`` function is available in NEURON as `VClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#VClamp>`_ by default.
- By default the electrode resistance (Re but VClamp attribute is ``rstim``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin.
"""
# NOTE: Do not insert several instances of this model at the same location
# to make level changes. That is equivalent to independent clamps and they
# will have incompatible internal state values.
no_of_voltages = len(parameters)
clampingvoltages = h.VClamp(0.5, sec=injectsite)
clampingvoltages.rstim = 1E-6
for i in range(no_of_voltages):
for key, value in parameters[i].items():
if key in clampingvoltages.__dict__:
if key in ["gain", "rstim", "tau1", "tau2"]:
setattr(clampingvoltages, key, value)
else:
clampattr = getattr(clampingvoltages, key)
clampattr[i] = value
else:
raise AttributeError( key + " is not an attribute in h.VClamp." )
return clampingvoltages
def inject_voltage_NEURON(self, voltagetype=None, injparameters=None, neuronsection=None):
"""Sets voltage injection parameters to either ``h.SEClamp``, ``h.VClamp``,
**Keyword Arguments:**
+-------------------+--------------------------------------------------------------+
| Keys | Value type |
+===================+==============================================================+
| ``voltagetype`` | string; ``"SEClamp"`` or ``"VClamp"``. |
+-------------------+--------------------------------------------------------------+
| ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - for ``SEClamp`` see :py:meth:`.inject_SEClamp`. |
| | - for ``VClamp`` see :py:meth:`.inject_VClamp`. |
+-------------------+--------------------------------------------------------------+
| ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+-------------------+--------------------------------------------------------------+
**Returned values:**Stimuli list where each element is ``hoc`` object ``h.SEClamp`` or ``h.VClamp``, depending on the given ``voltagetype`` parameter.
*NOTE:*
* depending on the currenttype choice :py:meth:`.inject_SEClamp` or :py:meth:`.inject_VClamp` is called
* ``h.SEClamp`` and not ``h.VClamp`` is recommended as `it is almost never necessary to use VClamp <https://www.neuron.yale.edu/phpBB/viewtopic.php?t=505>`_
* also, `VClamp will not work with variable step integration (CVODE) <https://neurojustas.com/2018/03/27/important-neuron-gotchas-and-tips/>`_
"""
if voltagetype is None or injparameters is None or neuronsection is None:
raise ValueError("voltagetype must be either 'SEClamp' or 'VClamp'. injparameters must be a list such that its elements are dictionaries [ {}, {}, ... ]. neuronsection must be for eg cell.soma where cell = CellTemplate().")
else:
if voltagetype is "SEClamp" or \
voltagetype is "VClamp":
desiredfunc = self.__getattribute__( "inject_"+voltagetype )
voltagestimuli = desiredfunc( injparameters, neuronsection )
else:
raise ValueError("currenttype must be 'SEClamp', 'VClamp'")
return voltagestimuli
| # ~/managers/operatorsSimaudit/stimulator.py
import os
import sys
from neuron import h
# import modules from other directories
# set to ~/cerebmodels
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
#
from utilities import UsefulUtils as uu
class Stimulator(object):
"""
**Available methods:**
+----------------------------------+------------------+
| Method name | Method type |
+==================================+==================+
| :py:meth:`.inject_IClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_IRamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_GrC_Sine` | static method |
+----------------------------------+------------------+
| :py:meth:`inject_current_NEURON` | class method |
+----------------------------------+------------------+
| :py:meth:`.inject_SEClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`.inject_VClamp` | static method |
+----------------------------------+------------------+
| :py:meth:`inject_voltage_NEURON` | class method |
+----------------------------------+------------------+
*NOTE:*
* ``inject_IClamp`` returns list_of_currents where each element is ``hoc`` object ``h.IClamp``
* ``inject_IRamp`` returns list_of_currents where each element is ``hoc`` object ``h.IRamp``
* ``inject_current_NEURON`` returns stimuli_list where each element is ``hoc`` object ``h.IClamp`` or ``h.IRamp` depending on current type.
Notice that ``h.IClamp`` is a default object in `NEURON <https://neuron.yale.edu/neuron/>`_ but ``h.IRamp`` and ``h.GrC_Sine`` are custom built objects. The former is built here in ``DummyModel`` of a cell which will get compiled. The later is build in the ``GrC2001Egidio`` model, a sinusoidal current injection for the Granule cell. However, ``h.IRamp`` is for injecting ramping current of the form "/" (up ramp), "\" (down ramp) or a combination ("/\").
"""
def __init__(self):
#self.h = neuron_dot_h
pass
@staticmethod
def inject_IClamp(parameters, injectsite):
"""Injects IClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.5, "dur": 100.0, "delay": 10.0}, |
| | {"amp": 1.0, "dur": 50.0, "delay": 10.0+100.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.IClamp``.
**NOTE:** The ``h.IClamp`` function is available in `NEURON <https://neuron.yale.edu/neuron/>`_ by default.
"""
no_of_currents = len(parameters) # number of currents
list_of_currents = []
for i in range(no_of_currents):
list_of_currents.append( h.IClamp(0.5, sec=injectsite) )
for key, value in parameters[i].items():
if key in list_of_currents[i].__dict__:
setattr(list_of_currents[i], key, value)
else:
raise AttributeError( key + " is not an attribute in h.IClamp." )
return list_of_currents
@staticmethod
def inject_IRamp(parameters, injectsite):
"""Injects ``IRamp`` for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp_initial": 0.0, "amp_final": 1.0, "dur": 100.0, |
| | "delay": 10.0}, |
| | {"amp_initial"": 1.0, "amp_final": 0.0, "dur": 100.0,|
| | "delay": 10.0+100.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.IRamp``.
*NOTE:* The ``h.IRamp`` function is available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod``.
"""
no_of_currents = len(parameters) # number of currents
list_of_currents = []
for i in range(no_of_currents):
list_of_currents.append( h.IRamp(0.5, sec=injectsite) )
for key, value in parameters[i].items():
if key not in list_of_currents[i].__dict__:
raise AttributeError( key + " is not an attribute in h.IRamp." )
else:
if key=="amp_final":
adjusted_value = value - parameters[i]["amp_initial"]
setattr(list_of_currents[i], key, adjusted_value)
else:
setattr(list_of_currents[i], key, value)
return list_of_currents
@staticmethod
def inject_GrC_Sine(parameters, injecsite):
"""Injects ``GrC_Sine`` for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.006, "dur": 800.0, "delay": 100.0, |
| | "freq": 4.0, "phase": 0.0}, |
| | {"amp": 0.006, "dur": 400.0, "delay": 100.0+800.0, |
| | "freq": 8.0, "phase": 0.0} ] |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.GrC_Sine``.
*NOTE:* This function is available in ``~/cerebmodels/models/cells/GrC2001Dangelo/mod_files/Grc_sine.mod``.
"""
no_of_sinucurrents = len(parameters)
list_of_sinucurrents = []
for i in range(no_of_sinucurrents):
list_of_sinucurrents.append( h.GrC_Sine(0.5, sec=injecsite) )
for key, value in parameters[i].iteritems():
if key in list_of_sinucurrents[i].__dict__:
setattr(list_of_sinucurrents[i], key, value)
else:
raise AttributeError( key + " is not an attribute in h.GrC_Sine" )
return list_of_sinucurrents
def inject_current_NEURON(self, currenttype=None, injparameters=None, neuronsection=None):
"""Sets current injection parameters to either ``h.IClamp``, ``h.IRamp``, ``GrC_Sine``
**Keyword Arguments:**
+-------------------+--------------------------------------------------------------+
| Keys | Value type |
+===================+==============================================================+
| ``currenttype`` | string; ``"IClamp"``, ``"IRamp"``, or ``"GrC_Sine"``. |
+-------------------+--------------------------------------------------------------+
| ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - for ``IClamp`` see :py:meth:`.inject_IClamp`. |
| | - for ``IRamp`` see :py:meth:`.inject_IRamp`. |
| | - for ``GrC_Sine`` see :py:meth:`.inject_GrC_Sine`. |
+-------------------+--------------------------------------------------------------+
| ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+-------------------+--------------------------------------------------------------+
**Returned values:**Stimuli list where each element is ``hoc`` object ``h.IClamp``, ``h.IRamp`` or ``h.GrC_Sine``, depending on the given ``currenttype`` parameter.
*NOTE:*
* depending on the currenttype choice :py:meth:`.inject_IClamp`, :py:meth:`.inject_IRamp` or :py:meth:`.inject_GrC_Sine` is called
* ``h.IClamp`` is available in NEURON by default
* ``h.IRamp`` is custom available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod``
* ``h.GrC_Sine`` is custom available in ``~/cerebmodels/models/cells/GrC2001DAngela/mod_files/Grc_sine.mod``
"""
if currenttype is None or injparameters is None or neuronsection is None:
raise ValueError("currenttype must be either 'IClamp' or 'IRamp'. injparameters must be a list such that its elements are dictionaries [ {}, {}, ... ]. neuronsection must be for eg cell.soma where cell = CellTemplate().")
else:
if currenttype is "IClamp" or \
currenttype is "IRamp" or \
currenttype is "GrC_Sine":
desiredfunc = self.__getattribute__( "inject_"+currenttype )
stimuli_list = desiredfunc( injparameters, neuronsection )
else:
raise ValueError("currenttype must be 'IClamp', 'IRamp','GrC_Sine'")
return stimuli_list
@staticmethod
def inject_SEClamp(parameters, injectsite):
"""Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp1": 0.0, "dur1": 50.0, "rs": 1E-6}, |
| | {"amp2": 10.0, "dur2": 100.0}, |
| | {"amp3": 20.0, "dur3": 150.0} ] |
| |**NOTE** There is no "amp>3" (therefore no "dur>3") |
| | - To add the electrode/pipette resistance do it just once |
| | with key "rs". This should be the same for all because its |
| | the same setup, just the amplitudes differ. |
| | - Since "Clamp is on at time 0, off at time dur1+dur2+dur3" |
| |if you don't want to start the simulation with it just set |
| |"amp1": 0.0 |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** list of currents where each element is a ``hoc`` object ``h.SEClamp``.
**NOTE:**
- The ``h.SEClamp`` function is available in NEURON as `SEClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#SEClamp>`_ by default.
- By default the electrode resistance (Re but SEClamp attribute is ``rs``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin.
"""
# NOTE: Do not insert several instances of this model at the same location
# to make level changes. That is equivalent to independent clamps and they
# will have incompatible internal state values.
no_of_voltages = len(parameters)
clampingvoltages = h.SEClamp(0.5, sec=injectsite)
clampingvoltages.rs = 1E-6
for i in range(no_of_voltages):
for key, value in parameters[i].items():
if key in clampingvoltages.__dict__:
setattr(clampingvoltages, key, value)
else:
raise AttributeError( key + " is not an attribute in h.SEClamp." )
return clampingvoltages
@staticmethod
def inject_VClamp(parameters, injectsite):
"""Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_
**Keyword Arguments:**
+----------------+--------------------------------------------------------------+
| Keys | Value type |
+================+==============================================================+
| ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - Eg: [ {"amp": 0.0, "dur": 50.0}, |
| | {"amp": 10.0, "dur": 100.0}, |
| | {"amp": 20.0, "dur": 150.0} ] |
| |**NOTE** There is no amp[>2] (therefore no dur[>2]) |
| | - To change clamp parameters with keys "gain", "rstim", |
} |"tau1" and "tau2", do it just once. They should be the same |
| |for all because its the same setup, just the amplitude changes|
| | - Since "Clamp is on at 0, off at time dur[0]+dur[1]+dur[2]" |
| |if you don't want to start the simulation with it just set the|
| | first "amp": 0.0 |
+----------------+--------------------------------------------------------------+
| ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+----------------+--------------------------------------------------------------+
**Returned values:** a ``hoc`` object ``h.VClamp``.
**NOTE:**
- The ``h.VClamp`` function is available in NEURON as `VClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#VClamp>`_ by default.
- By default the electrode resistance (Re but VClamp attribute is ``rstim``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin.
"""
# NOTE: Do not insert several instances of this model at the same location
# to make level changes. That is equivalent to independent clamps and they
# will have incompatible internal state values.
no_of_voltages = len(parameters)
clampingvoltages = h.VClamp(0.5, sec=injectsite)
clampingvoltages.rstim = 1E-6
for i in range(no_of_voltages):
for key, value in parameters[i].items():
if key in clampingvoltages.__dict__:
if key in ["gain", "rstim", "tau1", "tau2"]:
setattr(clampingvoltages, key, value)
else:
clampattr = getattr(clampingvoltages, key)
clampattr[i] = value
else:
raise AttributeError( key + " is not an attribute in h.VClamp." )
return clampingvoltages
def inject_voltage_NEURON(self, voltagetype=None, injparameters=None, neuronsection=None):
"""Sets voltage injection parameters to either ``h.SEClamp``, ``h.VClamp``,
**Keyword Arguments:**
+-------------------+--------------------------------------------------------------+
| Keys | Value type |
+===================+==============================================================+
| ``voltagetype`` | string; ``"SEClamp"`` or ``"VClamp"``. |
+-------------------+--------------------------------------------------------------+
| ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] |
| | - for ``SEClamp`` see :py:meth:`.inject_SEClamp`. |
| | - for ``VClamp`` see :py:meth:`.inject_VClamp`. |
+-------------------+--------------------------------------------------------------+
| ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` |
+-------------------+--------------------------------------------------------------+
**Returned values:**Stimuli list where each element is ``hoc`` object ``h.SEClamp`` or ``h.VClamp``, depending on the given ``voltagetype`` parameter.
*NOTE:*
* depending on the currenttype choice :py:meth:`.inject_SEClamp` or :py:meth:`.inject_VClamp` is called
* ``h.SEClamp`` and not ``h.VClamp`` is recommended as `it is almost never necessary to use VClamp <https://www.neuron.yale.edu/phpBB/viewtopic.php?t=505>`_
* also, `VClamp will not work with variable step integration (CVODE) <https://neurojustas.com/2018/03/27/important-neuron-gotchas-and-tips/>`_
"""
if voltagetype is None or injparameters is None or neuronsection is None:
raise ValueError("voltagetype must be either 'SEClamp' or 'VClamp'. injparameters must be a list such that its elements are dictionaries [ {}, {}, ... ]. neuronsection must be for eg cell.soma where cell = CellTemplate().")
else:
if voltagetype is "SEClamp" or \
voltagetype is "VClamp":
desiredfunc = self.__getattribute__( "inject_"+voltagetype )
voltagestimuli = desiredfunc( injparameters, neuronsection )
else:
raise ValueError("currenttype must be 'SEClamp', 'VClamp'")
return voltagestimuli | en | 0.600621 | # ~/managers/operatorsSimaudit/stimulator.py # import modules from other directories # set to ~/cerebmodels # **Available methods:** +----------------------------------+------------------+ | Method name | Method type | +==================================+==================+ | :py:meth:`.inject_IClamp` | static method | +----------------------------------+------------------+ | :py:meth:`.inject_IRamp` | static method | +----------------------------------+------------------+ | :py:meth:`.inject_GrC_Sine` | static method | +----------------------------------+------------------+ | :py:meth:`inject_current_NEURON` | class method | +----------------------------------+------------------+ | :py:meth:`.inject_SEClamp` | static method | +----------------------------------+------------------+ | :py:meth:`.inject_VClamp` | static method | +----------------------------------+------------------+ | :py:meth:`inject_voltage_NEURON` | class method | +----------------------------------+------------------+ *NOTE:* * ``inject_IClamp`` returns list_of_currents where each element is ``hoc`` object ``h.IClamp`` * ``inject_IRamp`` returns list_of_currents where each element is ``hoc`` object ``h.IRamp`` * ``inject_current_NEURON`` returns stimuli_list where each element is ``hoc`` object ``h.IClamp`` or ``h.IRamp` depending on current type. Notice that ``h.IClamp`` is a default object in `NEURON <https://neuron.yale.edu/neuron/>`_ but ``h.IRamp`` and ``h.GrC_Sine`` are custom built objects. The former is built here in ``DummyModel`` of a cell which will get compiled. The later is build in the ``GrC2001Egidio`` model, a sinusoidal current injection for the Granule cell. However, ``h.IRamp`` is for injecting ramping current of the form "/" (up ramp), "\" (down ramp) or a combination ("/\"). #self.h = neuron_dot_h Injects IClamp for `NEURON <https://neuron.yale.edu/neuron/>`_ **Keyword Arguments:** +----------------+--------------------------------------------------------------+ | Keys | Value type | +================+==============================================================+ | ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - Eg: [ {"amp": 0.5, "dur": 100.0, "delay": 10.0}, | | | {"amp": 1.0, "dur": 50.0, "delay": 10.0+100.0} ] | +----------------+--------------------------------------------------------------+ | ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +----------------+--------------------------------------------------------------+ **Returned values:** list of currents where each element is a ``hoc`` object ``h.IClamp``. **NOTE:** The ``h.IClamp`` function is available in `NEURON <https://neuron.yale.edu/neuron/>`_ by default. # number of currents Injects ``IRamp`` for `NEURON <https://neuron.yale.edu/neuron/>`_ **Keyword Arguments:** +----------------+--------------------------------------------------------------+ | Keys | Value type | +================+==============================================================+ | ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - Eg: [ {"amp_initial": 0.0, "amp_final": 1.0, "dur": 100.0, | | | "delay": 10.0}, | | | {"amp_initial"": 1.0, "amp_final": 0.0, "dur": 100.0,| | | "delay": 10.0+100.0} ] | +----------------+--------------------------------------------------------------+ | ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +----------------+--------------------------------------------------------------+ **Returned values:** list of currents where each element is a ``hoc`` object ``h.IRamp``. *NOTE:* The ``h.IRamp`` function is available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod``. # number of currents Injects ``GrC_Sine`` for `NEURON <https://neuron.yale.edu/neuron/>`_ **Keyword Arguments:** +----------------+--------------------------------------------------------------+ | Keys | Value type | +================+==============================================================+ | ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - Eg: [ {"amp": 0.006, "dur": 800.0, "delay": 100.0, | | | "freq": 4.0, "phase": 0.0}, | | | {"amp": 0.006, "dur": 400.0, "delay": 100.0+800.0, | | | "freq": 8.0, "phase": 0.0} ] | +----------------+--------------------------------------------------------------+ | ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +----------------+--------------------------------------------------------------+ **Returned values:** list of currents where each element is a ``hoc`` object ``h.GrC_Sine``. *NOTE:* This function is available in ``~/cerebmodels/models/cells/GrC2001Dangelo/mod_files/Grc_sine.mod``. Sets current injection parameters to either ``h.IClamp``, ``h.IRamp``, ``GrC_Sine`` **Keyword Arguments:** +-------------------+--------------------------------------------------------------+ | Keys | Value type | +===================+==============================================================+ | ``currenttype`` | string; ``"IClamp"``, ``"IRamp"``, or ``"GrC_Sine"``. | +-------------------+--------------------------------------------------------------+ | ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - for ``IClamp`` see :py:meth:`.inject_IClamp`. | | | - for ``IRamp`` see :py:meth:`.inject_IRamp`. | | | - for ``GrC_Sine`` see :py:meth:`.inject_GrC_Sine`. | +-------------------+--------------------------------------------------------------+ | ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +-------------------+--------------------------------------------------------------+ **Returned values:**Stimuli list where each element is ``hoc`` object ``h.IClamp``, ``h.IRamp`` or ``h.GrC_Sine``, depending on the given ``currenttype`` parameter. *NOTE:* * depending on the currenttype choice :py:meth:`.inject_IClamp`, :py:meth:`.inject_IRamp` or :py:meth:`.inject_GrC_Sine` is called * ``h.IClamp`` is available in NEURON by default * ``h.IRamp`` is custom available in ``~/cerebmodels/models/cells/DummyModel/mod_files/CurrentRamp.mod`` * ``h.GrC_Sine`` is custom available in ``~/cerebmodels/models/cells/GrC2001DAngela/mod_files/Grc_sine.mod`` Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_ **Keyword Arguments:** +----------------+--------------------------------------------------------------+ | Keys | Value type | +================+==============================================================+ | ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - Eg: [ {"amp1": 0.0, "dur1": 50.0, "rs": 1E-6}, | | | {"amp2": 10.0, "dur2": 100.0}, | | | {"amp3": 20.0, "dur3": 150.0} ] | | |**NOTE** There is no "amp>3" (therefore no "dur>3") | | | - To add the electrode/pipette resistance do it just once | | | with key "rs". This should be the same for all because its | | | the same setup, just the amplitudes differ. | | | - Since "Clamp is on at time 0, off at time dur1+dur2+dur3" | | |if you don't want to start the simulation with it just set | | |"amp1": 0.0 | +----------------+--------------------------------------------------------------+ | ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +----------------+--------------------------------------------------------------+ **Returned values:** list of currents where each element is a ``hoc`` object ``h.SEClamp``. **NOTE:** - The ``h.SEClamp`` function is available in NEURON as `SEClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#SEClamp>`_ by default. - By default the electrode resistance (Re but SEClamp attribute is ``rs``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin. # NOTE: Do not insert several instances of this model at the same location # to make level changes. That is equivalent to independent clamps and they # will have incompatible internal state values. Injects SEClamp for `NEURON <https://neuron.yale.edu/neuron/>`_ **Keyword Arguments:** +----------------+--------------------------------------------------------------+ | Keys | Value type | +================+==============================================================+ | ``parameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - Eg: [ {"amp": 0.0, "dur": 50.0}, | | | {"amp": 10.0, "dur": 100.0}, | | | {"amp": 20.0, "dur": 150.0} ] | | |**NOTE** There is no amp[>2] (therefore no dur[>2]) | | | - To change clamp parameters with keys "gain", "rstim", | } |"tau1" and "tau2", do it just once. They should be the same | | |for all because its the same setup, just the amplitude changes| | | - Since "Clamp is on at 0, off at time dur[0]+dur[1]+dur[2]" | | |if you don't want to start the simulation with it just set the| | | first "amp": 0.0 | +----------------+--------------------------------------------------------------+ | ``injectsite`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +----------------+--------------------------------------------------------------+ **Returned values:** a ``hoc`` object ``h.VClamp``. **NOTE:** - The ``h.VClamp`` function is available in NEURON as `VClamp <https://neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/mechanisms/mech.html#VClamp>`_ by default. - By default the electrode resistance (Re but VClamp attribute is ``rstim``) is made very small ``1E-6``. This is because the input resistance (Rin) for the voltmeter (i.e, the measuring circuit) must be very large (i.e, infinite resistance) so that the voltage drop across the voltmeter (given by the voltage divider equation, the resistance is Rin/(Rin+Re)) is as close as possible to the membrane voltage it is supposed to be clamping. By making the electrode resistance very small it is the same as infinite Rin. # NOTE: Do not insert several instances of this model at the same location # to make level changes. That is equivalent to independent clamps and they # will have incompatible internal state values. Sets voltage injection parameters to either ``h.SEClamp``, ``h.VClamp``, **Keyword Arguments:** +-------------------+--------------------------------------------------------------+ | Keys | Value type | +===================+==============================================================+ | ``voltagetype`` | string; ``"SEClamp"`` or ``"VClamp"``. | +-------------------+--------------------------------------------------------------+ | ``injparameters`` | - list such that each element is a dictionary [ {}, {}, {} ] | | | - for ``SEClamp`` see :py:meth:`.inject_SEClamp`. | | | - for ``VClamp`` see :py:meth:`.inject_VClamp`. | +-------------------+--------------------------------------------------------------+ | ``neuronsection`` | ``neuron`` ``section``, for e.g., ``cell.soma`` | +-------------------+--------------------------------------------------------------+ **Returned values:**Stimuli list where each element is ``hoc`` object ``h.SEClamp`` or ``h.VClamp``, depending on the given ``voltagetype`` parameter. *NOTE:* * depending on the currenttype choice :py:meth:`.inject_SEClamp` or :py:meth:`.inject_VClamp` is called * ``h.SEClamp`` and not ``h.VClamp`` is recommended as `it is almost never necessary to use VClamp <https://www.neuron.yale.edu/phpBB/viewtopic.php?t=505>`_ * also, `VClamp will not work with variable step integration (CVODE) <https://neurojustas.com/2018/03/27/important-neuron-gotchas-and-tips/>`_ | 2.025579 | 2 |
src/apps/distributed_efforts/viewsets/__init__.py | sanderland/katago-server | 27 | 6618278 | <filename>src/apps/distributed_efforts/viewsets/__init__.py
from .distributed_task import DistributedTaskViewSet
| <filename>src/apps/distributed_efforts/viewsets/__init__.py
from .distributed_task import DistributedTaskViewSet
| none | 1 | 1.143947 | 1 | |
example/server.py | v1c77/gogo | 0 | 6618279 | <filename>example/server.py<gh_stars>0
# -*- coding: utf-8 -*-
from concurrent import futures
import traceback
import inspect
import time
import logging
import sys
import grpc
from example import hello_bro_pb2
from example import hello_bro_pb2_grpc
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def func_cost():
print('c is runing.!!!')
start = time.time()
end = start + 10
i = 1
while i > 0:
i += 1
if time.time() > end:
break
print('down')
class Bro(hello_bro_pb2_grpc.BroServicer):
def SayHello(self, request, context):
# pool = futures.ThreadPoolExecutor(max_workers=2)
# pool.submit(func_cost)
root.info('trace')
traceback.print_stack()
# print(inspect.stack())
raise ValueError('test error happend.')
return hello_bro_pb2.HelloReply(
message='Hello, %s!' % request.name,
by=request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=65))
hello_bro_pb2_grpc.add_BroServicer_to_server(Bro(), server)
server.add_insecure_port('[::]:1994')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
print("start at 1994")
serve()
| <filename>example/server.py<gh_stars>0
# -*- coding: utf-8 -*-
from concurrent import futures
import traceback
import inspect
import time
import logging
import sys
import grpc
from example import hello_bro_pb2
from example import hello_bro_pb2_grpc
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def func_cost():
print('c is runing.!!!')
start = time.time()
end = start + 10
i = 1
while i > 0:
i += 1
if time.time() > end:
break
print('down')
class Bro(hello_bro_pb2_grpc.BroServicer):
def SayHello(self, request, context):
# pool = futures.ThreadPoolExecutor(max_workers=2)
# pool.submit(func_cost)
root.info('trace')
traceback.print_stack()
# print(inspect.stack())
raise ValueError('test error happend.')
return hello_bro_pb2.HelloReply(
message='Hello, %s!' % request.name,
by=request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=65))
hello_bro_pb2_grpc.add_BroServicer_to_server(Bro(), server)
server.add_insecure_port('[::]:1994')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
print("start at 1994")
serve()
| en | 0.44433 | # -*- coding: utf-8 -*- # pool = futures.ThreadPoolExecutor(max_workers=2) # pool.submit(func_cost) # print(inspect.stack()) | 2.598024 | 3 |
igparser/output.py | HudaJr/TAGRAM | 0 | 6618280 | <gh_stars>0
import re, json
from . import dump
from . import action
class Post:
def __init__(self, ses, data):
try:
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
if self.data.get("id"):
self.id = self.data["id"]
else:
self.id = ""
except:
self.id = ""
def __repr__(self):
return self.id
def like(self):
self.action.like_post(ses, self.id)
def unlike(self):
self.action.unlike_post(ses, self.id)
class People:
def __init__(self, ses, data):
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
self.username = self.data["username"]
self.id = self.data["id"]
self.name = self.data["full_name"]
self.profile_picture = self.data["profile_pic_url"]
def __repr__(self):
return self.username
def follow(self):
return action.follow_people(self.ses, self.username, idPeople = self.id)
def unfollow(self):
return action.unfollow_people(self.ses, self.username, idPeople = self.id)
def follower(self):
return dump.follower_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def following(self):
return dump.following_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def post(self):
return dump.post_people(self.ses, usernamePeople = self.username)
class Output:
def __init__(self, items = None, data = None, idPeople = None, next = None):
self.items = items
self.next = next.replace("==", "%3D") if next else None
self.data = data
self.idPeople = idPeople
self.isNext = bool(self.next)
def __repr__(self):
return "<total_items: {}, next: {}>".format(len(self.items), self.next)
| import re, json
from . import dump
from . import action
class Post:
def __init__(self, ses, data):
try:
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
if self.data.get("id"):
self.id = self.data["id"]
else:
self.id = ""
except:
self.id = ""
def __repr__(self):
return self.id
def like(self):
self.action.like_post(ses, self.id)
def unlike(self):
self.action.unlike_post(ses, self.id)
class People:
def __init__(self, ses, data):
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
self.username = self.data["username"]
self.id = self.data["id"]
self.name = self.data["full_name"]
self.profile_picture = self.data["profile_pic_url"]
def __repr__(self):
return self.username
def follow(self):
return action.follow_people(self.ses, self.username, idPeople = self.id)
def unfollow(self):
return action.unfollow_people(self.ses, self.username, idPeople = self.id)
def follower(self):
return dump.follower_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def following(self):
return dump.following_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def post(self):
return dump.post_people(self.ses, usernamePeople = self.username)
class Output:
def __init__(self, items = None, data = None, idPeople = None, next = None):
self.items = items
self.next = next.replace("==", "%3D") if next else None
self.data = data
self.idPeople = idPeople
self.isNext = bool(self.next)
def __repr__(self):
return "<total_items: {}, next: {}>".format(len(self.items), self.next) | none | 1 | 2.807636 | 3 | |
bcl/bcl/wrapper.py | wyatt-howe/bcl | 0 | 6618281 | """Basic cryptographic library.
Python library that provides a simple interface for symmetric (i.e.,
secret-key) and asymmetric (i.e., public-key) encryption/decryption
primitives.
"""
from __future__ import annotations
import doctest
import base64
if __name__ == "__main__":
import bcl # pylint: disable=cyclic-import
wrappers = bcl._lib.wrapper.wrappers # pylint: disable=protected-access
else:
import wrappers.utils
import wrappers.secret
import wrappers.public
class raw(bytes):
"""
Wrapper class for a raw bytes-like object that represents a key,
plaintext, or ciphertext.
"""
@classmethod
def from_base64(cls, s: str) -> raw:
"""Convert Base64 UTF-8 string representation of a raw value."""
return bytes.__new__(cls, base64.standard_b64decode(s))
def to_base64(self: raw) -> str:
"""Convert to equivalent Base64 UTF-8 string representation."""
return base64.standard_b64encode(self).decode('utf-8')
class key(raw):
"""
Wrapper class for a bytes-like object that represents a key.
"""
class secret(key):
"""
Wrapper class for a bytes-like object that represents a secret key.
"""
class public(key):
"""
Wrapper class for a bytes-like object that represents a public key.
"""
class plain(raw):
"""
Wrapper class for a bytes-like object that represents a plaintext.
"""
class cipher(raw):
"""
Wrapper class for a bytes-like object that represents a ciphertext.
"""
class symmetric:
"""
Symmetric (i.e., secret-key) encryption/decryption primitives.
>>> x = plain(wrappers.utils.random(1024))
>>> isinstance(x, raw)
True
>>> isinstance(x, plain)
True
>>> s = symmetric.secret()
>>> isinstance(s, key)
True
>>> isinstance(s, secret)
True
>>> s == secret.from_base64(s.to_base64())
True
>>> c = symmetric.encrypt(s, x)
>>> isinstance(c, raw)
True
>>> isinstance(c, cipher)
True
>>> c == secret.from_base64(c.to_base64())
True
>>> symmetric.decrypt(s, c) == x
True
>>> isinstance(symmetric.decrypt(s, c), plain)
True
"""
@staticmethod
def secret() -> secret:
"""
Create a secret key.
"""
return secret(wrappers.utils.random())
@staticmethod
def encrypt(secret_key: secret, plaintext: plain) -> cipher:
"""
Encrypt a plaintext (a bytes-like object) using the supplied secret key.
"""
return cipher(wrappers.secret.SecretBox(secret_key).encrypt(plaintext))
@staticmethod
def decrypt(secret_key: secret, ciphertext: cipher) -> plain:
"""
Decrypt a ciphertext (a bytes-like object) using the supplied secret key.
"""
return plain(wrappers.secret.SecretBox(secret_key).decrypt(ciphertext))
class asymmetric:
"""
Asymmetric (i.e., public-key) encryption/decryption primitives.
>>> x = plain(wrappers.utils.random(1024))
>>> x == plain.from_base64(x.to_base64())
True
>>> s = asymmetric.secret()
>>> p = asymmetric.public(s)
>>> isinstance(p, key)
True
>>> isinstance(p, public)
True
>>> p == public.from_base64(p.to_base64())
True
>>> c = asymmetric.encrypt(p, x)
>>> asymmetric.decrypt(s, c) == x
True
"""
@staticmethod
def secret() -> secret:
"""
Create a secret key.
"""
return secret(wrappers.utils.random())
@staticmethod
def public(secret_key: secret) -> public:
"""
Create a public key using a secret key (a bytes-like object of length 32).
"""
return public(wrappers.public.PrivateKey(secret_key).public_key)
@staticmethod
def encrypt(public_key: public, plaintext: plain) -> cipher:
"""
Encrypt a plaintext (a bytes-like object) using the supplied public key.
"""
return cipher(
wrappers.public\
.SealedBox(wrappers.public.PublicKey(public_key)).encrypt(plaintext)
)
@staticmethod
def decrypt(secret_key: secret, ciphertext: cipher) -> plain:
"""
Decrypt a ciphertext (a bytes-like object) using the supplied secret key.
"""
return plain(
wrappers.public\
.SealedBox(wrappers.public.PrivateKey(secret_key)).decrypt(ciphertext)
)
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| """Basic cryptographic library.
Python library that provides a simple interface for symmetric (i.e.,
secret-key) and asymmetric (i.e., public-key) encryption/decryption
primitives.
"""
from __future__ import annotations
import doctest
import base64
if __name__ == "__main__":
import bcl # pylint: disable=cyclic-import
wrappers = bcl._lib.wrapper.wrappers # pylint: disable=protected-access
else:
import wrappers.utils
import wrappers.secret
import wrappers.public
class raw(bytes):
"""
Wrapper class for a raw bytes-like object that represents a key,
plaintext, or ciphertext.
"""
@classmethod
def from_base64(cls, s: str) -> raw:
"""Convert Base64 UTF-8 string representation of a raw value."""
return bytes.__new__(cls, base64.standard_b64decode(s))
def to_base64(self: raw) -> str:
"""Convert to equivalent Base64 UTF-8 string representation."""
return base64.standard_b64encode(self).decode('utf-8')
class key(raw):
"""
Wrapper class for a bytes-like object that represents a key.
"""
class secret(key):
"""
Wrapper class for a bytes-like object that represents a secret key.
"""
class public(key):
"""
Wrapper class for a bytes-like object that represents a public key.
"""
class plain(raw):
"""
Wrapper class for a bytes-like object that represents a plaintext.
"""
class cipher(raw):
"""
Wrapper class for a bytes-like object that represents a ciphertext.
"""
class symmetric:
"""
Symmetric (i.e., secret-key) encryption/decryption primitives.
>>> x = plain(wrappers.utils.random(1024))
>>> isinstance(x, raw)
True
>>> isinstance(x, plain)
True
>>> s = symmetric.secret()
>>> isinstance(s, key)
True
>>> isinstance(s, secret)
True
>>> s == secret.from_base64(s.to_base64())
True
>>> c = symmetric.encrypt(s, x)
>>> isinstance(c, raw)
True
>>> isinstance(c, cipher)
True
>>> c == secret.from_base64(c.to_base64())
True
>>> symmetric.decrypt(s, c) == x
True
>>> isinstance(symmetric.decrypt(s, c), plain)
True
"""
@staticmethod
def secret() -> secret:
"""
Create a secret key.
"""
return secret(wrappers.utils.random())
@staticmethod
def encrypt(secret_key: secret, plaintext: plain) -> cipher:
"""
Encrypt a plaintext (a bytes-like object) using the supplied secret key.
"""
return cipher(wrappers.secret.SecretBox(secret_key).encrypt(plaintext))
@staticmethod
def decrypt(secret_key: secret, ciphertext: cipher) -> plain:
"""
Decrypt a ciphertext (a bytes-like object) using the supplied secret key.
"""
return plain(wrappers.secret.SecretBox(secret_key).decrypt(ciphertext))
class asymmetric:
"""
Asymmetric (i.e., public-key) encryption/decryption primitives.
>>> x = plain(wrappers.utils.random(1024))
>>> x == plain.from_base64(x.to_base64())
True
>>> s = asymmetric.secret()
>>> p = asymmetric.public(s)
>>> isinstance(p, key)
True
>>> isinstance(p, public)
True
>>> p == public.from_base64(p.to_base64())
True
>>> c = asymmetric.encrypt(p, x)
>>> asymmetric.decrypt(s, c) == x
True
"""
@staticmethod
def secret() -> secret:
"""
Create a secret key.
"""
return secret(wrappers.utils.random())
@staticmethod
def public(secret_key: secret) -> public:
"""
Create a public key using a secret key (a bytes-like object of length 32).
"""
return public(wrappers.public.PrivateKey(secret_key).public_key)
@staticmethod
def encrypt(public_key: public, plaintext: plain) -> cipher:
"""
Encrypt a plaintext (a bytes-like object) using the supplied public key.
"""
return cipher(
wrappers.public\
.SealedBox(wrappers.public.PublicKey(public_key)).encrypt(plaintext)
)
@staticmethod
def decrypt(secret_key: secret, ciphertext: cipher) -> plain:
"""
Decrypt a ciphertext (a bytes-like object) using the supplied secret key.
"""
return plain(
wrappers.public\
.SealedBox(wrappers.public.PrivateKey(secret_key)).decrypt(ciphertext)
)
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| en | 0.606748 | Basic cryptographic library. Python library that provides a simple interface for symmetric (i.e., secret-key) and asymmetric (i.e., public-key) encryption/decryption primitives. # pylint: disable=cyclic-import # pylint: disable=protected-access Wrapper class for a raw bytes-like object that represents a key, plaintext, or ciphertext. Convert Base64 UTF-8 string representation of a raw value. Convert to equivalent Base64 UTF-8 string representation. Wrapper class for a bytes-like object that represents a key. Wrapper class for a bytes-like object that represents a secret key. Wrapper class for a bytes-like object that represents a public key. Wrapper class for a bytes-like object that represents a plaintext. Wrapper class for a bytes-like object that represents a ciphertext. Symmetric (i.e., secret-key) encryption/decryption primitives. >>> x = plain(wrappers.utils.random(1024)) >>> isinstance(x, raw) True >>> isinstance(x, plain) True >>> s = symmetric.secret() >>> isinstance(s, key) True >>> isinstance(s, secret) True >>> s == secret.from_base64(s.to_base64()) True >>> c = symmetric.encrypt(s, x) >>> isinstance(c, raw) True >>> isinstance(c, cipher) True >>> c == secret.from_base64(c.to_base64()) True >>> symmetric.decrypt(s, c) == x True >>> isinstance(symmetric.decrypt(s, c), plain) True Create a secret key. Encrypt a plaintext (a bytes-like object) using the supplied secret key. Decrypt a ciphertext (a bytes-like object) using the supplied secret key. Asymmetric (i.e., public-key) encryption/decryption primitives. >>> x = plain(wrappers.utils.random(1024)) >>> x == plain.from_base64(x.to_base64()) True >>> s = asymmetric.secret() >>> p = asymmetric.public(s) >>> isinstance(p, key) True >>> isinstance(p, public) True >>> p == public.from_base64(p.to_base64()) True >>> c = asymmetric.encrypt(p, x) >>> asymmetric.decrypt(s, c) == x True Create a secret key. Create a public key using a secret key (a bytes-like object of length 32). Encrypt a plaintext (a bytes-like object) using the supplied public key. Decrypt a ciphertext (a bytes-like object) using the supplied secret key. # pragma: no cover | 3.254302 | 3 |
mopidy/audio/mixers/__init__.py | rzr/mopidy | 2 | 6618282 | from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst
import gobject
from .auto import AutoAudioMixer
from .fake import FakeMixer
def register_mixer(mixer_class):
gobject.type_register(mixer_class)
gst.element_register(
mixer_class, mixer_class.__name__.lower(), gst.RANK_MARGINAL)
def register_mixers():
register_mixer(AutoAudioMixer)
register_mixer(FakeMixer)
| from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst
import gobject
from .auto import AutoAudioMixer
from .fake import FakeMixer
def register_mixer(mixer_class):
gobject.type_register(mixer_class)
gst.element_register(
mixer_class, mixer_class.__name__.lower(), gst.RANK_MARGINAL)
def register_mixers():
register_mixer(AutoAudioMixer)
register_mixer(FakeMixer)
| none | 1 | 1.766007 | 2 | |
src/shorturls/tests/test_urlresolvers.py | tubaman/django-shorturls | 0 | 6618283 | <reponame>tubaman/django-shorturls
from django import template
from django.conf import settings
from django.test import TestCase
from django.core import urlresolvers
from shorturls.tests.models import Animal, Vegetable, Mineral
from shorturls.urlresolvers import get_shorturl
class UrlResolversTestCase(TestCase):
urls = 'shorturls.urls'
fixtures = ['shorturls-test-data.json']
def setUp(self):
self.old_shorten = getattr(settings, 'SHORTEN_MODELS', None)
self.old_base = getattr(settings, 'SHORT_BASE_URL', None)
settings.SHORT_BASE_URL = None
settings.SHORTEN_MODELS = {
'A': 'shorturls.animal',
'V': 'shorturls.vegetable',
}
def tearDown(self):
if self.old_shorten is not None:
settings.SHORTEN_MODELS = self.old_shorten
if self.old_base is not None:
settings.SHORT_BASE_URL = self.old_base
def test_shorturl(self):
r = get_shorturl(Animal.objects.get(id=12345))
self.assertEqual(r, '/ADNH')
def test_no_prefix(self):
try:
r = get_shorturl(Mineral.objects.all()[0])
except urlresolvers.NoReverseMatch:
pass
else:
self.assertFalse()
def test_short_base_url(self):
settings.SHORT_BASE_URL = 'http://example.com/'
r = get_shorturl(Animal.objects.get(id=12345))
self.assertEqual(r, 'http://example.com/ADNH')
| from django import template
from django.conf import settings
from django.test import TestCase
from django.core import urlresolvers
from shorturls.tests.models import Animal, Vegetable, Mineral
from shorturls.urlresolvers import get_shorturl
class UrlResolversTestCase(TestCase):
urls = 'shorturls.urls'
fixtures = ['shorturls-test-data.json']
def setUp(self):
self.old_shorten = getattr(settings, 'SHORTEN_MODELS', None)
self.old_base = getattr(settings, 'SHORT_BASE_URL', None)
settings.SHORT_BASE_URL = None
settings.SHORTEN_MODELS = {
'A': 'shorturls.animal',
'V': 'shorturls.vegetable',
}
def tearDown(self):
if self.old_shorten is not None:
settings.SHORTEN_MODELS = self.old_shorten
if self.old_base is not None:
settings.SHORT_BASE_URL = self.old_base
def test_shorturl(self):
r = get_shorturl(Animal.objects.get(id=12345))
self.assertEqual(r, '/ADNH')
def test_no_prefix(self):
try:
r = get_shorturl(Mineral.objects.all()[0])
except urlresolvers.NoReverseMatch:
pass
else:
self.assertFalse()
def test_short_base_url(self):
settings.SHORT_BASE_URL = 'http://example.com/'
r = get_shorturl(Animal.objects.get(id=12345))
self.assertEqual(r, 'http://example.com/ADNH') | none | 1 | 2.345234 | 2 | |
notebooks/pandas/import_export_parquet_excel_csv.py | blazzup/python-notes | 0 | 6618284 | <reponame>blazzup/python-notes
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import pandas as pd
import pandas.util.testing as tu
from uuid import uuid3, NAMESPACE_URL
from pathlib import Path
# %% [markdown]
# # Setup
# %%
def file_name(extension: str) -> str:
"""Create UUID file name from file extension to avoid overwriting existing files."""
return f'{uuid3(NAMESPACE_URL, extension)}.{extension}'
# %%
pd.np.random.seed(0)
tu.N = 5
d = tu.makeDataFrame().set_index('A')
d
# %% [markdown]
# # Export and import
# %% [markdown]
# ## Parquet
#
# Available with pandas $\ge$ 0.21.0
# %%
parquet_file = file_name('parquet')
d.to_parquet(parquet_file)
# %%
pd.read_parquet(parquet_file)
# %% [markdown]
# ### Column names must be strings!
# %%
try:
d.rename(columns={'B': 1, 'C': 2, 'D': 3}).to_parquet(parquet_file)
except ValueError as e:
print(e)
# %%
# use .rename(columns=str) for a quick fix
d.rename(columns={'B': 1, 'C': 2, 'D': 3}).rename(columns=str).to_parquet(parquet_file)
# %% [markdown]
# ## Excel
#
# Note: index is stored as an ordinary column!
# %%
excel_file = file_name('xlsx')
d.to_excel(excel_file)
# %%
pd.read_excel(excel_file)
# %%
pd.read_excel(excel_file).set_index('A') # restore index manually!
# %% [markdown]
# ## Tab-separated txt file with custom float format
#
# Note: index is stored as an ordinary column!
# %%
csv_file = file_name('txt')
d.to_csv(csv_file, sep='\t', float_format='%.2f')
# %%
pd.read_csv(csv_file, sep='\t')
# %%
pd.read_csv(csv_file, sep='\t').set_index('A') # restore index manually!
# %% [markdown]
# # Benchmark
# %%
tu.N = 100000
d = tu.makeDataFrame()
d.shape
# %% [markdown]
# ## Write
# %%
# %%time
d.to_parquet(parquet_file)
# %%
# %%time
d.to_excel(excel_file)
# %%
# %%time
d.to_csv(csv_file)
# %% [markdown]
# ## Read
# %%
# %%time
__ = pd.read_parquet(parquet_file)
# %%
# %%time
__ = pd.read_excel(excel_file)
# %%
# %%time
__ = pd.read_csv(csv_file)
# %% [markdown]
# ## Size
# %%
pd.DataFrame(
[(Path(f).suffix, Path(f).stat().st_size) for f in [parquet_file, excel_file, csv_file]],
columns=['type', 'size'])
# %% [markdown]
# # Remove exported files
# %%
Path(parquet_file).unlink()
Path(excel_file).unlink()
Path(csv_file).unlink()
| # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import pandas as pd
import pandas.util.testing as tu
from uuid import uuid3, NAMESPACE_URL
from pathlib import Path
# %% [markdown]
# # Setup
# %%
def file_name(extension: str) -> str:
"""Create UUID file name from file extension to avoid overwriting existing files."""
return f'{uuid3(NAMESPACE_URL, extension)}.{extension}'
# %%
pd.np.random.seed(0)
tu.N = 5
d = tu.makeDataFrame().set_index('A')
d
# %% [markdown]
# # Export and import
# %% [markdown]
# ## Parquet
#
# Available with pandas $\ge$ 0.21.0
# %%
parquet_file = file_name('parquet')
d.to_parquet(parquet_file)
# %%
pd.read_parquet(parquet_file)
# %% [markdown]
# ### Column names must be strings!
# %%
try:
d.rename(columns={'B': 1, 'C': 2, 'D': 3}).to_parquet(parquet_file)
except ValueError as e:
print(e)
# %%
# use .rename(columns=str) for a quick fix
d.rename(columns={'B': 1, 'C': 2, 'D': 3}).rename(columns=str).to_parquet(parquet_file)
# %% [markdown]
# ## Excel
#
# Note: index is stored as an ordinary column!
# %%
excel_file = file_name('xlsx')
d.to_excel(excel_file)
# %%
pd.read_excel(excel_file)
# %%
pd.read_excel(excel_file).set_index('A') # restore index manually!
# %% [markdown]
# ## Tab-separated txt file with custom float format
#
# Note: index is stored as an ordinary column!
# %%
csv_file = file_name('txt')
d.to_csv(csv_file, sep='\t', float_format='%.2f')
# %%
pd.read_csv(csv_file, sep='\t')
# %%
pd.read_csv(csv_file, sep='\t').set_index('A') # restore index manually!
# %% [markdown]
# # Benchmark
# %%
tu.N = 100000
d = tu.makeDataFrame()
d.shape
# %% [markdown]
# ## Write
# %%
# %%time
d.to_parquet(parquet_file)
# %%
# %%time
d.to_excel(excel_file)
# %%
# %%time
d.to_csv(csv_file)
# %% [markdown]
# ## Read
# %%
# %%time
__ = pd.read_parquet(parquet_file)
# %%
# %%time
__ = pd.read_excel(excel_file)
# %%
# %%time
__ = pd.read_csv(csv_file)
# %% [markdown]
# ## Size
# %%
pd.DataFrame(
[(Path(f).suffix, Path(f).stat().st_size) for f in [parquet_file, excel_file, csv_file]],
columns=['type', 'size'])
# %% [markdown]
# # Remove exported files
# %%
Path(parquet_file).unlink()
Path(excel_file).unlink()
Path(csv_file).unlink() | en | 0.44521 | # --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.4.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% # %% [markdown] # # Setup # %% Create UUID file name from file extension to avoid overwriting existing files. # %% # %% [markdown] # # Export and import # %% [markdown] # ## Parquet # # Available with pandas $\ge$ 0.21.0 # %% # %% # %% [markdown] # ### Column names must be strings! # %% # %% # use .rename(columns=str) for a quick fix # %% [markdown] # ## Excel # # Note: index is stored as an ordinary column! # %% # %% # %% # restore index manually! # %% [markdown] # ## Tab-separated txt file with custom float format # # Note: index is stored as an ordinary column! # %% # %% # %% # restore index manually! # %% [markdown] # # Benchmark # %% # %% [markdown] # ## Write # %% # %%time # %% # %%time # %% # %%time # %% [markdown] # ## Read # %% # %%time # %% # %%time # %% # %%time # %% [markdown] # ## Size # %% # %% [markdown] # # Remove exported files # %% | 2.50557 | 3 |
faqs/views.py | jjmartinr01/gauss3 | 0 | 6618285 | <filename>faqs/views.py
from django.http import JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from autenticar.control_acceso import permiso_required
from mensajes.models import Aviso
from faqs.models import FaqSection, FaqGauss, FaqEntidad, FaqSugerida
# Create your views here.
@permiso_required('acceso_configura_faqs')
def configura_faqs(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad, borrada=False)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'add_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.create(entidad=g_e.ronda.entidad, nombre='Nueva sección')
html = render_to_string('configura_faqs_secciones.html', {'s': fs})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido crear la sección.'})
elif action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('configura_faqs_secciones_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'borrar_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.get(id=request.POST['seccion'], entidad=g_e.ronda.entidad)
if fs.num_preguntas == 0:
fs.borrada = True
fs.save()
return JsonResponse({'ok': True, 'fs_id': fs.id})
else:
return JsonResponse(
{'ok': False, 'mensaje': 'No se puede borrar una sección si contiene preguntas.'})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error que ha impedido el borrado.'})
elif action == 'edit_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
fs.nombre = request.POST['nombre']
fs.save()
return JsonResponse({'ok': True, 'nombre': fs.nombre, 'fs_id': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'add_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
p = FaqEntidad.objects.create(faqsection=fs)
html = render_to_string('configura_faqs_secciones_content_pregunta.html', {'g_e': g_e, 'p': p})
return JsonResponse({'ok': True, 'html': html, 'fs': fs.id, 'num_preguntas': fs.num_preguntas})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'del_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad)
fs = p.faqsection
p.borrada = True
p.publicada = False
p.save()
return JsonResponse({'ok': True, 'p_id': p.id, 'num_preguntas': fs.num_preguntas, 'fs': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'update_input_faq' and g_e.has_permiso('edita_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.pregunta = request.POST['texto']
p.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False})
elif action == 'update_respuesta' and g_e.has_permiso('edita_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.respuesta = request.POST['texto']
p.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False})
elif action == 'change_pub_faq' and g_e.has_permiso('publica_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.publicada = not p.publicada
p.save()
fs = p.faqsection
return JsonResponse({'ok': True, 'publicar': ['No', 'Sí'][p.publicada], 'p': p.id,
'num_preguntas_pub': fs.num_preguntas_pub, 'fs': fs.id})
except:
return JsonResponse({'ok': False})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "configura_faqs.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_gauss')
def faqs_gauss(request):
g_e = request.session['gauser_extra']
if request.method == 'POST':
action = request.POST['action']
if action == 'libro_registros':
pass
return render(request, "faqs_gauss.html",
{
'formname': 'faqs_gauss',
'g_e': g_e,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_entidad')
def faqs_entidad(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad, borrada=False)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('faqs_entidad_seccion_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
return render(request, "faqs_entidad.html",
{
'formname': 'faqs_entidad',
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_borradas')
def faqs_borradas(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('configura_faqs_borradas_secciones_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'unborrar_seccion':
try:
fs = FaqSection.objects.get(id=request.POST['seccion'], entidad=g_e.ronda.entidad)
fs.borrada = False
fs.save()
return JsonResponse({'ok': True, 'fs_id': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error que ha impedido el borrado.'})
elif action == 'undel_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad)
fs = p.faqsection
p.borrada = False
p.publicada = False
p.save()
return JsonResponse({'ok': True, 'p': p.id, 'num_preguntas': fs.num_preguntas,
'num_preguntas_pub': fs.num_preguntas_pub, 'fs': fs.id,
'num_preguntas_borradas': fs.num_preguntas_borradas, })
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "configura_faqs_borradas.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
# @permiso_required('acceso_faqs_sugeridas')
def faqs_sugeridas(request):
g_e = request.session['gauser_extra']
faqssugeridas = FaqSugerida.objects.filter(entidad=g_e.ronda.entidad, aceptada=False, parent__isnull=True)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'add_sugerencia':
try:
fsug = FaqSugerida.objects.create(entidad=g_e.ronda.entidad, gauser=g_e.gauser)
html = render_to_string('faqs_sugeridas_fsug.html', {'fsug': fsug, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'Upps! No se puede añadir una nueva sugerencia.'})
elif action == 'update_input_fsug':
try:
id = request.POST['fsug']
fsug = FaqSugerida.objects.get(entidad=g_e.ronda.entidad, gauser=g_e.gauser, id=id)
fsug.texto = request.POST['texto']
fsug.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error en la actualización del texto.'})
elif action == 'respuesta_fsug':
try:
parent = FaqSugerida.objects.get(id=request.POST['id'], entidad=g_e.ronda.entidad)
fsug = FaqSugerida.objects.create(entidad=g_e.ronda.entidad, gauser=g_e.gauser, parent=parent)
html = render_to_string('faqs_sugeridas_fsug.html', {'fsug': fsug, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html, 'fsug': parent.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'acepta_fsug':
try:
fsug = FaqSugerida.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fsug'])
fsug.aceptada = True
fsug.save()
return JsonResponse({'ok': True, 'fsug': fsug.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "faqs_sugeridas.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssugeridas': faqssugeridas,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
| <filename>faqs/views.py
from django.http import JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from autenticar.control_acceso import permiso_required
from mensajes.models import Aviso
from faqs.models import FaqSection, FaqGauss, FaqEntidad, FaqSugerida
# Create your views here.
@permiso_required('acceso_configura_faqs')
def configura_faqs(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad, borrada=False)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'add_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.create(entidad=g_e.ronda.entidad, nombre='Nueva sección')
html = render_to_string('configura_faqs_secciones.html', {'s': fs})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido crear la sección.'})
elif action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('configura_faqs_secciones_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'borrar_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.get(id=request.POST['seccion'], entidad=g_e.ronda.entidad)
if fs.num_preguntas == 0:
fs.borrada = True
fs.save()
return JsonResponse({'ok': True, 'fs_id': fs.id})
else:
return JsonResponse(
{'ok': False, 'mensaje': 'No se puede borrar una sección si contiene preguntas.'})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error que ha impedido el borrado.'})
elif action == 'edit_seccion' and g_e.has_permiso('crea_secciones_faqs'):
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
fs.nombre = request.POST['nombre']
fs.save()
return JsonResponse({'ok': True, 'nombre': fs.nombre, 'fs_id': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'add_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
p = FaqEntidad.objects.create(faqsection=fs)
html = render_to_string('configura_faqs_secciones_content_pregunta.html', {'g_e': g_e, 'p': p})
return JsonResponse({'ok': True, 'html': html, 'fs': fs.id, 'num_preguntas': fs.num_preguntas})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'del_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad)
fs = p.faqsection
p.borrada = True
p.publicada = False
p.save()
return JsonResponse({'ok': True, 'p_id': p.id, 'num_preguntas': fs.num_preguntas, 'fs': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'update_input_faq' and g_e.has_permiso('edita_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.pregunta = request.POST['texto']
p.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False})
elif action == 'update_respuesta' and g_e.has_permiso('edita_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.respuesta = request.POST['texto']
p.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False})
elif action == 'change_pub_faq' and g_e.has_permiso('publica_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad, borrada=False)
p.publicada = not p.publicada
p.save()
fs = p.faqsection
return JsonResponse({'ok': True, 'publicar': ['No', 'Sí'][p.publicada], 'p': p.id,
'num_preguntas_pub': fs.num_preguntas_pub, 'fs': fs.id})
except:
return JsonResponse({'ok': False})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "configura_faqs.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_gauss')
def faqs_gauss(request):
g_e = request.session['gauser_extra']
if request.method == 'POST':
action = request.POST['action']
if action == 'libro_registros':
pass
return render(request, "faqs_gauss.html",
{
'formname': 'faqs_gauss',
'g_e': g_e,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_entidad')
def faqs_entidad(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad, borrada=False)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('faqs_entidad_seccion_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
return render(request, "faqs_entidad.html",
{
'formname': 'faqs_entidad',
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
@permiso_required('acceso_faqs_borradas')
def faqs_borradas(request):
g_e = request.session['gauser_extra']
faqssections = FaqSection.objects.filter(entidad=g_e.ronda.entidad)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'open_accordion_fsection':
try:
fs = FaqSection.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fs'], borrada=False)
html = render_to_string('configura_faqs_borradas_secciones_content.html', {'s': fs, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'No se puede abrir la sección solicitada.'})
elif action == 'unborrar_seccion':
try:
fs = FaqSection.objects.get(id=request.POST['seccion'], entidad=g_e.ronda.entidad)
fs.borrada = False
fs.save()
return JsonResponse({'ok': True, 'fs_id': fs.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error que ha impedido el borrado.'})
elif action == 'undel_faq' and g_e.has_permiso('crea_faqs_entidad'):
try:
p = FaqEntidad.objects.get(id=request.POST['id'], faqsection__entidad=g_e.ronda.entidad)
fs = p.faqsection
p.borrada = False
p.publicada = False
p.save()
return JsonResponse({'ok': True, 'p': p.id, 'num_preguntas': fs.num_preguntas,
'num_preguntas_pub': fs.num_preguntas_pub, 'fs': fs.id,
'num_preguntas_borradas': fs.num_preguntas_borradas, })
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "configura_faqs_borradas.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssections': faqssections,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
# @permiso_required('acceso_faqs_sugeridas')
def faqs_sugeridas(request):
g_e = request.session['gauser_extra']
faqssugeridas = FaqSugerida.objects.filter(entidad=g_e.ronda.entidad, aceptada=False, parent__isnull=True)
if request.method == 'POST' and request.is_ajax():
action = request.POST['action']
if action == 'add_sugerencia':
try:
fsug = FaqSugerida.objects.create(entidad=g_e.ronda.entidad, gauser=g_e.gauser)
html = render_to_string('faqs_sugeridas_fsug.html', {'fsug': fsug, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html})
except:
return JsonResponse({'ok': False, 'mensaje': 'Upps! No se puede añadir una nueva sugerencia.'})
elif action == 'update_input_fsug':
try:
id = request.POST['fsug']
fsug = FaqSugerida.objects.get(entidad=g_e.ronda.entidad, gauser=g_e.gauser, id=id)
fsug.texto = request.POST['texto']
fsug.save()
return JsonResponse({'ok': True})
except:
return JsonResponse({'ok': False, 'mensaje': 'Se ha procido un error en la actualización del texto.'})
elif action == 'respuesta_fsug':
try:
parent = FaqSugerida.objects.get(id=request.POST['id'], entidad=g_e.ronda.entidad)
fsug = FaqSugerida.objects.create(entidad=g_e.ronda.entidad, gauser=g_e.gauser, parent=parent)
html = render_to_string('faqs_sugeridas_fsug.html', {'fsug': fsug, 'g_e': g_e})
return JsonResponse({'ok': True, 'html': html, 'fsug': parent.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
elif action == 'acepta_fsug':
try:
fsug = FaqSugerida.objects.get(entidad=g_e.ronda.entidad, id=request.POST['fsug'])
fsug.aceptada = True
fsug.save()
return JsonResponse({'ok': True, 'fsug': fsug.id})
except:
return JsonResponse({'ok': False, 'mensaje': 'No has hecho la petición correctamente.'})
else:
return JsonResponse({'ok': False, 'mensaje': 'No se ha podido llevar a cabo la operación solicitada.'})
return render(request, "faqs_sugeridas.html",
{
'formname': 'configura_faqs',
'g_e': g_e,
'faqssugeridas': faqssugeridas,
'avisos': Aviso.objects.filter(usuario=g_e, aceptado=False),
})
| es | 0.292784 | # Create your views here. # @permiso_required('acceso_faqs_sugeridas') | 2.189456 | 2 |
baselines/XGBoost/model_tuning.py | jatinarora2702/TaxoExpan | 55 | 6618286 | """
__author__: <NAME>
__description__: Tune XGBoost model to obtain a good combination of hyper-parameters
"""
import xgboost as xgb
import time
import argparse
def main(args):
dtrain = xgb.DMatrix(args.train)
dval = xgb.DMatrix(args.validation)
# default XGBoost hyperparameters, c.f.: https://xgboost.readthedocs.io/en/latest/parameter.html
default_param = {
'objective': 'binary:logistic', # keep this fixed
'max_depth': 6,
'min_child_weight': 5,
'gamma': 0.0,
"subsample": 0.5,
'colsample_bytree': 0.8,
'reg_alpha': 1.0,
'reg_lambda': 0.1,
'eta': 0.1,
'importance_type': 'weight',
'random_state': 0,
'nthread': 20,
'tree_method': "hist",
'eval_metric': ['error', 'auc']
}
# indicate the hyper-parameters to be tuned
# each row indicates a hyper-parameter name and its possible value range
tuned_param = {
'max_depth': [3, 4, 5, 6, 7, 8],
'min_child_weight': [0, 3, 5, 10, 15, 20, 50],
'gamma': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
'subsample': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bytree': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'reg_alpha': [1e-5, 1e-2, 0.1, 1, 10],
'reg_lambda': [1e-5, 1e-2, 0.1, 1, 10],
'eta': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0],
'random_state': [0, 7, 19, 47, 147, 940622],
'importance_type': ["gain", "weight", "cover", "total_gain", "total_cover"]
}
# start tuning by hill-climbing
evallist = [(dtrain, 'train'), (dval, 'eval')]
start = time.time()
cnt = 0
num_round = 100
early_stopping_rounds = 10
verbose_eval = 10 # larger this value, less verbose the model is
for param_name in tuned_param:
print(f"===================== Tunning param: {param_name} =====================")
param_range = tuned_param[param_name]
best_metric = -1e10 # assume we want to maximize a metric
best_param_value = None
for param_value in param_range:
cnt += 1
default_param[param_name] = param_value
bst = xgb.train(default_param, dtrain=dtrain, num_boost_round=num_round, evals=evallist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose_eval)
cur_metric = bst.best_score
if cur_metric >= best_metric:
best_metric = cur_metric
best_param_value = param_value
default_param[param_name] = best_param_value
print(f"!!! Best param_value: {best_param_value}")
end = time.time()
print(f"!!! Finish tuning {cnt} parameter combinations, using {end-start} seconds")
print(f"Best score: {best_metric}")
print(f"Best parameters:")
for param_name in default_param:
print(f"\t{param_name}: {default_param[param_name]}")
print(f"Please save the above hyper-parameters and manually copy to model_training.py script for learning the final model")
if __name__ == "__main__":
# Example:
# --train: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_train_1102.buffer"
# --validation: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_validation_1102.buffer"
parser = argparse.ArgumentParser()
parser.add_argument('--train', required=True, type=str, help='training data file path')
parser.add_argument('--validation', required=True, type=str, help='validation data file path')
args = parser.parse_args()
main(args)
| """
__author__: <NAME>
__description__: Tune XGBoost model to obtain a good combination of hyper-parameters
"""
import xgboost as xgb
import time
import argparse
def main(args):
dtrain = xgb.DMatrix(args.train)
dval = xgb.DMatrix(args.validation)
# default XGBoost hyperparameters, c.f.: https://xgboost.readthedocs.io/en/latest/parameter.html
default_param = {
'objective': 'binary:logistic', # keep this fixed
'max_depth': 6,
'min_child_weight': 5,
'gamma': 0.0,
"subsample": 0.5,
'colsample_bytree': 0.8,
'reg_alpha': 1.0,
'reg_lambda': 0.1,
'eta': 0.1,
'importance_type': 'weight',
'random_state': 0,
'nthread': 20,
'tree_method': "hist",
'eval_metric': ['error', 'auc']
}
# indicate the hyper-parameters to be tuned
# each row indicates a hyper-parameter name and its possible value range
tuned_param = {
'max_depth': [3, 4, 5, 6, 7, 8],
'min_child_weight': [0, 3, 5, 10, 15, 20, 50],
'gamma': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
'subsample': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bytree': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'reg_alpha': [1e-5, 1e-2, 0.1, 1, 10],
'reg_lambda': [1e-5, 1e-2, 0.1, 1, 10],
'eta': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0],
'random_state': [0, 7, 19, 47, 147, 940622],
'importance_type': ["gain", "weight", "cover", "total_gain", "total_cover"]
}
# start tuning by hill-climbing
evallist = [(dtrain, 'train'), (dval, 'eval')]
start = time.time()
cnt = 0
num_round = 100
early_stopping_rounds = 10
verbose_eval = 10 # larger this value, less verbose the model is
for param_name in tuned_param:
print(f"===================== Tunning param: {param_name} =====================")
param_range = tuned_param[param_name]
best_metric = -1e10 # assume we want to maximize a metric
best_param_value = None
for param_value in param_range:
cnt += 1
default_param[param_name] = param_value
bst = xgb.train(default_param, dtrain=dtrain, num_boost_round=num_round, evals=evallist, early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose_eval)
cur_metric = bst.best_score
if cur_metric >= best_metric:
best_metric = cur_metric
best_param_value = param_value
default_param[param_name] = best_param_value
print(f"!!! Best param_value: {best_param_value}")
end = time.time()
print(f"!!! Finish tuning {cnt} parameter combinations, using {end-start} seconds")
print(f"Best score: {best_metric}")
print(f"Best parameters:")
for param_name in default_param:
print(f"\t{param_name}: {default_param[param_name]}")
print(f"Please save the above hyper-parameters and manually copy to model_training.py script for learning the final model")
if __name__ == "__main__":
# Example:
# --train: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_train_1102.buffer"
# --validation: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_validation_1102.buffer"
parser = argparse.ArgumentParser()
parser.add_argument('--train', required=True, type=str, help='training data file path')
parser.add_argument('--validation', required=True, type=str, help='validation data file path')
args = parser.parse_args()
main(args)
| en | 0.57718 | __author__: <NAME> __description__: Tune XGBoost model to obtain a good combination of hyper-parameters # default XGBoost hyperparameters, c.f.: https://xgboost.readthedocs.io/en/latest/parameter.html # keep this fixed # indicate the hyper-parameters to be tuned # each row indicates a hyper-parameter name and its possible value range # start tuning by hill-climbing # larger this value, less verbose the model is # assume we want to maximize a metric # Example: # --train: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_train_1102.buffer" # --validation: "/datadrive/structure_expan/data/MAG_FoS/mag_cs_validation_1102.buffer" | 2.878668 | 3 |
zhihu/oauth/zhihu_oauth.py | githubao/xiao-awesome-zhihu | 0 | 6618287 | #!/usr/bin/env python
# encoding: utf-8
"""
@description: //TODO
@version: 1.0
@author: BaoQiang
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.github.com/githubao
@software: PyCharm
@file: zhihu_oauth.py
@time: 2016/10/5 21:59
"""
from .im_andriod import *
from .zhihu_token import *
class ZhihuOAuth(ImAndroidClient):
def __init__(self, token, api_version=None, app_version=None,
app_build=None, app_za=None):
"""
增加发送token的功能
"""
assert isinstance(token, ZhihuToken)
super(ZhihuOAuth, self).__init__(api_version, app_version, app_build, app_za)
self._token = token
def __call__(self, r):
r = super(ZhihuOAuth, self).__call__(r)
r.headers['Authorization'] = '{type} {token}'.format(
type=str(self._token.type.capitalize()),
token=str(self._token.token)
)
return r
| #!/usr/bin/env python
# encoding: utf-8
"""
@description: //TODO
@version: 1.0
@author: BaoQiang
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.github.com/githubao
@software: PyCharm
@file: zhihu_oauth.py
@time: 2016/10/5 21:59
"""
from .im_andriod import *
from .zhihu_token import *
class ZhihuOAuth(ImAndroidClient):
def __init__(self, token, api_version=None, app_version=None,
app_build=None, app_za=None):
"""
增加发送token的功能
"""
assert isinstance(token, ZhihuToken)
super(ZhihuOAuth, self).__init__(api_version, app_version, app_build, app_za)
self._token = token
def __call__(self, r):
r = super(ZhihuOAuth, self).__call__(r)
r.headers['Authorization'] = '{type} {token}'.format(
type=str(self._token.type.capitalize()),
token=str(self._token.token)
)
return r
| en | 0.291883 | #!/usr/bin/env python # encoding: utf-8 @description: //TODO @version: 1.0 @author: BaoQiang @license: Apache Licence @contact: <EMAIL> @site: http://www.github.com/githubao @software: PyCharm @file: zhihu_oauth.py @time: 2016/10/5 21:59 增加发送token的功能 | 2.304518 | 2 |
Text Analyzer/index.py | Sudani-Coder/python | 0 | 6618288 | <reponame>Sudani-Coder/python
## project: 9
# text analyzer
vowels = ("a", "e", "i", "o", "u")
def count_char(text, char):
count = 0
for each in text:
if each == char:
count += 1
return count
def count_vowels(text):
count = 0
for each in text:
if each in vowels:
count += 1
return count
def perc_char(text):
for char in "abcdefghijklmnopqrstuvwxyz":
perc = 100 * count_char(text, char) / len(text)
print("{0} - {1}%".format(char, round(perc, 2)))
filename = input("Please Enter The File Name -->")
thechar = input("Please Enter The char -->").lower()
with open(filename, "rt") as myfile:
text = myfile.read()
print("the char {} repeted {} number of times".format(thechar ,count_char(text, thechar)))
print("the number of vowels letters is {}".format(count_vowels(text)))
perc_char(text) | ## project: 9
# text analyzer
vowels = ("a", "e", "i", "o", "u")
def count_char(text, char):
count = 0
for each in text:
if each == char:
count += 1
return count
def count_vowels(text):
count = 0
for each in text:
if each in vowels:
count += 1
return count
def perc_char(text):
for char in "abcdefghijklmnopqrstuvwxyz":
perc = 100 * count_char(text, char) / len(text)
print("{0} - {1}%".format(char, round(perc, 2)))
filename = input("Please Enter The File Name -->")
thechar = input("Please Enter The char -->").lower()
with open(filename, "rt") as myfile:
text = myfile.read()
print("the char {} repeted {} number of times".format(thechar ,count_char(text, thechar)))
print("the number of vowels letters is {}".format(count_vowels(text)))
perc_char(text) | en | 0.591139 | ## project: 9 # text analyzer | 3.990646 | 4 |
Week6/234.py | bobsingh149/LeetCode | 101 | 6618289 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
array=[]
stack=[]
while head:
array.append(head.val)
head=head.next
if len(array)%2!=0:
array.pop(len(array)//2)
while array:
if not stack:
stack.append(array[-1])
array.pop()
else:
if stack[-1]==array[-1]:
stack.pop()
array.pop()
else:
stack.append(array[-1])
array.pop()
return len(stack)==0 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
array=[]
stack=[]
while head:
array.append(head.val)
head=head.next
if len(array)%2!=0:
array.pop(len(array)//2)
while array:
if not stack:
stack.append(array[-1])
array.pop()
else:
if stack[-1]==array[-1]:
stack.pop()
array.pop()
else:
stack.append(array[-1])
array.pop()
return len(stack)==0 | en | 0.626952 | # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next | 3.603727 | 4 |
src/3.DecisionTree/trees.py | lvcAI/MLInAction_learning | 0 | 6618290 | <filename>src/3.DecisionTree/trees.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from math import log
import operator
import treePlotter as dtPlot
from collections import Counter
import matplotlib
# ## 决策树的一般流程
#
# - 1.收集数据:可以使用任何方法
# - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。
# - 3.分析数据:可以使用任何方法,构造树完成后,我们应该检查图形是否符合预期。
# - 4.NativeBayes.训练算法:构造树的数据结构。
# - 5.测试算法:使用经验树计算错误率。
# - 6.使用算法:此步骤可以适用于任何监督学习算法,而使用决策树可以更好地理解数据的内在含义
# 程序清单3-1 计算给定数据集的香农熵
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] +=1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(float(prob), 2)
return shannonEnt
# - 1.收集数据:可以使用任何方法
# 构造数据
def createDataSet():
dataSet = [[1,1,'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
return dataSet,labels
# - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。
# 划分数据集
# 程序清单3-2 按照给定的特征划分数据集
def splitDataSet(dataSet,axis,value):
"""
Desc:
按照给定的特征划分数据集
Args:
dataSet -- 带划分的数据集
axis -- 划分数据集的特征
value -- 特征的返回值
Returns:
retDataSet -- 特征数据集
调用方式
import kNN
group, labels = kNN.createDataSet()
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
# 程序清单3-3 选择最好饿数据集划分方式
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prop = len(subDataSet)/float(len(dataSet))
newEntropy += prop * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
# print('infoGain=', infoGain, 'bestFeature=', i, baseEntropy, newEntropy)
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter, reverse=True)
return sortedClassCount[0][0]
#程序清单3-4.NativeBayes 创建书的函数代码
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
print("classList = ", classList, "classListCounts = ", len(classList))
# `count()` 方法用于统计某个元素在列表中出现的次数。
# 递归结束条件 当获取的分类的 labels 都是一样的,递归结束
if classList.count(classList[0]) == len(classList):
return classList[0]
# dataSet 每一个数据集,只剩下 目标类(labels)时,返回labels 中出现次数多的 label
# 此时,对所有 feature(特征向量)已经划分完了,
if len(dataSet[0]) == 1:
return majorityCnt(classList)
# 最佳 特征向量
bestFeat = chooseBestFeatureToSplit(dataSet)
# 最佳特征向量的标签集
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
#print(myTree)
del(labels[bestFeat])
featValues = [data[bestFeat] for data in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
print(myTree)
return myTree
# 测试算法:使用决策树进行分类
# 程序清单3-8 使用
def classify(inputTree, featLabels,testVec):
#
#.TypeError: ‘dict_keys’ object does not support indexing
# 这个问题是python版本的问题
# 1
# #如果使用的是python2
# firstStr = myTree.keys()[0]
# #LZ使用的是python3
# firstSides = list(myTree.keys())
# firstStr = firstSides[0]
# ---------------------
# firstStr = inputTree.keys()[0] 该写法是错误的 在python3 下
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
# 使用算法:决策树的存储
def storeTree(inputTree, fileName):
import pickle
fw = open(fileName, 'w')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(fileName):
import pickle
fr = open(fileName)
return pickle.load(fr)
# 测试实例
def test():
dataSet, labels = createDataSet()
print(dataSet)
print(labels)
# print(chooseBestFeatureToSplit(dataSet))
print(createTree(dataSet, labels))
def ContactLensesTest():
"""
Desc:
预测隐形眼镜的测试代码,并将结果画出来
Args:
none
Returns:
none
"""
# 加载隐形眼镜相关的 文本文件 数据
fr = open('../db/3.DecisionTree/lenses.txt')
# 解析数据,获得 features 数据
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
# 得到数据的对应的 Labels
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']
# 使用上面的创建决策树的代码,构造预测隐形眼镜的决策树
lensesTree = createTree(lenses, lensesLabels)
print(lensesTree)
# 画图可视化展现
dtPlot.createPlot(lensesTree)
if __name__ == '__main__':
ContactLensesTest()
| <filename>src/3.DecisionTree/trees.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from math import log
import operator
import treePlotter as dtPlot
from collections import Counter
import matplotlib
# ## 决策树的一般流程
#
# - 1.收集数据:可以使用任何方法
# - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。
# - 3.分析数据:可以使用任何方法,构造树完成后,我们应该检查图形是否符合预期。
# - 4.NativeBayes.训练算法:构造树的数据结构。
# - 5.测试算法:使用经验树计算错误率。
# - 6.使用算法:此步骤可以适用于任何监督学习算法,而使用决策树可以更好地理解数据的内在含义
# 程序清单3-1 计算给定数据集的香农熵
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] +=1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(float(prob), 2)
return shannonEnt
# - 1.收集数据:可以使用任何方法
# 构造数据
def createDataSet():
dataSet = [[1,1,'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
return dataSet,labels
# - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。
# 划分数据集
# 程序清单3-2 按照给定的特征划分数据集
def splitDataSet(dataSet,axis,value):
"""
Desc:
按照给定的特征划分数据集
Args:
dataSet -- 带划分的数据集
axis -- 划分数据集的特征
value -- 特征的返回值
Returns:
retDataSet -- 特征数据集
调用方式
import kNN
group, labels = kNN.createDataSet()
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
# 程序清单3-3 选择最好饿数据集划分方式
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prop = len(subDataSet)/float(len(dataSet))
newEntropy += prop * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
# print('infoGain=', infoGain, 'bestFeature=', i, baseEntropy, newEntropy)
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter, reverse=True)
return sortedClassCount[0][0]
#程序清单3-4.NativeBayes 创建书的函数代码
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
print("classList = ", classList, "classListCounts = ", len(classList))
# `count()` 方法用于统计某个元素在列表中出现的次数。
# 递归结束条件 当获取的分类的 labels 都是一样的,递归结束
if classList.count(classList[0]) == len(classList):
return classList[0]
# dataSet 每一个数据集,只剩下 目标类(labels)时,返回labels 中出现次数多的 label
# 此时,对所有 feature(特征向量)已经划分完了,
if len(dataSet[0]) == 1:
return majorityCnt(classList)
# 最佳 特征向量
bestFeat = chooseBestFeatureToSplit(dataSet)
# 最佳特征向量的标签集
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
#print(myTree)
del(labels[bestFeat])
featValues = [data[bestFeat] for data in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
print(myTree)
return myTree
# 测试算法:使用决策树进行分类
# 程序清单3-8 使用
def classify(inputTree, featLabels,testVec):
#
#.TypeError: ‘dict_keys’ object does not support indexing
# 这个问题是python版本的问题
# 1
# #如果使用的是python2
# firstStr = myTree.keys()[0]
# #LZ使用的是python3
# firstSides = list(myTree.keys())
# firstStr = firstSides[0]
# ---------------------
# firstStr = inputTree.keys()[0] 该写法是错误的 在python3 下
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
# 使用算法:决策树的存储
def storeTree(inputTree, fileName):
import pickle
fw = open(fileName, 'w')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(fileName):
import pickle
fr = open(fileName)
return pickle.load(fr)
# 测试实例
def test():
dataSet, labels = createDataSet()
print(dataSet)
print(labels)
# print(chooseBestFeatureToSplit(dataSet))
print(createTree(dataSet, labels))
def ContactLensesTest():
"""
Desc:
预测隐形眼镜的测试代码,并将结果画出来
Args:
none
Returns:
none
"""
# 加载隐形眼镜相关的 文本文件 数据
fr = open('../db/3.DecisionTree/lenses.txt')
# 解析数据,获得 features 数据
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
# 得到数据的对应的 Labels
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']
# 使用上面的创建决策树的代码,构造预测隐形眼镜的决策树
lensesTree = createTree(lenses, lensesLabels)
print(lensesTree)
# 画图可视化展现
dtPlot.createPlot(lensesTree)
if __name__ == '__main__':
ContactLensesTest()
| zh | 0.953464 | #!/usr/bin/env python # -*- coding: UTF-8 -*- # ## 决策树的一般流程 # # - 1.收集数据:可以使用任何方法 # - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。 # - 3.分析数据:可以使用任何方法,构造树完成后,我们应该检查图形是否符合预期。 # - 4.NativeBayes.训练算法:构造树的数据结构。 # - 5.测试算法:使用经验树计算错误率。 # - 6.使用算法:此步骤可以适用于任何监督学习算法,而使用决策树可以更好地理解数据的内在含义 # 程序清单3-1 计算给定数据集的香农熵 # - 1.收集数据:可以使用任何方法 # 构造数据 # - 2.准备数据:树构造算法只适用于标称数据类型,因此数值型数据比徐离散化。 # 划分数据集 # 程序清单3-2 按照给定的特征划分数据集 Desc: 按照给定的特征划分数据集 Args: dataSet -- 带划分的数据集 axis -- 划分数据集的特征 value -- 特征的返回值 Returns: retDataSet -- 特征数据集 调用方式 import kNN group, labels = kNN.createDataSet() # 程序清单3-3 选择最好饿数据集划分方式 # print('infoGain=', infoGain, 'bestFeature=', i, baseEntropy, newEntropy) #程序清单3-4.NativeBayes 创建书的函数代码 # `count()` 方法用于统计某个元素在列表中出现的次数。 # 递归结束条件 当获取的分类的 labels 都是一样的,递归结束 # dataSet 每一个数据集,只剩下 目标类(labels)时,返回labels 中出现次数多的 label # 此时,对所有 feature(特征向量)已经划分完了, # 最佳 特征向量 # 最佳特征向量的标签集 #print(myTree) # 测试算法:使用决策树进行分类 # 程序清单3-8 使用 # #.TypeError: ‘dict_keys’ object does not support indexing # 这个问题是python版本的问题 # 1 # #如果使用的是python2 # firstStr = myTree.keys()[0] # #LZ使用的是python3 # firstSides = list(myTree.keys()) # firstStr = firstSides[0] # --------------------- # firstStr = inputTree.keys()[0] 该写法是错误的 在python3 下 # 使用算法:决策树的存储 # 测试实例 # print(chooseBestFeatureToSplit(dataSet)) Desc: 预测隐形眼镜的测试代码,并将结果画出来 Args: none Returns: none # 加载隐形眼镜相关的 文本文件 数据 # 解析数据,获得 features 数据 # 得到数据的对应的 Labels # 使用上面的创建决策树的代码,构造预测隐形眼镜的决策树 # 画图可视化展现 | 2.744022 | 3 |
MxOnline/apps/assets/models.py | zhangsheng166/demo | 0 | 6618291 | <reponame>zhangsheng166/demo
# _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Equipment(models.Model):
hostname = models.CharField(max_length=50, verbose_name=u"主机名", default="")
ip = models.CharField(max_length=50, verbose_name=u"IP地址", default="")
information = models.CharField(max_length=300, verbose_name=u"硬件配置", default="")
OSname = models.CharField(max_length=300, verbose_name=u"系统镜像", default="")
role = models.CharField(max_length=50, choices=(("Product", u"生产环境"), ("Test", u"测试环境")), default="Test",verbose_name=u"所属环境")
buildtime = models.DateField(verbose_name=u"创建时间", null=True, blank=True)
service = models.CharField(max_length=600, default="", verbose_name=u"部署服务")
manager = models.CharField(max_length=300, default="", verbose_name=u"负责人")
area = models.CharField(max_length=50, default="", verbose_name=u"地区")
beizhu = models.CharField(max_length=300, default="", verbose_name=u"备注")
class Meta:
verbose_name = "资产信息"
verbose_name_plural = verbose_name
def __str__(self):
return '{0}'.format(self.hostname)
| # _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Equipment(models.Model):
hostname = models.CharField(max_length=50, verbose_name=u"主机名", default="")
ip = models.CharField(max_length=50, verbose_name=u"IP地址", default="")
information = models.CharField(max_length=300, verbose_name=u"硬件配置", default="")
OSname = models.CharField(max_length=300, verbose_name=u"系统镜像", default="")
role = models.CharField(max_length=50, choices=(("Product", u"生产环境"), ("Test", u"测试环境")), default="Test",verbose_name=u"所属环境")
buildtime = models.DateField(verbose_name=u"创建时间", null=True, blank=True)
service = models.CharField(max_length=600, default="", verbose_name=u"部署服务")
manager = models.CharField(max_length=300, default="", verbose_name=u"负责人")
area = models.CharField(max_length=50, default="", verbose_name=u"地区")
beizhu = models.CharField(max_length=300, default="", verbose_name=u"备注")
class Meta:
verbose_name = "资产信息"
verbose_name_plural = verbose_name
def __str__(self):
return '{0}'.format(self.hostname) | en | 0.896383 | # _*_ encoding:utf-8 _*_ # Create your models here. | 2.107189 | 2 |
scriptlets/game.py | GabeKnuth/STTNG | 0 | 6618292 | # Game mode Scriptlet for Big Shot
from mpf.system.scriptlet import Scriptlet
class Game(Scriptlet):
def on_load(self):
self.machine.events.add_handler('machineflow_Game_start', self.start)
self.machine.events.add_handler('machineflow_Game_stop', self.stop)
# self.machine.events.add_handler('timer_tick', self.tick)
def start(self):
# turn on the GI
for light in self.machine.lights.items_tagged('GI'):
light.on()
def tick(self):
pass
# self.machine.platform.verify_switches()
def enable_classic_mode(self):
pass
def enable_modern_mode(self):
pass
def stop(self):
self.machine.events.remove_handler(self.player_added)
self.machine.events.remove_handler(self.ball_started)
def player_added(self, **kwargs):
pass
def ball_started(self, **kwargs):
self.log.debug("Game Scriplet ball_started()")
# Need this since Big Shot's plunger lane is not a ball device,
# so we need to automatically launch a "live" ball when the ball
# starts
if not self.machine.ball_controller.num_balls_live:
self.machine.ball_controller.add_live()
# Gabe put this in because we need to make sure the 8 ball lights
# are turned off when a ball starts. They seem to have a mind of
# their own since there's no device attached to them
| # Game mode Scriptlet for Big Shot
from mpf.system.scriptlet import Scriptlet
class Game(Scriptlet):
def on_load(self):
self.machine.events.add_handler('machineflow_Game_start', self.start)
self.machine.events.add_handler('machineflow_Game_stop', self.stop)
# self.machine.events.add_handler('timer_tick', self.tick)
def start(self):
# turn on the GI
for light in self.machine.lights.items_tagged('GI'):
light.on()
def tick(self):
pass
# self.machine.platform.verify_switches()
def enable_classic_mode(self):
pass
def enable_modern_mode(self):
pass
def stop(self):
self.machine.events.remove_handler(self.player_added)
self.machine.events.remove_handler(self.ball_started)
def player_added(self, **kwargs):
pass
def ball_started(self, **kwargs):
self.log.debug("Game Scriplet ball_started()")
# Need this since Big Shot's plunger lane is not a ball device,
# so we need to automatically launch a "live" ball when the ball
# starts
if not self.machine.ball_controller.num_balls_live:
self.machine.ball_controller.add_live()
# Gabe put this in because we need to make sure the 8 ball lights
# are turned off when a ball starts. They seem to have a mind of
# their own since there's no device attached to them
| en | 0.890781 | # Game mode Scriptlet for Big Shot # self.machine.events.add_handler('timer_tick', self.tick) # turn on the GI # self.machine.platform.verify_switches() # Need this since Big Shot's plunger lane is not a ball device, # so we need to automatically launch a "live" ball when the ball # starts # Gabe put this in because we need to make sure the 8 ball lights # are turned off when a ball starts. They seem to have a mind of # their own since there's no device attached to them | 2.709495 | 3 |
loops/findTargetIndex.py | Awes35/python-functions | 0 | 6618293 | <reponame>Awes35/python-functions
#author <NAME>
#function to search list of numbers L for target T, returning
#index of T occurrence, -1 otherwise
def findTargetIndex(L, T):
count = -1
for x in range(len(L)):
if (L[x] == T):
count = x
return count
| #author <NAME>
#function to search list of numbers L for target T, returning
#index of T occurrence, -1 otherwise
def findTargetIndex(L, T):
count = -1
for x in range(len(L)):
if (L[x] == T):
count = x
return count | en | 0.540605 | #author <NAME> #function to search list of numbers L for target T, returning #index of T occurrence, -1 otherwise | 3.461282 | 3 |
build/lib/ezyt/imageEditor/tmp.py | AlexBacho/ezyt | 1 | 6618294 | <reponame>AlexBacho/ezyt
from image_utils import ImageText
def add_colored_text(text, output, image=None):
main_color = (255,255,255)
second_color = (255,0,0)
padding_x = 10
padding_y = 10
margin_x = 0
margin_y = 0
words_to_highlight = ["reddit", "day"]
words = text.split(" ")
xy = (margin_x, margin_y)
size = (800, 600)
img = ImageText(size)
font = "resources/font_tumbnail_abeezee.otf"
font_size = 31
for word in words:
if word.lower() in words_to_highlight:
color = second_color
else:
color = main_color
offset = img.write_text(xy, word, font, font_size=font_size, color=color)
xy = (xy[0] + offset[0] + padding_x, xy[1])
if xy[0] > size[0] / 2:
xy = (margin_x, offset[1] + padding_y)
img.save(output)
if __name__ == "__main__":
text = "hello reddit, how has your day been going so far?"
out = "tmp.png"
add_colored_text(text, out)
| from image_utils import ImageText
def add_colored_text(text, output, image=None):
main_color = (255,255,255)
second_color = (255,0,0)
padding_x = 10
padding_y = 10
margin_x = 0
margin_y = 0
words_to_highlight = ["reddit", "day"]
words = text.split(" ")
xy = (margin_x, margin_y)
size = (800, 600)
img = ImageText(size)
font = "resources/font_tumbnail_abeezee.otf"
font_size = 31
for word in words:
if word.lower() in words_to_highlight:
color = second_color
else:
color = main_color
offset = img.write_text(xy, word, font, font_size=font_size, color=color)
xy = (xy[0] + offset[0] + padding_x, xy[1])
if xy[0] > size[0] / 2:
xy = (margin_x, offset[1] + padding_y)
img.save(output)
if __name__ == "__main__":
text = "hello reddit, how has your day been going so far?"
out = "tmp.png"
add_colored_text(text, out) | none | 1 | 3.397041 | 3 | |
server/apps/api/models.py | htmercury/GLselector | 0 | 6618295 | <filename>server/apps/api/models.py
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Face(models.Model):
chin_angle = models.FloatField()
mofa_ratio = models.FloatField()
hlmo_angle = models.FloatField()
shape = models.CharField(max_length=200)
user = models.ForeignKey(User, related_name = "faces", on_delete='CASCADE')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self):
return "<Face object: {} {} {} {} {} {}>".format(self.chin_angle, self.mofa_ratio, self.hlmo_angle, self.shape, self.image, self.user) | <filename>server/apps/api/models.py
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Face(models.Model):
chin_angle = models.FloatField()
mofa_ratio = models.FloatField()
hlmo_angle = models.FloatField()
shape = models.CharField(max_length=200)
user = models.ForeignKey(User, related_name = "faces", on_delete='CASCADE')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self):
return "<Face object: {} {} {} {} {} {}>".format(self.chin_angle, self.mofa_ratio, self.hlmo_angle, self.shape, self.image, self.user) | none | 1 | 2.352982 | 2 | |
gripper_representation/gripper_feature_extraction.py | stanford-iprl-lab/UniGrasp | 39 | 6618296 | <reponame>stanford-iprl-lab/UniGrasp
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import time
import sys
import argparse
from tf_models.gripper_auto_encoding import pc_encoder as pc_encoder
from tf_models.gripper_auto_encoding import pc_decoder
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(BASE_DIR,'../')
# Basic model parameters
parser = argparse.ArgumentParser()
parser.add_argument('--saver_dir',default='./saved_models/',help='Directory to save the trained model')
parser.add_argument('--learning_rate',type=float,default=0.0005,help='Initial learning rate')
parser.add_argument('--num_epochs',type=int,default=1000 * 1000,help='NUmber of epochs to run trainer')
parser.add_argument('--batch_size',type=int,default=2,help='Number of examples within a batch')
parser.add_argument('--max_model_to_keep',type=int,default=400,help='max saved models')
parser.add_argument('--log_dir',default='./logging/',help='folder to save logging info')
FLAGS = parser.parse_args()
sys.path.insert(0,"../vis_3d")
from show3d_balls import showpoints
if not os.path.exists(FLAGS.saver_dir):
os.mkdir(FLAGS.saver_dir)
if not os.path.exists(FLAGS.log_dir):
os.mkdir(FLAGS.log_dir)
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
in_gripper_tf = tf.placeholder(tf.float32,[None,2048,3],'gripper_in')
gt_gripper_tf = tf.placeholder(tf.float32,[None,2048,3],'gripper_gt')
with tf.variable_scope('gripper_encoder'):
gripper_feat_tf = pc_encoder(in_gripper_tf)
with tf.variable_scope('gripper_decoder'):
out_gripper_tf = pc_decoder(gripper_feat_tf)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init_op)
SAVER = tf.train.Saver(max_to_keep=1000)
def restore(epoch):
save_top_dir = os.path.join("../saved_models/gripper_representation")
ckpt_path = os.path.join(save_top_dir,str(epoch)+'model.ckpt')
print("restoring from %s" % ckpt_path)
SAVER.restore(sess, ckpt_path)
def test(gripper_dir=None,gripper_name="robotiq_3f"):
in_gripper_file_list = [line for line in os.listdir(gripper_dir) if line.startswith(gripper_name)]
in_gripper_list = []
for idx, env_i in enumerate(in_gripper_file_list):
env_dir = os.path.join(gripper_dir, env_i)
obj_pcs = np.load(env_dir)
in_gripper_list.append(obj_pcs)
in_gripper = np.array(in_gripper_list)
gt_gripper = in_gripper
out_gripper, gripper_feat = sess.run([out_gripper_tf, gripper_feat_tf],feed_dict={in_gripper_tf: in_gripper, gt_gripper_tf:gt_gripper})
print(gripper_feat.shape)
gripper_mean = np.mean(gripper_feat,axis=0)
gripper_max = np.max(gripper_feat,axis=0)
gripper_min = np.min(gripper_feat,axis=0)
print(gripper_mean.shape)
recon_dir = gripper_dir
mean_feat_file = os.path.join(recon_dir,'mean.npy')
max_feat_file = os.path.join(recon_dir,'max.npy')
min_feat_file = os.path.join(recon_dir,'min.npy')
print(mean_feat_file)
print(max_feat_file)
print(min_feat_file)
np.save(mean_feat_file,gripper_mean)
np.save(max_feat_file,gripper_max)
np.save(min_feat_file,gripper_min)
if 1:
for gj in range(len(in_gripper)):
green = np.zeros((4096,3))
green[:2048,0] = 255.0
green[2048:,1] = 255.0
pred_gripper = np.copy(out_gripper[gj])
gt__gripper = np.copy(gt_gripper[gj])
gripper_two = np.zeros((4096,3))
gripper_two[:2048,:] = pred_gripper
gripper_two[2048:,:] = gt__gripper
showpoints(gripper_two,c_gt=green,waittime=50,freezerot=False) ### GRB
#input("gripper")
if __name__ == "__main__":
#### restore the model of auto encoder
restore(2248)
#### specify the folder path of point clouds of the gripper
gripper_dir = "../data/grippers/robotiq_3f"
gripper_name = "robotiq_3f"
test(gripper_dir,gripper_name)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import time
import sys
import argparse
from tf_models.gripper_auto_encoding import pc_encoder as pc_encoder
from tf_models.gripper_auto_encoding import pc_decoder
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(BASE_DIR,'../')
# Basic model parameters
parser = argparse.ArgumentParser()
parser.add_argument('--saver_dir',default='./saved_models/',help='Directory to save the trained model')
parser.add_argument('--learning_rate',type=float,default=0.0005,help='Initial learning rate')
parser.add_argument('--num_epochs',type=int,default=1000 * 1000,help='NUmber of epochs to run trainer')
parser.add_argument('--batch_size',type=int,default=2,help='Number of examples within a batch')
parser.add_argument('--max_model_to_keep',type=int,default=400,help='max saved models')
parser.add_argument('--log_dir',default='./logging/',help='folder to save logging info')
FLAGS = parser.parse_args()
sys.path.insert(0,"../vis_3d")
from show3d_balls import showpoints
if not os.path.exists(FLAGS.saver_dir):
os.mkdir(FLAGS.saver_dir)
if not os.path.exists(FLAGS.log_dir):
os.mkdir(FLAGS.log_dir)
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
in_gripper_tf = tf.placeholder(tf.float32,[None,2048,3],'gripper_in')
gt_gripper_tf = tf.placeholder(tf.float32,[None,2048,3],'gripper_gt')
with tf.variable_scope('gripper_encoder'):
gripper_feat_tf = pc_encoder(in_gripper_tf)
with tf.variable_scope('gripper_decoder'):
out_gripper_tf = pc_decoder(gripper_feat_tf)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init_op)
SAVER = tf.train.Saver(max_to_keep=1000)
def restore(epoch):
save_top_dir = os.path.join("../saved_models/gripper_representation")
ckpt_path = os.path.join(save_top_dir,str(epoch)+'model.ckpt')
print("restoring from %s" % ckpt_path)
SAVER.restore(sess, ckpt_path)
def test(gripper_dir=None,gripper_name="robotiq_3f"):
in_gripper_file_list = [line for line in os.listdir(gripper_dir) if line.startswith(gripper_name)]
in_gripper_list = []
for idx, env_i in enumerate(in_gripper_file_list):
env_dir = os.path.join(gripper_dir, env_i)
obj_pcs = np.load(env_dir)
in_gripper_list.append(obj_pcs)
in_gripper = np.array(in_gripper_list)
gt_gripper = in_gripper
out_gripper, gripper_feat = sess.run([out_gripper_tf, gripper_feat_tf],feed_dict={in_gripper_tf: in_gripper, gt_gripper_tf:gt_gripper})
print(gripper_feat.shape)
gripper_mean = np.mean(gripper_feat,axis=0)
gripper_max = np.max(gripper_feat,axis=0)
gripper_min = np.min(gripper_feat,axis=0)
print(gripper_mean.shape)
recon_dir = gripper_dir
mean_feat_file = os.path.join(recon_dir,'mean.npy')
max_feat_file = os.path.join(recon_dir,'max.npy')
min_feat_file = os.path.join(recon_dir,'min.npy')
print(mean_feat_file)
print(max_feat_file)
print(min_feat_file)
np.save(mean_feat_file,gripper_mean)
np.save(max_feat_file,gripper_max)
np.save(min_feat_file,gripper_min)
if 1:
for gj in range(len(in_gripper)):
green = np.zeros((4096,3))
green[:2048,0] = 255.0
green[2048:,1] = 255.0
pred_gripper = np.copy(out_gripper[gj])
gt__gripper = np.copy(gt_gripper[gj])
gripper_two = np.zeros((4096,3))
gripper_two[:2048,:] = pred_gripper
gripper_two[2048:,:] = gt__gripper
showpoints(gripper_two,c_gt=green,waittime=50,freezerot=False) ### GRB
#input("gripper")
if __name__ == "__main__":
#### restore the model of auto encoder
restore(2248)
#### specify the folder path of point clouds of the gripper
gripper_dir = "../data/grippers/robotiq_3f"
gripper_name = "robotiq_3f"
test(gripper_dir,gripper_name) | en | 0.146448 | # Basic model parameters ### GRB #input("gripper") #### restore the model of auto encoder #### specify the folder path of point clouds of the gripper | 1.920551 | 2 |
brand/views.py | ohahlev/ahlev_django_brand | 0 | 6618297 | <filename>brand/views.py
from django.shortcuts import render
from django.http import HttpResponse
from .models import Brand
def index(request):
brands = Brand.objects.all()
context = {
'brands': brands
}
return render(request, 'brand/index.html', context=context)
| <filename>brand/views.py
from django.shortcuts import render
from django.http import HttpResponse
from .models import Brand
def index(request):
brands = Brand.objects.all()
context = {
'brands': brands
}
return render(request, 'brand/index.html', context=context)
| none | 1 | 1.735621 | 2 | |
app/models/comments.py | EvanMHargett/PoE-Tracker | 1 | 6618298 | from .db import db
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key = True)
flipId = db.Column(db.Integer, db.ForeignKey("flips.id"), nullable = False)
userId = db.Column(db.Integer, db.ForeignKey("users.id"), nullable = False)
content = db.Column(db.String, nullable = False)
def to_dict(self):
return {
"id": self.id,
"flipId": self.flipId,
"userId": self.userId,
"content": self.content,
}
| from .db import db
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key = True)
flipId = db.Column(db.Integer, db.ForeignKey("flips.id"), nullable = False)
userId = db.Column(db.Integer, db.ForeignKey("users.id"), nullable = False)
content = db.Column(db.String, nullable = False)
def to_dict(self):
return {
"id": self.id,
"flipId": self.flipId,
"userId": self.userId,
"content": self.content,
}
| none | 1 | 2.666889 | 3 | |
tests/test_suspend_suppliers_without_agreements.py | alphagov-mirror/digitalmarketplace-scripts | 1 | 6618299 | import mock
from dmscripts.suspend_suppliers_without_agreements import (
suspend_supplier_services, get_all_email_addresses_for_supplier
)
class TestSuspendSupplierServices:
def setup(self):
self.data_api_client = mock.Mock()
self.data_api_client.find_services.return_value = {
'services': [
{'id': 1},
{'id': 2}
],
"meta": {
"total": 2
}
}
self.logger = mock.Mock()
def test_suspend_supplier_services_updates_services_and_returns_count(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 2
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == [
mock.call(1, 'disabled', "Suspend services script"),
mock.call(2, 'disabled', "Suspend services script"),
]
assert self.logger.info.call_args_list == [
mock.call("Setting 2 services to 'disabled' for supplier 12345.")
]
def test_suspend_supplier_services_skips_if_no_services(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
self.data_api_client.find_services.return_value = {'services': []}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 has no published services on the framework.")
]
def test_suspend_supplier_services_skips_if_not_on_framework(self):
framework_interest = {
"frameworkInterest": {
"onFramework": False,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 is not on the framework.")
]
def test_suspend_supplier_services_skips_if_agreement_returned(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": True,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 has returned their framework agreement.")
]
def test_suspend_supplier_services_skips_if_agreement_on_hold(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": "on-hold"
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345's framework agreement is on hold.")
]
def test_suspend_supplier_services_logs_instead_of_suspending_for_dry_run(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, True) == 2
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.info.call_args_list == [
mock.call("Setting 2 services to 'disabled' for supplier 12345."),
mock.call("[DRY RUN] Would suspend service 1 for supplier 12345"),
mock.call("[DRY RUN] Would suspend service 2 for supplier 12345")
]
class TestGetAllEmailAddressesForSupplier:
def test_get_all_email_addresses_for_supplier(self):
data_api_client = mock.Mock()
data_api_client.find_users_iter.return_value = [
{"emailAddress": "<EMAIL>", "active": True},
{"emailAddress": "<EMAIL>", "active": True},
{"emailAddress": "<EMAIL>", "active": False},
]
framework_interest = {
"frameworkInterest": {
"supplierId": 12345,
"declaration": {
"primaryContactEmail": "<EMAIL>"
}
}
}
assert get_all_email_addresses_for_supplier(data_api_client, framework_interest) == {
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
}
assert data_api_client.find_users_iter.call_args_list == [
mock.call(supplier_id=12345)
]
| import mock
from dmscripts.suspend_suppliers_without_agreements import (
suspend_supplier_services, get_all_email_addresses_for_supplier
)
class TestSuspendSupplierServices:
def setup(self):
self.data_api_client = mock.Mock()
self.data_api_client.find_services.return_value = {
'services': [
{'id': 1},
{'id': 2}
],
"meta": {
"total": 2
}
}
self.logger = mock.Mock()
def test_suspend_supplier_services_updates_services_and_returns_count(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 2
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == [
mock.call(1, 'disabled', "Suspend services script"),
mock.call(2, 'disabled', "Suspend services script"),
]
assert self.logger.info.call_args_list == [
mock.call("Setting 2 services to 'disabled' for supplier 12345.")
]
def test_suspend_supplier_services_skips_if_no_services(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
self.data_api_client.find_services.return_value = {'services': []}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 has no published services on the framework.")
]
def test_suspend_supplier_services_skips_if_not_on_framework(self):
framework_interest = {
"frameworkInterest": {
"onFramework": False,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 is not on the framework.")
]
def test_suspend_supplier_services_skips_if_agreement_returned(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": True,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345 has returned their framework agreement.")
]
def test_suspend_supplier_services_skips_if_agreement_on_hold(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": "on-hold"
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, None) == 0
assert self.data_api_client.find_services.call_args_list == []
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.error.call_args_list == [
mock.call("Supplier 12345's framework agreement is on hold.")
]
def test_suspend_supplier_services_logs_instead_of_suspending_for_dry_run(self):
framework_interest = {
"frameworkInterest": {
"onFramework": True,
"agreementReturned": False,
"agreementStatus": None
}
}
assert suspend_supplier_services(
self.data_api_client, self.logger, 'g-cloud-11', 12345, framework_interest, True) == 2
assert self.data_api_client.find_services.call_args_list == [
mock.call(supplier_id=12345, framework='g-cloud-11', status='published')
]
assert self.data_api_client.update_service_status.call_args_list == []
assert self.logger.info.call_args_list == [
mock.call("Setting 2 services to 'disabled' for supplier 12345."),
mock.call("[DRY RUN] Would suspend service 1 for supplier 12345"),
mock.call("[DRY RUN] Would suspend service 2 for supplier 12345")
]
class TestGetAllEmailAddressesForSupplier:
def test_get_all_email_addresses_for_supplier(self):
data_api_client = mock.Mock()
data_api_client.find_users_iter.return_value = [
{"emailAddress": "<EMAIL>", "active": True},
{"emailAddress": "<EMAIL>", "active": True},
{"emailAddress": "<EMAIL>", "active": False},
]
framework_interest = {
"frameworkInterest": {
"supplierId": 12345,
"declaration": {
"primaryContactEmail": "<EMAIL>"
}
}
}
assert get_all_email_addresses_for_supplier(data_api_client, framework_interest) == {
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
}
assert data_api_client.find_users_iter.call_args_list == [
mock.call(supplier_id=12345)
]
| none | 1 | 2.325565 | 2 | |
blog/blog_signals.py | Emiliemorais/ido | 0 | 6618300 | from notifications.signals import notify
from models import Message
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import Group
def message_notify(sender, instance, created, **kwargs):
group = Group.objects.get(name="admin")
notify.send(instance, recipient=group, verb=_('send a new message'), description='new message')
def budget_notify(sender, instance, created, **kwargs):
group = Group.objects.get(name="admin")
notify.send(instance, recipient=group, verb=_('solicited a budget'), description='new solicited budget')
| from notifications.signals import notify
from models import Message
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import Group
def message_notify(sender, instance, created, **kwargs):
group = Group.objects.get(name="admin")
notify.send(instance, recipient=group, verb=_('send a new message'), description='new message')
def budget_notify(sender, instance, created, **kwargs):
group = Group.objects.get(name="admin")
notify.send(instance, recipient=group, verb=_('solicited a budget'), description='new solicited budget')
| none | 1 | 1.98362 | 2 | |
login.py | python20160618/201606 | 0 | 6618301 | def login():
return 'login info'
def profile():
return 'profile info'
def password():
return '<PASSWORD>'
# 请完善具体的内容
| def login():
return 'login info'
def profile():
return 'profile info'
def password():
return '<PASSWORD>'
# 请完善具体的内容
| zh | 0.994561 | # 请完善具体的内容 | 1.715413 | 2 |
kgtk/cli/count.py | vishalbelsare/kgtk | 222 | 6618302 | """Count records or non-empty values per column.
This is a simple command that illustrates several aspects of building
a KGTK command. The following features are illustrated:
* Reading a KGTK input file.
* Writing a KGTK output file.
* Writing non-KGTK output to stdout.
* Writing progress feedback to etderr.
* A command alias with a different default than the base command.
* An expert option.
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
# Decine the name of the command and its alias.
COUNT_COMMAND: str = "count"
WC_COMMAND: str = "wc"
# Default option values:
DEFAULT_COUNT_RECORDS: bool = False
DEFAULT_COUNT_RECORDS_WC: bool = True
DEFAULT_COUNT_PROPERTY: str = "count"
def parser():
return {
'aliases': [ WC_COMMAND ],
'help': 'Count records or non-empty values per column.',
'description': 'Count the number of records in a KGTK file, excluding the header record, ' +
'or count the number of non-empty values per column. Note: not non-empty unique values, ' +
'that is what `kgtk unique` does.' +
'\n\nAdditional options are shown in expert help.\nkgtk --expert lift --help'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.exceptions import KGTKException
from kgtk.lift.kgtklift import KgtkLift
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
_command: str = parsed_shared_args._command
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if _expert:
return msg
else:
return SUPPRESS
parser.add_input_file()
parser.add_output_file()
# The default value for this option depends upon the command used.
parser.add_argument('-l', '--lines', dest="count_records", metavar="True/False",
help="If true, count records and print a single number to stdout. " +
"If false, count non-empty values per column and produce a simple KGTK output file. (default=%(default)s).",
type=optional_bool, nargs='?', const=True,
default=DEFAULT_COUNT_RECORDS_WC if _command == WC_COMMAND else DEFAULT_COUNT_RECORDS)
# This is an expert option. It will not show up on `--help` without `--expert`:
parser.add_argument( "--count-property", dest="count_property",
help=h("The property used for column count output edges. (default=%(default)s)."),
default=DEFAULT_COUNT_PROPERTY)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
count_records: bool = DEFAULT_COUNT_RECORDS,
count_property: str = DEFAULT_COUNT_PROPERTY,
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
import sys
from kgtk.exceptions import KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
input_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="input", fallback=True)
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_kgtk_file), file=error_file, flush=True)
print("--output-file=%s" % str(output_kgtk_file), file=error_file, flush=True)
print("--lines=%s" % repr(count_records), file=error_file, flush=True)
print("--count-property=%s" % repr(count_property), file=error_file, flush=True)
input_reader_options.show(out=error_file, who="input")
label_reader_options.show(out=error_file, who="label")
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
if verbose:
print("Opening the input file %s" % str(input_kgtk_file), file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
row: typing.List[str]
if count_records:
record_count: int = 0
for row in kr:
record_count += 1
print("%d" % record_count, file=sys.stdout, flush=True)
else:
if verbose:
print("Opening the output file %s" % str(output_kgtk_file), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(["node1", "label", "node2" ],
output_kgtk_file,
verbose=verbose,
very_verbose=very_verbose,
)
record_counts: typing.List[int] = [ 0 for idx in range(kr.column_count) ]
idx: int
for row in kr:
item: str
for idx, item in enumerate(row):
if len(item) > 0:
record_counts[idx] += 1
count: int
for idx, count in enumerate(record_counts):
kw.write([ kr.column_names[idx], count_property, str(record_counts[idx]) ])
kw.close()
kr.close()
return 0
except SystemExit as e:
raise KGTKException("Exit requested")
except Exception as e:
raise KGTKException(str(e))
| """Count records or non-empty values per column.
This is a simple command that illustrates several aspects of building
a KGTK command. The following features are illustrated:
* Reading a KGTK input file.
* Writing a KGTK output file.
* Writing non-KGTK output to stdout.
* Writing progress feedback to etderr.
* A command alias with a different default than the base command.
* An expert option.
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
# Decine the name of the command and its alias.
COUNT_COMMAND: str = "count"
WC_COMMAND: str = "wc"
# Default option values:
DEFAULT_COUNT_RECORDS: bool = False
DEFAULT_COUNT_RECORDS_WC: bool = True
DEFAULT_COUNT_PROPERTY: str = "count"
def parser():
return {
'aliases': [ WC_COMMAND ],
'help': 'Count records or non-empty values per column.',
'description': 'Count the number of records in a KGTK file, excluding the header record, ' +
'or count the number of non-empty values per column. Note: not non-empty unique values, ' +
'that is what `kgtk unique` does.' +
'\n\nAdditional options are shown in expert help.\nkgtk --expert lift --help'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.exceptions import KGTKException
from kgtk.lift.kgtklift import KgtkLift
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
_command: str = parsed_shared_args._command
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if _expert:
return msg
else:
return SUPPRESS
parser.add_input_file()
parser.add_output_file()
# The default value for this option depends upon the command used.
parser.add_argument('-l', '--lines', dest="count_records", metavar="True/False",
help="If true, count records and print a single number to stdout. " +
"If false, count non-empty values per column and produce a simple KGTK output file. (default=%(default)s).",
type=optional_bool, nargs='?', const=True,
default=DEFAULT_COUNT_RECORDS_WC if _command == WC_COMMAND else DEFAULT_COUNT_RECORDS)
# This is an expert option. It will not show up on `--help` without `--expert`:
parser.add_argument( "--count-property", dest="count_property",
help=h("The property used for column count output edges. (default=%(default)s)."),
default=DEFAULT_COUNT_PROPERTY)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
count_records: bool = DEFAULT_COUNT_RECORDS,
count_property: str = DEFAULT_COUNT_PROPERTY,
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
import sys
from kgtk.exceptions import KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
input_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="input", fallback=True)
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_kgtk_file), file=error_file, flush=True)
print("--output-file=%s" % str(output_kgtk_file), file=error_file, flush=True)
print("--lines=%s" % repr(count_records), file=error_file, flush=True)
print("--count-property=%s" % repr(count_property), file=error_file, flush=True)
input_reader_options.show(out=error_file, who="input")
label_reader_options.show(out=error_file, who="label")
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
if verbose:
print("Opening the input file %s" % str(input_kgtk_file), file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
row: typing.List[str]
if count_records:
record_count: int = 0
for row in kr:
record_count += 1
print("%d" % record_count, file=sys.stdout, flush=True)
else:
if verbose:
print("Opening the output file %s" % str(output_kgtk_file), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(["node1", "label", "node2" ],
output_kgtk_file,
verbose=verbose,
very_verbose=very_verbose,
)
record_counts: typing.List[int] = [ 0 for idx in range(kr.column_count) ]
idx: int
for row in kr:
item: str
for idx, item in enumerate(row):
if len(item) > 0:
record_counts[idx] += 1
count: int
for idx, count in enumerate(record_counts):
kw.write([ kr.column_names[idx], count_property, str(record_counts[idx]) ])
kw.close()
kr.close()
return 0
except SystemExit as e:
raise KGTKException("Exit requested")
except Exception as e:
raise KGTKException(str(e))
| en | 0.713567 | Count records or non-empty values per column. This is a simple command that illustrates several aspects of building a KGTK command. The following features are illustrated: * Reading a KGTK input file. * Writing a KGTK output file. * Writing non-KGTK output to stdout. * Writing progress feedback to etderr. * A command alias with a different default than the base command. * An expert option. # Decine the name of the command and its alias. # Default option values: Parse arguments Args: parser (argparse.ArgumentParser) # This helper function makes it easy to suppress options from # The help message. The options are still there, and initialize # what they need to initialize. # The default value for this option depends upon the command used. # This is an expert option. It will not show up on `--help` without `--expert`: # Whatever KgtkFileOptions and KgtkValueOptions want. # Select where to send error messages, defaulting to stderr. # Build the option structures. # Show the final option structures for debugging and documentation. | 3.500104 | 4 |
mak/libs/pyxx/cxx/grammar/statement/selection.py | motor-dev/Motor | 0 | 6618303 | <gh_stars>0
"""
selection-statement:
if constexpr? ( init-statement? condition ) statement
if constexpr? ( init-statement? condition ) statement else statement
if !? consteval compound-statement
if !? consteval compound-statement else statement
switch ( init-statement? condition ) statement
"""
import glrp
from ...parser import cxx98, deprecated_cxx17, cxx17, cxx23
from motor_typing import TYPE_CHECKING
@glrp.rule('selection-statement : "switch" "(" condition ")" statement')
@glrp.rule('selection-statement : "switch" "(" init-statement condition ")" statement')
@cxx98
def selection_statement(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "(" condition ")" statement')
@glrp.rule('selection-statement : "if" "(" condition ")" statement [prec:left,1]"else" statement')
@cxx98
@deprecated_cxx17
def selection_statement_if(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "constexpr"? "(" condition ")" statement')
@glrp.rule('selection-statement : "if" "constexpr"? "(" condition ")" statement [prec:left,1]"else" statement')
@glrp.rule('selection-statement : "if" "constexpr"? "(" init-statement condition ")" statement')
@glrp.rule(
'selection-statement : "if" "constexpr"? "(" init-statement condition ")" statement [prec:left,1]"else" statement'
)
@cxx17
def selection_statement_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('constexpr? : "constexpr"')
@glrp.rule('constexpr? : ')
@cxx17
def constexpr_opt_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "!"? "consteval" compound-statement')
@glrp.rule('selection-statement : "if" "!"? "consteval" compound-statement [prec:left,1]"else" statement')
@cxx23
def selection_statement_cxx23(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('"!"? : "!"')
@glrp.rule('"!"? :')
@cxx23
def not_opt_cxx23(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ...parser import CxxParser | """
selection-statement:
if constexpr? ( init-statement? condition ) statement
if constexpr? ( init-statement? condition ) statement else statement
if !? consteval compound-statement
if !? consteval compound-statement else statement
switch ( init-statement? condition ) statement
"""
import glrp
from ...parser import cxx98, deprecated_cxx17, cxx17, cxx23
from motor_typing import TYPE_CHECKING
@glrp.rule('selection-statement : "switch" "(" condition ")" statement')
@glrp.rule('selection-statement : "switch" "(" init-statement condition ")" statement')
@cxx98
def selection_statement(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "(" condition ")" statement')
@glrp.rule('selection-statement : "if" "(" condition ")" statement [prec:left,1]"else" statement')
@cxx98
@deprecated_cxx17
def selection_statement_if(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "constexpr"? "(" condition ")" statement')
@glrp.rule('selection-statement : "if" "constexpr"? "(" condition ")" statement [prec:left,1]"else" statement')
@glrp.rule('selection-statement : "if" "constexpr"? "(" init-statement condition ")" statement')
@glrp.rule(
'selection-statement : "if" "constexpr"? "(" init-statement condition ")" statement [prec:left,1]"else" statement'
)
@cxx17
def selection_statement_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('constexpr? : "constexpr"')
@glrp.rule('constexpr? : ')
@cxx17
def constexpr_opt_cxx17(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('selection-statement : "if" "!"? "consteval" compound-statement')
@glrp.rule('selection-statement : "if" "!"? "consteval" compound-statement [prec:left,1]"else" statement')
@cxx23
def selection_statement_cxx23(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('"!"? : "!"')
@glrp.rule('"!"? :')
@cxx23
def not_opt_cxx23(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ...parser import CxxParser | en | 0.449919 | selection-statement: if constexpr? ( init-statement? condition ) statement if constexpr? ( init-statement? condition ) statement else statement if !? consteval compound-statement if !? consteval compound-statement else statement switch ( init-statement? condition ) statement # type: (CxxParser, glrp.Production) -> None # type: (CxxParser, glrp.Production) -> None # type: (CxxParser, glrp.Production) -> None # type: (CxxParser, glrp.Production) -> None # type: (CxxParser, glrp.Production) -> None # type: (CxxParser, glrp.Production) -> None | 2.501818 | 3 |
trac/jira/jira.py | glensc/trac-plugin-jira | 1 | 6618304 | <filename>trac/jira/jira.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2015 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# Author: <NAME> <<EMAIL>>
from trac.core import implements, Component
from trac.wiki import IWikiSyntaxProvider
import trac
if [int(x) for x in trac.__version__.split('.')] >= [0, 11]:
# trac 0.11
from genshi.builder import tag
else:
# trac 0.10
from trac.util.html import html as tag
class TracJiraLink(Component):
implements(IWikiSyntaxProvider)
ticket_regexp = r"\b[A-Z]+?-(?P<id>\d+)\b"
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
jira_url = self.env.config.get('jira', 'url')
def ticket(formatter, match, fullmatch):
return tag.a(tag.span(u'\u200b', class_="icon"), match, class_="ext-link", href=jira_url % int(fullmatch.group('id')))
if jira_url:
yield (self.ticket_regexp, ticket)
else:
self.log.warn('url not set in configuration. Jira links disabled')
def get_link_resolvers(self):
return []
| <filename>trac/jira/jira.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2015 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# Author: <NAME> <<EMAIL>>
from trac.core import implements, Component
from trac.wiki import IWikiSyntaxProvider
import trac
if [int(x) for x in trac.__version__.split('.')] >= [0, 11]:
# trac 0.11
from genshi.builder import tag
else:
# trac 0.10
from trac.util.html import html as tag
class TracJiraLink(Component):
implements(IWikiSyntaxProvider)
ticket_regexp = r"\b[A-Z]+?-(?P<id>\d+)\b"
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
jira_url = self.env.config.get('jira', 'url')
def ticket(formatter, match, fullmatch):
return tag.a(tag.span(u'\u200b', class_="icon"), match, class_="ext-link", href=jira_url % int(fullmatch.group('id')))
if jira_url:
yield (self.ticket_regexp, ticket)
else:
self.log.warn('url not set in configuration. Jira links disabled')
def get_link_resolvers(self):
return []
| en | 0.92522 | # -*- coding: utf-8 -*- # # Copyright (C) 2008-2015 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # Author: <NAME> <<EMAIL>> # trac 0.11 # trac 0.10 # IWikiSyntaxProvider methods | 2.125382 | 2 |
netsuitesdk/api/accountingPeriod.py | dokka-ai/netsuite-sdk-py2.7 | 0 | 6618305 | from __future__ import absolute_import
from .base import ApiBase
import logging
from ..internal.utils import PaginatedSearch
logger = logging.getLogger(__name__)
class AccountingPeriod(ApiBase):
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name=u'AccountingPeriod')
@staticmethod
def compare(item1, item2):
q1, y1 = item1[0].split(" ")
q2, y2 = item2[0].split(" ")
if y1 > y2:
return -1
elif y1 < y2:
return 1
if y1 == y2:
if q1 > q2:
return -1
else:
return 1
def get_periods(self):
_p = {}
_periods = self.get_all()
for period in _periods:
if not period['isQuarter'] and not period['isYear'] and not period['closed']:
if period['parent']['name'] not in _p:
_p.setdefault(period['parent']['name'], [])
_p[period['parent']['name']].append((period['internalId'], period['periodName']))
dictionary_items = _p.items()
sorted_items = sorted(dictionary_items, cmp=self.compare)
periods = []
for q in sorted_items:
periods.append({'internalId': 0, 'name': q[0]})
for i in reversed(q[1]):
periods.append({'internalId': i[0], 'name': ' ' + i[1]})
return periods
def get_all(self):
_false = self.ns_client.SearchBooleanField(searchValue=False)
basic_search = self.ns_client.basic_search_factory(
u'AccountingPeriod',
isInactive=_false,
isQuarter=_false,
isYear=_false,
apLocked=_false,
allLocked=_false,
closed=_false,
)
paginated_search = PaginatedSearch(client=self.ns_client,
type_name='AccountingPeriod',
basic_search=basic_search,
pageSize=50)
return list(self._paginated_search_to_generator(paginated_search=paginated_search))
| from __future__ import absolute_import
from .base import ApiBase
import logging
from ..internal.utils import PaginatedSearch
logger = logging.getLogger(__name__)
class AccountingPeriod(ApiBase):
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name=u'AccountingPeriod')
@staticmethod
def compare(item1, item2):
q1, y1 = item1[0].split(" ")
q2, y2 = item2[0].split(" ")
if y1 > y2:
return -1
elif y1 < y2:
return 1
if y1 == y2:
if q1 > q2:
return -1
else:
return 1
def get_periods(self):
_p = {}
_periods = self.get_all()
for period in _periods:
if not period['isQuarter'] and not period['isYear'] and not period['closed']:
if period['parent']['name'] not in _p:
_p.setdefault(period['parent']['name'], [])
_p[period['parent']['name']].append((period['internalId'], period['periodName']))
dictionary_items = _p.items()
sorted_items = sorted(dictionary_items, cmp=self.compare)
periods = []
for q in sorted_items:
periods.append({'internalId': 0, 'name': q[0]})
for i in reversed(q[1]):
periods.append({'internalId': i[0], 'name': ' ' + i[1]})
return periods
def get_all(self):
_false = self.ns_client.SearchBooleanField(searchValue=False)
basic_search = self.ns_client.basic_search_factory(
u'AccountingPeriod',
isInactive=_false,
isQuarter=_false,
isYear=_false,
apLocked=_false,
allLocked=_false,
closed=_false,
)
paginated_search = PaginatedSearch(client=self.ns_client,
type_name='AccountingPeriod',
basic_search=basic_search,
pageSize=50)
return list(self._paginated_search_to_generator(paginated_search=paginated_search))
| none | 1 | 2.453673 | 2 | |
memory.py | KRLGroup/memory-wrap | 2 | 6618306 | <filename>memory.py
import torch
import torch.nn as nn
from entmax import sparsemax
_EPSILON = 1e-6
def _vector_norms(v:torch.Tensor)->torch.Tensor:
""" Computes the vector norms
Args:
v: The vector from which there must be calculated the norms
Returns:
A tensor containing the norms of input vector v
"""
squared_norms = torch.sum(v * v, dim=1, keepdim=True)
return torch.sqrt(squared_norms + _EPSILON)
class MLP(nn.Module):
'''
Multi-layer perceptron class
'''
def __init__(self, input_size:int, hidden_size:int, output_size:int, activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Init function to initialize a multi-layer perceptron
Args:
input_size (int): Input's dimension
hidden_size (int): Number of units in the hidden layer
output_size (int): Number of output units
activation (torch.nn.modules.activation, optional): Activation function of the hidden layer. Defaults to torch.nn.ReLU().
"""
super(MLP, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.activation = activation
self.fc2 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x:torch.Tensor)->torch.Tensor:
""" Forward call of multi-layer perceptron
Args:
x (torch.Tensor): Input tensor
Returns:
torch.Tensor: the output of multi-layer perceptron.
"""
hidden = self.fc1(x)
relu = self.activation(hidden)
output = self.fc2(relu)
return output
class MemoryWrapLayer(nn.Module):
def __init__(self, encoder_output_dim:int, output_dim:int, mlp_activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Initialize a Memory Wrap layer
Args:
encoder_output_dim (int): Dimensions of the last layer of the encoder
output_dim (int): Number of desired output units.
mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU().
"""
super(MemoryWrapLayer, self).__init__()
final_input_dim = encoder_output_dim*2
self.fc = MLP(final_input_dim,final_input_dim*2,output_dim,mlp_activation)
def forward(self, encoder_output:torch.Tensor, memory_set:torch.Tensor, return_weights:bool=False)->torch.Tensor:
"""Forward call of MemoryWrap.
Args:
input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder
memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory
parsed_memory: a flag to indicate if the memory set is already parsed by the encoder. It is useful
to reduce the testing time if you fix the memory or if you parse the whole training set.
Returns:
A tuple `(output, content-weight)` where `output`
is the output tensor, `content_weights` is a tensor containing the
read weights for sample in memory. If return_weights is False, then
only `output` is returned.
"""
encoder_norm = encoder_output / _vector_norms(encoder_output)
memory_norm = memory_set / _vector_norms(memory_set)
sim = torch.mm(encoder_norm,memory_norm.transpose(0,1))
content_weights = sparsemax(sim,dim=1)
memory_vector = torch.matmul(content_weights,memory_set)
final_input = torch.cat([encoder_output,memory_vector],1)
output = self.fc(final_input)
if return_weights:
return output, content_weights
else:
return output
class BaselineMemory(nn.Module):
def __init__(self, encoder_output_dim:int, output_dim:int,mlp_activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Initialize the layer opf the baseline that uses only the memory set to compute the output
Args:
encoder_output_dim (int): Dimensions of the last layer of the encoder
output_dim (int): Number of desired output units.
mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU().
"""
super(BaselineMemory, self).__init__()
final_input_dim = encoder_output_dim
self.fc = MLP(final_input_dim,final_input_dim*2,output_dim,mlp_activation)
def forward(self, encoder_output:torch.Tensor, memory_set:torch.Tensor, return_weights:bool=False)->torch.Tensor:
"""Forward call of MemoryWrap.
Args:
input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder
memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory
parsed_memory: a flag to indicate if the memory set is already parsed by the encoder
Returns:
A tuple `(output, content-weight)` where `output`
is the output tensor, `content_weights` is a tensor containing the
read weights for sample in memory. If return_weights is False, then
only `output` is returned.
"""
encoder_norm = encoder_output / _vector_norms(encoder_output)
memory_norm = memory_set / _vector_norms(memory_set)
sim = torch.mm(encoder_norm,memory_norm.transpose(0,1))
content_weights = sparsemax(sim,dim=1)
memory_vector = torch.matmul(content_weights,memory_set)
output = self.fc(memory_vector)
if return_weights:
return output, content_weights
else:
return output | <filename>memory.py
import torch
import torch.nn as nn
from entmax import sparsemax
_EPSILON = 1e-6
def _vector_norms(v:torch.Tensor)->torch.Tensor:
""" Computes the vector norms
Args:
v: The vector from which there must be calculated the norms
Returns:
A tensor containing the norms of input vector v
"""
squared_norms = torch.sum(v * v, dim=1, keepdim=True)
return torch.sqrt(squared_norms + _EPSILON)
class MLP(nn.Module):
'''
Multi-layer perceptron class
'''
def __init__(self, input_size:int, hidden_size:int, output_size:int, activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Init function to initialize a multi-layer perceptron
Args:
input_size (int): Input's dimension
hidden_size (int): Number of units in the hidden layer
output_size (int): Number of output units
activation (torch.nn.modules.activation, optional): Activation function of the hidden layer. Defaults to torch.nn.ReLU().
"""
super(MLP, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.activation = activation
self.fc2 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x:torch.Tensor)->torch.Tensor:
""" Forward call of multi-layer perceptron
Args:
x (torch.Tensor): Input tensor
Returns:
torch.Tensor: the output of multi-layer perceptron.
"""
hidden = self.fc1(x)
relu = self.activation(hidden)
output = self.fc2(relu)
return output
class MemoryWrapLayer(nn.Module):
def __init__(self, encoder_output_dim:int, output_dim:int, mlp_activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Initialize a Memory Wrap layer
Args:
encoder_output_dim (int): Dimensions of the last layer of the encoder
output_dim (int): Number of desired output units.
mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU().
"""
super(MemoryWrapLayer, self).__init__()
final_input_dim = encoder_output_dim*2
self.fc = MLP(final_input_dim,final_input_dim*2,output_dim,mlp_activation)
def forward(self, encoder_output:torch.Tensor, memory_set:torch.Tensor, return_weights:bool=False)->torch.Tensor:
"""Forward call of MemoryWrap.
Args:
input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder
memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory
parsed_memory: a flag to indicate if the memory set is already parsed by the encoder. It is useful
to reduce the testing time if you fix the memory or if you parse the whole training set.
Returns:
A tuple `(output, content-weight)` where `output`
is the output tensor, `content_weights` is a tensor containing the
read weights for sample in memory. If return_weights is False, then
only `output` is returned.
"""
encoder_norm = encoder_output / _vector_norms(encoder_output)
memory_norm = memory_set / _vector_norms(memory_set)
sim = torch.mm(encoder_norm,memory_norm.transpose(0,1))
content_weights = sparsemax(sim,dim=1)
memory_vector = torch.matmul(content_weights,memory_set)
final_input = torch.cat([encoder_output,memory_vector],1)
output = self.fc(final_input)
if return_weights:
return output, content_weights
else:
return output
class BaselineMemory(nn.Module):
def __init__(self, encoder_output_dim:int, output_dim:int,mlp_activation:torch.nn.modules.activation=torch.nn.ReLU()):
""" Initialize the layer opf the baseline that uses only the memory set to compute the output
Args:
encoder_output_dim (int): Dimensions of the last layer of the encoder
output_dim (int): Number of desired output units.
mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU().
"""
super(BaselineMemory, self).__init__()
final_input_dim = encoder_output_dim
self.fc = MLP(final_input_dim,final_input_dim*2,output_dim,mlp_activation)
def forward(self, encoder_output:torch.Tensor, memory_set:torch.Tensor, return_weights:bool=False)->torch.Tensor:
"""Forward call of MemoryWrap.
Args:
input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder
memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory
parsed_memory: a flag to indicate if the memory set is already parsed by the encoder
Returns:
A tuple `(output, content-weight)` where `output`
is the output tensor, `content_weights` is a tensor containing the
read weights for sample in memory. If return_weights is False, then
only `output` is returned.
"""
encoder_norm = encoder_output / _vector_norms(encoder_output)
memory_norm = memory_set / _vector_norms(memory_set)
sim = torch.mm(encoder_norm,memory_norm.transpose(0,1))
content_weights = sparsemax(sim,dim=1)
memory_vector = torch.matmul(content_weights,memory_set)
output = self.fc(memory_vector)
if return_weights:
return output, content_weights
else:
return output | en | 0.66983 | Computes the vector norms Args: v: The vector from which there must be calculated the norms Returns: A tensor containing the norms of input vector v Multi-layer perceptron class Init function to initialize a multi-layer perceptron Args: input_size (int): Input's dimension hidden_size (int): Number of units in the hidden layer output_size (int): Number of output units activation (torch.nn.modules.activation, optional): Activation function of the hidden layer. Defaults to torch.nn.ReLU(). Forward call of multi-layer perceptron Args: x (torch.Tensor): Input tensor Returns: torch.Tensor: the output of multi-layer perceptron. Initialize a Memory Wrap layer Args: encoder_output_dim (int): Dimensions of the last layer of the encoder output_dim (int): Number of desired output units. mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU(). Forward call of MemoryWrap. Args: input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory parsed_memory: a flag to indicate if the memory set is already parsed by the encoder. It is useful to reduce the testing time if you fix the memory or if you parse the whole training set. Returns: A tuple `(output, content-weight)` where `output` is the output tensor, `content_weights` is a tensor containing the read weights for sample in memory. If return_weights is False, then only `output` is returned. Initialize the layer opf the baseline that uses only the memory set to compute the output Args: encoder_output_dim (int): Dimensions of the last layer of the encoder output_dim (int): Number of desired output units. mlp_activation (torch.nn.modules.activation, optional): Activation function of the hidden layer in the multi-layer perceptron. Defaults to torch.nn.ReLU(). Forward call of MemoryWrap. Args: input: A tensor of dimensions [b,dim] where dim is the dimension required by the encoder memory_set: Memory set. A tensor of dimension [m,dim] where m is the number of examples in memory parsed_memory: a flag to indicate if the memory set is already parsed by the encoder Returns: A tuple `(output, content-weight)` where `output` is the output tensor, `content_weights` is a tensor containing the read weights for sample in memory. If return_weights is False, then only `output` is returned. | 3.514915 | 4 |
authnapp/migrations/0003_default_age.py | RashidRysaev/geekshop | 0 | 6618307 | # Generated by Django 2.2.18 on 2021-03-07 18:18
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
("authnapp", "0002_user_model_extend"),
]
operations = [
migrations.AlterField(
model_name="shopuser",
name="activation_key_expires",
field=models.DateTimeField(
default=datetime.datetime(2021, 3, 9, 18, 18, 33, 204462, tzinfo=utc), verbose_name="актуальность ключа"
),
),
migrations.AlterField(
model_name="shopuser",
name="age",
field=models.PositiveIntegerField(default=18, verbose_name="возраст"),
),
]
| # Generated by Django 2.2.18 on 2021-03-07 18:18
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
("authnapp", "0002_user_model_extend"),
]
operations = [
migrations.AlterField(
model_name="shopuser",
name="activation_key_expires",
field=models.DateTimeField(
default=datetime.datetime(2021, 3, 9, 18, 18, 33, 204462, tzinfo=utc), verbose_name="актуальность ключа"
),
),
migrations.AlterField(
model_name="shopuser",
name="age",
field=models.PositiveIntegerField(default=18, verbose_name="возраст"),
),
]
| en | 0.85618 | # Generated by Django 2.2.18 on 2021-03-07 18:18 | 1.778659 | 2 |
diffrawconfig.py | andycranston/raritan-scp | 1 | 6618308 | #! /usr/bin/python3
#
# @(!--#) @(#) compareconfigs.py, version 001, 08-june-2019
#
# compare two raw_config.txt files extracted from a Raritan PDU
#
#################################################################
#
# imports
#
import sys
import os
import argparse
#################################################################
def readrawconfig(configfilename):
global progname
try:
configfile = open(configfilename, 'r', encoding='utf-8')
except IOError:
print('{}: unable to open raw config file "{}" for reading'.format(configfilename), file=sys.stderr)
sys.exit(0)
config = {}
linenumber = 0
for line in configfile:
linenumber += 1
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
equalsposition = line.find('=')
if equalsposition == -1:
print('{}: warning: line {} in config file "{}" does not contain an equals sign - ignoring'.format(progname, linenumber, configfilename), file=sys.stderr)
continue
key = line[0:equalsposition]
value = line[equalsposition+1:]
### print('+++ {} +++ {} +++'.format(key, value))
if key in config:
print('{}: warning: the key "{}" in line {} in config file "{}" is a duplicate - ignoring'.format(progname, equalsposition, linenumber, configfilename), file=sys.stderr)
else:
config[key] = value
configfile.close()
if len(config) == 0:
print('{}: warning: config file "{}" does not contain any keys - continuing'.format(progname, configfilename), file=sys.stderr)
return config
#################################################################
def compareconfigs(first, second):
diffs = {}
index = 0
# report keys in second config that are not in the first
for key in second:
if key not in first:
diffs[index] = 'Key "{}" added\n New value = "{}"'.format(key, second[key])
index += 1
# report keys in both that have a different value
for key in first:
if key in second:
if first[key] != second[key]:
diffs[index] = 'Key "{}" changed\n Old value = "{}"\n New value = "{}"'.format(key, first[key], second[key])
index += 1
# report keys in first config that are not on the second
for key in first:
if key not in second:
diffs[index] = 'Key "{}" deleted\n Old value = "{}"'.format(key, first[key])
index += 1
return diffs
#################################################################
def main():
global progame
parser = argparse.ArgumentParser()
parser.add_argument('first', help='name of first raw_config.txt file')
parser.add_argument('second', help='name of second raw_config.txt file')
args = parser.parse_args()
firstfilename = args.first
secondfilename = args.second
firstconfig = readrawconfig(firstfilename)
secondconfig = readrawconfig(secondfilename)
diffs = compareconfigs(firstconfig, secondconfig)
numdiffs = len(diffs)
if numdiffs == 0:
retcode = 0
else:
for i in range(0, numdiffs):
print(diffs[i])
retcode = 1
return retcode
#################################################################
progname = os.path.basename(sys.argv[0])
sys.exit(main())
# end of file
| #! /usr/bin/python3
#
# @(!--#) @(#) compareconfigs.py, version 001, 08-june-2019
#
# compare two raw_config.txt files extracted from a Raritan PDU
#
#################################################################
#
# imports
#
import sys
import os
import argparse
#################################################################
def readrawconfig(configfilename):
global progname
try:
configfile = open(configfilename, 'r', encoding='utf-8')
except IOError:
print('{}: unable to open raw config file "{}" for reading'.format(configfilename), file=sys.stderr)
sys.exit(0)
config = {}
linenumber = 0
for line in configfile:
linenumber += 1
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
equalsposition = line.find('=')
if equalsposition == -1:
print('{}: warning: line {} in config file "{}" does not contain an equals sign - ignoring'.format(progname, linenumber, configfilename), file=sys.stderr)
continue
key = line[0:equalsposition]
value = line[equalsposition+1:]
### print('+++ {} +++ {} +++'.format(key, value))
if key in config:
print('{}: warning: the key "{}" in line {} in config file "{}" is a duplicate - ignoring'.format(progname, equalsposition, linenumber, configfilename), file=sys.stderr)
else:
config[key] = value
configfile.close()
if len(config) == 0:
print('{}: warning: config file "{}" does not contain any keys - continuing'.format(progname, configfilename), file=sys.stderr)
return config
#################################################################
def compareconfigs(first, second):
diffs = {}
index = 0
# report keys in second config that are not in the first
for key in second:
if key not in first:
diffs[index] = 'Key "{}" added\n New value = "{}"'.format(key, second[key])
index += 1
# report keys in both that have a different value
for key in first:
if key in second:
if first[key] != second[key]:
diffs[index] = 'Key "{}" changed\n Old value = "{}"\n New value = "{}"'.format(key, first[key], second[key])
index += 1
# report keys in first config that are not on the second
for key in first:
if key not in second:
diffs[index] = 'Key "{}" deleted\n Old value = "{}"'.format(key, first[key])
index += 1
return diffs
#################################################################
def main():
global progame
parser = argparse.ArgumentParser()
parser.add_argument('first', help='name of first raw_config.txt file')
parser.add_argument('second', help='name of second raw_config.txt file')
args = parser.parse_args()
firstfilename = args.first
secondfilename = args.second
firstconfig = readrawconfig(firstfilename)
secondconfig = readrawconfig(secondfilename)
diffs = compareconfigs(firstconfig, secondconfig)
numdiffs = len(diffs)
if numdiffs == 0:
retcode = 0
else:
for i in range(0, numdiffs):
print(diffs[i])
retcode = 1
return retcode
#################################################################
progname = os.path.basename(sys.argv[0])
sys.exit(main())
# end of file
| de | 0.493827 | #! /usr/bin/python3 # # @(!--#) @(#) compareconfigs.py, version 001, 08-june-2019 # # compare two raw_config.txt files extracted from a Raritan PDU # ################################################################# # # imports # ################################################################# ### print('+++ {} +++ {} +++'.format(key, value)) ################################################################# # report keys in second config that are not in the first # report keys in both that have a different value # report keys in first config that are not on the second ################################################################# ################################################################# # end of file | 2.530851 | 3 |
python/fe3lmaker/export_css.py | flarebyte/wonderful-bazar | 0 | 6618309 | <filename>python/fe3lmaker/export_css.py
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by <NAME> on 2010-03-22.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
import S3
import time
import csv, uuid
import simplejson as json
import mondriancss
import datautils, devutils, datasource
from collections import defaultdict
from export import MediaExporter
NAME_ID=">>>[selector-name]"
class Stylesheet(MediaExporter):
def __init__(self):
self.initialize()
def upload(self):
if (self.conn==None):
self.connect()
if (self.conn==None):
return False
stylesheet=mondriancss.get_stylesheet()
css=self.create_css(stylesheet)
self.upload_css("base",css)
def create_css(self,stylesheet):
r = ""
for selector in stylesheet:
name = selector.pop(NAME_ID)
r+="%s{" % name
for k in selector:
r+="%s:%s;" % (k,selector[k])
r+="}\n"
return r
stylesheet=Stylesheet()
stylesheet.upload()
| <filename>python/fe3lmaker/export_css.py
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by <NAME> on 2010-03-22.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
import S3
import time
import csv, uuid
import simplejson as json
import mondriancss
import datautils, devutils, datasource
from collections import defaultdict
from export import MediaExporter
NAME_ID=">>>[selector-name]"
class Stylesheet(MediaExporter):
def __init__(self):
self.initialize()
def upload(self):
if (self.conn==None):
self.connect()
if (self.conn==None):
return False
stylesheet=mondriancss.get_stylesheet()
css=self.create_css(stylesheet)
self.upload_css("base",css)
def create_css(self,stylesheet):
r = ""
for selector in stylesheet:
name = selector.pop(NAME_ID)
r+="%s{" % name
for k in selector:
r+="%s:%s;" % (k,selector[k])
r+="}\n"
return r
stylesheet=Stylesheet()
stylesheet.upload()
| en | 0.664657 | #!/usr/bin/env python # encoding: utf-8 untitled.py Created by <NAME> on 2010-03-22. Copyright (c) 2010 Flarebyte.com Limited. All rights reserved. | 2.155796 | 2 |
loggers_control/scripts/agents/qnet_test.py | IRASatUC/two_loggers | 0 | 6618310 | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from dqn import QNet
x = np.random.randn(10000,7)
y = np.random.randint(4, size=10000)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(128, input_shape=(7, ), activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model_hat = tf.keras.models.Sequential([
tf.keras.layers.Dense(128, input_shape=(7, ), activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x, y, epochs=100)
x_pred = np.random.randn(16,7)
preds = np.argmax(model.predict(x_pred), axis=1)
preds_hat = np.argmax(model_hat.predict(x_pred), axis=1)
| from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from dqn import QNet
x = np.random.randn(10000,7)
y = np.random.randint(4, size=10000)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(128, input_shape=(7, ), activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model_hat = tf.keras.models.Sequential([
tf.keras.layers.Dense(128, input_shape=(7, ), activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x, y, epochs=100)
x_pred = np.random.randn(16,7)
preds = np.argmax(model.predict(x_pred), axis=1)
preds_hat = np.argmax(model_hat.predict(x_pred), axis=1)
| none | 1 | 2.408907 | 2 | |
tape_tracker/tape_tracker.py | atharvjoshi/Unit | 0 | 6618311 | from tape_tracker.utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import threading
class TapeTrackerThread(threading.Thread):
def __init__(self, input_q, output_q, score_thresh=0.9, max_det=1):
self.threshold = score_thresh
self.max_det = 1
self.input_q = input_q
self.output_q = output_q
self.cap_params = self._init_cap_prop()
# Other variables
self.num_frames = 0
self.fps = 0
self.index = 0
threading.Thread.__init__(self)
print(">> loading frozen model for worker")
self.detection_graph, self.sess = detector_utils.load_inference_graph()
self.sess = tf.Session(graph=self.detection_graph)
def _init_cap_prop(self):
cap_params = {}
cap_params['im_width'], cap_params['im_height'] = 576, 432
cap_params['score_thresh'] = self.threshold
cap_params['num_tapes_detect'] = self.max_det
print(cap_params)
return cap_params
def run (self):
while True:
frame = self.input_q.get()
bb_coordinates = None
if (frame is not None):
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detector_utils.detect_objects(
frame, self.detection_graph, self.sess)
# draw bounding boxes
bb_coordinates = detector_utils.draw_box_on_image(
self.cap_params['num_tapes_detect'], self.cap_params["score_thresh"],
scores, boxes, self.cap_params['im_width'], self.cap_params['im_height'],
frame)
# add frame annotated with bounding box to queue
self.output_q.put((frame, bb_coordinates))
else:
self.output_q.put((frame, bb_coordinates))
def close (self):
self.sess.close()
| from tape_tracker.utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import threading
class TapeTrackerThread(threading.Thread):
def __init__(self, input_q, output_q, score_thresh=0.9, max_det=1):
self.threshold = score_thresh
self.max_det = 1
self.input_q = input_q
self.output_q = output_q
self.cap_params = self._init_cap_prop()
# Other variables
self.num_frames = 0
self.fps = 0
self.index = 0
threading.Thread.__init__(self)
print(">> loading frozen model for worker")
self.detection_graph, self.sess = detector_utils.load_inference_graph()
self.sess = tf.Session(graph=self.detection_graph)
def _init_cap_prop(self):
cap_params = {}
cap_params['im_width'], cap_params['im_height'] = 576, 432
cap_params['score_thresh'] = self.threshold
cap_params['num_tapes_detect'] = self.max_det
print(cap_params)
return cap_params
def run (self):
while True:
frame = self.input_q.get()
bb_coordinates = None
if (frame is not None):
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detector_utils.detect_objects(
frame, self.detection_graph, self.sess)
# draw bounding boxes
bb_coordinates = detector_utils.draw_box_on_image(
self.cap_params['num_tapes_detect'], self.cap_params["score_thresh"],
scores, boxes, self.cap_params['im_width'], self.cap_params['im_height'],
frame)
# add frame annotated with bounding box to queue
self.output_q.put((frame, bb_coordinates))
else:
self.output_q.put((frame, bb_coordinates))
def close (self):
self.sess.close()
| en | 0.82579 | # Other variables # Actual detection. Variable boxes contains the bounding box cordinates for hands detected, # while scores contains the confidence for each of these boxes. # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold) # draw bounding boxes # add frame annotated with bounding box to queue | 2.33435 | 2 |
app.py | sjg10/flask-google-mqtt-connector | 1 | 6618312 | <reponame>sjg10/flask-google-mqtt-connector
import os
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
from website.app import create_app
if __name__ == "__main__":
print("Insecure mode ACTIVE")
app = create_app({
'SECRET_KEY': 'secret',
'OAUTH2_REFRESH_TOKEN_GENERATOR': True,
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'SQLALCHEMY_DATABASE_URI': 'sqlite:///data/db.sqlite',
})
app.run(host="0.0.0.0", debug=True)
| import os
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
from website.app import create_app
if __name__ == "__main__":
print("Insecure mode ACTIVE")
app = create_app({
'SECRET_KEY': 'secret',
'OAUTH2_REFRESH_TOKEN_GENERATOR': True,
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'SQLALCHEMY_DATABASE_URI': 'sqlite:///data/db.sqlite',
})
app.run(host="0.0.0.0", debug=True) | none | 1 | 2.11654 | 2 | |
base/chapter3/shopping_ticket.py | liuweiccy/pyexample | 0 | 6618313 | <reponame>liuweiccy/pyexample<gh_stars>0
# 根据指定的宽度打印格式良好的购物小票
width = int(input('Please enter width:'))
price_width = 10
item_width = width - price_width
header_fmt = '{{:{}}}{{:>{}}}'.format(item_width, price_width)
fmt = '{{:{}}}{{:>{}.2f}}'.format(item_width, price_width)
print('=' * width)
print(header_fmt.format("Item", "Price"))
print('-' * width)
print(fmt.format("Apple", 0.4))
print(fmt.format("Pear", 0.5))
print(fmt.format("Apple Phone", 999))
print(fmt.format("Beer (4L)", 40))
print(fmt.format("Meat (5kg * 2)", 29.99))
print('=' * width)
"".translate()
| # 根据指定的宽度打印格式良好的购物小票
width = int(input('Please enter width:'))
price_width = 10
item_width = width - price_width
header_fmt = '{{:{}}}{{:>{}}}'.format(item_width, price_width)
fmt = '{{:{}}}{{:>{}.2f}}'.format(item_width, price_width)
print('=' * width)
print(header_fmt.format("Item", "Price"))
print('-' * width)
print(fmt.format("Apple", 0.4))
print(fmt.format("Pear", 0.5))
print(fmt.format("Apple Phone", 999))
print(fmt.format("Beer (4L)", 40))
print(fmt.format("Meat (5kg * 2)", 29.99))
print('=' * width)
"".translate() | zh | 0.961151 | # 根据指定的宽度打印格式良好的购物小票 | 4.136909 | 4 |
cond_glow/conv1x1.py | scey26/srdualglow | 1 | 6618314 | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from scipy import linalg as la
from .cond_net import WCondNet
logabs = lambda x: torch.log(torch.abs(x))
# non-LU unconditional
class InvConv1x1Unconditional(nn.Module):
def __init__(self, in_channel):
super().__init__()
q, _ = torch.qr(torch.randn(in_channel, in_channel))
# making it 1x1 conv: conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=1, stride=1)
w = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(w) # the weight matrix
def forward(self, inp):
_, _, height, width = inp.shape
out = F.conv2d(inp, self.weight)
log_w = torch.slogdet(self.weight.squeeze().double())[1].float()
log_det = height * width * log_w
return out, log_det
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
# non-LU conditional
class InvConv1x1Conditional(nn.Module):
def __init__(self, cond_shape, inp_shape):
super().__init__()
self.cond_net = WCondNet(cond_shape, inp_shape) # initialized with QR decomposition
print_params = False
if print_params:
total_params = sum(p.numel() for p in self.cond_net.parameters())
print('ActNormConditional CondNet params:', total_params)
def forward(self, inp, condition):
"""
F.conv2d doc: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.conv2d
:param inp:
:param condition:
:return:
"""
_, _, height, width = inp.shape
cond_net_out = self.cond_net(condition) # shape (B, C, C)
batch_size = inp.shape[0]
log_w = 0
output = []
# convolve every batch item with its corresponding W
for i in range(batch_size):
corresponding_inp = inp[i].unsqueeze(0) # re-adding batch dim - shape (1, C, H, W)
corresponding_w = cond_net_out[i].unsqueeze(2).unsqueeze(3) # shape: (C, C) --> (C, C, 1, 1)
corresponding_out = F.conv2d(corresponding_inp, corresponding_w)
output.append(corresponding_out.squeeze(0)) # removing batch dimension - will be added with torch.stack
corresponding_log_w = torch.slogdet(corresponding_w.squeeze().double())[1].float()
log_w += corresponding_log_w
output = torch.stack(output, dim=0) # convert list to tensor
log_w = log_w / batch_size # taking average
log_det = height * width * log_w
return output, log_det
def reverse(self, output, condition):
cond_net_out = self.cond_net(condition) # shape (B, C, C)
batch_size = output.shape[0]
inp = []
# convolve every batch item with its corresponding W inverse
for i in range(batch_size):
corresponding_out = output[i].unsqueeze(0) # shape (1, C, H, W)
corresponding_w_inv = cond_net_out[i].inverse().unsqueeze(2).unsqueeze(3) # shape: (C, C) --> (C, C, 1, 1)
corresponding_inp = F.conv2d(corresponding_out, corresponding_w_inv)
inp.append(corresponding_inp.squeeze(0))
inp = torch.stack(inp, dim=0)
return inp
# LU for both conditional and unconditional
class InvConv1x1LU(nn.Module):
def __init__(self, in_channel, mode='unconditional', cond_shape=None, inp_shape=None):
super().__init__()
self.mode = mode
# initialize with LU decomposition
q = la.qr(np.random.randn(in_channel, in_channel))[0].astype(np.float32)
w_p, w_l, w_u = la.lu(q)
w_s = np.diag(w_u) # extract diagonal elements of U into vector w_s
w_u = np.triu(w_u, 1) # set diagonal elements of U to 0
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l)
w_u = torch.from_numpy(w_u)
w_s = torch.from_numpy(w_s)
# non-trainable parameters
self.register_buffer('w_p', w_p)
self.register_buffer('u_mask', torch.from_numpy(u_mask))
self.register_buffer('l_mask', torch.from_numpy(l_mask))
self.register_buffer('s_sign', torch.sign(w_s))
self.register_buffer('l_eye', torch.eye(l_mask.shape[0]))
if self.mode == 'conditional':
matrices_flattened = torch.cat([torch.flatten(w_l), torch.flatten(w_u), logabs(w_s)])
self.cond_net = WCondNet(cond_shape, inp_shape, do_lu=True, initial_bias=matrices_flattened)
else:
# learnable parameters
self.w_l = nn.Parameter(w_l)
self.w_u = nn.Parameter(w_u)
self.w_s = nn.Parameter(logabs(w_s))
def forward(self, inp, condition=None):
_, _, height, width = inp.shape
weight, s_vector = self.calc_weight(condition)
out = F.conv2d(inp, weight)
logdet = height * width * torch.sum(s_vector)
return out, logdet
def calc_weight(self, condition=None):
if self.mode == 'conditional':
l_matrix, u_matrix, s_vector = self.cond_net(condition)
else:
l_matrix, u_matrix, s_vector = self.w_l, self.w_u, self.w_s
weight = (
self.w_p
@ (l_matrix * self.l_mask + self.l_eye) # explicitly make it lower-triangular with 1's on diagonal
@ ((u_matrix * self.u_mask) + torch.diag(self.s_sign * torch.exp(s_vector)))
)
return weight.unsqueeze(2).unsqueeze(3), s_vector
def reverse_single(self, output, condition=None):
weight, _ = self.calc_weight(condition)
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
def reverse(self, output, condition=None):
batch_size = output.shape[0]
if batch_size == 1:
return self.reverse_single(output, condition)
# reverse one by one for batch size greater than 1. Improving this is not a priority since batch size is usually 1.
batch_reversed = []
for i_batch, batch_item in enumerate(output):
batch_reversed.append(self.reverse(output[i_batch].unsqueeze(0), condition[i_batch].unsqueeze(0)))
return torch.cat(batch_reversed)
| import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from scipy import linalg as la
from .cond_net import WCondNet
logabs = lambda x: torch.log(torch.abs(x))
# non-LU unconditional
class InvConv1x1Unconditional(nn.Module):
def __init__(self, in_channel):
super().__init__()
q, _ = torch.qr(torch.randn(in_channel, in_channel))
# making it 1x1 conv: conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=1, stride=1)
w = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(w) # the weight matrix
def forward(self, inp):
_, _, height, width = inp.shape
out = F.conv2d(inp, self.weight)
log_w = torch.slogdet(self.weight.squeeze().double())[1].float()
log_det = height * width * log_w
return out, log_det
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
# non-LU conditional
class InvConv1x1Conditional(nn.Module):
def __init__(self, cond_shape, inp_shape):
super().__init__()
self.cond_net = WCondNet(cond_shape, inp_shape) # initialized with QR decomposition
print_params = False
if print_params:
total_params = sum(p.numel() for p in self.cond_net.parameters())
print('ActNormConditional CondNet params:', total_params)
def forward(self, inp, condition):
"""
F.conv2d doc: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.conv2d
:param inp:
:param condition:
:return:
"""
_, _, height, width = inp.shape
cond_net_out = self.cond_net(condition) # shape (B, C, C)
batch_size = inp.shape[0]
log_w = 0
output = []
# convolve every batch item with its corresponding W
for i in range(batch_size):
corresponding_inp = inp[i].unsqueeze(0) # re-adding batch dim - shape (1, C, H, W)
corresponding_w = cond_net_out[i].unsqueeze(2).unsqueeze(3) # shape: (C, C) --> (C, C, 1, 1)
corresponding_out = F.conv2d(corresponding_inp, corresponding_w)
output.append(corresponding_out.squeeze(0)) # removing batch dimension - will be added with torch.stack
corresponding_log_w = torch.slogdet(corresponding_w.squeeze().double())[1].float()
log_w += corresponding_log_w
output = torch.stack(output, dim=0) # convert list to tensor
log_w = log_w / batch_size # taking average
log_det = height * width * log_w
return output, log_det
def reverse(self, output, condition):
cond_net_out = self.cond_net(condition) # shape (B, C, C)
batch_size = output.shape[0]
inp = []
# convolve every batch item with its corresponding W inverse
for i in range(batch_size):
corresponding_out = output[i].unsqueeze(0) # shape (1, C, H, W)
corresponding_w_inv = cond_net_out[i].inverse().unsqueeze(2).unsqueeze(3) # shape: (C, C) --> (C, C, 1, 1)
corresponding_inp = F.conv2d(corresponding_out, corresponding_w_inv)
inp.append(corresponding_inp.squeeze(0))
inp = torch.stack(inp, dim=0)
return inp
# LU for both conditional and unconditional
class InvConv1x1LU(nn.Module):
def __init__(self, in_channel, mode='unconditional', cond_shape=None, inp_shape=None):
super().__init__()
self.mode = mode
# initialize with LU decomposition
q = la.qr(np.random.randn(in_channel, in_channel))[0].astype(np.float32)
w_p, w_l, w_u = la.lu(q)
w_s = np.diag(w_u) # extract diagonal elements of U into vector w_s
w_u = np.triu(w_u, 1) # set diagonal elements of U to 0
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l)
w_u = torch.from_numpy(w_u)
w_s = torch.from_numpy(w_s)
# non-trainable parameters
self.register_buffer('w_p', w_p)
self.register_buffer('u_mask', torch.from_numpy(u_mask))
self.register_buffer('l_mask', torch.from_numpy(l_mask))
self.register_buffer('s_sign', torch.sign(w_s))
self.register_buffer('l_eye', torch.eye(l_mask.shape[0]))
if self.mode == 'conditional':
matrices_flattened = torch.cat([torch.flatten(w_l), torch.flatten(w_u), logabs(w_s)])
self.cond_net = WCondNet(cond_shape, inp_shape, do_lu=True, initial_bias=matrices_flattened)
else:
# learnable parameters
self.w_l = nn.Parameter(w_l)
self.w_u = nn.Parameter(w_u)
self.w_s = nn.Parameter(logabs(w_s))
def forward(self, inp, condition=None):
_, _, height, width = inp.shape
weight, s_vector = self.calc_weight(condition)
out = F.conv2d(inp, weight)
logdet = height * width * torch.sum(s_vector)
return out, logdet
def calc_weight(self, condition=None):
if self.mode == 'conditional':
l_matrix, u_matrix, s_vector = self.cond_net(condition)
else:
l_matrix, u_matrix, s_vector = self.w_l, self.w_u, self.w_s
weight = (
self.w_p
@ (l_matrix * self.l_mask + self.l_eye) # explicitly make it lower-triangular with 1's on diagonal
@ ((u_matrix * self.u_mask) + torch.diag(self.s_sign * torch.exp(s_vector)))
)
return weight.unsqueeze(2).unsqueeze(3), s_vector
def reverse_single(self, output, condition=None):
weight, _ = self.calc_weight(condition)
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
def reverse(self, output, condition=None):
batch_size = output.shape[0]
if batch_size == 1:
return self.reverse_single(output, condition)
# reverse one by one for batch size greater than 1. Improving this is not a priority since batch size is usually 1.
batch_reversed = []
for i_batch, batch_item in enumerate(output):
batch_reversed.append(self.reverse(output[i_batch].unsqueeze(0), condition[i_batch].unsqueeze(0)))
return torch.cat(batch_reversed)
| en | 0.812058 | # non-LU unconditional # making it 1x1 conv: conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=1, stride=1) # the weight matrix # non-LU conditional # initialized with QR decomposition F.conv2d doc: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.conv2d :param inp: :param condition: :return: # shape (B, C, C) # convolve every batch item with its corresponding W # re-adding batch dim - shape (1, C, H, W) # shape: (C, C) --> (C, C, 1, 1) # removing batch dimension - will be added with torch.stack # convert list to tensor # taking average # shape (B, C, C) # convolve every batch item with its corresponding W inverse # shape (1, C, H, W) # shape: (C, C) --> (C, C, 1, 1) # LU for both conditional and unconditional # initialize with LU decomposition # extract diagonal elements of U into vector w_s # set diagonal elements of U to 0 # non-trainable parameters # learnable parameters # explicitly make it lower-triangular with 1's on diagonal # reverse one by one for batch size greater than 1. Improving this is not a priority since batch size is usually 1. | 2.320492 | 2 |
Chapter_BestPractices/Holdout_Method.py | ML-PSE/Machine_Learning_for_PSE | 2 | 6618315 | <gh_stars>1-10
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Split dataset into training and test sets
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import numpy as np
data = np.loadtxt('quadratic_raw_data.csv', delimiter=',')
x = data[:,0,None]; y = data[:,1,None]
#%% create pipeline for quadratic fit via linear model
# import relevant classes
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
# add transformers and estimators sequentially as list of tuples
# the names ‘poly’, ‘scaler’, ‘model’ can be used to access the individual elements of pipeline later
pipe = Pipeline([('poly', PolynomialFeatures(degree=2, include_bias=False)),
('scaler', StandardScaler()),
('model', LinearRegression())])
#%% separate training data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
print('Number of samples in training set: ', x_train.shape[0])
print('Number of samples in test set: ', x_test.shape[0])
#%% fit pipeline and predict
pipe.fit(x_train, y_train)
y_predicted_train = pipe.predict(x_train)
y_predicted_test = pipe.predict(x_test)
#%% performance metrics
from sklearn.metrics import mean_squared_error as mse
print('Training metric (mse) = ', mse(y_train, y_predicted_train))
print('Test metric (mse) = ', mse(y_test, y_predicted_test))
#%% plot predictions
y_predicted = pipe.predict(x)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(x_train,y_train, 'bo', label='raw training data')
plt.plot(x_test,y_test, 'ro', label='raw test data')
plt.plot(x,y_predicted, color='orange', label='quadratic fit')
plt.legend()
plt.xlabel('x'), plt.ylabel('y')
| ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Split dataset into training and test sets
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import numpy as np
data = np.loadtxt('quadratic_raw_data.csv', delimiter=',')
x = data[:,0,None]; y = data[:,1,None]
#%% create pipeline for quadratic fit via linear model
# import relevant classes
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
# add transformers and estimators sequentially as list of tuples
# the names ‘poly’, ‘scaler’, ‘model’ can be used to access the individual elements of pipeline later
pipe = Pipeline([('poly', PolynomialFeatures(degree=2, include_bias=False)),
('scaler', StandardScaler()),
('model', LinearRegression())])
#%% separate training data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
print('Number of samples in training set: ', x_train.shape[0])
print('Number of samples in test set: ', x_test.shape[0])
#%% fit pipeline and predict
pipe.fit(x_train, y_train)
y_predicted_train = pipe.predict(x_train)
y_predicted_test = pipe.predict(x_test)
#%% performance metrics
from sklearn.metrics import mean_squared_error as mse
print('Training metric (mse) = ', mse(y_train, y_predicted_train))
print('Test metric (mse) = ', mse(y_test, y_predicted_test))
#%% plot predictions
y_predicted = pipe.predict(x)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(x_train,y_train, 'bo', label='raw training data')
plt.plot(x_test,y_test, 'ro', label='raw test data')
plt.plot(x,y_predicted, color='orange', label='quadratic fit')
plt.legend()
plt.xlabel('x'), plt.ylabel('y') | en | 0.920826 | ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Split dataset into training and test sets ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #%% read data #%% create pipeline for quadratic fit via linear model # import relevant classes # add transformers and estimators sequentially as list of tuples # the names ‘poly’, ‘scaler’, ‘model’ can be used to access the individual elements of pipeline later #%% separate training data #%% fit pipeline and predict #%% performance metrics #%% plot predictions | 3.299937 | 3 |
modules/utils.py | madwayz/Communication-Channel | 0 | 6618316 | from matplotlib.pyplot import *
from config import img_path, path
import os
import re
import ast
"""
На вход подаются данные для создания цикла, обёрнутые в np.arrange()
@arg1:
"""
def createTimeLine(*args):
return [dig for dig in np.arange(*args)]
def tolist(array):
return np.ndarray.tolist(array)
def writeInFile(data, path, text):
with open(path, 'w+') as f:
f.write(data)
print('{} был(-о/и) записан(-о/ы) в {}'.format(text, path))
def savePlot(plt, name, xCoords=None, yCoords=None, label=None, xlabel=None, ylabel=None):
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if xCoords is not None and yCoords is not None:
if label is not None:
plt.plot(xCoords, yCoords, label=label)
else:
plt.plot(xCoords, yCoords)
else:
plt.plot(xCoords, yCoords)
if label is not None:
plt.legend(bbox_to_anchor=(1.01, 0.15), loc='right')
plt.savefig(img_path + name)
show()
print('Сохраняю график в {}'.format(img_path + name))
def parseErrorChances(plt, timeline):
plt.xlabel(r'$\frac{i+10}{10}$')
plt.ylabel('Вероятность ошибок O')
file_names = list(file_name for file_name in os.listdir(path + '\data') if re.findall(r'error_chance_q=\d+.txt', file_name))
for file in file_names:
with open(path + '\data\\' + file, 'r+') as f:
file_out = ast.literal_eval(f.read())
plt.plot(timeline, np.log(file_out), label='q={}'.format(*re.findall(r'\d+', file)))
plt.legend(bbox_to_anchor=(1.01, 0.14), loc='right')
plt.savefig(img_path + 'p4.png')
show()
print('Сохраняю график в {}'.format(img_path + 'p4.png')) | from matplotlib.pyplot import *
from config import img_path, path
import os
import re
import ast
"""
На вход подаются данные для создания цикла, обёрнутые в np.arrange()
@arg1:
"""
def createTimeLine(*args):
return [dig for dig in np.arange(*args)]
def tolist(array):
return np.ndarray.tolist(array)
def writeInFile(data, path, text):
with open(path, 'w+') as f:
f.write(data)
print('{} был(-о/и) записан(-о/ы) в {}'.format(text, path))
def savePlot(plt, name, xCoords=None, yCoords=None, label=None, xlabel=None, ylabel=None):
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if xCoords is not None and yCoords is not None:
if label is not None:
plt.plot(xCoords, yCoords, label=label)
else:
plt.plot(xCoords, yCoords)
else:
plt.plot(xCoords, yCoords)
if label is not None:
plt.legend(bbox_to_anchor=(1.01, 0.15), loc='right')
plt.savefig(img_path + name)
show()
print('Сохраняю график в {}'.format(img_path + name))
def parseErrorChances(plt, timeline):
plt.xlabel(r'$\frac{i+10}{10}$')
plt.ylabel('Вероятность ошибок O')
file_names = list(file_name for file_name in os.listdir(path + '\data') if re.findall(r'error_chance_q=\d+.txt', file_name))
for file in file_names:
with open(path + '\data\\' + file, 'r+') as f:
file_out = ast.literal_eval(f.read())
plt.plot(timeline, np.log(file_out), label='q={}'.format(*re.findall(r'\d+', file)))
plt.legend(bbox_to_anchor=(1.01, 0.14), loc='right')
plt.savefig(img_path + 'p4.png')
show()
print('Сохраняю график в {}'.format(img_path + 'p4.png')) | ru | 0.998713 | На вход подаются данные для создания цикла, обёрнутые в np.arrange() @arg1: | 2.643328 | 3 |
pypykatz/__init__.py | retr0-13/pypykatz | 1 | 6618317 | <reponame>retr0-13/pypykatz<gh_stars>1-10
name = "pypykatz" | name = "pypykatz" | none | 1 | 1.131367 | 1 | |
tests/utils.py | herrmannlab/highdicom | 2 | 6618318 | <reponame>herrmannlab/highdicom
from io import BytesIO
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.filereader import dcmread
def write_and_read_dataset(dataset: Dataset):
"""Write DICOM dataset to buffer and read it back from buffer."""
clone = Dataset(dataset)
clone.is_little_endian = True
if hasattr(dataset, 'file_meta'):
clone.file_meta = FileMetaDataset(dataset.file_meta)
if dataset.file_meta.TransferSyntaxUID == '1.2.840.10008.1.2':
clone.is_implicit_VR = True
else:
clone.is_implicit_VR = False
else:
clone.is_implicit_VR = False
with BytesIO() as fp:
clone.save_as(fp)
return dcmread(fp, force=True)
| from io import BytesIO
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.filereader import dcmread
def write_and_read_dataset(dataset: Dataset):
"""Write DICOM dataset to buffer and read it back from buffer."""
clone = Dataset(dataset)
clone.is_little_endian = True
if hasattr(dataset, 'file_meta'):
clone.file_meta = FileMetaDataset(dataset.file_meta)
if dataset.file_meta.TransferSyntaxUID == '1.2.840.10008.1.2':
clone.is_implicit_VR = True
else:
clone.is_implicit_VR = False
else:
clone.is_implicit_VR = False
with BytesIO() as fp:
clone.save_as(fp)
return dcmread(fp, force=True) | en | 0.90719 | Write DICOM dataset to buffer and read it back from buffer. | 2.537271 | 3 |
src/btrccts/exchange_backend.py | brandsimon/btrccts | 88 | 6618319 | import pandas
from ccxt.base.exchange import Exchange
from ccxt.base.errors import BadRequest, BadSymbol
from btrccts.check_dataframe import _check_dataframe
from btrccts.exchange_account import ExchangeAccount
class ExchangeBackend:
def __init__(self, timeframe, balances={}, ohlcvs={}):
self._account = ExchangeAccount(timeframe=timeframe,
balances=balances,
ohlcvs=ohlcvs)
self._ohlcvs = {}
self._timeframe = timeframe
for key in ohlcvs:
self._ohlcvs[key] = _check_dataframe(
ohlcvs[key],
timeframe,
['open', 'low', 'high', 'close', 'volume'])
def fetch_order(self, id, symbol=None):
return self._account.fetch_order(id=id, symbol=symbol)
def fetch_balance(self):
return self._account.fetch_balance()
def create_order(self, market, type, price, side, amount):
return self._account.create_order(market=market, type=type, side=side,
price=price, amount=amount)
def cancel_order(self, id, symbol=None):
return self._account.cancel_order(id=id, symbol=symbol)
def fetch_open_orders(self, symbol=None, since=None, limit=None):
return self._account.fetch_open_orders(symbol=symbol, since=since,
limit=limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None):
return self._account.fetch_closed_orders(symbol=symbol, since=since,
limit=limit)
def fetch_ticker(self, symbol):
ohlcv = self._ohlcvs.get(symbol)
if ohlcv is None:
raise BadSymbol('ExchangeBackend: no prices for {}'.format(symbol))
current_date = self._timeframe.date().floor('1T')
row = ohlcv.loc[current_date]
timestamp = int(current_date.value / 10**6)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': Exchange.iso8601(timestamp),
'high': row['high'],
'low': row['low'],
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': row['open'],
'close': row['close'],
'last': None,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': {},
}
def fetch_ohlcv_dataframe(self, symbol, timeframe='1m', since=None,
limit=None, params={}):
# Exchanges in the real world have different behaviour, when there is
# no since parameter provided. (some use data from the beginning,
# some from the end)
# We return data from the beginning, because this is most likely not
# what the user wants, so this will force the user to provide the
# parameters, which will work with every exchange. This is a bug
# prevention mechanism.
ohlcv = self._ohlcvs.get(symbol)
if ohlcv is None:
raise BadSymbol('ExchangeBackend: no prices for {}'.format(symbol))
pd_current_date = self._timeframe.date().floor('1T')
if limit is None:
limit = 5
timeframe_sec = Exchange.parse_timeframe(timeframe)
pd_timeframe = pandas.Timedelta(timeframe_sec, unit='s')
ohlcv_start_date = ohlcv.index[0]
if since is None:
pd_since = ohlcv_start_date
else:
pd_since = pandas.Timestamp(since, unit='ms', tz='UTC')
pd_since = pd_since.ceil(pd_timeframe)
if pd_since < ohlcv_start_date:
raise BadRequest('ExchangeBackend: fetch_ohlcv: no date availabe '
'at since')
pd_until = pd_since + limit * pd_timeframe - pandas.Timedelta('1m')
if pd_until >= pd_current_date + pd_timeframe:
raise BadRequest(
'ExchangeBackend: fetch_ohlcv:'
' since.ceil(timeframe) + limit * timeframe'
' needs to be in the past')
pd_until = min(pd_until, pd_current_date)
data = ohlcv[pd_since:pd_until]
return data.resample(pd_timeframe).agg({
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'})
| import pandas
from ccxt.base.exchange import Exchange
from ccxt.base.errors import BadRequest, BadSymbol
from btrccts.check_dataframe import _check_dataframe
from btrccts.exchange_account import ExchangeAccount
class ExchangeBackend:
def __init__(self, timeframe, balances={}, ohlcvs={}):
self._account = ExchangeAccount(timeframe=timeframe,
balances=balances,
ohlcvs=ohlcvs)
self._ohlcvs = {}
self._timeframe = timeframe
for key in ohlcvs:
self._ohlcvs[key] = _check_dataframe(
ohlcvs[key],
timeframe,
['open', 'low', 'high', 'close', 'volume'])
def fetch_order(self, id, symbol=None):
return self._account.fetch_order(id=id, symbol=symbol)
def fetch_balance(self):
return self._account.fetch_balance()
def create_order(self, market, type, price, side, amount):
return self._account.create_order(market=market, type=type, side=side,
price=price, amount=amount)
def cancel_order(self, id, symbol=None):
return self._account.cancel_order(id=id, symbol=symbol)
def fetch_open_orders(self, symbol=None, since=None, limit=None):
return self._account.fetch_open_orders(symbol=symbol, since=since,
limit=limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None):
return self._account.fetch_closed_orders(symbol=symbol, since=since,
limit=limit)
def fetch_ticker(self, symbol):
ohlcv = self._ohlcvs.get(symbol)
if ohlcv is None:
raise BadSymbol('ExchangeBackend: no prices for {}'.format(symbol))
current_date = self._timeframe.date().floor('1T')
row = ohlcv.loc[current_date]
timestamp = int(current_date.value / 10**6)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': Exchange.iso8601(timestamp),
'high': row['high'],
'low': row['low'],
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': row['open'],
'close': row['close'],
'last': None,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': {},
}
def fetch_ohlcv_dataframe(self, symbol, timeframe='1m', since=None,
limit=None, params={}):
# Exchanges in the real world have different behaviour, when there is
# no since parameter provided. (some use data from the beginning,
# some from the end)
# We return data from the beginning, because this is most likely not
# what the user wants, so this will force the user to provide the
# parameters, which will work with every exchange. This is a bug
# prevention mechanism.
ohlcv = self._ohlcvs.get(symbol)
if ohlcv is None:
raise BadSymbol('ExchangeBackend: no prices for {}'.format(symbol))
pd_current_date = self._timeframe.date().floor('1T')
if limit is None:
limit = 5
timeframe_sec = Exchange.parse_timeframe(timeframe)
pd_timeframe = pandas.Timedelta(timeframe_sec, unit='s')
ohlcv_start_date = ohlcv.index[0]
if since is None:
pd_since = ohlcv_start_date
else:
pd_since = pandas.Timestamp(since, unit='ms', tz='UTC')
pd_since = pd_since.ceil(pd_timeframe)
if pd_since < ohlcv_start_date:
raise BadRequest('ExchangeBackend: fetch_ohlcv: no date availabe '
'at since')
pd_until = pd_since + limit * pd_timeframe - pandas.Timedelta('1m')
if pd_until >= pd_current_date + pd_timeframe:
raise BadRequest(
'ExchangeBackend: fetch_ohlcv:'
' since.ceil(timeframe) + limit * timeframe'
' needs to be in the past')
pd_until = min(pd_until, pd_current_date)
data = ohlcv[pd_since:pd_until]
return data.resample(pd_timeframe).agg({
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'})
| en | 0.921125 | # Exchanges in the real world have different behaviour, when there is # no since parameter provided. (some use data from the beginning, # some from the end) # We return data from the beginning, because this is most likely not # what the user wants, so this will force the user to provide the # parameters, which will work with every exchange. This is a bug # prevention mechanism. | 2.416365 | 2 |
shop/migrations/0013_auto_20170325_2150.py | IlyaDjurin/django-shop | 0 | 6618320 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-25 18:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_tovar_img_tovar_info5'),
]
operations = [
migrations.RemoveField(
model_name='tovar_img',
name='tovar_created',
),
migrations.RemoveField(
model_name='tovar_img',
name='tovar_updated',
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image11',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара11'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_video',
field=models.CharField(default='youtube', max_length=200),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-25 18:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_tovar_img_tovar_info5'),
]
operations = [
migrations.RemoveField(
model_name='tovar_img',
name='tovar_created',
),
migrations.RemoveField(
model_name='tovar_img',
name='tovar_updated',
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image11',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара11'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_video',
field=models.CharField(default='youtube', max_length=200),
preserve_default=False,
),
]
| en | 0.765612 | # -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-25 18:50 | 1.613889 | 2 |
npin/Counter.py | fasteddy516/npin | 0 | 6618321 | <reponame>fasteddy516/npin
"""Counter.py: NeoPixel Indicator Module."""
class Counter:
"""This is a docstring."""
def __init__(self, count: int) -> None:
"""Make this a docstring."""
self._count = 1
self._total = count
@property
def count(self) -> int:
"""Make this a docstring."""
return self._count
@property
def total(self) -> int:
"""Make this a docstring."""
return self._total
def __str__(self) -> str:
"""Make this a docstring."""
return f"{self._count}/{self._total}"
def update(self) -> None:
"""Make this a docstring."""
if self._count == self._total:
self._count = 1
else:
self._count += 1
class SharedCounter:
"""Make this a docstring."""
counters = dict()
@staticmethod
def add(name: str, count: int) -> None:
"""Make this a docstring."""
if name not in SharedCounter.counters:
SharedCounter.counters[name] = Counter(count)
else:
raise KeyError(f"Requested counter name {name} already exists.")
@staticmethod
def remove(name: str) -> None:
"""Make this a docstring."""
del SharedCounter.counters[name]
@staticmethod
def update_all() -> None:
"""Make this a docstring."""
for c in SharedCounter.counters:
SharedCounter.counters[c].update()
| """Counter.py: NeoPixel Indicator Module."""
class Counter:
"""This is a docstring."""
def __init__(self, count: int) -> None:
"""Make this a docstring."""
self._count = 1
self._total = count
@property
def count(self) -> int:
"""Make this a docstring."""
return self._count
@property
def total(self) -> int:
"""Make this a docstring."""
return self._total
def __str__(self) -> str:
"""Make this a docstring."""
return f"{self._count}/{self._total}"
def update(self) -> None:
"""Make this a docstring."""
if self._count == self._total:
self._count = 1
else:
self._count += 1
class SharedCounter:
"""Make this a docstring."""
counters = dict()
@staticmethod
def add(name: str, count: int) -> None:
"""Make this a docstring."""
if name not in SharedCounter.counters:
SharedCounter.counters[name] = Counter(count)
else:
raise KeyError(f"Requested counter name {name} already exists.")
@staticmethod
def remove(name: str) -> None:
"""Make this a docstring."""
del SharedCounter.counters[name]
@staticmethod
def update_all() -> None:
"""Make this a docstring."""
for c in SharedCounter.counters:
SharedCounter.counters[c].update() | en | 0.771714 | Counter.py: NeoPixel Indicator Module. This is a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. Make this a docstring. | 2.756102 | 3 |
src/functools/1.functools.py | sudeep0901/python | 0 | 6618322 | from functools import partial
def multiplier(x, y):
"""Multiplier doc string."""
print(x, y)
return x * y
double = partial(multiplier, 2)
triple = partial(multiplier, 3)
quadruple = partial(multiplier, 4)
print(double(5))
print(triple(5))
print(triple(quadruple(5)))
print(triple.func)
print(triple.keywords, multiplier.__doc__)
| from functools import partial
def multiplier(x, y):
"""Multiplier doc string."""
print(x, y)
return x * y
double = partial(multiplier, 2)
triple = partial(multiplier, 3)
quadruple = partial(multiplier, 4)
print(double(5))
print(triple(5))
print(triple(quadruple(5)))
print(triple.func)
print(triple.keywords, multiplier.__doc__)
| en | 0.623548 | Multiplier doc string. | 3.490102 | 3 |
forumsApp/migrations/0002_remove_replies_user.py | glen-s-abraham/Elearning-platform | 0 | 6618323 | <filename>forumsApp/migrations/0002_remove_replies_user.py<gh_stars>0
# Generated by Django 3.1.7 on 2021-03-07 04:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forumsApp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='replies',
name='user',
),
]
| <filename>forumsApp/migrations/0002_remove_replies_user.py<gh_stars>0
# Generated by Django 3.1.7 on 2021-03-07 04:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forumsApp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='replies',
name='user',
),
]
| en | 0.850323 | # Generated by Django 3.1.7 on 2021-03-07 04:49 | 1.46142 | 1 |
lldb/packages/Python/lldbsuite/test/commands/breakpoint/command/list/TestBreakpointCommandList.py | medismailben/llvm-project | 2,338 | 6618324 | """
Test 'breakpoint command list'.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_list_commands(self):
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "a.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact("main.o")
self.yaml2obj(yaml_path, obj_path)
# Create a target with the object file we just created from YAML
target = self.dbg.CreateTarget(obj_path)
self.assertTrue(target, VALID_TARGET)
# Test without any breakpoints.
self.expect("breakpoint command list 1", error=True, substrs=["error: No breakpoints exist for which to list commands"])
# Set a breakpoint
self.runCmd("b foo")
# Check list breakpoint commands for breakpoints that have no commands.
self.expect("breakpoint command list 1", startstr="Breakpoint 1 does not have an associated command.")
# Add a breakpoint command.
self.runCmd("breakpoint command add -o 'source list' 1")
# List breakpoint command that we just created.
self.expect("breakpoint command list 1", startstr="""Breakpoint 1:
Breakpoint commands:
source list
""")
# List breakpoint command with invalid breakpoint ID.
self.expect("breakpoint command list 2", error=True, startstr="error: '2' is not a currently valid breakpoint ID.")
| """
Test 'breakpoint command list'.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_list_commands(self):
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "a.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact("main.o")
self.yaml2obj(yaml_path, obj_path)
# Create a target with the object file we just created from YAML
target = self.dbg.CreateTarget(obj_path)
self.assertTrue(target, VALID_TARGET)
# Test without any breakpoints.
self.expect("breakpoint command list 1", error=True, substrs=["error: No breakpoints exist for which to list commands"])
# Set a breakpoint
self.runCmd("b foo")
# Check list breakpoint commands for breakpoints that have no commands.
self.expect("breakpoint command list 1", startstr="Breakpoint 1 does not have an associated command.")
# Add a breakpoint command.
self.runCmd("breakpoint command add -o 'source list' 1")
# List breakpoint command that we just created.
self.expect("breakpoint command list 1", startstr="""Breakpoint 1:
Breakpoint commands:
source list
""")
# List breakpoint command with invalid breakpoint ID.
self.expect("breakpoint command list 2", error=True, startstr="error: '2' is not a currently valid breakpoint ID.")
| en | 0.884924 | Test 'breakpoint command list'. # Create a target with the object file we just created from YAML # Test without any breakpoints. # Set a breakpoint # Check list breakpoint commands for breakpoints that have no commands. # Add a breakpoint command. # List breakpoint command that we just created. Breakpoint 1: Breakpoint commands: source list # List breakpoint command with invalid breakpoint ID. | 2.499244 | 2 |
oslo/db/sqlalchemy/provision.py | redhat-openstack/oslo.db | 0 | 6618325 | # Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import copy
import logging
import os
import random
import string
from six import moves
import sqlalchemy
from oslo.db import exception as exc
LOG = logging.getLogger(__name__)
def get_engine(uri):
"""Engine creation
Call the function without arguments to get admin connection. Admin
connection required to create temporary database for each
particular test. Otherwise use existing connection to recreate
connection to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
LOG.exception(msg, engine.url)
raise exc.DBConnectionError(msg % engine.url)
def create_database(engine):
"""Provide temporary database for each particular test."""
driver = engine.name
database = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
if driver == 'sqlite':
database = '/tmp/%s' % database
elif driver in ['mysql', 'postgresql']:
sql = 'create database %s;' % database
_execute_sql(engine, [sql], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
# Both shallow and deep copies may lead to surprising behaviour
# without knowing the implementation of sqlalchemy.engine.url.
# Use a shallow copy here, since we're only overriding a single
# property, invoking __str__ and then discarding our copy. This
# is currently safe and _should_ remain safe into the future.
new_url = copy.copy(engine.url)
new_url.database = database
return str(new_url)
def drop_database(admin_engine, current_uri):
"""Drop temporary database after each particular test."""
engine = get_engine(current_uri)
driver = engine.name
if driver == 'sqlite':
try:
os.remove(engine.url.database)
except OSError:
pass
elif driver in ['mysql', 'postgresql']:
sql = 'drop database %s;' % engine.url.database
_execute_sql(admin_engine, [sql], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
def main():
"""Controller to handle commands
::create: Create test database with random names.
::drop: Drop database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test databases.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()
| # Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import copy
import logging
import os
import random
import string
from six import moves
import sqlalchemy
from oslo.db import exception as exc
LOG = logging.getLogger(__name__)
def get_engine(uri):
"""Engine creation
Call the function without arguments to get admin connection. Admin
connection required to create temporary database for each
particular test. Otherwise use existing connection to recreate
connection to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
LOG.exception(msg, engine.url)
raise exc.DBConnectionError(msg % engine.url)
def create_database(engine):
"""Provide temporary database for each particular test."""
driver = engine.name
database = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
if driver == 'sqlite':
database = '/tmp/%s' % database
elif driver in ['mysql', 'postgresql']:
sql = 'create database %s;' % database
_execute_sql(engine, [sql], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
# Both shallow and deep copies may lead to surprising behaviour
# without knowing the implementation of sqlalchemy.engine.url.
# Use a shallow copy here, since we're only overriding a single
# property, invoking __str__ and then discarding our copy. This
# is currently safe and _should_ remain safe into the future.
new_url = copy.copy(engine.url)
new_url.database = database
return str(new_url)
def drop_database(admin_engine, current_uri):
"""Drop temporary database after each particular test."""
engine = get_engine(current_uri)
driver = engine.name
if driver == 'sqlite':
try:
os.remove(engine.url.database)
except OSError:
pass
elif driver in ['mysql', 'postgresql']:
sql = 'drop database %s;' % engine.url.database
_execute_sql(admin_engine, [sql], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
def main():
"""Controller to handle commands
::create: Create test database with random names.
::drop: Drop database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test databases.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()
| en | 0.839105 | # Copyright 2013 Mirantis.inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Provision test environment for specific DB backends Engine creation Call the function without arguments to get admin connection. Admin connection required to create temporary database for each particular test. Otherwise use existing connection to recreate connection to the temporary database. Initialize connection, execute sql query and close it. Provide temporary database for each particular test. # Both shallow and deep copies may lead to surprising behaviour # without knowing the implementation of sqlalchemy.engine.url. # Use a shallow copy here, since we're only overriding a single # property, invoking __str__ and then discarding our copy. This # is currently safe and _should_ remain safe into the future. Drop temporary database after each particular test. Controller to handle commands ::create: Create test database with random names. ::drop: Drop database created by previous command. | 2.090288 | 2 |
sosia/processing/utils.py | sosia-dev/sosia | 14 | 6618326 | import functools
from time import sleep
from sosia.processing.constants import QUERY_MAX_TRIES
class AttemptFailed(Exception):
pass
def build_dict(results, chunk):
"""Create dictionary assigning publication information to authors we
are looking for.
"""
from math import inf
from collections import defaultdict
chunk = [int(au) for au in chunk]
d = defaultdict(
lambda: {"first_year": inf, "pubs": set(), "coauth": set(),
"n_coauth": inf, "n_pubs": inf})
for pub in results:
if not pub.author_ids:
continue
authors = set([int(au) for au in pub.author_ids.split(";")])
for focal in authors.intersection(chunk):
d[focal]["coauth"].update(authors)
d[focal]["coauth"].remove(focal)
d[focal]["pubs"].add(pub.eid)
d[focal]["n_pubs"] = len(d[focal]["pubs"])
d[focal]["n_coauth"] = len(d[focal]["coauth"])
if not pub.coverDate:
continue
first_year = min(d[focal]["first_year"], int(pub.coverDate[:4]))
d[focal]["first_year"] = first_year
return d
def expand_affiliation(df):
"""Auxiliary function to expand the information about the affiliation
in publications from ScopusSearch.
"""
from pandas import Series
res = df[["source_id", "author_ids", "afid"]].copy()
res['afid'] = res["afid"].str.split(';')
res = (res["afid"].apply(Series)
.merge(res, right_index=True, left_index=True)
.drop(["afid"], axis=1)
.melt(id_vars=['source_id', 'author_ids'], value_name="afid")
.drop("variable", axis=1)
.dropna())
res['afid'] = res['afid'].astype(float)
return res
def flat_set_from_df(df, col, condition=None):
"""Flatten Series from DataFrame which contains lists and
return as set, optionally after filtering the DataFrame.
"""
if condition is not None:
df = df[condition]
lists = df[col].tolist()
return set([item for sublist in lists for item in sublist])
def handle_scopus_connectivity(func):
"""A decorator to handle errors returned by scopus.
Causes a querying function to attempt to access the server
`QUERY_MAX_TRIES` more times. Circumvents the problem of unreplicable
errors, such as missing cursor or unresponsive server.
"""
from urllib.error import HTTPError
from pybliometrics.scopus.exception import Scopus500Error
@functools.wraps(func)
def try_query(*args, **kwargs):
tries = 1
while tries <= QUERY_MAX_TRIES:
try:
return func(*args, **kwargs)
except (Scopus500Error, KeyError, HTTPError):
sleep(2.0)
tries += 1
continue
text = f"Max number of query attempts reached: {QUERY_MAX_TRIES}.\n"\
"Verify your connection and settings or wait for the Scopus"\
"server to return responsive."
raise AttemptFailed(text)
return try_query
def robust_join(s, sep=','):
"""Join an iterable converting each element to str first."""
return sep.join([str(e) for e in s])
def margin_range(base, val):
"""Create a range of margins around a base value.
Parameters
----------
base : int
The value around which a margin should be created.
val : int or float
The margin size. If float, val will be interpreted as percentage.
Returns
-------
r : range
A range object representing the margin range.
"""
from math import ceil
if isinstance(val, float):
margin = ceil(val * base)
r = range(base - margin, base + margin + 1)
elif isinstance(val, int):
r = range(base - val, base + val + 1)
else:
raise Exception("Value must be either float or int.")
return r
| import functools
from time import sleep
from sosia.processing.constants import QUERY_MAX_TRIES
class AttemptFailed(Exception):
pass
def build_dict(results, chunk):
"""Create dictionary assigning publication information to authors we
are looking for.
"""
from math import inf
from collections import defaultdict
chunk = [int(au) for au in chunk]
d = defaultdict(
lambda: {"first_year": inf, "pubs": set(), "coauth": set(),
"n_coauth": inf, "n_pubs": inf})
for pub in results:
if not pub.author_ids:
continue
authors = set([int(au) for au in pub.author_ids.split(";")])
for focal in authors.intersection(chunk):
d[focal]["coauth"].update(authors)
d[focal]["coauth"].remove(focal)
d[focal]["pubs"].add(pub.eid)
d[focal]["n_pubs"] = len(d[focal]["pubs"])
d[focal]["n_coauth"] = len(d[focal]["coauth"])
if not pub.coverDate:
continue
first_year = min(d[focal]["first_year"], int(pub.coverDate[:4]))
d[focal]["first_year"] = first_year
return d
def expand_affiliation(df):
"""Auxiliary function to expand the information about the affiliation
in publications from ScopusSearch.
"""
from pandas import Series
res = df[["source_id", "author_ids", "afid"]].copy()
res['afid'] = res["afid"].str.split(';')
res = (res["afid"].apply(Series)
.merge(res, right_index=True, left_index=True)
.drop(["afid"], axis=1)
.melt(id_vars=['source_id', 'author_ids'], value_name="afid")
.drop("variable", axis=1)
.dropna())
res['afid'] = res['afid'].astype(float)
return res
def flat_set_from_df(df, col, condition=None):
"""Flatten Series from DataFrame which contains lists and
return as set, optionally after filtering the DataFrame.
"""
if condition is not None:
df = df[condition]
lists = df[col].tolist()
return set([item for sublist in lists for item in sublist])
def handle_scopus_connectivity(func):
"""A decorator to handle errors returned by scopus.
Causes a querying function to attempt to access the server
`QUERY_MAX_TRIES` more times. Circumvents the problem of unreplicable
errors, such as missing cursor or unresponsive server.
"""
from urllib.error import HTTPError
from pybliometrics.scopus.exception import Scopus500Error
@functools.wraps(func)
def try_query(*args, **kwargs):
tries = 1
while tries <= QUERY_MAX_TRIES:
try:
return func(*args, **kwargs)
except (Scopus500Error, KeyError, HTTPError):
sleep(2.0)
tries += 1
continue
text = f"Max number of query attempts reached: {QUERY_MAX_TRIES}.\n"\
"Verify your connection and settings or wait for the Scopus"\
"server to return responsive."
raise AttemptFailed(text)
return try_query
def robust_join(s, sep=','):
"""Join an iterable converting each element to str first."""
return sep.join([str(e) for e in s])
def margin_range(base, val):
"""Create a range of margins around a base value.
Parameters
----------
base : int
The value around which a margin should be created.
val : int or float
The margin size. If float, val will be interpreted as percentage.
Returns
-------
r : range
A range object representing the margin range.
"""
from math import ceil
if isinstance(val, float):
margin = ceil(val * base)
r = range(base - margin, base + margin + 1)
elif isinstance(val, int):
r = range(base - val, base + val + 1)
else:
raise Exception("Value must be either float or int.")
return r
| en | 0.792797 | Create dictionary assigning publication information to authors we are looking for. Auxiliary function to expand the information about the affiliation in publications from ScopusSearch. Flatten Series from DataFrame which contains lists and return as set, optionally after filtering the DataFrame. A decorator to handle errors returned by scopus. Causes a querying function to attempt to access the server `QUERY_MAX_TRIES` more times. Circumvents the problem of unreplicable errors, such as missing cursor or unresponsive server. Join an iterable converting each element to str first. Create a range of margins around a base value. Parameters ---------- base : int The value around which a margin should be created. val : int or float The margin size. If float, val will be interpreted as percentage. Returns ------- r : range A range object representing the margin range. | 2.370114 | 2 |
example/controller/tests/model/exception/occure.py | donghak-shin/dp-tornado | 18 | 6618327 | # -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class OccureController(Controller):
def get(self):
raise Exception('Intened Exception')
| # -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class OccureController(Controller):
def get(self):
raise Exception('Intened Exception')
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.743441 | 2 |
example-bots/python/game/logic/random_diamond.py | sandbergsimon42/diamonds2 | 0 | 6618328 | import random
from ..util import get_direction, position_equals
class RandomDiamondLogic(object):
def __init__(self):
self.goal_position = None
def next_move(self, board_bot, board):
props = board_bot["properties"]
current_position = board_bot["position"]
print (type(board))
for thingy in board.gameObjects:
if (thingy["type"]=="DiamondButtonGameObject"):
self.goal_position = thingy.get("position")
for thingy1 in board.gameObjects:
if (thingy1["type"]=="BotGameObject"):
if thingy1["properties"]["name"]=="robot":
#print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n WOOOOOOOOOOO \n\n\n\n\n\n\n\n\n")
if thingy1["properties"]["diamonds"] != 5:
print ("Ej 5 diamanter")
if self.goal_position["x"] != 0:
self.goal_position["x"] -= 1
else:
self.goal_position["x"] += 1
if self.goal_position:
# Calculate move according to goal position
delta_x, delta_y = get_direction(
current_position["x"],
current_position["y"],
self.goal_position["x"],
self.goal_position["y"],
)
("<NAME>")
if (delta_x == 0 and delta_y == 0 ):
delta_y += 1
return delta_x, delta_y
print ("BORDE <NAME>")
return 0, 0
"""
{"id":794523,"position":{"x":5,"y":1},"type":"DiamondButtonGameObject","properties":null}
,{"id":793402,"position":{"x":8,"y":11},"type":"BotGameObject","properties":{"diamonds":2,"score":37,"name":"robot","inventorySize":5,"millisecondsLeft":5563,"timeJoined":"2020-05-10T23:34:10.331Z","base":{"x":4,"y":10}}}
"""
| import random
from ..util import get_direction, position_equals
class RandomDiamondLogic(object):
def __init__(self):
self.goal_position = None
def next_move(self, board_bot, board):
props = board_bot["properties"]
current_position = board_bot["position"]
print (type(board))
for thingy in board.gameObjects:
if (thingy["type"]=="DiamondButtonGameObject"):
self.goal_position = thingy.get("position")
for thingy1 in board.gameObjects:
if (thingy1["type"]=="BotGameObject"):
if thingy1["properties"]["name"]=="robot":
#print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n WOOOOOOOOOOO \n\n\n\n\n\n\n\n\n")
if thingy1["properties"]["diamonds"] != 5:
print ("Ej 5 diamanter")
if self.goal_position["x"] != 0:
self.goal_position["x"] -= 1
else:
self.goal_position["x"] += 1
if self.goal_position:
# Calculate move according to goal position
delta_x, delta_y = get_direction(
current_position["x"],
current_position["y"],
self.goal_position["x"],
self.goal_position["y"],
)
("<NAME>")
if (delta_x == 0 and delta_y == 0 ):
delta_y += 1
return delta_x, delta_y
print ("BORDE <NAME>")
return 0, 0
"""
{"id":794523,"position":{"x":5,"y":1},"type":"DiamondButtonGameObject","properties":null}
,{"id":793402,"position":{"x":8,"y":11},"type":"BotGameObject","properties":{"diamonds":2,"score":37,"name":"robot","inventorySize":5,"millisecondsLeft":5563,"timeJoined":"2020-05-10T23:34:10.331Z","base":{"x":4,"y":10}}}
"""
| en | 0.612347 | #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n WOOOOOOOOOOO \n\n\n\n\n\n\n\n\n") # Calculate move according to goal position {"id":794523,"position":{"x":5,"y":1},"type":"DiamondButtonGameObject","properties":null} ,{"id":793402,"position":{"x":8,"y":11},"type":"BotGameObject","properties":{"diamonds":2,"score":37,"name":"robot","inventorySize":5,"millisecondsLeft":5563,"timeJoined":"2020-05-10T23:34:10.331Z","base":{"x":4,"y":10}}} | 3.111704 | 3 |
invoice/forms.py | NumanIbnMazid/invoice_management | 0 | 6618329 | <reponame>NumanIbnMazid/invoice_management
from django import forms
from utils.mixins import CustomModelForm
from .models import Invoice
from deals.models import Coupon, Vat
from company.models import Company
from service.models import Service
from django_select2.forms import ModelSelect2MultipleWidget, ModelSelect2Widget
class InvoiceManageForm(CustomModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
self.object = kwargs.pop('object', None)
super(InvoiceManageForm, self).__init__(*args, **kwargs)
if self.object and len(self.object.service.all()) > 0:
self.initial['company'] = self.object.service.first().company
try:
company = forms.ModelChoiceField(
queryset=Company.objects.filter(is_active=True),
label=u"Company",
empty_label="Select Company...",
widget=ModelSelect2Widget(
model=Company,
search_fields=['name__icontains'],
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
service = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
label=u"Service",
widget=ModelSelect2MultipleWidget(
model=Service,
search_fields=['name__icontains', 'company__name__icontains'],
dependent_fields={'company': 'company'},
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
coupon = forms.ModelChoiceField(
queryset=Coupon.objects.filter(is_active=True),
required=False,
label=u"Coupon",
empty_label="Select Coupon...",
widget=ModelSelect2Widget(
model=Coupon,
search_fields=['code__icontains', 'discount_amount__icontains'],
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
vat = forms.ModelChoiceField(
queryset=Vat.objects.filter(is_active=True),
required=False,
initial=Vat.objects.filter(is_active=True).last(),
label=u"Vat",
empty_label="Select Vat..."
)
except Exception as e:
print("*********** Exception: Invoice->forms.py: ", e, "***********")
class Meta:
model = Invoice
fields = ("company", "service", "coupon", "vat", "additional_charge", "card_number", "status")
exclude = ('slug', 'total_cost', 'created_at', 'created_at')
def clean_company(self):
company = self.cleaned_data.get('company')
if not company:
raise forms.ValidationError("Company is required!!")
return company
def clean_service(self):
service = self.cleaned_data.get('service')
if not service == None:
company_ids = service.all().values_list('company', flat=True)
if len(set(company_ids)) > 1:
raise forms.ValidationError("Services should belong to same company!")
return service
def clean_coupon(self):
coupon = self.cleaned_data.get('coupon')
if not coupon == None:
qs = Coupon.objects.filter(code__iexact=coupon.code)
if not qs:
raise forms.ValidationError("Invalid Coupon!")
if qs and not qs.last().is_active:
raise forms.ValidationError("Coupon Expired!")
return coupon
def clean_vat(self):
vat = self.cleaned_data.get('vat')
if not vat == None:
qs = Vat.objects.filter(slug__iexact=vat.slug)
if not qs:
raise forms.ValidationError("Vat not exists!")
if qs and not qs.last().is_active:
raise forms.ValidationError("Vat is not active!")
return vat | from django import forms
from utils.mixins import CustomModelForm
from .models import Invoice
from deals.models import Coupon, Vat
from company.models import Company
from service.models import Service
from django_select2.forms import ModelSelect2MultipleWidget, ModelSelect2Widget
class InvoiceManageForm(CustomModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
self.object = kwargs.pop('object', None)
super(InvoiceManageForm, self).__init__(*args, **kwargs)
if self.object and len(self.object.service.all()) > 0:
self.initial['company'] = self.object.service.first().company
try:
company = forms.ModelChoiceField(
queryset=Company.objects.filter(is_active=True),
label=u"Company",
empty_label="Select Company...",
widget=ModelSelect2Widget(
model=Company,
search_fields=['name__icontains'],
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
service = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
label=u"Service",
widget=ModelSelect2MultipleWidget(
model=Service,
search_fields=['name__icontains', 'company__name__icontains'],
dependent_fields={'company': 'company'},
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
coupon = forms.ModelChoiceField(
queryset=Coupon.objects.filter(is_active=True),
required=False,
label=u"Coupon",
empty_label="Select Coupon...",
widget=ModelSelect2Widget(
model=Coupon,
search_fields=['code__icontains', 'discount_amount__icontains'],
max_results=500,
attrs={'data-minimum-input-length': '0'}
)
)
vat = forms.ModelChoiceField(
queryset=Vat.objects.filter(is_active=True),
required=False,
initial=Vat.objects.filter(is_active=True).last(),
label=u"Vat",
empty_label="Select Vat..."
)
except Exception as e:
print("*********** Exception: Invoice->forms.py: ", e, "***********")
class Meta:
model = Invoice
fields = ("company", "service", "coupon", "vat", "additional_charge", "card_number", "status")
exclude = ('slug', 'total_cost', 'created_at', 'created_at')
def clean_company(self):
company = self.cleaned_data.get('company')
if not company:
raise forms.ValidationError("Company is required!!")
return company
def clean_service(self):
service = self.cleaned_data.get('service')
if not service == None:
company_ids = service.all().values_list('company', flat=True)
if len(set(company_ids)) > 1:
raise forms.ValidationError("Services should belong to same company!")
return service
def clean_coupon(self):
coupon = self.cleaned_data.get('coupon')
if not coupon == None:
qs = Coupon.objects.filter(code__iexact=coupon.code)
if not qs:
raise forms.ValidationError("Invalid Coupon!")
if qs and not qs.last().is_active:
raise forms.ValidationError("Coupon Expired!")
return coupon
def clean_vat(self):
vat = self.cleaned_data.get('vat')
if not vat == None:
qs = Vat.objects.filter(slug__iexact=vat.slug)
if not qs:
raise forms.ValidationError("Vat not exists!")
if qs and not qs.last().is_active:
raise forms.ValidationError("Vat is not active!")
return vat | none | 1 | 2.127677 | 2 | |
06. Binary Search/BinarySearch_Lab.py | emilia98/Python-Algorithms | 0 | 6618330 | from random import randint
# Returns index of x in array. If not found, returns -1
def binarySearch(arr, x):
print(' l r mid arr[mid] x =', x)
print('======================================')
# left border of interval
l = 0
# right border of interval
r = len(arr) - 1
# while left border of interval is before the right border of interval
while l <= r:
# find the index of middle element in the subarray
mid = (l + r) // 2
print(f'{l:2} {r:2} {mid:2} {arr[mid]:2} ', end='')
# if x is smaller than the middle element in the array
# we move the right border with one position
# on the left of the middle element
if x < arr[mid]:
print(x, '<', arr[mid])
r = mid - 1
# if x is bigger than the middle element in the array
# we move the left border with one position
# on the right of the middle element
elif x > arr[mid]:
print(x, '>', arr[mid])
l = mid + 1
# if the element x is equal of the middle element
# in the array, we return the index
# of the middle element
else:
print(x, '=', arr[mid])
return mid
print(f'{l:2} {r:2}')
# if the searched element x had not found in the given array
# the returned result is -1
return -1
n = int(input('n = '))
arr = [randint(10, 99) for i in range(n)]
arr.sort()
print(arr)
x = int(input('x = '))
print()
for i in range(n):
print(f'{i:2} ', end='')
print()
for i in range(n):
print(f'{arr[i]:2} ', end='')
print()
print()
# calling the function, which finds if the
# searched element is found
mid = binarySearch(arr, x)
if mid >= 0:
print('Found at position', mid)
else:
print('Not found') | from random import randint
# Returns index of x in array. If not found, returns -1
def binarySearch(arr, x):
print(' l r mid arr[mid] x =', x)
print('======================================')
# left border of interval
l = 0
# right border of interval
r = len(arr) - 1
# while left border of interval is before the right border of interval
while l <= r:
# find the index of middle element in the subarray
mid = (l + r) // 2
print(f'{l:2} {r:2} {mid:2} {arr[mid]:2} ', end='')
# if x is smaller than the middle element in the array
# we move the right border with one position
# on the left of the middle element
if x < arr[mid]:
print(x, '<', arr[mid])
r = mid - 1
# if x is bigger than the middle element in the array
# we move the left border with one position
# on the right of the middle element
elif x > arr[mid]:
print(x, '>', arr[mid])
l = mid + 1
# if the element x is equal of the middle element
# in the array, we return the index
# of the middle element
else:
print(x, '=', arr[mid])
return mid
print(f'{l:2} {r:2}')
# if the searched element x had not found in the given array
# the returned result is -1
return -1
n = int(input('n = '))
arr = [randint(10, 99) for i in range(n)]
arr.sort()
print(arr)
x = int(input('x = '))
print()
for i in range(n):
print(f'{i:2} ', end='')
print()
for i in range(n):
print(f'{arr[i]:2} ', end='')
print()
print()
# calling the function, which finds if the
# searched element is found
mid = binarySearch(arr, x)
if mid >= 0:
print('Found at position', mid)
else:
print('Not found') | en | 0.756387 | # Returns index of x in array. If not found, returns -1 # left border of interval # right border of interval # while left border of interval is before the right border of interval # find the index of middle element in the subarray # if x is smaller than the middle element in the array # we move the right border with one position # on the left of the middle element # if x is bigger than the middle element in the array # we move the left border with one position # on the right of the middle element # if the element x is equal of the middle element # in the array, we return the index # of the middle element # if the searched element x had not found in the given array # the returned result is -1 # calling the function, which finds if the # searched element is found | 4.024046 | 4 |
evkit/utils/misc.py | jozhang97/Side-tuning | 56 | 6618331 | <gh_stars>10-100
import collections
import torch
import pprint
import string
from evkit.preprocess.transforms import rescale_centercrop_resize, rescale, grayscale_rescale, cross_modal_transform, \
identity_transform, rescale_centercrop_resize_collated, map_pool_collated, map_pool, taskonomy_features_transform, \
image_to_input_collated, taskonomy_multi_features_transform
from evkit.models.alexnet import alexnet_transform, alexnet_features_transform
from evkit.preprocess.baseline_transforms import blind, pixels_as_state
from evkit.models.srl_architectures import srl_features_transform
import warnings
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def count_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_total_parameters(model):
return sum(p.numel() for p in model.parameters())
def is_interactive():
try:
ip = get_ipython()
return ip.has_trait('kernel')
except:
return False
def is_cuda(model):
return next(model.parameters()).is_cuda
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
self._keys, self._vals = zip(*adict.items())
self._keys, self._vals = list(self._keys), list(self._vals)
def keys(self):
return self._keys
def vals(self):
return self._vals
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
def append_dict(d, u, stop_recurse_keys=[]):
for k, v in u.items():
if isinstance(v, collections.Mapping) and k not in stop_recurse_keys:
d[k] = append_dict(d.get(k, {}), v, stop_recurse_keys=stop_recurse_keys)
else:
if k not in d:
d[k] = []
d[k].append(v)
return d
def update_dict_deepcopy(d, u): # we need a deep dictionary update
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update_dict_deepcopy(d.get(k, {}), v)
else:
d[k] = v
return d
def eval_dict_values(d):
for k in d.keys():
if isinstance(d[k], collections.Mapping):
d[k] = eval_dict_values(d[k])
elif isinstance(d[k], str):
d[k] = eval(d[k].replace("---", "'"))
return d
def search_and_replace_dict(model_kwargs, task_initial):
for k, v in model_kwargs.items():
if isinstance(v, collections.Mapping):
search_and_replace_dict(v, task_initial)
else:
if isinstance(v, str) and 'encoder' in v and task_initial not in v:
new_pth = v.replace('curvature', task_initial) # TODO make this the string between / and encoder
warnings.warn(f'BE CAREFUL - CHANGING ENCODER PATH: {v} is being replaced for {new_pth}')
model_kwargs[k] = new_pth
return
| import collections
import torch
import pprint
import string
from evkit.preprocess.transforms import rescale_centercrop_resize, rescale, grayscale_rescale, cross_modal_transform, \
identity_transform, rescale_centercrop_resize_collated, map_pool_collated, map_pool, taskonomy_features_transform, \
image_to_input_collated, taskonomy_multi_features_transform
from evkit.models.alexnet import alexnet_transform, alexnet_features_transform
from evkit.preprocess.baseline_transforms import blind, pixels_as_state
from evkit.models.srl_architectures import srl_features_transform
import warnings
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def count_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_total_parameters(model):
return sum(p.numel() for p in model.parameters())
def is_interactive():
try:
ip = get_ipython()
return ip.has_trait('kernel')
except:
return False
def is_cuda(model):
return next(model.parameters()).is_cuda
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
self._keys, self._vals = zip(*adict.items())
self._keys, self._vals = list(self._keys), list(self._vals)
def keys(self):
return self._keys
def vals(self):
return self._vals
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
def append_dict(d, u, stop_recurse_keys=[]):
for k, v in u.items():
if isinstance(v, collections.Mapping) and k not in stop_recurse_keys:
d[k] = append_dict(d.get(k, {}), v, stop_recurse_keys=stop_recurse_keys)
else:
if k not in d:
d[k] = []
d[k].append(v)
return d
def update_dict_deepcopy(d, u): # we need a deep dictionary update
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update_dict_deepcopy(d.get(k, {}), v)
else:
d[k] = v
return d
def eval_dict_values(d):
for k in d.keys():
if isinstance(d[k], collections.Mapping):
d[k] = eval_dict_values(d[k])
elif isinstance(d[k], str):
d[k] = eval(d[k].replace("---", "'"))
return d
def search_and_replace_dict(model_kwargs, task_initial):
for k, v in model_kwargs.items():
if isinstance(v, collections.Mapping):
search_and_replace_dict(v, task_initial)
else:
if isinstance(v, str) and 'encoder' in v and task_initial not in v:
new_pth = v.replace('curvature', task_initial) # TODO make this the string between / and encoder
warnings.warn(f'BE CAREFUL - CHANGING ENCODER PATH: {v} is being replaced for {new_pth}')
model_kwargs[k] = new_pth
return | en | 0.551793 | Because tensorboard uses markdown no grads! # total += p.numel() use regex to get the first integer in the name if none exists, return -1 # we need a deep dictionary update # TODO make this the string between / and encoder | 2.052978 | 2 |
shs/gui/sshutils.py | ansobolev/shs | 1 | 6618332 | <reponame>ansobolev/shs<filename>shs/gui/sshutils.py
'''
#
# This file is a part of Siesta Help Scripts GUI
#
# (c) <NAME>, 2013
#
Created on 03.04.2013
@author: andrey
'''
import os, paramiko
def getMount(path):
path = os.path.realpath(os.path.abspath(path))
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def getDevice(path):
"Get the device mounted at path"
# uses "/proc/mounts"
pathname = os.path.normcase(path) # might be unnecessary here
try:
with open("/proc/mounts", "r") as ifp:
for line in ifp:
fields= line.rstrip('\n').split()
# note that line above assumes that
# no mount points contain whitespace
if fields[1] == pathname:
return fields[0], fields[2]
except EnvironmentError:
pass
return None # explicit
def getSSHClient(host, user):
'Returns paramiko ssh client'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user)
return ssh
def runCommand(ssh, cmd):
_, stdout, stderr = ssh.exec_command(cmd)
return stdout, stderr
def findExecutable(ssh, filename):
stdout, _ = runCommand(ssh, 'which ' + filename)
return len(stdout.readlines()) != 0
def getQueue(ssh):
'Returns queue system implemented on a remote cluster'
if findExecutable(ssh, 'qstat'):
return 'pbs'
elif findExecutable(ssh, 'sinfo'):
return 'slurm'
else:
return None
def copyFile(ssh, filename, localdir, remotedir):
'Copies a file filename from localdir to remotedir'
sftp = ssh.open_sftp()
localpath = os.path.join(localdir, filename)
remotepath = os.path.join(remotedir, filename)
sftp.put(localpath, remotepath)
return sftp
def removeFile(sftp, remotefile):
'Removes a file at given remotepath'
sftp.remove(remotefile)
def getRemoteDir(localdir, localmpath, remotempath):
''' Gets remote path of a directory mounted on local machine
Input:
-> localdir : a directory mounted on local machine
-> localmpath : a mountpoint of a directory on a local machine
-> remotempath : a directory on a remote machine which is mounted at localmpath
'''
return localdir.replace(localmpath, remotempath)
if __name__ == '__main__':
host = 'tornado.susu.ac.ru'
user = 'physics'
ssh = getSSHClient(host, user)
| '''
#
# This file is a part of Siesta Help Scripts GUI
#
# (c) <NAME>, 2013
#
Created on 03.04.2013
@author: andrey
'''
import os, paramiko
def getMount(path):
path = os.path.realpath(os.path.abspath(path))
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def getDevice(path):
"Get the device mounted at path"
# uses "/proc/mounts"
pathname = os.path.normcase(path) # might be unnecessary here
try:
with open("/proc/mounts", "r") as ifp:
for line in ifp:
fields= line.rstrip('\n').split()
# note that line above assumes that
# no mount points contain whitespace
if fields[1] == pathname:
return fields[0], fields[2]
except EnvironmentError:
pass
return None # explicit
def getSSHClient(host, user):
'Returns paramiko ssh client'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user)
return ssh
def runCommand(ssh, cmd):
_, stdout, stderr = ssh.exec_command(cmd)
return stdout, stderr
def findExecutable(ssh, filename):
stdout, _ = runCommand(ssh, 'which ' + filename)
return len(stdout.readlines()) != 0
def getQueue(ssh):
'Returns queue system implemented on a remote cluster'
if findExecutable(ssh, 'qstat'):
return 'pbs'
elif findExecutable(ssh, 'sinfo'):
return 'slurm'
else:
return None
def copyFile(ssh, filename, localdir, remotedir):
'Copies a file filename from localdir to remotedir'
sftp = ssh.open_sftp()
localpath = os.path.join(localdir, filename)
remotepath = os.path.join(remotedir, filename)
sftp.put(localpath, remotepath)
return sftp
def removeFile(sftp, remotefile):
'Removes a file at given remotepath'
sftp.remove(remotefile)
def getRemoteDir(localdir, localmpath, remotempath):
''' Gets remote path of a directory mounted on local machine
Input:
-> localdir : a directory mounted on local machine
-> localmpath : a mountpoint of a directory on a local machine
-> remotempath : a directory on a remote machine which is mounted at localmpath
'''
return localdir.replace(localmpath, remotempath)
if __name__ == '__main__':
host = 'tornado.susu.ac.ru'
user = 'physics'
ssh = getSSHClient(host, user) | en | 0.747718 | # # This file is a part of Siesta Help Scripts GUI # # (c) <NAME>, 2013 # Created on 03.04.2013 @author: andrey # uses "/proc/mounts" # might be unnecessary here # note that line above assumes that # no mount points contain whitespace # explicit Gets remote path of a directory mounted on local machine Input: -> localdir : a directory mounted on local machine -> localmpath : a mountpoint of a directory on a local machine -> remotempath : a directory on a remote machine which is mounted at localmpath | 2.111623 | 2 |
src/RBF_warp.py | michaelStettler/FacialRetargeting | 21 | 6618333 | <gh_stars>10-100
import numpy as np
np.set_printoptions(precision=2, linewidth=200)
def rbf_kernel(k, k_prime):
"""
compute the L2 norm between k and k_prime ||k - k_prime||
using the fact that ||x - y||^2 = ||x||^2 + ||y||^2 - 2*x.T*y
:param k: (n x m) vector
:param k_prime: (n x m) vector
:return: K (n x n) matrix
"""
k_norm = np.sum(k ** 2, axis=-1)
k_prime_norm = np.sum(k_prime ** 2, axis=-1)
K = np.sqrt(np.abs((k_norm[:, None] + k_prime_norm[None, :] - 2 * np.dot(k, k.T))))
return K
def rbf_warp(p, q):
"""
RBF warping function to initialize the Actor Blendshape for the implementation of the paper:
"Facial Retargeting with Automatic Range of Motion Alignment" (Ribera et al. 2017)
The warping function follows the implementation from:
"Transferring the Rig and Animations from a Character to Different Face Models" (Orvalho et al. 2008)
by solving a linear function:
ax = b
with a = [K P; P.T 0] -> ((n+4)x(n+4))
x = [W A].T
b = [Q 0].T
K is the RBF kernel U(x-p) = |x - p|
:param p: n landmarks positions matrix (xyz) -> (nx3)
:param q: n target positions matrix (xyz) -> (nx3)
:return: W, A, solved matrix
"""
# get number of lmks
n = np.shape(p)[0]
# declare matrices
P = np.ones((n, 4))
a_zero = np.zeros((4, 4))
Q = q
b_zero = np.zeros((4, 3))
# build rbf kernel
K = rbf_kernel(p, p)
# build P
P[:, 1:] = p
# build final matrices
a = np.concatenate((K, P), axis=1)
a = np.concatenate((a, np.concatenate((P.T, a_zero), axis=1)), axis=0)
b = np.concatenate((Q, b_zero), axis=0)
# solve for ax = b with x = [W A].T
x = np.linalg.solve(a, b)
W = x[:n, :]
A = x[n:, :]
return W, A
def get_initial_actor_blendshapes(s0, a0, delta_sk):
"""
Compute the initial guess actor blendshapes in delta space as explained in 4.4 Geometric Constraint of the paper
k:= num_of_blendshapes
m:= num_of_markers
:param s0: neutral character expression
:param a0: neutral actor expression
:param delta_sk: character blendshapes in delta space
:return: initial guess of actor blendshapes
"""
# compute initial transform of neutral pose
W, A = rbf_warp(s0, a0)
# compute initial guess by transforming each character blendshapes delta_sk
delta_gk = np.zeros(np.shape(delta_sk))
for k in range(np.shape(delta_sk)[0]):
delta_gk[k] = delta_sk[k] + np.multiply(delta_sk[k], W)
return delta_gk
if __name__ == '__main__':
"""
test the following two functions:
- rbf_warp
- get_initial_actor_blendshapes
run: python -m src.RBF_warp
"""
# declare variables
m = 5 # num_markers
np.random.seed(0)
print("---------- test RBF Warp ----------")
# test RBF_warp function
s0 = np.random.rand(m, 3) # random landmarks population
a0 = np.random.rand(m, 3) # random target coordinates
print("s0", np.shape(s0))
print(s0)
print("a0", np.shape(a0))
print(a0)
W, A = rbf_warp(s0, a0)
print("shape W, A", np.shape(W), np.shape(A))
print(W)
print(A)
print()
print("---------- test RBF Warp ----------")
# test get_initial_actor_blendshapes
K = 4
delta_sk = np.random.rand(K, m, 3)
delta_gk = get_initial_actor_blendshapes(s0, a0, delta_sk)
print("shape gk", np.shape(delta_gk)) | import numpy as np
np.set_printoptions(precision=2, linewidth=200)
def rbf_kernel(k, k_prime):
"""
compute the L2 norm between k and k_prime ||k - k_prime||
using the fact that ||x - y||^2 = ||x||^2 + ||y||^2 - 2*x.T*y
:param k: (n x m) vector
:param k_prime: (n x m) vector
:return: K (n x n) matrix
"""
k_norm = np.sum(k ** 2, axis=-1)
k_prime_norm = np.sum(k_prime ** 2, axis=-1)
K = np.sqrt(np.abs((k_norm[:, None] + k_prime_norm[None, :] - 2 * np.dot(k, k.T))))
return K
def rbf_warp(p, q):
"""
RBF warping function to initialize the Actor Blendshape for the implementation of the paper:
"Facial Retargeting with Automatic Range of Motion Alignment" (Ribera et al. 2017)
The warping function follows the implementation from:
"Transferring the Rig and Animations from a Character to Different Face Models" (Orvalho et al. 2008)
by solving a linear function:
ax = b
with a = [K P; P.T 0] -> ((n+4)x(n+4))
x = [W A].T
b = [Q 0].T
K is the RBF kernel U(x-p) = |x - p|
:param p: n landmarks positions matrix (xyz) -> (nx3)
:param q: n target positions matrix (xyz) -> (nx3)
:return: W, A, solved matrix
"""
# get number of lmks
n = np.shape(p)[0]
# declare matrices
P = np.ones((n, 4))
a_zero = np.zeros((4, 4))
Q = q
b_zero = np.zeros((4, 3))
# build rbf kernel
K = rbf_kernel(p, p)
# build P
P[:, 1:] = p
# build final matrices
a = np.concatenate((K, P), axis=1)
a = np.concatenate((a, np.concatenate((P.T, a_zero), axis=1)), axis=0)
b = np.concatenate((Q, b_zero), axis=0)
# solve for ax = b with x = [W A].T
x = np.linalg.solve(a, b)
W = x[:n, :]
A = x[n:, :]
return W, A
def get_initial_actor_blendshapes(s0, a0, delta_sk):
"""
Compute the initial guess actor blendshapes in delta space as explained in 4.4 Geometric Constraint of the paper
k:= num_of_blendshapes
m:= num_of_markers
:param s0: neutral character expression
:param a0: neutral actor expression
:param delta_sk: character blendshapes in delta space
:return: initial guess of actor blendshapes
"""
# compute initial transform of neutral pose
W, A = rbf_warp(s0, a0)
# compute initial guess by transforming each character blendshapes delta_sk
delta_gk = np.zeros(np.shape(delta_sk))
for k in range(np.shape(delta_sk)[0]):
delta_gk[k] = delta_sk[k] + np.multiply(delta_sk[k], W)
return delta_gk
if __name__ == '__main__':
"""
test the following two functions:
- rbf_warp
- get_initial_actor_blendshapes
run: python -m src.RBF_warp
"""
# declare variables
m = 5 # num_markers
np.random.seed(0)
print("---------- test RBF Warp ----------")
# test RBF_warp function
s0 = np.random.rand(m, 3) # random landmarks population
a0 = np.random.rand(m, 3) # random target coordinates
print("s0", np.shape(s0))
print(s0)
print("a0", np.shape(a0))
print(a0)
W, A = rbf_warp(s0, a0)
print("shape W, A", np.shape(W), np.shape(A))
print(W)
print(A)
print()
print("---------- test RBF Warp ----------")
# test get_initial_actor_blendshapes
K = 4
delta_sk = np.random.rand(K, m, 3)
delta_gk = get_initial_actor_blendshapes(s0, a0, delta_sk)
print("shape gk", np.shape(delta_gk)) | en | 0.746074 | compute the L2 norm between k and k_prime ||k - k_prime|| using the fact that ||x - y||^2 = ||x||^2 + ||y||^2 - 2*x.T*y :param k: (n x m) vector :param k_prime: (n x m) vector :return: K (n x n) matrix RBF warping function to initialize the Actor Blendshape for the implementation of the paper: "Facial Retargeting with Automatic Range of Motion Alignment" (Ribera et al. 2017) The warping function follows the implementation from: "Transferring the Rig and Animations from a Character to Different Face Models" (Orvalho et al. 2008) by solving a linear function: ax = b with a = [K P; P.T 0] -> ((n+4)x(n+4)) x = [W A].T b = [Q 0].T K is the RBF kernel U(x-p) = |x - p| :param p: n landmarks positions matrix (xyz) -> (nx3) :param q: n target positions matrix (xyz) -> (nx3) :return: W, A, solved matrix # get number of lmks # declare matrices # build rbf kernel # build P # build final matrices # solve for ax = b with x = [W A].T Compute the initial guess actor blendshapes in delta space as explained in 4.4 Geometric Constraint of the paper k:= num_of_blendshapes m:= num_of_markers :param s0: neutral character expression :param a0: neutral actor expression :param delta_sk: character blendshapes in delta space :return: initial guess of actor blendshapes # compute initial transform of neutral pose # compute initial guess by transforming each character blendshapes delta_sk test the following two functions: - rbf_warp - get_initial_actor_blendshapes run: python -m src.RBF_warp # declare variables # num_markers # test RBF_warp function # random landmarks population # random target coordinates # test get_initial_actor_blendshapes | 3.205947 | 3 |
src/config/api-server/tests/test_subnet_ip_count_with_ui.py | biswajit-mandal/contrail-controller | 5 | 6618334 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from vnc_api.vnc_api import *
import uuid
def all(ip='10.84.9.45', port=8082, domain_name='my-domain',
proj_name='admin', subnet1='192.168.1.0', subnet2='10.10.1.0',
prefix1=30, prefix2=29, vn_name='vn1',
compute_node='a3s45.contrail.juniper.net'):
vnc_lib = VncApi(username='admin', password='<PASSWORD>',
tenant_name='admin', api_server_host=ip,
api_server_port=port)
# This test needs VN to be creeated using Horizon and then create
# instances to get ip address from various IP Block in this VN
# then copy vn's uunid in the next call. This uuid is for example
print 'Read Virtual Network object '
net_obj = vnc_lib.virtual_network_read(
id='58398587-5747-475e-b394-583187eeb930')
print 'Read no of instance ip for each subnet'
print '["192.168.1.0/30", "10.10.1.0/29"]'
subnet_list = ["192.168.1.0/30", "10.10.1.0/29"]
result = vnc_lib.virtual_network_subnet_ip_count(net_obj, subnet_list)
print result
# compare result with created instances
if __name__ == '__main__':
all()
| #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from vnc_api.vnc_api import *
import uuid
def all(ip='10.84.9.45', port=8082, domain_name='my-domain',
proj_name='admin', subnet1='192.168.1.0', subnet2='10.10.1.0',
prefix1=30, prefix2=29, vn_name='vn1',
compute_node='a3s45.contrail.juniper.net'):
vnc_lib = VncApi(username='admin', password='<PASSWORD>',
tenant_name='admin', api_server_host=ip,
api_server_port=port)
# This test needs VN to be creeated using Horizon and then create
# instances to get ip address from various IP Block in this VN
# then copy vn's uunid in the next call. This uuid is for example
print 'Read Virtual Network object '
net_obj = vnc_lib.virtual_network_read(
id='58398587-5747-475e-b394-583187eeb930')
print 'Read no of instance ip for each subnet'
print '["192.168.1.0/30", "10.10.1.0/29"]'
subnet_list = ["192.168.1.0/30", "10.10.1.0/29"]
result = vnc_lib.virtual_network_subnet_ip_count(net_obj, subnet_list)
print result
# compare result with created instances
if __name__ == '__main__':
all()
| en | 0.872938 | # # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # # This test needs VN to be creeated using Horizon and then create # instances to get ip address from various IP Block in this VN # then copy vn's uunid in the next call. This uuid is for example # compare result with created instances | 2.541668 | 3 |
stopwatch.py | Pergamon256/Stopwatch | 0 | 6618335 | import time
x = int(input("Enter stopwatch minutes: "))
y = int(input("Enter stopwatch seconds: "))
print('Stopwatch started') #You can edit this to display whatever text you want, even include strings using {} and .format()
def stopwatch(m,s):
i=m
j=s
k=0
try:
while True:
if(j == 60):
j = 0
i += 1
if((i > 9) and (j > 9)):
print(str(i)+":"+str(j), end='\r') #Carriage return only works with python 3; end='\r' will not work with python 2.
elif(i > 9):
print(str(i)+":"+str(k)+str(j), end='\r')
elif(j > 9):
print(str(k)+str(i)+":"+str(j), end='\r')
else:
print(str(k)+str(i)+":"+str(k)+str(j), end='\r')
time.sleep(1)
j += 1
except KeyboardInterrupt:
pass
print("", end='\r')
result = stopwatch(x,y)
stopwatch(result) | import time
x = int(input("Enter stopwatch minutes: "))
y = int(input("Enter stopwatch seconds: "))
print('Stopwatch started') #You can edit this to display whatever text you want, even include strings using {} and .format()
def stopwatch(m,s):
i=m
j=s
k=0
try:
while True:
if(j == 60):
j = 0
i += 1
if((i > 9) and (j > 9)):
print(str(i)+":"+str(j), end='\r') #Carriage return only works with python 3; end='\r' will not work with python 2.
elif(i > 9):
print(str(i)+":"+str(k)+str(j), end='\r')
elif(j > 9):
print(str(k)+str(i)+":"+str(j), end='\r')
else:
print(str(k)+str(i)+":"+str(k)+str(j), end='\r')
time.sleep(1)
j += 1
except KeyboardInterrupt:
pass
print("", end='\r')
result = stopwatch(x,y)
stopwatch(result) | en | 0.830199 | #You can edit this to display whatever text you want, even include strings using {} and .format() #Carriage return only works with python 3; end='\r' will not work with python 2. | 3.999826 | 4 |
lib/matching_model.py | JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching | 1 | 6618336 | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1, inplace=True))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
def NormMap2D_to_unNormMap2D(NormMap2D):
B, C, H, W = NormMap2D.size()
mapping = torch.zeros_like(NormMap2D)
# mesh grid
mapping[:,0,:,:] = (NormMap2D[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (NormMap2D[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
idx = mapping[:, 0, :, :] + mapping[:,1,:,:] * W
idx = idx.type(torch.cuda.LongTensor)
return idx
#from normalized mapping to unnormalised flow
def unnormalise_and_convert_mapping_to_flow(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow
def unnormalise_and_convert_mapping_to_flow_and_grid(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow, grid
class CorrelationVolume(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(CorrelationVolume, self).__init__()
def forward(self, feature_A, feature_B):
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) # shape (b,c,h*w)
# feature_A = feature_A.view(b, c, h*w).transpose(1,2)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2) # shape (b,h*w,c)
feature_mul = torch.bmm(feature_B, feature_A) # shape (b,h*w,h*w)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
# correlation_numpy = correlation_tensor.detach().cpu().numpy()
return correlation_tensor # shape (b,h*w,h,w)
class FeatureL2Norm(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class OpticalFlowEstimator(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimator, self).__init__()
dd = np.cumsum([128,128,96,64,32])
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(in_channels + dd[0], 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(in_channels + dd[1], 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(in_channels + dd[2], 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(in_channels + dd[3], 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(in_channels + dd[4])
def forward(self, x):
# dense net connection
x = torch.cat((self.conv_0(x), x),1)
x = torch.cat((self.conv_1(x), x),1)
x = torch.cat((self.conv_2(x), x),1)
x = torch.cat((self.conv_3(x), x),1)
x = torch.cat((self.conv_4(x), x),1)
flow = self.predict_flow(x)
return x, flow
class OpticalFlowEstimatorNoDenseConnection(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimatorNoDenseConnection, self).__init__()
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(128, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(128, 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(96, 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(64, 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(32)
def forward(self, x):
x = self.conv_4(self.conv_3(self.conv_2(self.conv_1(self.conv_0(x)))))
flow = self.predict_flow(x)
return x, flow
# extracted from DGCNet
def conv_blck(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, dilation=1, bn=False):
if bn:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
else:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.ReLU(inplace=True))
def conv_head(in_channels):
return nn.Conv2d(in_channels, 2, kernel_size=3, padding=1)
class CorrespondenceMapBase(nn.Module):
def __init__(self, in_channels, bn=False):
super().__init__()
def forward(self, x1, x2=None, x3=None):
x = x1
# concatenating dimensions
if (x2 is not None) and (x3 is None):
x = torch.cat((x1, x2), 1)
elif (x2 is None) and (x3 is not None):
x = torch.cat((x1, x3), 1)
elif (x2 is not None) and (x3 is not None):
x = torch.cat((x1, x2, x3), 1)
return x
class CMDTop(CorrespondenceMapBase):
def __init__(self, in_channels, bn=False, use_cuda=False):
super().__init__(in_channels, bn)
chan = [128, 128, 96, 64, 32]
self.conv0 = conv_blck(in_channels, chan[0], bn=bn)
self.conv1 = conv_blck(chan[0], chan[1], bn=bn)
self.conv2 = conv_blck(chan[1], chan[2], bn=bn)
self.conv3 = conv_blck(chan[2], chan[3], bn=bn)
self.conv4 = conv_blck(chan[3], chan[4], bn=bn)
self.final = conv_head(chan[-1])
if use_cuda:
self.conv0.cuda()
self.conv1.cuda()
self.conv2.cuda()
self.conv3.cuda()
self.conv4.cuda()
self.final.cuda()
def forward(self, x1, x2=None, x3=None):
x = super().forward(x1, x2, x3)
x = self.conv4(self.conv3(self.conv2(self.conv1(self.conv0(x)))))
return self.final(x)
def warp(x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output * mask
# return output
def unNormMap1D_to_NormMap2D(idx_B_Avec, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
w = sz // 25
h = w
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def unNormMap1D_to_NormMap2D_inLoc(idx_B_Avec,h,w, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def warp_from_NormMap2D(x, NormMap2D):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
vgrid = NormMap2D.permute(0, 2, 3, 1).contiguous()
output = nn.functional.grid_sample(x, vgrid, align_corners=True) #N,C,H,W
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
#
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output*mask
# return output
def L1_loss(input_flow, target_flow):
L1 = torch.abs(input_flow-target_flow)
L1 = torch.sum(L1, 1)
return L1
def L1_charbonnier_loss(input_flow, target_flow, sparse=False, mean=True, sum=False):
batch_size = input_flow.size(0)
epsilon = 0.01
alpha = 0.4
L1 = L1_loss(input_flow, target_flow)
norm = torch.pow(L1 + epsilon, alpha)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
norm = norm[~mask]
if mean:
return norm.mean()
elif sum:
return norm.sum()
else:
return norm.sum()/batch_size
def EPE(input_flow, target_flow, sparse=False, mean=True, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return EPE_map.sum()/batch_size
def EPE_mask(input_flow, target_flow, mask_num, sparse=False, mean=False, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:, 0] == 0) & (target_flow[:, 1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return (EPE_map/ mask_num).sum() /batch_size
def multiscaleEPE(Map2D_WTA, Map2D_NET, mask, sparse=False, robust_L1_loss=False, mean=True, sum=False):
# b, _, h, w = output.size()
# if sparse:
# target_scaled = sparse_max_pool(target, (h, w))
#
# if mask is not None:
# mask = sparse_max_pool(mask.float().unsqueeze(1), (h, w))
# else:
# target_scaled = F.interpolate(target, (h, w), mode='bilinear')
if mask is not None:
mask = mask.cuda().detach().byte()
if robust_L1_loss:
if mask is not None:
return L1_charbonnier_loss(output * mask.float(), target_scaled * mask.float(), sparse, mean=mean, sum=False)
else:
return L1_charbonnier_loss(output, target_scaled, sparse, mean=mean, sum=False)
else:
if mask is not None:
eps = 1
src_num_fgnd = mask.sum(dim=3, keepdim=True).sum(dim=2, keepdim=True) + eps
return EPE_mask(Map2D_WTA * mask.float(), Map2D_NET * mask.float(), src_num_fgnd, sparse, mean=mean, sum=sum)
else:
return EPE(Map2D_WTA, Map2D_NET, sparse, mean=mean, sum=False)
def generate_NormMap2D_corr4d_WTA(corr4d):
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
nc_B_Avec = torch.nn.functional.softmax(nc_B_Avec, 1)
scores_B, index_B = torch.max(nc_B_Avec, dim=1)
index1D_B = index_B.view(batch_size, -1)
Map2D = unNormMap1D_to_NormMap2D(index1D_B) # (B,2,S,S)
return Map2D
def generate_mask(flow, flow_bw, occ_thresh):
output_sum = flow + flow_bw
output_sum = torch.sum(torch.pow(output_sum.permute(0, 2, 3, 1), 2), 3)
occ_bw = (output_sum > occ_thresh).float()
mask_bw = 1. - occ_bw
return mask_bw
def warp_with_mask(x, flo, masked_flow):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
mask: [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
# output_img = output * mask
output_masked = output * masked_flow
return output_masked | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1, inplace=True))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
def NormMap2D_to_unNormMap2D(NormMap2D):
B, C, H, W = NormMap2D.size()
mapping = torch.zeros_like(NormMap2D)
# mesh grid
mapping[:,0,:,:] = (NormMap2D[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (NormMap2D[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
idx = mapping[:, 0, :, :] + mapping[:,1,:,:] * W
idx = idx.type(torch.cuda.LongTensor)
return idx
#from normalized mapping to unnormalised flow
def unnormalise_and_convert_mapping_to_flow(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow
def unnormalise_and_convert_mapping_to_flow_and_grid(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow, grid
class CorrelationVolume(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(CorrelationVolume, self).__init__()
def forward(self, feature_A, feature_B):
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) # shape (b,c,h*w)
# feature_A = feature_A.view(b, c, h*w).transpose(1,2)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2) # shape (b,h*w,c)
feature_mul = torch.bmm(feature_B, feature_A) # shape (b,h*w,h*w)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
# correlation_numpy = correlation_tensor.detach().cpu().numpy()
return correlation_tensor # shape (b,h*w,h,w)
class FeatureL2Norm(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class OpticalFlowEstimator(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimator, self).__init__()
dd = np.cumsum([128,128,96,64,32])
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(in_channels + dd[0], 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(in_channels + dd[1], 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(in_channels + dd[2], 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(in_channels + dd[3], 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(in_channels + dd[4])
def forward(self, x):
# dense net connection
x = torch.cat((self.conv_0(x), x),1)
x = torch.cat((self.conv_1(x), x),1)
x = torch.cat((self.conv_2(x), x),1)
x = torch.cat((self.conv_3(x), x),1)
x = torch.cat((self.conv_4(x), x),1)
flow = self.predict_flow(x)
return x, flow
class OpticalFlowEstimatorNoDenseConnection(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimatorNoDenseConnection, self).__init__()
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(128, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(128, 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(96, 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(64, 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(32)
def forward(self, x):
x = self.conv_4(self.conv_3(self.conv_2(self.conv_1(self.conv_0(x)))))
flow = self.predict_flow(x)
return x, flow
# extracted from DGCNet
def conv_blck(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, dilation=1, bn=False):
if bn:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
else:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.ReLU(inplace=True))
def conv_head(in_channels):
return nn.Conv2d(in_channels, 2, kernel_size=3, padding=1)
class CorrespondenceMapBase(nn.Module):
def __init__(self, in_channels, bn=False):
super().__init__()
def forward(self, x1, x2=None, x3=None):
x = x1
# concatenating dimensions
if (x2 is not None) and (x3 is None):
x = torch.cat((x1, x2), 1)
elif (x2 is None) and (x3 is not None):
x = torch.cat((x1, x3), 1)
elif (x2 is not None) and (x3 is not None):
x = torch.cat((x1, x2, x3), 1)
return x
class CMDTop(CorrespondenceMapBase):
def __init__(self, in_channels, bn=False, use_cuda=False):
super().__init__(in_channels, bn)
chan = [128, 128, 96, 64, 32]
self.conv0 = conv_blck(in_channels, chan[0], bn=bn)
self.conv1 = conv_blck(chan[0], chan[1], bn=bn)
self.conv2 = conv_blck(chan[1], chan[2], bn=bn)
self.conv3 = conv_blck(chan[2], chan[3], bn=bn)
self.conv4 = conv_blck(chan[3], chan[4], bn=bn)
self.final = conv_head(chan[-1])
if use_cuda:
self.conv0.cuda()
self.conv1.cuda()
self.conv2.cuda()
self.conv3.cuda()
self.conv4.cuda()
self.final.cuda()
def forward(self, x1, x2=None, x3=None):
x = super().forward(x1, x2, x3)
x = self.conv4(self.conv3(self.conv2(self.conv1(self.conv0(x)))))
return self.final(x)
def warp(x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output * mask
# return output
def unNormMap1D_to_NormMap2D(idx_B_Avec, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
w = sz // 25
h = w
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def unNormMap1D_to_NormMap2D_inLoc(idx_B_Avec,h,w, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def warp_from_NormMap2D(x, NormMap2D):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
vgrid = NormMap2D.permute(0, 2, 3, 1).contiguous()
output = nn.functional.grid_sample(x, vgrid, align_corners=True) #N,C,H,W
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
#
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output*mask
# return output
def L1_loss(input_flow, target_flow):
L1 = torch.abs(input_flow-target_flow)
L1 = torch.sum(L1, 1)
return L1
def L1_charbonnier_loss(input_flow, target_flow, sparse=False, mean=True, sum=False):
batch_size = input_flow.size(0)
epsilon = 0.01
alpha = 0.4
L1 = L1_loss(input_flow, target_flow)
norm = torch.pow(L1 + epsilon, alpha)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
norm = norm[~mask]
if mean:
return norm.mean()
elif sum:
return norm.sum()
else:
return norm.sum()/batch_size
def EPE(input_flow, target_flow, sparse=False, mean=True, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return EPE_map.sum()/batch_size
def EPE_mask(input_flow, target_flow, mask_num, sparse=False, mean=False, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:, 0] == 0) & (target_flow[:, 1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return (EPE_map/ mask_num).sum() /batch_size
def multiscaleEPE(Map2D_WTA, Map2D_NET, mask, sparse=False, robust_L1_loss=False, mean=True, sum=False):
# b, _, h, w = output.size()
# if sparse:
# target_scaled = sparse_max_pool(target, (h, w))
#
# if mask is not None:
# mask = sparse_max_pool(mask.float().unsqueeze(1), (h, w))
# else:
# target_scaled = F.interpolate(target, (h, w), mode='bilinear')
if mask is not None:
mask = mask.cuda().detach().byte()
if robust_L1_loss:
if mask is not None:
return L1_charbonnier_loss(output * mask.float(), target_scaled * mask.float(), sparse, mean=mean, sum=False)
else:
return L1_charbonnier_loss(output, target_scaled, sparse, mean=mean, sum=False)
else:
if mask is not None:
eps = 1
src_num_fgnd = mask.sum(dim=3, keepdim=True).sum(dim=2, keepdim=True) + eps
return EPE_mask(Map2D_WTA * mask.float(), Map2D_NET * mask.float(), src_num_fgnd, sparse, mean=mean, sum=sum)
else:
return EPE(Map2D_WTA, Map2D_NET, sparse, mean=mean, sum=False)
def generate_NormMap2D_corr4d_WTA(corr4d):
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
nc_B_Avec = torch.nn.functional.softmax(nc_B_Avec, 1)
scores_B, index_B = torch.max(nc_B_Avec, dim=1)
index1D_B = index_B.view(batch_size, -1)
Map2D = unNormMap1D_to_NormMap2D(index1D_B) # (B,2,S,S)
return Map2D
def generate_mask(flow, flow_bw, occ_thresh):
output_sum = flow + flow_bw
output_sum = torch.sum(torch.pow(output_sum.permute(0, 2, 3, 1), 2), 3)
occ_bw = (output_sum > occ_thresh).float()
mask_bw = 1. - occ_bw
return mask_bw
def warp_with_mask(x, flo, masked_flow):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
mask: [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
# output_img = output * mask
output_masked = output * masked_flow
return output_masked | en | 0.559422 | # mesh grid # unormalise # unormalise #from normalized mapping to unnormalised flow # here map is normalised to -1;1 # we put it back to 0,W-1, then convert it to flow # mesh grid # unormalise # unormalise # print("map(normalized)") # print(map[:, 0, 3, 5]) # print("mapping(unnormalized)") # print(mapping[:, 0, 3, 5]) # here map is normalised to -1;1 # we put it back to 0,W-1, then convert it to flow # mesh grid # unormalise # unormalise # print("map(normalized)") # print(map[:, 0, 3, 5]) # print("mapping(unnormalized)") # print(mapping[:, 0, 3, 5]) Implementation by <NAME> paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch # reshape features for matrix multiplication # shape (b,c,h*w) # feature_A = feature_A.view(b, c, h*w).transpose(1,2) # shape (b,h*w,c) # shape (b,h*w,h*w) # correlation_numpy = correlation_tensor.detach().cpu().numpy() # shape (b,h*w,h,w) Implementation by <NAME> paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch # dense net connection # extracted from DGCNet # concatenating dimensions warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow # mesh grid # makes a mapping out of the flow # scale grid to [-1,1] # return output # fs2: width, fs1: height # XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h)) # XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h)) # JB, IB = np.meshgrid(range(w), range(h)) # XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB))) # JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1))) # iB = IB.expand_as(iA) # jB = JB.expand_as(jA) # xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1) # yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1) # fs2: width, fs1: height # XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h)) # XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h)) # JB, IB = np.meshgrid(range(w), range(h)) # XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB))) # JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1))) # iB = IB.expand_as(iA) # jB = JB.expand_as(jA) # xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1) # yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1) warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow # mesh grid #N,C,H,W # # return output # invalid flow is defined with both flow coordinates to be exactly 0 # input_flow_np = input_flow.detach().cpu().numpy() # invalid flow is defined with both flow coordinates to be exactly 0 # input_flow_np = input_flow.detach().cpu().numpy() # invalid flow is defined with both flow coordinates to be exactly 0 # b, _, h, w = output.size() # if sparse: # target_scaled = sparse_max_pool(target, (h, w)) # # if mask is not None: # mask = sparse_max_pool(mask.float().unsqueeze(1), (h, w)) # else: # target_scaled = F.interpolate(target, (h, w), mode='bilinear') # [batch_idx,k_A,i_B,j_B] # (B,2,S,S) warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow mask: [B, C, H, W] # mesh grid # makes a mapping out of the flow # scale grid to [-1,1] # output_img = output * mask | 2.255944 | 2 |
distinct_occure_helpers.py | martynaut/mirnaome_somatic_mutations | 1 | 6618337 | <gh_stars>1-10
import numpy as np
def set_balance(row, ratio):
if row['ratio 3/(3+5)'] >= ratio:
return '3p'
elif row['ratio 5/(3+5)'] >= ratio:
return '5p'
elif np.isnan(row['reads_3p']) and np.isnan(row['reads_5p']):
return 'unknown'
elif np.isnan(row['reads_3p']):
return '5p'
elif np.isnan(row['reads_5p']):
return '3p'
else:
return 'both'
def find_in_mirna(row, df_loc):
if df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].shape[0] != 0:
temp = df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].values[0]
if row['orient_loc'] == '+':
start = row['pos'] - temp[2] + 1
stop = row['pos'] - temp[3] - 1
else:
start = -(row['pos'] - temp[3] - 1)
stop = -(row['pos'] - temp[2] + 1)
localiz = [start, stop]
else:
localiz = [np.nan,
np.nan]
return localiz
def find_arm(row):
if row['-/+'] == '+':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '5p'
else:
return '3p'
if row['-/+'] == '-':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '3p'
else:
return '5p'
def from_start(row, column_start, column_stop):
if row['orient_loc'] == '+':
return row['pos'] - row[column_start] + 1
else:
return row[column_stop] - row['pos'] + 1
def from_end(row, column_stop, column_start):
if row['orient_loc'] == '+':
return row['pos'] - row[column_stop] - 1
else:
return row[column_start] - row['pos'] - 1
def find_localization(row, df_loc):
# fix values that were not in reference
if row['name'].lower() == 'hsa-mir-4477b' and \
row['start'] == 63819560 and \
row['stop'] == 63819669:
row['Strand'] = '+'
elif row['name'].lower() == 'hsa-mir-6723':
row['Strand'] = '-'
elif row['name'].lower() == 'hsa-mir-3656':
row['Strand'] = '+'
if (type(row['Strand']) != str and
df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos'])].shape[0] != 0):
localiz = df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos'])].values[0]
elif df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos']) &
(df_loc['orientation'] == row['Strand'])].shape[0] != 0:
localiz = df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos']) &
(df_loc['orientation'] == row['Strand'])].values[0]
else:
localiz = [np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan]
return localiz
def if_complex(row, complex_df):
if complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['start'] == row['start']) &
(complex_df['stop'] == row['stop']) &
(complex_df['gene'] == row['gene']) &
(complex_df['seq_type'] == row['seq_type'])].shape[0] != 0:
values = complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['start'] == row['start']) &
(complex_df['stop'] == row['stop']) &
(complex_df['gene'] == row['gene']) &
(complex_df['seq_type'] == row['seq_type'])]['complex'].unique()
if 1 in values:
return 1
else:
return 0
else:
return 0
def concat_ints(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
return '"' + ':'.join(new_row) + '"'
def concat_alg(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
new_row = sorted(set(new_row))
return '"' + ':'.join(new_row) + '"'
def type_of_mutation(row):
if len(row['ref']) > len(row['alt']):
return 'del'
elif len(row['ref']) == len(row['alt']):
return 'subst'
elif ',' in row['alt']:
return 'subst'
else:
return 'ins'
def take_from_coord(coordinates, column_name, row):
return coordinates[(coordinates['chr'] == row['chrom']) &
(coordinates['start'] < int(row['pos'])) &
(coordinates['stop'] > int(row['pos']))][column_name].values[0]
def seq_type(value, list_df):
if 'hsa-' in value:
return 'mirna'
elif value in list_df:
return 'cancer_exome'
else:
return 'not_defined'
def subst_type(row):
if row['mutation_type'] == 'subst':
if (((row['ref'] in ['A', 'G']) and (row['alt'] in ['A', 'G'])) or
((row['ref'] in ['C', 'T']) and (row['alt'] in ['C', 'T']))):
return 'transition'
else:
return 'transversion'
else:
return 'n.a.'
| import numpy as np
def set_balance(row, ratio):
if row['ratio 3/(3+5)'] >= ratio:
return '3p'
elif row['ratio 5/(3+5)'] >= ratio:
return '5p'
elif np.isnan(row['reads_3p']) and np.isnan(row['reads_5p']):
return 'unknown'
elif np.isnan(row['reads_3p']):
return '5p'
elif np.isnan(row['reads_5p']):
return '3p'
else:
return 'both'
def find_in_mirna(row, df_loc):
if df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].shape[0] != 0:
temp = df_loc[
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['orientation'] == row['orient_loc']) &
(df_loc['stop'] >= row['pos'])].values[0]
if row['orient_loc'] == '+':
start = row['pos'] - temp[2] + 1
stop = row['pos'] - temp[3] - 1
else:
start = -(row['pos'] - temp[3] - 1)
stop = -(row['pos'] - temp[2] + 1)
localiz = [start, stop]
else:
localiz = [np.nan,
np.nan]
return localiz
def find_arm(row):
if row['-/+'] == '+':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '5p'
else:
return '3p'
if row['-/+'] == '-':
if row['start'] - row['start_pre'] < row['stop_pre'] - row['stop']:
return '3p'
else:
return '5p'
def from_start(row, column_start, column_stop):
if row['orient_loc'] == '+':
return row['pos'] - row[column_start] + 1
else:
return row[column_stop] - row['pos'] + 1
def from_end(row, column_stop, column_start):
if row['orient_loc'] == '+':
return row['pos'] - row[column_stop] - 1
else:
return row[column_start] - row['pos'] - 1
def find_localization(row, df_loc):
# fix values that were not in reference
if row['name'].lower() == 'hsa-mir-4477b' and \
row['start'] == 63819560 and \
row['stop'] == 63819669:
row['Strand'] = '+'
elif row['name'].lower() == 'hsa-mir-6723':
row['Strand'] = '-'
elif row['name'].lower() == 'hsa-mir-3656':
row['Strand'] = '+'
if (type(row['Strand']) != str and
df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos'])].shape[0] != 0):
localiz = df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos'])].values[0]
elif df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos']) &
(df_loc['orientation'] == row['Strand'])].shape[0] != 0:
localiz = df_loc[(df_loc['name'].str.contains(row['name'].lower())) &
(df_loc['chrom'] == row['chrom']) &
(df_loc['start'] <= row['pos']) &
(df_loc['stop'] >= row['pos']) &
(df_loc['orientation'] == row['Strand'])].values[0]
else:
localiz = [np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan]
return localiz
def if_complex(row, complex_df):
if complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['start'] == row['start']) &
(complex_df['stop'] == row['stop']) &
(complex_df['gene'] == row['gene']) &
(complex_df['seq_type'] == row['seq_type'])].shape[0] != 0:
values = complex_df[(complex_df['chrom'] == row['chrom']) &
(complex_df['start'] == row['start']) &
(complex_df['stop'] == row['stop']) &
(complex_df['gene'] == row['gene']) &
(complex_df['seq_type'] == row['seq_type'])]['complex'].unique()
if 1 in values:
return 1
else:
return 0
else:
return 0
def concat_ints(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
return '"' + ':'.join(new_row) + '"'
def concat_alg(col):
row = list(col.values)
new_row = []
for x in row:
new_row.append(str(x))
new_row = sorted(set(new_row))
return '"' + ':'.join(new_row) + '"'
def type_of_mutation(row):
if len(row['ref']) > len(row['alt']):
return 'del'
elif len(row['ref']) == len(row['alt']):
return 'subst'
elif ',' in row['alt']:
return 'subst'
else:
return 'ins'
def take_from_coord(coordinates, column_name, row):
return coordinates[(coordinates['chr'] == row['chrom']) &
(coordinates['start'] < int(row['pos'])) &
(coordinates['stop'] > int(row['pos']))][column_name].values[0]
def seq_type(value, list_df):
if 'hsa-' in value:
return 'mirna'
elif value in list_df:
return 'cancer_exome'
else:
return 'not_defined'
def subst_type(row):
if row['mutation_type'] == 'subst':
if (((row['ref'] in ['A', 'G']) and (row['alt'] in ['A', 'G'])) or
((row['ref'] in ['C', 'T']) and (row['alt'] in ['C', 'T']))):
return 'transition'
else:
return 'transversion'
else:
return 'n.a.' | en | 0.991993 | # fix values that were not in reference | 2.906219 | 3 |
Data Structures/Recursion/solutions/call_stack_solution.py | michal0janczyk/udacity_data_structures_and_algorithms_nanodegree | 1 | 6618338 | <gh_stars>1-10
def print_integers(n):
if n <= 0:
return
print(n)
print_integers(n - 1) | def print_integers(n):
if n <= 0:
return
print(n)
print_integers(n - 1) | none | 1 | 3.422103 | 3 | |
tests/unit/searchsupport_tests.py | Jardo72/Python-Sudoku-Sandbox | 0 | 6618339 | #
# Copyright 2018 <NAME>
#
# This file is part of Python Sudoku Sandbox.
#
# Python Sudoku Sandbox is free software developed for educational and
# experimental purposes. It is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is a collection of unit tests covering the functionality provided by the searchsupport module.
"""
from logging import basicConfig, INFO
from unittest import TestCase
from unittest.mock import Mock
from searchsupport import CandidateList, CandidateQueryMode, _CandidateCellExclusionLogic, _CandidateValueExclusionLogic, _ExclusionLogic, _ExclusionOutcome, _RegionCandidateCells, UnambiguousCandidate
class UnambiguousCandidateTest(TestCase):
"""
Test fixture aimed at the UnambiguousCandidate class.
"""
def test_unambiguous_candidate_is_equal_to_itself(self):
candidate = UnambiguousCandidate(row = 3, column = 7, value = 5)
self.assertEqual(candidate, candidate)
def test_two_unambiguous_candidate_instances_are_equal_if_they_have_equal_row_and_column_and_value(self):
candidate_one = UnambiguousCandidate(row = 7, column = 4, value = 1)
candidate_two = UnambiguousCandidate(row = 7, column = 4, value = 1)
self.assertEqual(candidate_one, candidate_two)
self.assertEqual(candidate_two, candidate_one)
def test_unambiguous_candidate_is_not_equal_to_none(self):
candidate = UnambiguousCandidate(row = 4, column = 1, value = 3)
self.assertNotEqual(candidate, None)
def test_unambiguous_candidate_is_not_equal_to_instance_of_other_class(self):
candidate = UnambiguousCandidate(row = 9, column = 2, value = 6)
self.assertNotEqual(candidate, "dummy")
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_value_and_row_but_distinct_column(self):
candidate_one = UnambiguousCandidate(row = 3, column = 9, value = 4)
candidate_two = UnambiguousCandidate(row = 3, column = 8, value = 4)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_value_and_column_but_distinct_row(self):
candidate_one = UnambiguousCandidate(row = 1, column = 5, value = 2)
candidate_two = UnambiguousCandidate(row = 3, column = 5, value = 2)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_row_and_column_but_distinct_value(self):
candidate_one = UnambiguousCandidate(row = 4, column = 7, value = 2)
candidate_two = UnambiguousCandidate(row = 4, column = 7, value = 3)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_unambiguous_candidate_provides_proper_cell_address(self):
candidate = UnambiguousCandidate(row = 6, column = 4, value = 7)
self.assertEqual((6, 4), candidate.cell_address)
candidate = UnambiguousCandidate(row = 0, column = 5, value = 3)
self.assertEqual((0, 5), candidate.cell_address)
class CandidateListTest(TestCase):
"""
Test fixture aimed at the CandidateList class.
"""
def test_candidate_list_is_equal_to_itself(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(candidate_list, candidate_list)
def test_two_candidate_list_instances_are_equal_if_they_have_identical_cell_address_and_values(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(candidate_list_one, candidate_list_two)
self.assertEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_equal_if_they_have_identical_cell_address_and_values_even_if_the_order_of_values_is_distinct(self):
candidate_list_one = CandidateList(row = 8, column = 5, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 8, column = 5, values = [9, 7, 1])
self.assertEqual(candidate_list_one, candidate_list_two)
self.assertEqual(candidate_list_two, candidate_list_one)
def test_candidate_list_is_not_equal_to_none(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list, None)
def test_candidate_list_is_not_equal_to_instance_of_other_class(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list, "dummy")
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_cell_address_but_distinct_values(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7])
candidate_list_two = CandidateList(row = 3, column = 2, values = [1, 4, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_column_and_values_but_distinct_row(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 4, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_row_and_values_but_distinct_column(self):
candidate_list_one = CandidateList(row = 4, column = 9, values = [3, 6, 7, 9])
candidate_list_two = CandidateList(row = 4, column = 2, values = [3, 6, 7, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_length_of_candidate_list_reflects_the_number_of_candidate_values(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(3, len(candidate_list))
candidate_list = CandidateList(row = 3, column = 2, values = [2, 3, 7, 8, 9])
self.assertEqual(5, len(candidate_list))
def test_candidate_list_provides_proper_cell_address(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual((3, 2), candidate_list.cell_address)
candidate_list = CandidateList(row = 8, column = 0, values = [2, 3, 7, 8, 9])
self.assertEqual((8, 0), candidate_list.cell_address)
class CandidateValueExclusionLogicTest(TestCase):
"""
Test fixture aimed at the CandidateValueExclusionLogic class. When designing the
test cases, I wanted to ensure complete coverage of various aspects:
* Various kinds of exclusion (pure row exclusion, pure column exclusion, pure region
exclusion, various combinations like row and column exclusion).
* Equivalence classes and (implicit) boundary values (i.e. top/bottom row,
leftmost/rightmost column, regions).
* All valid cell values.
"""
def setUp(self):
self._exclusion_logic = _CandidateValueExclusionLogic()
def test_pure_row_exclusion_in_topmost_row_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 9 6 5 | 8 7 4 | 1 3 |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 2 has to be identified as unambiguous candidate
for the cell [0; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 4, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 1, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 8)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 5, value = 4)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 6, value = 2) in candidate_list)
def test_pure_row_exclusion_in_bottom_row_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| 7 6 | 2 4 8 | 1 3 9 |
+-------+-------+-------+
For the grid above, the value 5 has to be identified as unambiguous candidate
for the cell [8; 2].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 7, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 3, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 6, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 4, value = 4)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 2, value = 5) in candidate_list)
def test_pure_column_exclusion_in_leftmost_column_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 3 | | |
| 7 | | |
| 1 | | |
+-------+-------+-------+
| 9 | | |
| 2 | | |
| 6 | | |
+-------+-------+-------+
| | | |
| 5 | | |
| 8 | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate
for the cell [6; 0].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 0, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 0, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 0, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 0, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 0, value = 4) in candidate_list)
def test_pure_column_exclusion_in_rightmost_column_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 2 |
| | | 7 |
| | | 5 |
+-------+-------+-------+
| | | 9 |
| | | 4 |
| | | 3 |
+-------+-------+-------+
| | | 6 |
| | | 8 |
| | | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate
for the cell [8; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 8, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 8, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 8, value = 1) in candidate_list)
def test_pure_region_exclusion_in_upper_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 3 1 6 | | |
| 9 2 4 | | |
| 8 5 | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 7 has to be identified as unambiguous candidate
for the cell [2; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 1, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 2, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 0, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 2, column = 1, value = 7) in candidate_list)
def test_pure_region_exclusion_in_upper_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 1 |
| | | 2 7 3 |
| | | 4 5 8 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate
for the cell [0; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 7, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 6, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 6, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 6, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 7, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 8, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 8, value = 6) in candidate_list)
def test_pure_region_exclusion_in_bottom_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 9 1 5 | | |
| 6 2 | | |
| 3 4 7 | | |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate
for the cell [7; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 2, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 2, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 2, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 0, value = 3)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 7, column = 1, value = 8) in candidate_list)
def test_pure_region_exclusion_in_bottom_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | 5 7 1 |
| | | 8 2 9 |
| | | 6 4 |
+-------+-------+-------+
For the grid above, the value 3 has to be identified as unambiguous candidate
for the cell [8; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 6, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 6, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 7, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 7, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 6, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 7, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 8, value = 3) in candidate_list)
def test_combination_of_row_and_column_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | 9 | |
| | | |
| | | |
+-------+-------+-------+
| | 2 | |
| 3 | 5 | 1 8 |
| | | |
+-------+-------+-------+
| | 4 | |
| | 7 | |
| | | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate
for the cell [4; 3].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 5, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 8, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 6, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 3, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 3, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 1, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 3, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 3, value = 6) in candidate_list)
def test_combination_of_row_and_region_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 7 3 | 2 | 8 5 |
| | | 1 |
| | | 6 4 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 9 has to be identified as unambiguous candidate
for the cell [3; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 8, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 7, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 6, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 2, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 3, column = 6, value = 9) in candidate_list)
def test_combination_of_column_and_region_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | 3 | |
| | | |
| | | |
+-------+-------+-------+
| | 7 9 | |
| | 5 | |
| | 4 8 | |
+-------+-------+-------+
| | 6 | |
| | 2 | |
| | | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate
for the cell [4; 3].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 5, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 4, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 4, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 3, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 3, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 3, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 3, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 3, value = 1) in candidate_list)
def test_combination_of_row_and_column_and_region_exlusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| 4 | | |
| | | |
+-------+-------+-------+
| | | |
| 1 | | |
| | | |
+-------+-------+-------+
| 2 | 7 | 6 |
| 5 | | |
| 9 3 | | |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate
for the cell [6; 2].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 2, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 4, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 2, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 1, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 2, value = 8) in candidate_list)
def test_candidates_for_first_undefined_cell_reflect_exclusion(self):
"""
+-------+-------+-------+
| | 7 | |
| 9 | | |
| 4 | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the values 1, 3, 5, 6 and 8 have to be identified as candidates
for the cell [0; 0], which should be identified as the first undefined cell.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 4)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.FIRST_UNDEFINED_CELL)
expected_candidate_list = CandidateList(row = 0, column = 0, values = [1, 3, 5, 6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_candidates_for_undefined_cell_with_least_candidates_reflect_exclusion(self):
"""
+-------+-------+-------+
| | 7 | |
| 9 | | 3 |
| 4 | | |
+-------+-------+-------+
| | | |
| | | 2 |
| 2 | | |
+-------+-------+-------+
| | | 7 |
| | | 1 |
| 5 | 9 | |
+-------+-------+-------+
For the grid above, the values 4, 6 and 8 have to be identified as candidates
for the cell [8; 8], which should be identified as the undefined cells with
least candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 8, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 8, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 6, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 2, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 9)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 8, column = 8, values = [4, 6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_no_value_is_applicable_to_cell_whose_value_has_been_already_set(self):
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 2, value = 5)
for value in range(1, 10):
candidate = UnambiguousCandidate(row = 3, column = 2, value = value)
self.assertFalse(self._exclusion_logic.is_applicable(candidate))
def test_applicability_of_value_reflects_former_exclusions(self):
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 5, value = 9)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 0, column = 4, value = 8)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 0, column = 4, value = 9)))
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 5, value = 6)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 1, column = 5, value = 3)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 1, column = 5, value = 6)))
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 0, value = 5)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 5, column = 1, value = 3)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 5, column = 1, value = 5)))
def test_number_of_applicable_values_reflects_exclusion(self):
self.assertEqual(9, self._exclusion_logic.get_applicable_value_count(row = 0, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 0, value = 1)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 0, column = 0))
self.assertEqual(8, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(8, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 3)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 1, column = 1))
self.assertEqual(7, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(7, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 2, value = 9)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 2, column = 2))
self.assertEqual(6, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(6, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
def test_clone_reflects_the_state_of_the_original_when_candidates_are_requested(self):
"""
+-------+-------+-------+
| 2 | 7 | 1 |
| 4 | | 3 |
| 8 | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| | | 7 |
| 3 5 | 9 | 2 4 |
| | | |
+-------+-------+-------+
For the grid above:
* The cell [0; 1] is to be identified as the first undefined cell. The applicable
candidates for that cell should be 5, 6, and 9.
* The cell [7; 6] is to be identified as undefined cell with least candidates. The
applicable candidates for that cell should be 6 and 8.
A clone of the corresponding exclusion logic has to identify the same candidate
values.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 6, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 2, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 8, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 8)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 2, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 6, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 1, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 2, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 5, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 7, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 4)
clone = self._exclusion_logic.copy()
actual_candidate_list = clone.get_undefined_cell_candidates(CandidateQueryMode.FIRST_UNDEFINED_CELL)
expected_candidate_list = CandidateList(row = 0, column = 1, values = [5, 6, 9])
self.assertEqual(actual_candidate_list, expected_candidate_list)
actual_candidate_list = clone.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 7, column = 6, values = [6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_clone_reflects_the_state_of_the_original_when_further_exclusion_is_performed(self):
"""
+-------+-------+-------+
| | | |
| | 5 | |
| | | |
+-------+-------+-------+
| | 1 | |
| 6 | 3 | 2 |
| | 7 | |
+-------+-------+-------+
| | 8 | |
| | | |
| | 9 | |
+-------+-------+-------+
For the grid above, value 4 is to be identified as unambiguous candidate for the
cell [4; 4], even if half of the exclusion is performed with one instance of
exclusion logic, and the other half is performed with a clone of the above mentioned
instance. The unambiguous candidate is identified by the clone. The original instance
cannot identify any unambiguous candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 4, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 3, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 0, value = 6)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 7, value = 2)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 4, column = 5, value = 3)
clone.apply_and_exclude_cell_value(row = 5, column = 3, value = 7)
clone.apply_and_exclude_cell_value(row = 6, column = 4, value = 8)
candidate_list = clone.apply_and_exclude_cell_value(row = 8, column = 4, value = 9)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 4) in candidate_list)
def test_exclusion_in_clone_does_not_affect_the_original(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | 7 | |
+-------+-------+-------+
| | 8 | |
| | | |
| | 9 | |
+-------+-------+-------+
For the grid above, the values 1, 2, 4, 5 and 6 have to be identified as candidates
for the cell [3; 4]. The above mentioned cell should be identified as undefined cell
with least candidates for the original exclusion logic instance, depsite of further
exclusions performed with a clone of the exclusion logic.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 4, value = 8)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 4, value = 9)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 1, column = 4, value = 5)
clone.apply_and_exclude_cell_value(row = 3, column = 3, value = 1)
clone.apply_and_exclude_cell_value(row = 4, column = 7, value = 2)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 3, column = 4, values = [1, 2, 4, 5, 6])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def __test_that_exclusion_does_not_lead_to_unambiguous_candidate(self, row, column, value):
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
self.assertIs(candidate_list, None)
class RegionCandidateCellsTest(TestCase):
"""
Test fixture aimed at the class _RegionCandidateCells.
"""
def test_combination_of_row_and_column_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| 7 | | |
| | | |
| | | 7 |
+-------+-------+-------+
| 7 | | |
| 7 | | |
| | | C |
+-------+-------+-------+
| | | |
| | | |
| | | 7 |
+-------+-------+-------+
For the grid above, the cell [5, 6] is the only cell in the middle right region (i.e. region
with upper left cell [3, 6]) where the value 7 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 3, leftmost_column = 6, value = 7)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 0, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 3, column = 2, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 8, column = 7, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 4, column = 0, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 2, column = 8, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 5, column = 6, value = 7)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
def test_combination_of_column_and_cell_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| | | |
| 2 | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| 1 | | |
| C | | |
| 4 | | |
+-------+-------+-------+
For the grid above, the cell [7, 0] is the only cell in the bottom left region (i.e. region
with upper left cell [6, 0]) where the value 2 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 6, leftmost_column = 0, value = 2)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 6, column = 0, value = 1)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 1, column = 1, value = 2)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 5, column = 2, value = 2)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 8, column = 0, value = 4)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 7, column = 0, value = 2)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
def test_combination_of_row_and_cell_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| 1 C 4 | | |
| | | 8 |
| | 8 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the cell [0, 1] is the only cell in the upper left region (i.e. region
with upper left cell [0, 0]) where the value 8 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 0, leftmost_column = 0, value = 8)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 0, value = 1)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 1, column = 7, value = 8)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 2, column = 4, value = 8)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 2, value = 4)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 0, column = 1, value = 8)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
class CandidateCellExclusionLogicTest(TestCase):
"""
Test fixture aimed at the CandidateCellExclusionLogic class. When designing the
test cases, I wanted to ensure complete coverage of various aspects:
* Exclusion of candidate cells in each of the nine regions.
* All valid cell values.
* Various kinds of exclusion (e.g. row and column, row and cells, column and cells).
"""
def setUp(self):
self._exclusion_logic = _CandidateCellExclusionLogic()
def test_row_and_column_exclusion_with_cell_exclusion_in_upper_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 |
| 1 | | |
| | 9 | |
+-------+-------+-------+
| 9 | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 9 has to be identified as unambiguous candidate for
the cell [1; 0].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 3, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 9)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 2, value = 1)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 1, column = 0, value = 9) in candidate_list)
def test_row_and_column_exclusion_in_upper_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | 3 |
| 3 | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | | |
+-------+-------+-------+
| | 3 | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 3 has to be identified as unambiguous candidate for
the cell [0; 4].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 1, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 3, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 5, value = 3)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 4, value = 3) in candidate_list)
def test_row_exclusion_with_cell_exclusion_in_upper_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 4 |
| 2 | | |
| | 2 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 2 has to be identified as unambiguous candidate for
the cell [0; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 3, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 6, value = 9)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 7, value = 4)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 8, value = 2) in candidate_list)
def test_column_exclusion_with_cell_exclusion_in_middle_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 4 | | |
| | | |
| | | |
+-------+-------+-------+
| 5 | | |
| | | |
| 9 | | |
+-------+-------+-------+
| | | |
| 4 | | |
| | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate for
the cell [4; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 1, value = 9)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 1, value = 4) in candidate_list)
def test_row_exclusion_with_cell_exclusion_in_middle_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 5 | | |
| | 2 7 | |
| | | 5 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 5 has to be identified as unambiguous candidate for
the cell [4; 4].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 3, value = 2)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 7)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 5) in candidate_list)
def test_row_and_column_exclusion_with_cell_exclusion_in_middle_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 8 |
| | | |
| | | |
+-------+-------+-------+
| | | 5 |
| | | |
| 8 | | |
+-------+-------+-------+
| | | |
| | | |
| | | 8 |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate for
the cell [4; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 2, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 8, value = 8)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 6, value = 5)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 6, value = 8) in candidate_list)
def test_row_and_column_exclusion_in_bottom_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| 7 | | |
| | | |
+-------+-------+-------+
| | | |
| 7 | | |
| | | |
+-------+-------+-------+
| | | 7 |
| | | |
| | 7 | |
+-------+-------+-------+
For the grid above, the value 7 has to be identified as unambiguous candidate for
the cell [7; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 2, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 7, value = 7)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 3, value = 7)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 7, column = 1, value = 7) in candidate_list)
def test_column_exclusion_with_cell_exclusion_in_bottom_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | 6 | |
| | | |
+-------+-------+-------+
| | 6 | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | 8 | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate for
the cell [6; 5].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 3, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 4, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 5, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 5, value = 6) in candidate_list)
def test_row_and_column_exclusion_with_cell_exclusion_in_bottom_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 1 |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | 8 |
| 1 | | |
| | 1 | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate for
the cell [6; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 3, value = 1)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 7, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 6, value = 1) in candidate_list)
def test_clone_reflects_the_state_of_the_original_when_further_exclusion_is_performed(self):
"""
+-------+-------+-------+
| | | |
| | 4 | |
| | | |
+-------+-------+-------+
| 4 | | |
| | 1 | |
| | 7 2 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate for
the cell [4; 4], even if half of the exclusion is performed with one instance of
exclusion logic, and the other half is performed with a clone of the above mentioned
instance. The unambiguous candidate is identified by the clone. The original instance
cannot identify any unambiguous candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 3, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 4, value = 7)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 3, column = 1, value = 4)
candidate_list = clone.apply_and_exclude_cell_value(row = 5, column = 5, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 4) in candidate_list)
def test_exclusion_in_clone_does_not_affect_the_original(self):
"""
If a clone of exclusion logic is created after several exclusions, further exclusions
performed upon the clone will not affect the original exclusion logic.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 3, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 4, value = 7)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 3, column = 1, value = 4)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 5, value = 2)
self.assertIsNone(candidate_list)
def __test_that_exclusion_does_not_lead_to_unambiguous_candidate(self, row, column, value):
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
self.assertIs(candidate_list, None)
class ExclusionLogicTest(TestCase):
"""
Test fixture aimed at the class ExclusionLogic.
"""
def test_none_of_the_exclusions_finds_a_candidate_none_is_returned(self):
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = None
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = None
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertIsNone(final_candidate_list)
def test_value_exclusion_returns_non_empty_list_cell_exclusion_returns_none_value_exclusion_list_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = None
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list, final_candidate_list)
def test_cell_exclusion_returns_non_empty_list_value_exclusion_returns_none_cell_exclusion_list_is_returned(self):
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = None
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(cell_exclusion_candidate_list, final_candidate_list)
def test_both_exclusions_return_non_empty_list_union_of_both_lists_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 2, column = 3, value = 9),
UnambiguousCandidate(row = 5, column = 1, value = 2),
UnambiguousCandidate(row = 8, column = 7, value = 6),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 7, column = 5, value = 1),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list + cell_exclusion_candidate_list, final_candidate_list)
def test_both_exclusions_return_the_same_candidate_list_containing_duplicate_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list + cell_exclusion_candidate_list, final_candidate_list)
basicConfig(level = INFO,
format = "%(asctime)s %(levelname)-8s %(module)-18s line %(lineno)-4d %(message)s",
datefmt = "%d-%b-%Y %H:%M:%S",
filename = "exclusiontest.log",
filemode = "w")
| #
# Copyright 2018 <NAME>
#
# This file is part of Python Sudoku Sandbox.
#
# Python Sudoku Sandbox is free software developed for educational and
# experimental purposes. It is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is a collection of unit tests covering the functionality provided by the searchsupport module.
"""
from logging import basicConfig, INFO
from unittest import TestCase
from unittest.mock import Mock
from searchsupport import CandidateList, CandidateQueryMode, _CandidateCellExclusionLogic, _CandidateValueExclusionLogic, _ExclusionLogic, _ExclusionOutcome, _RegionCandidateCells, UnambiguousCandidate
class UnambiguousCandidateTest(TestCase):
"""
Test fixture aimed at the UnambiguousCandidate class.
"""
def test_unambiguous_candidate_is_equal_to_itself(self):
candidate = UnambiguousCandidate(row = 3, column = 7, value = 5)
self.assertEqual(candidate, candidate)
def test_two_unambiguous_candidate_instances_are_equal_if_they_have_equal_row_and_column_and_value(self):
candidate_one = UnambiguousCandidate(row = 7, column = 4, value = 1)
candidate_two = UnambiguousCandidate(row = 7, column = 4, value = 1)
self.assertEqual(candidate_one, candidate_two)
self.assertEqual(candidate_two, candidate_one)
def test_unambiguous_candidate_is_not_equal_to_none(self):
candidate = UnambiguousCandidate(row = 4, column = 1, value = 3)
self.assertNotEqual(candidate, None)
def test_unambiguous_candidate_is_not_equal_to_instance_of_other_class(self):
candidate = UnambiguousCandidate(row = 9, column = 2, value = 6)
self.assertNotEqual(candidate, "dummy")
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_value_and_row_but_distinct_column(self):
candidate_one = UnambiguousCandidate(row = 3, column = 9, value = 4)
candidate_two = UnambiguousCandidate(row = 3, column = 8, value = 4)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_value_and_column_but_distinct_row(self):
candidate_one = UnambiguousCandidate(row = 1, column = 5, value = 2)
candidate_two = UnambiguousCandidate(row = 3, column = 5, value = 2)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_two_unambiguous_candidate_instances_are_not_equal_if_they_have_identical_row_and_column_but_distinct_value(self):
candidate_one = UnambiguousCandidate(row = 4, column = 7, value = 2)
candidate_two = UnambiguousCandidate(row = 4, column = 7, value = 3)
self.assertNotEqual(candidate_one, candidate_two)
self.assertNotEqual(candidate_two, candidate_one)
def test_unambiguous_candidate_provides_proper_cell_address(self):
candidate = UnambiguousCandidate(row = 6, column = 4, value = 7)
self.assertEqual((6, 4), candidate.cell_address)
candidate = UnambiguousCandidate(row = 0, column = 5, value = 3)
self.assertEqual((0, 5), candidate.cell_address)
class CandidateListTest(TestCase):
"""
Test fixture aimed at the CandidateList class.
"""
def test_candidate_list_is_equal_to_itself(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(candidate_list, candidate_list)
def test_two_candidate_list_instances_are_equal_if_they_have_identical_cell_address_and_values(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(candidate_list_one, candidate_list_two)
self.assertEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_equal_if_they_have_identical_cell_address_and_values_even_if_the_order_of_values_is_distinct(self):
candidate_list_one = CandidateList(row = 8, column = 5, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 8, column = 5, values = [9, 7, 1])
self.assertEqual(candidate_list_one, candidate_list_two)
self.assertEqual(candidate_list_two, candidate_list_one)
def test_candidate_list_is_not_equal_to_none(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list, None)
def test_candidate_list_is_not_equal_to_instance_of_other_class(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list, "dummy")
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_cell_address_but_distinct_values(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7])
candidate_list_two = CandidateList(row = 3, column = 2, values = [1, 4, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_column_and_values_but_distinct_row(self):
candidate_list_one = CandidateList(row = 3, column = 2, values = [1, 7, 9])
candidate_list_two = CandidateList(row = 4, column = 2, values = [1, 7, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_two_candidate_list_instances_are_not_equal_if_they_have_identical_row_and_values_but_distinct_column(self):
candidate_list_one = CandidateList(row = 4, column = 9, values = [3, 6, 7, 9])
candidate_list_two = CandidateList(row = 4, column = 2, values = [3, 6, 7, 9])
self.assertNotEqual(candidate_list_one, candidate_list_two)
self.assertNotEqual(candidate_list_two, candidate_list_one)
def test_length_of_candidate_list_reflects_the_number_of_candidate_values(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual(3, len(candidate_list))
candidate_list = CandidateList(row = 3, column = 2, values = [2, 3, 7, 8, 9])
self.assertEqual(5, len(candidate_list))
def test_candidate_list_provides_proper_cell_address(self):
candidate_list = CandidateList(row = 3, column = 2, values = [1, 7, 9])
self.assertEqual((3, 2), candidate_list.cell_address)
candidate_list = CandidateList(row = 8, column = 0, values = [2, 3, 7, 8, 9])
self.assertEqual((8, 0), candidate_list.cell_address)
class CandidateValueExclusionLogicTest(TestCase):
"""
Test fixture aimed at the CandidateValueExclusionLogic class. When designing the
test cases, I wanted to ensure complete coverage of various aspects:
* Various kinds of exclusion (pure row exclusion, pure column exclusion, pure region
exclusion, various combinations like row and column exclusion).
* Equivalence classes and (implicit) boundary values (i.e. top/bottom row,
leftmost/rightmost column, regions).
* All valid cell values.
"""
def setUp(self):
self._exclusion_logic = _CandidateValueExclusionLogic()
def test_pure_row_exclusion_in_topmost_row_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 9 6 5 | 8 7 4 | 1 3 |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 2 has to be identified as unambiguous candidate
for the cell [0; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 4, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 1, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 8)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 5, value = 4)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 6, value = 2) in candidate_list)
def test_pure_row_exclusion_in_bottom_row_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| 7 6 | 2 4 8 | 1 3 9 |
+-------+-------+-------+
For the grid above, the value 5 has to be identified as unambiguous candidate
for the cell [8; 2].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 7, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 3, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 6, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 4, value = 4)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 2, value = 5) in candidate_list)
def test_pure_column_exclusion_in_leftmost_column_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 3 | | |
| 7 | | |
| 1 | | |
+-------+-------+-------+
| 9 | | |
| 2 | | |
| 6 | | |
+-------+-------+-------+
| | | |
| 5 | | |
| 8 | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate
for the cell [6; 0].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 0, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 0, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 0, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 0, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 0, value = 4) in candidate_list)
def test_pure_column_exclusion_in_rightmost_column_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 2 |
| | | 7 |
| | | 5 |
+-------+-------+-------+
| | | 9 |
| | | 4 |
| | | 3 |
+-------+-------+-------+
| | | 6 |
| | | 8 |
| | | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate
for the cell [8; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 8, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 8, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 8, value = 1) in candidate_list)
def test_pure_region_exclusion_in_upper_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 3 1 6 | | |
| 9 2 4 | | |
| 8 5 | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 7 has to be identified as unambiguous candidate
for the cell [2; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 0, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 1, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 2, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 0, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 2, column = 1, value = 7) in candidate_list)
def test_pure_region_exclusion_in_upper_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 1 |
| | | 2 7 3 |
| | | 4 5 8 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate
for the cell [0; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 7, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 6, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 6, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 6, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 7, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 8, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 8, value = 6) in candidate_list)
def test_pure_region_exclusion_in_bottom_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 9 1 5 | | |
| 6 2 | | |
| 3 4 7 | | |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate
for the cell [7; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 2, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 0, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 2, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 2, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 0, value = 3)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 7, column = 1, value = 8) in candidate_list)
def test_pure_region_exclusion_in_bottom_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | 5 7 1 |
| | | 8 2 9 |
| | | 6 4 |
+-------+-------+-------+
For the grid above, the value 3 has to be identified as unambiguous candidate
for the cell [8; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 6, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 6, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 7, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 7, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 6, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 7, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 8, column = 8, value = 3) in candidate_list)
def test_combination_of_row_and_column_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | 9 | |
| | | |
| | | |
+-------+-------+-------+
| | 2 | |
| 3 | 5 | 1 8 |
| | | |
+-------+-------+-------+
| | 4 | |
| | 7 | |
| | | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate
for the cell [4; 3].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 5, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 8, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 6, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 3, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 3, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 1, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 3, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 3, value = 6) in candidate_list)
def test_combination_of_row_and_region_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 7 3 | 2 | 8 5 |
| | | 1 |
| | | 6 4 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 9 has to be identified as unambiguous candidate
for the cell [3; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 8, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 7, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 7, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 6, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 2, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 3, column = 6, value = 9) in candidate_list)
def test_combination_of_column_and_region_exclusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | 3 | |
| | | |
| | | |
+-------+-------+-------+
| | 7 9 | |
| | 5 | |
| | 4 8 | |
+-------+-------+-------+
| | 6 | |
| | 2 | |
| | | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate
for the cell [4; 3].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 5, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 4, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 4, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 3, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 3, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 3, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 3, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 3, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 3, value = 1) in candidate_list)
def test_combination_of_row_and_column_and_region_exlusion_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| 4 | | |
| | | |
+-------+-------+-------+
| | | |
| 1 | | |
| | | |
+-------+-------+-------+
| 2 | 7 | 6 |
| 5 | | |
| 9 3 | | |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate
for the cell [6; 2].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 1, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 2, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 8, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 4, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 2, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 1, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 2, value = 8) in candidate_list)
def test_candidates_for_first_undefined_cell_reflect_exclusion(self):
"""
+-------+-------+-------+
| | 7 | |
| 9 | | |
| 4 | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the values 1, 3, 5, 6 and 8 have to be identified as candidates
for the cell [0; 0], which should be identified as the first undefined cell.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 4)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.FIRST_UNDEFINED_CELL)
expected_candidate_list = CandidateList(row = 0, column = 0, values = [1, 3, 5, 6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_candidates_for_undefined_cell_with_least_candidates_reflect_exclusion(self):
"""
+-------+-------+-------+
| | 7 | |
| 9 | | 3 |
| 4 | | |
+-------+-------+-------+
| | | |
| | | 2 |
| 2 | | |
+-------+-------+-------+
| | | 7 |
| | | 1 |
| 5 | 9 | |
+-------+-------+-------+
For the grid above, the values 4, 6 and 8 have to be identified as candidates
for the cell [8; 8], which should be identified as the undefined cells with
least candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 8, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 8, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 6, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 2, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 9)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 8, column = 8, values = [4, 6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_no_value_is_applicable_to_cell_whose_value_has_been_already_set(self):
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 2, value = 5)
for value in range(1, 10):
candidate = UnambiguousCandidate(row = 3, column = 2, value = value)
self.assertFalse(self._exclusion_logic.is_applicable(candidate))
def test_applicability_of_value_reflects_former_exclusions(self):
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 5, value = 9)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 0, column = 4, value = 8)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 0, column = 4, value = 9)))
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 5, value = 6)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 1, column = 5, value = 3)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 1, column = 5, value = 6)))
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 0, value = 5)
self.assertTrue(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 5, column = 1, value = 3)))
self.assertFalse(self._exclusion_logic.is_applicable(UnambiguousCandidate(row = 5, column = 1, value = 5)))
def test_number_of_applicable_values_reflects_exclusion(self):
self.assertEqual(9, self._exclusion_logic.get_applicable_value_count(row = 0, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 0, value = 1)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 0, column = 0))
self.assertEqual(8, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(8, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 1, value = 3)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 1, column = 1))
self.assertEqual(7, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(7, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 2, value = 9)
self.assertEqual(0, self._exclusion_logic.get_applicable_value_count(row = 2, column = 2))
self.assertEqual(6, self._exclusion_logic.get_applicable_value_count(row = 0, column = 1))
self.assertEqual(6, self._exclusion_logic.get_applicable_value_count(row = 1, column = 0))
def test_clone_reflects_the_state_of_the_original_when_candidates_are_requested(self):
"""
+-------+-------+-------+
| 2 | 7 | 1 |
| 4 | | 3 |
| 8 | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| | | 7 |
| 3 5 | 9 | 2 4 |
| | | |
+-------+-------+-------+
For the grid above:
* The cell [0; 1] is to be identified as the first undefined cell. The applicable
candidates for that cell should be 5, 6, and 9.
* The cell [7; 6] is to be identified as undefined cell with least candidates. The
applicable candidates for that cell should be 6 and 8.
A clone of the corresponding exclusion logic has to identify the same candidate
values.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 0, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 6, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 2, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 8, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 2, column = 1, value = 8)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 2, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 6, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 1, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 2, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 5, value = 9)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 7, value = 2)
self._exclusion_logic.apply_and_exclude_cell_value(row = 7, column = 8, value = 4)
clone = self._exclusion_logic.copy()
actual_candidate_list = clone.get_undefined_cell_candidates(CandidateQueryMode.FIRST_UNDEFINED_CELL)
expected_candidate_list = CandidateList(row = 0, column = 1, values = [5, 6, 9])
self.assertEqual(actual_candidate_list, expected_candidate_list)
actual_candidate_list = clone.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 7, column = 6, values = [6, 8])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def test_clone_reflects_the_state_of_the_original_when_further_exclusion_is_performed(self):
"""
+-------+-------+-------+
| | | |
| | 5 | |
| | | |
+-------+-------+-------+
| | 1 | |
| 6 | 3 | 2 |
| | 7 | |
+-------+-------+-------+
| | 8 | |
| | | |
| | 9 | |
+-------+-------+-------+
For the grid above, value 4 is to be identified as unambiguous candidate for the
cell [4; 4], even if half of the exclusion is performed with one instance of
exclusion logic, and the other half is performed with a clone of the above mentioned
instance. The unambiguous candidate is identified by the clone. The original instance
cannot identify any unambiguous candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 4, value = 5)
self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 3, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 0, value = 6)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 7, value = 2)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 4, column = 5, value = 3)
clone.apply_and_exclude_cell_value(row = 5, column = 3, value = 7)
clone.apply_and_exclude_cell_value(row = 6, column = 4, value = 8)
candidate_list = clone.apply_and_exclude_cell_value(row = 8, column = 4, value = 9)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 4) in candidate_list)
def test_exclusion_in_clone_does_not_affect_the_original(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | 7 | |
+-------+-------+-------+
| | 8 | |
| | | |
| | 9 | |
+-------+-------+-------+
For the grid above, the values 1, 2, 4, 5 and 6 have to be identified as candidates
for the cell [3; 4]. The above mentioned cell should be identified as undefined cell
with least candidates for the original exclusion logic instance, depsite of further
exclusions performed with a clone of the exclusion logic.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 3)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 3, value = 7)
self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 4, value = 8)
self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 4, value = 9)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 1, column = 4, value = 5)
clone.apply_and_exclude_cell_value(row = 3, column = 3, value = 1)
clone.apply_and_exclude_cell_value(row = 4, column = 7, value = 2)
actual_candidate_list = self._exclusion_logic.get_undefined_cell_candidates(CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES)
expected_candidate_list = CandidateList(row = 3, column = 4, values = [1, 2, 4, 5, 6])
self.assertEqual(actual_candidate_list, expected_candidate_list)
def __test_that_exclusion_does_not_lead_to_unambiguous_candidate(self, row, column, value):
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
self.assertIs(candidate_list, None)
class RegionCandidateCellsTest(TestCase):
"""
Test fixture aimed at the class _RegionCandidateCells.
"""
def test_combination_of_row_and_column_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| 7 | | |
| | | |
| | | 7 |
+-------+-------+-------+
| 7 | | |
| 7 | | |
| | | C |
+-------+-------+-------+
| | | |
| | | |
| | | 7 |
+-------+-------+-------+
For the grid above, the cell [5, 6] is the only cell in the middle right region (i.e. region
with upper left cell [3, 6]) where the value 7 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 3, leftmost_column = 6, value = 7)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 0, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 3, column = 2, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 8, column = 7, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 4, column = 0, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 2, column = 8, value = 7)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 5, column = 6, value = 7)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
def test_combination_of_column_and_cell_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| | | |
| 2 | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| 2 | | |
+-------+-------+-------+
| 1 | | |
| C | | |
| 4 | | |
+-------+-------+-------+
For the grid above, the cell [7, 0] is the only cell in the bottom left region (i.e. region
with upper left cell [6, 0]) where the value 2 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 6, leftmost_column = 0, value = 2)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 6, column = 0, value = 1)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 1, column = 1, value = 2)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 5, column = 2, value = 2)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 8, column = 0, value = 4)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 7, column = 0, value = 2)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
def test_combination_of_row_and_cell_exclusion_proper_candidate_is_found(self):
"""
+-------+-------+-------+
| 1 C 4 | | |
| | | 8 |
| | 8 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the cell [0, 1] is the only cell in the upper left region (i.e. region
with upper left cell [0, 0]) where the value 8 is applicable.
"""
candidate_cells = _RegionCandidateCells(topmost_row = 0, leftmost_column = 0, value = 8)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 0, value = 1)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 1, column = 7, value = 8)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 2, column = 4, value = 8)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND, exclusion_result)
exclusion_result = candidate_cells.apply_and_exclude_cell_value(row = 0, column = 2, value = 4)
self.assertEqual(_ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND, exclusion_result)
expected_unambiguous_candidate = UnambiguousCandidate(row = 0, column = 1, value = 8)
actual_unambiguous_candidate = candidate_cells.get_single_remaining_applicable_cell()
self.assertEqual(expected_unambiguous_candidate, actual_unambiguous_candidate)
class CandidateCellExclusionLogicTest(TestCase):
"""
Test fixture aimed at the CandidateCellExclusionLogic class. When designing the
test cases, I wanted to ensure complete coverage of various aspects:
* Exclusion of candidate cells in each of the nine regions.
* All valid cell values.
* Various kinds of exclusion (e.g. row and column, row and cells, column and cells).
"""
def setUp(self):
self._exclusion_logic = _CandidateCellExclusionLogic()
def test_row_and_column_exclusion_with_cell_exclusion_in_upper_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 |
| 1 | | |
| | 9 | |
+-------+-------+-------+
| 9 | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 9 has to be identified as unambiguous candidate for
the cell [1; 0].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 3, value = 9)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 9)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 2, value = 1)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 1, column = 0, value = 9) in candidate_list)
def test_row_and_column_exclusion_in_upper_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | 3 |
| 3 | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | | |
+-------+-------+-------+
| | 3 | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 3 has to be identified as unambiguous candidate for
the cell [0; 4].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 1, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 8, value = 3)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 3, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 5, value = 3)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 4, value = 3) in candidate_list)
def test_row_exclusion_with_cell_exclusion_in_upper_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 9 4 |
| 2 | | |
| | 2 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 2 has to be identified as unambiguous candidate for
the cell [0; 8].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 2, column = 3, value = 2)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 6, value = 9)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 0, column = 7, value = 4)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 0, column = 8, value = 2) in candidate_list)
def test_column_exclusion_with_cell_exclusion_in_middle_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| 4 | | |
| | | |
| | | |
+-------+-------+-------+
| 5 | | |
| | | |
| 9 | | |
+-------+-------+-------+
| | | |
| 4 | | |
| | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate for
the cell [4; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 2, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 0, value = 4)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 5)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 1, value = 9)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 1, value = 4) in candidate_list)
def test_row_exclusion_with_cell_exclusion_in_middle_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| 5 | | |
| | 2 7 | |
| | | 5 |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 5 has to be identified as unambiguous candidate for
the cell [4; 4].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 1, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 8, value = 5)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 3, value = 2)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 7)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 5) in candidate_list)
def test_row_and_column_exclusion_with_cell_exclusion_in_middle_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 8 |
| | | |
| | | |
+-------+-------+-------+
| | | 5 |
| | | |
| 8 | | |
+-------+-------+-------+
| | | |
| | | |
| | | 8 |
+-------+-------+-------+
For the grid above, the value 8 has to be identified as unambiguous candidate for
the cell [4; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 7, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 5, column = 2, value = 8)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 8, value = 8)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 6, value = 5)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 6, value = 8) in candidate_list)
def test_row_and_column_exclusion_in_bottom_left_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| 7 | | |
| | | |
+-------+-------+-------+
| | | |
| 7 | | |
| | | |
+-------+-------+-------+
| | | 7 |
| | | |
| | 7 | |
+-------+-------+-------+
For the grid above, the value 7 has to be identified as unambiguous candidate for
the cell [7; 1].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 0, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 4, column = 2, value = 7)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 6, column = 7, value = 7)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 3, value = 7)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 7, column = 1, value = 7) in candidate_list)
def test_column_exclusion_with_cell_exclusion_in_bottom_middle_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | |
| | 6 | |
| | | |
+-------+-------+-------+
| | 6 | |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | 3 | |
| | 8 | |
+-------+-------+-------+
For the grid above, the value 6 has to be identified as unambiguous candidate for
the cell [6; 5].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 1, column = 3, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 3, column = 4, value = 6)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 5, value = 3)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 8, column = 5, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 5, value = 6) in candidate_list)
def test_row_and_column_exclusion_with_cell_exclusion_in_bottom_right_region_finds_proper_unambiguous_candidate(self):
"""
+-------+-------+-------+
| | | 1 |
| | | |
| | | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
| | | 8 |
| 1 | | |
| | 1 | |
+-------+-------+-------+
For the grid above, the value 1 has to be identified as unambiguous candidate for
the cell [6; 6].
"""
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 0, column = 8, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 7, column = 1, value = 1)
self.__test_that_exclusion_does_not_lead_to_unambiguous_candidate(row = 8, column = 3, value = 1)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 6, column = 7, value = 8)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 6, column = 6, value = 1) in candidate_list)
def test_clone_reflects_the_state_of_the_original_when_further_exclusion_is_performed(self):
"""
+-------+-------+-------+
| | | |
| | 4 | |
| | | |
+-------+-------+-------+
| 4 | | |
| | 1 | |
| | 7 2 | |
+-------+-------+-------+
| | | |
| | | |
| | | |
+-------+-------+-------+
For the grid above, the value 4 has to be identified as unambiguous candidate for
the cell [4; 4], even if half of the exclusion is performed with one instance of
exclusion logic, and the other half is performed with a clone of the above mentioned
instance. The unambiguous candidate is identified by the clone. The original instance
cannot identify any unambiguous candidate.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 3, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 4, value = 7)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 3, column = 1, value = 4)
candidate_list = clone.apply_and_exclude_cell_value(row = 5, column = 5, value = 2)
self.assertEqual(len(candidate_list), 1)
self.assertTrue(UnambiguousCandidate(row = 4, column = 4, value = 4) in candidate_list)
def test_exclusion_in_clone_does_not_affect_the_original(self):
"""
If a clone of exclusion logic is created after several exclusions, further exclusions
performed upon the clone will not affect the original exclusion logic.
"""
self._exclusion_logic.apply_and_exclude_cell_value(row = 1, column = 3, value = 4)
self._exclusion_logic.apply_and_exclude_cell_value(row = 4, column = 5, value = 1)
self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 4, value = 7)
clone = self._exclusion_logic.copy()
clone.apply_and_exclude_cell_value(row = 3, column = 1, value = 4)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row = 5, column = 5, value = 2)
self.assertIsNone(candidate_list)
def __test_that_exclusion_does_not_lead_to_unambiguous_candidate(self, row, column, value):
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
self.assertIs(candidate_list, None)
class ExclusionLogicTest(TestCase):
"""
Test fixture aimed at the class ExclusionLogic.
"""
def test_none_of_the_exclusions_finds_a_candidate_none_is_returned(self):
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = None
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = None
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertIsNone(final_candidate_list)
def test_value_exclusion_returns_non_empty_list_cell_exclusion_returns_none_value_exclusion_list_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = None
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list, final_candidate_list)
def test_cell_exclusion_returns_non_empty_list_value_exclusion_returns_none_cell_exclusion_list_is_returned(self):
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = None
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(cell_exclusion_candidate_list, final_candidate_list)
def test_both_exclusions_return_non_empty_list_union_of_both_lists_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 2, column = 3, value = 9),
UnambiguousCandidate(row = 5, column = 1, value = 2),
UnambiguousCandidate(row = 8, column = 7, value = 6),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 7, column = 5, value = 1),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list + cell_exclusion_candidate_list, final_candidate_list)
def test_both_exclusions_return_the_same_candidate_list_containing_duplicate_is_returned(self):
value_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
UnambiguousCandidate(row = 8, column = 5, value = 7),
]
value_exclusion_stub = Mock(_CandidateValueExclusionLogic)
value_exclusion_stub.apply_and_exclude_cell_value.return_value = value_exclusion_candidate_list
cell_exclusion_candidate_list = [
UnambiguousCandidate(row = 8, column = 0, value = 4),
]
cell_exclusion_stub = Mock(_CandidateCellExclusionLogic)
cell_exclusion_stub.apply_and_exclude_cell_value.return_value = cell_exclusion_candidate_list
exclusion_logic = _ExclusionLogic(value_exclusion_stub, cell_exclusion_stub)
final_candidate_list = exclusion_logic.apply_and_exclude_cell_value(row = 3, column = 5, value = 2)
self.assertListEqual(value_exclusion_candidate_list + cell_exclusion_candidate_list, final_candidate_list)
basicConfig(level = INFO,
format = "%(asctime)s %(levelname)-8s %(module)-18s line %(lineno)-4d %(message)s",
datefmt = "%d-%b-%Y %H:%M:%S",
filename = "exclusiontest.log",
filemode = "w")
| en | 0.799304 | # # Copyright 2018 <NAME> # # This file is part of Python Sudoku Sandbox. # # Python Sudoku Sandbox is free software developed for educational and # experimental purposes. It is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is a collection of unit tests covering the functionality provided by the searchsupport module. Test fixture aimed at the UnambiguousCandidate class. Test fixture aimed at the CandidateList class. Test fixture aimed at the CandidateValueExclusionLogic class. When designing the test cases, I wanted to ensure complete coverage of various aspects: * Various kinds of exclusion (pure row exclusion, pure column exclusion, pure region exclusion, various combinations like row and column exclusion). * Equivalence classes and (implicit) boundary values (i.e. top/bottom row, leftmost/rightmost column, regions). * All valid cell values. +-------+-------+-------+ | 9 6 5 | 8 7 4 | 1 3 | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 2 has to be identified as unambiguous candidate for the cell [0; 6]. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | 7 6 | 2 4 8 | 1 3 9 | +-------+-------+-------+ For the grid above, the value 5 has to be identified as unambiguous candidate for the cell [8; 2]. +-------+-------+-------+ | 3 | | | | 7 | | | | 1 | | | +-------+-------+-------+ | 9 | | | | 2 | | | | 6 | | | +-------+-------+-------+ | | | | | 5 | | | | 8 | | | +-------+-------+-------+ For the grid above, the value 4 has to be identified as unambiguous candidate for the cell [6; 0]. +-------+-------+-------+ | | | 2 | | | | 7 | | | | 5 | +-------+-------+-------+ | | | 9 | | | | 4 | | | | 3 | +-------+-------+-------+ | | | 6 | | | | 8 | | | | | +-------+-------+-------+ For the grid above, the value 1 has to be identified as unambiguous candidate for the cell [8; 8]. +-------+-------+-------+ | 3 1 6 | | | | 9 2 4 | | | | 8 5 | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 7 has to be identified as unambiguous candidate for the cell [2; 1]. +-------+-------+-------+ | | | 9 1 | | | | 2 7 3 | | | | 4 5 8 | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 6 has to be identified as unambiguous candidate for the cell [0; 8]. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | 9 1 5 | | | | 6 2 | | | | 3 4 7 | | | +-------+-------+-------+ For the grid above, the value 8 has to be identified as unambiguous candidate for the cell [7; 1]. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | 5 7 1 | | | | 8 2 9 | | | | 6 4 | +-------+-------+-------+ For the grid above, the value 3 has to be identified as unambiguous candidate for the cell [8; 8]. +-------+-------+-------+ | | 9 | | | | | | | | | | +-------+-------+-------+ | | 2 | | | 3 | 5 | 1 8 | | | | | +-------+-------+-------+ | | 4 | | | | 7 | | | | | | +-------+-------+-------+ For the grid above, the value 6 has to be identified as unambiguous candidate for the cell [4; 3]. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | 7 3 | 2 | 8 5 | | | | 1 | | | | 6 4 | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 9 has to be identified as unambiguous candidate for the cell [3; 6]. +-------+-------+-------+ | | 3 | | | | | | | | | | +-------+-------+-------+ | | 7 9 | | | | 5 | | | | 4 8 | | +-------+-------+-------+ | | 6 | | | | 2 | | | | | | +-------+-------+-------+ For the grid above, the value 1 has to be identified as unambiguous candidate for the cell [4; 3]. +-------+-------+-------+ | | | | | 4 | | | | | | | +-------+-------+-------+ | | | | | 1 | | | | | | | +-------+-------+-------+ | 2 | 7 | 6 | | 5 | | | | 9 3 | | | +-------+-------+-------+ For the grid above, the value 8 has to be identified as unambiguous candidate for the cell [6; 2]. +-------+-------+-------+ | | 7 | | | 9 | | | | 4 | | | +-------+-------+-------+ | | | | | | | | | 2 | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the values 1, 3, 5, 6 and 8 have to be identified as candidates for the cell [0; 0], which should be identified as the first undefined cell. +-------+-------+-------+ | | 7 | | | 9 | | 3 | | 4 | | | +-------+-------+-------+ | | | | | | | 2 | | 2 | | | +-------+-------+-------+ | | | 7 | | | | 1 | | 5 | 9 | | +-------+-------+-------+ For the grid above, the values 4, 6 and 8 have to be identified as candidates for the cell [8; 8], which should be identified as the undefined cells with least candidate. +-------+-------+-------+ | 2 | 7 | 1 | | 4 | | 3 | | 8 | | | +-------+-------+-------+ | | | | | | | | | 2 | | | +-------+-------+-------+ | | | 7 | | 3 5 | 9 | 2 4 | | | | | +-------+-------+-------+ For the grid above: * The cell [0; 1] is to be identified as the first undefined cell. The applicable candidates for that cell should be 5, 6, and 9. * The cell [7; 6] is to be identified as undefined cell with least candidates. The applicable candidates for that cell should be 6 and 8. A clone of the corresponding exclusion logic has to identify the same candidate values. +-------+-------+-------+ | | | | | | 5 | | | | | | +-------+-------+-------+ | | 1 | | | 6 | 3 | 2 | | | 7 | | +-------+-------+-------+ | | 8 | | | | | | | | 9 | | +-------+-------+-------+ For the grid above, value 4 is to be identified as unambiguous candidate for the cell [4; 4], even if half of the exclusion is performed with one instance of exclusion logic, and the other half is performed with a clone of the above mentioned instance. The unambiguous candidate is identified by the clone. The original instance cannot identify any unambiguous candidate. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | 3 | | | | 7 | | +-------+-------+-------+ | | 8 | | | | | | | | 9 | | +-------+-------+-------+ For the grid above, the values 1, 2, 4, 5 and 6 have to be identified as candidates for the cell [3; 4]. The above mentioned cell should be identified as undefined cell with least candidates for the original exclusion logic instance, depsite of further exclusions performed with a clone of the exclusion logic. Test fixture aimed at the class _RegionCandidateCells. +-------+-------+-------+ | 7 | | | | | | | | | | 7 | +-------+-------+-------+ | 7 | | | | 7 | | | | | | C | +-------+-------+-------+ | | | | | | | | | | | 7 | +-------+-------+-------+ For the grid above, the cell [5, 6] is the only cell in the middle right region (i.e. region with upper left cell [3, 6]) where the value 7 is applicable. +-------+-------+-------+ | | | | | 2 | | | | | | | +-------+-------+-------+ | | | | | | | | | 2 | | | +-------+-------+-------+ | 1 | | | | C | | | | 4 | | | +-------+-------+-------+ For the grid above, the cell [7, 0] is the only cell in the bottom left region (i.e. region with upper left cell [6, 0]) where the value 2 is applicable. +-------+-------+-------+ | 1 C 4 | | | | | | 8 | | | 8 | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the cell [0, 1] is the only cell in the upper left region (i.e. region with upper left cell [0, 0]) where the value 8 is applicable. Test fixture aimed at the CandidateCellExclusionLogic class. When designing the test cases, I wanted to ensure complete coverage of various aspects: * Exclusion of candidate cells in each of the nine regions. * All valid cell values. * Various kinds of exclusion (e.g. row and column, row and cells, column and cells). +-------+-------+-------+ | | | 9 | | 1 | | | | | 9 | | +-------+-------+-------+ | 9 | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 9 has to be identified as unambiguous candidate for the cell [1; 0]. +-------+-------+-------+ | | | | | | | 3 | | 3 | | | +-------+-------+-------+ | | | | | | 3 | | | | | | +-------+-------+-------+ | | 3 | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 3 has to be identified as unambiguous candidate for the cell [0; 4]. +-------+-------+-------+ | | | 9 4 | | 2 | | | | | 2 | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 2 has to be identified as unambiguous candidate for the cell [0; 8]. +-------+-------+-------+ | 4 | | | | | | | | | | | +-------+-------+-------+ | 5 | | | | | | | | 9 | | | +-------+-------+-------+ | | | | | 4 | | | | | | | +-------+-------+-------+ For the grid above, the value 4 has to be identified as unambiguous candidate for the cell [4; 1]. +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | 5 | | | | | 2 7 | | | | | 5 | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 5 has to be identified as unambiguous candidate for the cell [4; 4]. +-------+-------+-------+ | | | 8 | | | | | | | | | +-------+-------+-------+ | | | 5 | | | | | | 8 | | | +-------+-------+-------+ | | | | | | | | | | | 8 | +-------+-------+-------+ For the grid above, the value 8 has to be identified as unambiguous candidate for the cell [4; 6]. +-------+-------+-------+ | | | | | 7 | | | | | | | +-------+-------+-------+ | | | | | 7 | | | | | | | +-------+-------+-------+ | | | 7 | | | | | | | 7 | | +-------+-------+-------+ For the grid above, the value 7 has to be identified as unambiguous candidate for the cell [7; 1]. +-------+-------+-------+ | | | | | | 6 | | | | | | +-------+-------+-------+ | | 6 | | | | | | | | | | +-------+-------+-------+ | | | | | | 3 | | | | 8 | | +-------+-------+-------+ For the grid above, the value 6 has to be identified as unambiguous candidate for the cell [6; 5]. +-------+-------+-------+ | | | 1 | | | | | | | | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ | | | 8 | | 1 | | | | | 1 | | +-------+-------+-------+ For the grid above, the value 1 has to be identified as unambiguous candidate for the cell [6; 6]. +-------+-------+-------+ | | | | | | 4 | | | | | | +-------+-------+-------+ | 4 | | | | | 1 | | | | 7 2 | | +-------+-------+-------+ | | | | | | | | | | | | +-------+-------+-------+ For the grid above, the value 4 has to be identified as unambiguous candidate for the cell [4; 4], even if half of the exclusion is performed with one instance of exclusion logic, and the other half is performed with a clone of the above mentioned instance. The unambiguous candidate is identified by the clone. The original instance cannot identify any unambiguous candidate. If a clone of exclusion logic is created after several exclusions, further exclusions performed upon the clone will not affect the original exclusion logic. Test fixture aimed at the class ExclusionLogic. | 2.566831 | 3 |
01 - Basics/39-datatypes-numerics.py | python-demo-codes/basics | 2 | 6618340 | # HEAD
# Python Basics - Numeric Data Type
# DESCRIPTION
# Describes
# - how numerics are assigned to variables
# - how string like numerics are converted to numerics (type conversion)
#
# RESOURCES
#
# CORE PYTHON DATA TYPES
# # Integer
# # INTEGER
# Integer like Numeric
var = 1
# Convert a Integer like string into Integer/Numeric
var = int("1")
# # Following fails to convert to Integer or Numeric Type
# var = int("1x")
| # HEAD
# Python Basics - Numeric Data Type
# DESCRIPTION
# Describes
# - how numerics are assigned to variables
# - how string like numerics are converted to numerics (type conversion)
#
# RESOURCES
#
# CORE PYTHON DATA TYPES
# # Integer
# # INTEGER
# Integer like Numeric
var = 1
# Convert a Integer like string into Integer/Numeric
var = int("1")
# # Following fails to convert to Integer or Numeric Type
# var = int("1x")
| en | 0.547178 | # HEAD # Python Basics - Numeric Data Type # DESCRIPTION # Describes # - how numerics are assigned to variables # - how string like numerics are converted to numerics (type conversion) # # RESOURCES # # CORE PYTHON DATA TYPES # # Integer # # INTEGER # Integer like Numeric # Convert a Integer like string into Integer/Numeric # # Following fails to convert to Integer or Numeric Type # var = int("1x") | 3.713446 | 4 |
core/pay.py | hyouv/Darknights-server | 112 | 6618341 | <reponame>hyouv/Darknights-server
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description: Payment
"""
No, we will not do any payment.
"""
from bottle import *
import json
from utils import logger
@route('/pay/getUnconfirmedOrderIdList', method='POST')
def pay_getUnconfirmedOrderIdList():
logger.info('Hit /pay/getUnconfirmedOrderIdList', request.environ.get('HTTP_X_FORWARDED_FOR'))
resp = """
{
"orderIdList": [],
"playerDataDelta": {
"deleted": {},
"modified": {}
}
}
"""
return json.loads(resp)
@route('/pay/createOrder', method='POST')
def pay_createOrder():
logger.info('Hit /pay/createOrder', request.environ.get('HTTP_X_FORWARDED_FOR'))
resp = """
{
"alertMinor": 0,
"extension": "",
"orderId": "20770230191981000063369555114514",
"playerDataDelta": {
"deleted": {},
"modified": {}
},
"result": 0
}
"""
return json.loads(resp)
@route('/u8/pay/confirmOrderState', method='POST')
def u8_pay_confirmOrderState():
resp = """
{
"payState": 3
}
"""
return json.loads(resp)
@route('/pay/confirmOrderAlipay', method='POST')
def pay_confirmOrderAlipay():
resp = """
{
"status": 0
}
"""
return json.loads(resp)
@route('/pay/confirmOrder', method='POST')
def pay_confirmOrder():
resp = """
{
"goodId": "GP_Once_1",
"playerDataDelta": {
"deleted": {},
"modified": {
"status": {
}
}
},
"receiveItems": {
"items": []
},
"result": 0
}
"""
return json.loads(resp)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description: Payment
"""
No, we will not do any payment.
"""
from bottle import *
import json
from utils import logger
@route('/pay/getUnconfirmedOrderIdList', method='POST')
def pay_getUnconfirmedOrderIdList():
logger.info('Hit /pay/getUnconfirmedOrderIdList', request.environ.get('HTTP_X_FORWARDED_FOR'))
resp = """
{
"orderIdList": [],
"playerDataDelta": {
"deleted": {},
"modified": {}
}
}
"""
return json.loads(resp)
@route('/pay/createOrder', method='POST')
def pay_createOrder():
logger.info('Hit /pay/createOrder', request.environ.get('HTTP_X_FORWARDED_FOR'))
resp = """
{
"alertMinor": 0,
"extension": "",
"orderId": "20770230191981000063369555114514",
"playerDataDelta": {
"deleted": {},
"modified": {}
},
"result": 0
}
"""
return json.loads(resp)
@route('/u8/pay/confirmOrderState', method='POST')
def u8_pay_confirmOrderState():
resp = """
{
"payState": 3
}
"""
return json.loads(resp)
@route('/pay/confirmOrderAlipay', method='POST')
def pay_confirmOrderAlipay():
resp = """
{
"status": 0
}
"""
return json.loads(resp)
@route('/pay/confirmOrder', method='POST')
def pay_confirmOrder():
resp = """
{
"goodId": "GP_Once_1",
"playerDataDelta": {
"deleted": {},
"modified": {
"status": {
}
}
},
"receiveItems": {
"items": []
},
"result": 0
}
"""
return json.loads(resp) | en | 0.350066 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Description: Payment No, we will not do any payment. {
"orderIdList": [],
"playerDataDelta": {
"deleted": {},
"modified": {}
}
} {
"alertMinor": 0,
"extension": "",
"orderId": "20770230191981000063369555114514",
"playerDataDelta": {
"deleted": {},
"modified": {}
},
"result": 0
} {
"payState": 3
} {
"status": 0
} {
"goodId": "GP_Once_1",
"playerDataDelta": {
"deleted": {},
"modified": {
"status": {
}
}
},
"receiveItems": {
"items": []
},
"result": 0
} | 2.225813 | 2 |
benchmark.py | wjones127/pyarrow-s3-parquet-benchmarks | 0 | 6618342 | <filename>benchmark.py
from enum import Enum
from itertools import product
import time
from typing import Any, Dict, List, NamedTuple, Optional
import awswrangler as wr
import pyarrow
from pyarrow.parquet import ParquetDataset
from pyarrow import fs
import s3fs
from tqdm import tqdm
class Method(Enum):
local = 'Local Filesystem'
arrow_s3fs = 'PyArrow s3fs'
s3fs = 'Dask s3fs'
aws_data_wrangler = 'AWS Data Wrangler'
class BenchmarkParams(NamedTuple):
num_files: int
num_columns: Optional[int]
method: Method
def get_files(num_files: int) -> List[str]:
return [f'2018/{month:02}/data.parquet' for month in range(1, num_files + 1)]
def get_columns(num_columns: Optional[int]) -> Optional[List[str]]:
file = './data/2018/01/data.parquet'
all_columns = ParquetDataset(file).schema.names
if num_columns is None:
return None
else:
return all_columns[:num_columns]
def local_read(files: List[str], columns: Optional[List[str]]) -> pyarrow.Table:
files = [f'data/{path}' for path in files]
return ParquetDataset(files).read(columns=columns)
def arrow_s3fs_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f'{bucket}/{path}' for path in files]
s3 = fs.S3FileSystem()
return ParquetDataset(files, filesystem=s3).read(columns=columns)
def s3fs_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f's3://{bucket}/{path}' for path in files]
s3 = s3fs.S3FileSystem()
return ParquetDataset(files, filesystem=s3).read(columns=columns)
def datawrangler_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f's3://{bucket}/{path}' for path in files]
df = wr.s3.read_parquet(files, columns=columns)
return pyarrow.Table.from_pandas(df)
def run_one(params: BenchmarkParams, bucket: str) -> pyarrow.Table:
files = get_files(params.num_files)
columns = get_columns(params.num_columns)
if params.method == Method.local:
return local_read(files, columns)
elif params.method == Method.arrow_s3fs:
return arrow_s3fs_read(files, columns, bucket)
elif params.method == Method.s3fs:
return s3fs_read(files, columns, bucket)
elif params.method == Method.aws_data_wrangler:
return datawrangler_read(files, columns, bucket)
def run_benchmark(files: List[int], columns: List[Optional[int]], bucket: str) -> pyarrow.Table:
out = []
params = product(files, columns, [Method.local, Method.arrow_s3fs, Method.aws_data_wrangler])
for num_files, num_columns, method in tqdm(list(params)):
params = BenchmarkParams(num_files, num_columns, method)
start = time.monotonic()
result = run_one(params, bucket)
end = time.monotonic()
out.append({'runtime': end - start, 'out_size': result.nbytes, **params._asdict()})
out_table = pyarrow.table(
{
'num_files': [r['num_files'] for r in out],
'num_columns': [r['num_columns'] for r in out],
'method': [r['method'].value for r in out],
'runtime': [r['runtime'] for r in out],
'out_size': [r['out_size'] for r in out],
}
)
return out_table
| <filename>benchmark.py
from enum import Enum
from itertools import product
import time
from typing import Any, Dict, List, NamedTuple, Optional
import awswrangler as wr
import pyarrow
from pyarrow.parquet import ParquetDataset
from pyarrow import fs
import s3fs
from tqdm import tqdm
class Method(Enum):
local = 'Local Filesystem'
arrow_s3fs = 'PyArrow s3fs'
s3fs = 'Dask s3fs'
aws_data_wrangler = 'AWS Data Wrangler'
class BenchmarkParams(NamedTuple):
num_files: int
num_columns: Optional[int]
method: Method
def get_files(num_files: int) -> List[str]:
return [f'2018/{month:02}/data.parquet' for month in range(1, num_files + 1)]
def get_columns(num_columns: Optional[int]) -> Optional[List[str]]:
file = './data/2018/01/data.parquet'
all_columns = ParquetDataset(file).schema.names
if num_columns is None:
return None
else:
return all_columns[:num_columns]
def local_read(files: List[str], columns: Optional[List[str]]) -> pyarrow.Table:
files = [f'data/{path}' for path in files]
return ParquetDataset(files).read(columns=columns)
def arrow_s3fs_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f'{bucket}/{path}' for path in files]
s3 = fs.S3FileSystem()
return ParquetDataset(files, filesystem=s3).read(columns=columns)
def s3fs_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f's3://{bucket}/{path}' for path in files]
s3 = s3fs.S3FileSystem()
return ParquetDataset(files, filesystem=s3).read(columns=columns)
def datawrangler_read(files: List[str], columns: Optional[List[str]], bucket: str) -> pyarrow.Table:
files = [f's3://{bucket}/{path}' for path in files]
df = wr.s3.read_parquet(files, columns=columns)
return pyarrow.Table.from_pandas(df)
def run_one(params: BenchmarkParams, bucket: str) -> pyarrow.Table:
files = get_files(params.num_files)
columns = get_columns(params.num_columns)
if params.method == Method.local:
return local_read(files, columns)
elif params.method == Method.arrow_s3fs:
return arrow_s3fs_read(files, columns, bucket)
elif params.method == Method.s3fs:
return s3fs_read(files, columns, bucket)
elif params.method == Method.aws_data_wrangler:
return datawrangler_read(files, columns, bucket)
def run_benchmark(files: List[int], columns: List[Optional[int]], bucket: str) -> pyarrow.Table:
out = []
params = product(files, columns, [Method.local, Method.arrow_s3fs, Method.aws_data_wrangler])
for num_files, num_columns, method in tqdm(list(params)):
params = BenchmarkParams(num_files, num_columns, method)
start = time.monotonic()
result = run_one(params, bucket)
end = time.monotonic()
out.append({'runtime': end - start, 'out_size': result.nbytes, **params._asdict()})
out_table = pyarrow.table(
{
'num_files': [r['num_files'] for r in out],
'num_columns': [r['num_columns'] for r in out],
'method': [r['method'].value for r in out],
'runtime': [r['runtime'] for r in out],
'out_size': [r['out_size'] for r in out],
}
)
return out_table
| none | 1 | 2.503719 | 3 | |
django_simplenote/templatetags/simplenote_version.py | gregschmit/django-simplenote | 0 | 6618343 | <filename>django_simplenote/templatetags/simplenote_version.py
from django import template
from django_simplenote import version
register = template.Library()
@register.simple_tag
def get_version():
print(version.get_version())
return version.get_version()
| <filename>django_simplenote/templatetags/simplenote_version.py
from django import template
from django_simplenote import version
register = template.Library()
@register.simple_tag
def get_version():
print(version.get_version())
return version.get_version()
| none | 1 | 1.727655 | 2 | |
write_grib.py | ahijevyc/NSC_objects | 0 | 6618344 | import pdb
import numpy as np
import ncepgrib2
import time, os, sys
nx, ny = 93, 65
ifile = "/glade/p/mmm/parc/ahijevyc/NSC/2015043000_NCARENS_mem1_upscaled.npz"
data = np.load(ifile, allow_pickle=True)
upscaled_fields = data['a'].item()
fhrs = range(len(upscaled_fields['MUCAPE']))
year = os.path.basename(ifile)[0:4]
month = os.path.basename(ifile)[4:6]
day = os.path.basename(ifile)[6:8]
hour = os.path.basename(ifile)[8:10]
minute = 0
second = 0
# Section 1 - Identification Section
discipline = 0 # 0 for meteorological
centreid = 7 # 7=US National Weather Service, National Centres for Environmental Prediction (NCEP); 60=United States National Centre for Atmospheric Research (NCAR)
subcentreid = 0
grbMasterTablesV = 2
grbLocalTablesV = 1
sig_reftime = 1 # 1 for start of forecast
status = 2 # production status of data: 2 = research
datatype = 1 # type of data: 1 = forecast products
idsect = [centreid, subcentreid, grbMasterTablesV, grbLocalTablesV, sig_reftime, year, month, day, hour, minute, second, status, datatype]
# Section 3 - Grid Definition Section
src_griddef = 0 #Tried specifying 211 but got seg fault
npts = nx * ny
noct = 0 # =0 for regular grid
opt = 0 # there is no appended list
gdtn = 30 # 30=Lambert Conformal
gdsinfo = [src_griddef, npts, noct, opt, gdtn]
e_shape = 6
e_sf = 0
e_sv = 0
ose_sf_major = 0
earthRmajor = 0
ose_sf_minor = 0
earthRminor = 0
la1=12190000
lo1=226541000
rcflag = 8
LaD = 25000000 # latitude where dx and dy are specified
LoV = 265000000
dx, dy = 81270500, 81270500
proj_centre_flag = 0
scanning_mode = 64 # 01000000
Latin1 = 25000000
Latin2 = 25000000
LatSP = 0
LonSP = 0
gdtmpl = [e_shape, e_sf, e_sv, ose_sf_major, earthRmajor, ose_sf_minor, earthRminor, nx, ny, la1, lo1, rcflag,
LaD, LoV, dx, dy, proj_centre_flag, scanning_mode, Latin1, Latin2, LatSP, LonSP]
# Section 4 - Product Definition Section
pdtnum = 0 # 0=Analysis or forecast at a horizontal level or in a horizontal layer at a point in time. 9=Probability forecasts at a horizontal level or in a horizontal layer in a continuous or non-continuous time interval
parameter_category = 7 # thermo stability=7
parameter_num = 6 # CAPE=6
generating_process = 2 # 2=Forecast, 5=Probability Forecast
backgrd_generating_process = 0
analys_generating_process = 116 # 116=WRF-EM model, generic resolution (Used in various runs) EM - Eulerian Mass-core (NCAR - aka Advanced Research WRF)
obs_data_cutoff_hours = 0
obs_data_cutoff_minutes = 0
time_range_unit_indicator = 1 # 1=Hour
fhr = 0
fixed_sfc_type1 = 108 # 108=Level at Specified Pressure Difference from Ground to Level
fixed_sfc_scale_factor1 = 0
fixed_sfc_scaled_value1 = 9000
fixed_sfc_type2 = 108 # 108=Level at Specified Pressure Difference from Ground to Level
fixed_sfc_scale_factor2 = 0
fixed_sfc_scaled_value2 = 0
pdtmpl = [parameter_category,parameter_num,generating_process,backgrd_generating_process,
analys_generating_process,obs_data_cutoff_hours,obs_data_cutoff_minutes,time_range_unit_indicator,fhr,
fixed_sfc_type1,fixed_sfc_scale_factor1,fixed_sfc_scaled_value1,
fixed_sfc_type2,fixed_sfc_scale_factor2,fixed_sfc_scaled_value2]
# Section 5 - Data Representation Section
drtnum=0 # 0=simple packing (4=ieee floating point and 40=JPEG2000 compression aren't implemented)
field = np.array(upscaled_fields['MUCAPE'], dtype=np.float64)
precision = 1.
nvalues = 1 + np.ceil( (field.max()-field.min()) / (2 * precision))
DRTnbits = np.ceil(np.log2(nvalues))
DRTref = 1
#https://www.nws.noaa.gov/mdl/synop/gmos/binaryscaling.php
DRTbinary_scale_factor = 0 # -1 = twice the precision, 0 = unchanged, 1 = half the precision, 2=1/4 the precision
DRTdecimal_scale_factor = 0 # -1 = tenth the precision, 0=unchanged, 1 = 10x the precision
DRTorigType = 0 # floating point=0
drtmpl = [DRTref,DRTbinary_scale_factor,DRTdecimal_scale_factor,DRTnbits,DRTorigType]
ofile = "test.grb"
fh = open(ofile, "wb")
# Tried writing global GRB section once (skipping with subsequent messages) but got error 'addgrid must be called before addfield'
for fhr in fhrs:
field1 = field[fhr]
grbo = ncepgrib2.Grib2Encode(discipline, idsect)
grbo.addgrid(gdsinfo, gdtmpl)
pdtmpl[8] = fhr
grbo.addfield(pdtnum, pdtmpl, drtnum, drtmpl, field1)
grbo.end()
fh.write(grbo.msg)
fh.close()
| import pdb
import numpy as np
import ncepgrib2
import time, os, sys
nx, ny = 93, 65
ifile = "/glade/p/mmm/parc/ahijevyc/NSC/2015043000_NCARENS_mem1_upscaled.npz"
data = np.load(ifile, allow_pickle=True)
upscaled_fields = data['a'].item()
fhrs = range(len(upscaled_fields['MUCAPE']))
year = os.path.basename(ifile)[0:4]
month = os.path.basename(ifile)[4:6]
day = os.path.basename(ifile)[6:8]
hour = os.path.basename(ifile)[8:10]
minute = 0
second = 0
# Section 1 - Identification Section
discipline = 0 # 0 for meteorological
centreid = 7 # 7=US National Weather Service, National Centres for Environmental Prediction (NCEP); 60=United States National Centre for Atmospheric Research (NCAR)
subcentreid = 0
grbMasterTablesV = 2
grbLocalTablesV = 1
sig_reftime = 1 # 1 for start of forecast
status = 2 # production status of data: 2 = research
datatype = 1 # type of data: 1 = forecast products
idsect = [centreid, subcentreid, grbMasterTablesV, grbLocalTablesV, sig_reftime, year, month, day, hour, minute, second, status, datatype]
# Section 3 - Grid Definition Section
src_griddef = 0 #Tried specifying 211 but got seg fault
npts = nx * ny
noct = 0 # =0 for regular grid
opt = 0 # there is no appended list
gdtn = 30 # 30=Lambert Conformal
gdsinfo = [src_griddef, npts, noct, opt, gdtn]
e_shape = 6
e_sf = 0
e_sv = 0
ose_sf_major = 0
earthRmajor = 0
ose_sf_minor = 0
earthRminor = 0
la1=12190000
lo1=226541000
rcflag = 8
LaD = 25000000 # latitude where dx and dy are specified
LoV = 265000000
dx, dy = 81270500, 81270500
proj_centre_flag = 0
scanning_mode = 64 # 01000000
Latin1 = 25000000
Latin2 = 25000000
LatSP = 0
LonSP = 0
gdtmpl = [e_shape, e_sf, e_sv, ose_sf_major, earthRmajor, ose_sf_minor, earthRminor, nx, ny, la1, lo1, rcflag,
LaD, LoV, dx, dy, proj_centre_flag, scanning_mode, Latin1, Latin2, LatSP, LonSP]
# Section 4 - Product Definition Section
pdtnum = 0 # 0=Analysis or forecast at a horizontal level or in a horizontal layer at a point in time. 9=Probability forecasts at a horizontal level or in a horizontal layer in a continuous or non-continuous time interval
parameter_category = 7 # thermo stability=7
parameter_num = 6 # CAPE=6
generating_process = 2 # 2=Forecast, 5=Probability Forecast
backgrd_generating_process = 0
analys_generating_process = 116 # 116=WRF-EM model, generic resolution (Used in various runs) EM - Eulerian Mass-core (NCAR - aka Advanced Research WRF)
obs_data_cutoff_hours = 0
obs_data_cutoff_minutes = 0
time_range_unit_indicator = 1 # 1=Hour
fhr = 0
fixed_sfc_type1 = 108 # 108=Level at Specified Pressure Difference from Ground to Level
fixed_sfc_scale_factor1 = 0
fixed_sfc_scaled_value1 = 9000
fixed_sfc_type2 = 108 # 108=Level at Specified Pressure Difference from Ground to Level
fixed_sfc_scale_factor2 = 0
fixed_sfc_scaled_value2 = 0
pdtmpl = [parameter_category,parameter_num,generating_process,backgrd_generating_process,
analys_generating_process,obs_data_cutoff_hours,obs_data_cutoff_minutes,time_range_unit_indicator,fhr,
fixed_sfc_type1,fixed_sfc_scale_factor1,fixed_sfc_scaled_value1,
fixed_sfc_type2,fixed_sfc_scale_factor2,fixed_sfc_scaled_value2]
# Section 5 - Data Representation Section
drtnum=0 # 0=simple packing (4=ieee floating point and 40=JPEG2000 compression aren't implemented)
field = np.array(upscaled_fields['MUCAPE'], dtype=np.float64)
precision = 1.
nvalues = 1 + np.ceil( (field.max()-field.min()) / (2 * precision))
DRTnbits = np.ceil(np.log2(nvalues))
DRTref = 1
#https://www.nws.noaa.gov/mdl/synop/gmos/binaryscaling.php
DRTbinary_scale_factor = 0 # -1 = twice the precision, 0 = unchanged, 1 = half the precision, 2=1/4 the precision
DRTdecimal_scale_factor = 0 # -1 = tenth the precision, 0=unchanged, 1 = 10x the precision
DRTorigType = 0 # floating point=0
drtmpl = [DRTref,DRTbinary_scale_factor,DRTdecimal_scale_factor,DRTnbits,DRTorigType]
ofile = "test.grb"
fh = open(ofile, "wb")
# Tried writing global GRB section once (skipping with subsequent messages) but got error 'addgrid must be called before addfield'
for fhr in fhrs:
field1 = field[fhr]
grbo = ncepgrib2.Grib2Encode(discipline, idsect)
grbo.addgrid(gdsinfo, gdtmpl)
pdtmpl[8] = fhr
grbo.addfield(pdtnum, pdtmpl, drtnum, drtmpl, field1)
grbo.end()
fh.write(grbo.msg)
fh.close()
| en | 0.76057 | # Section 1 - Identification Section # 0 for meteorological # 7=US National Weather Service, National Centres for Environmental Prediction (NCEP); 60=United States National Centre for Atmospheric Research (NCAR) # 1 for start of forecast # production status of data: 2 = research # type of data: 1 = forecast products # Section 3 - Grid Definition Section #Tried specifying 211 but got seg fault # =0 for regular grid # there is no appended list # 30=Lambert Conformal # latitude where dx and dy are specified # 01000000 # Section 4 - Product Definition Section # 0=Analysis or forecast at a horizontal level or in a horizontal layer at a point in time. 9=Probability forecasts at a horizontal level or in a horizontal layer in a continuous or non-continuous time interval # thermo stability=7 # CAPE=6 # 2=Forecast, 5=Probability Forecast # 116=WRF-EM model, generic resolution (Used in various runs) EM - Eulerian Mass-core (NCAR - aka Advanced Research WRF) # 1=Hour # 108=Level at Specified Pressure Difference from Ground to Level # 108=Level at Specified Pressure Difference from Ground to Level # Section 5 - Data Representation Section # 0=simple packing (4=ieee floating point and 40=JPEG2000 compression aren't implemented) #https://www.nws.noaa.gov/mdl/synop/gmos/binaryscaling.php # -1 = twice the precision, 0 = unchanged, 1 = half the precision, 2=1/4 the precision # -1 = tenth the precision, 0=unchanged, 1 = 10x the precision # floating point=0 # Tried writing global GRB section once (skipping with subsequent messages) but got error 'addgrid must be called before addfield' | 1.901934 | 2 |
experiment1.py | FeliMe/residual-score-pitfalls | 4 | 6618345 | <filename>experiment1.py
"""
Experiment 1:
- Get an image (mid-slice of a brain)
- Create an anomaly by putting a circle of specific intensity inside the image
- Subtract the original image from the anomaly (assuming an Autoencoder that has learned the distribution perfectly)
- Do this for all intensities from 0 to 1 and report the average precision
- In a second dimension, add gaussian blur to the image (simulates imperfect reconstruction of the Autoencoder)
"""
import argparse
import random
import numpy as np
from tqdm import tqdm
from artificial_anomalies import disk_anomaly, sample_position
from utils import (
average_precision,
blur_img,
load_mood_test_data,
plot_landscape,
plot_heatmap,
show,
)
if __name__ == "__main__":
# Place random seeds
seed = 0
random.seed(seed)
np.random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--results_path', type=str, default=None)
args = parser.parse_args()
intensities = np.linspace(0., 1., num=100) # First dimension
blurrings = np.linspace(0., 5., num=100) # Second dimension
if args.results_path is None:
# Load data
print("Loading data...")
imgs = load_mood_test_data()
# Select ball size
radius = 20
ap_results = [] # Gather ap results here
rec_results = [] # Gather reconstruction error results here
# Perform experiment
for intensity in tqdm(intensities):
ap_result_row = []
rec_result_row = []
for blur in blurrings:
aps = []
rec_errs = []
# Reset the random seed so for every intensity and blurring we get the same positions
random.seed(seed)
np.random.seed(seed)
for img in imgs:
# Blur the normal image (simulates imperfect reconstruction)
img_blur = blur_img(img, blur)
# Create an anomaly at a random position
position = sample_position(img)
img_anomal, label = disk_anomaly(img, position, radius, intensity)
# Compute the reconstruction error
pred = np.abs(img_blur - img_anomal)
# Compute the average precision
ap = average_precision(label, pred)
aps.append(ap)
rec_errs.append(pred.mean())
ap_result_row.append(np.mean(aps))
rec_result_row.append(np.mean(rec_errs))
ap_results.append(ap_result_row)
rec_results.append(rec_result_row)
ap_results = np.array(ap_results)
rec_results = np.array(rec_results)
np.save("./results/experiment1/experiment1_full_aps.npy", ap_results)
np.save("./results/experiment1/experiment1_full_rec_errs.npy", rec_results)
else:
ap_results = np.load(args.results_path)
plot_landscape(blurrings, intensities, ap_results, ("σ", "intensity", "ap"),
path="./results/experiment1/experiment1_full_landscape.png")
plot_heatmap(blurrings, intensities, ap_results, ("σ", "intensity"),
path="./results/experiment1/experiment1_full_heatmap.png")
plot_landscape(blurrings, intensities, ap_results, ("σ", "intensity", "ap"))
plot_heatmap(blurrings, intensities, ap_results, ("σ", "intensity"))
| <filename>experiment1.py
"""
Experiment 1:
- Get an image (mid-slice of a brain)
- Create an anomaly by putting a circle of specific intensity inside the image
- Subtract the original image from the anomaly (assuming an Autoencoder that has learned the distribution perfectly)
- Do this for all intensities from 0 to 1 and report the average precision
- In a second dimension, add gaussian blur to the image (simulates imperfect reconstruction of the Autoencoder)
"""
import argparse
import random
import numpy as np
from tqdm import tqdm
from artificial_anomalies import disk_anomaly, sample_position
from utils import (
average_precision,
blur_img,
load_mood_test_data,
plot_landscape,
plot_heatmap,
show,
)
if __name__ == "__main__":
# Place random seeds
seed = 0
random.seed(seed)
np.random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--results_path', type=str, default=None)
args = parser.parse_args()
intensities = np.linspace(0., 1., num=100) # First dimension
blurrings = np.linspace(0., 5., num=100) # Second dimension
if args.results_path is None:
# Load data
print("Loading data...")
imgs = load_mood_test_data()
# Select ball size
radius = 20
ap_results = [] # Gather ap results here
rec_results = [] # Gather reconstruction error results here
# Perform experiment
for intensity in tqdm(intensities):
ap_result_row = []
rec_result_row = []
for blur in blurrings:
aps = []
rec_errs = []
# Reset the random seed so for every intensity and blurring we get the same positions
random.seed(seed)
np.random.seed(seed)
for img in imgs:
# Blur the normal image (simulates imperfect reconstruction)
img_blur = blur_img(img, blur)
# Create an anomaly at a random position
position = sample_position(img)
img_anomal, label = disk_anomaly(img, position, radius, intensity)
# Compute the reconstruction error
pred = np.abs(img_blur - img_anomal)
# Compute the average precision
ap = average_precision(label, pred)
aps.append(ap)
rec_errs.append(pred.mean())
ap_result_row.append(np.mean(aps))
rec_result_row.append(np.mean(rec_errs))
ap_results.append(ap_result_row)
rec_results.append(rec_result_row)
ap_results = np.array(ap_results)
rec_results = np.array(rec_results)
np.save("./results/experiment1/experiment1_full_aps.npy", ap_results)
np.save("./results/experiment1/experiment1_full_rec_errs.npy", rec_results)
else:
ap_results = np.load(args.results_path)
plot_landscape(blurrings, intensities, ap_results, ("σ", "intensity", "ap"),
path="./results/experiment1/experiment1_full_landscape.png")
plot_heatmap(blurrings, intensities, ap_results, ("σ", "intensity"),
path="./results/experiment1/experiment1_full_heatmap.png")
plot_landscape(blurrings, intensities, ap_results, ("σ", "intensity", "ap"))
plot_heatmap(blurrings, intensities, ap_results, ("σ", "intensity"))
| en | 0.762725 | Experiment 1: - Get an image (mid-slice of a brain) - Create an anomaly by putting a circle of specific intensity inside the image - Subtract the original image from the anomaly (assuming an Autoencoder that has learned the distribution perfectly) - Do this for all intensities from 0 to 1 and report the average precision - In a second dimension, add gaussian blur to the image (simulates imperfect reconstruction of the Autoencoder) # Place random seeds # First dimension # Second dimension # Load data # Select ball size # Gather ap results here # Gather reconstruction error results here # Perform experiment # Reset the random seed so for every intensity and blurring we get the same positions # Blur the normal image (simulates imperfect reconstruction) # Create an anomaly at a random position # Compute the reconstruction error # Compute the average precision | 3.335666 | 3 |
jupyterhub/notebooks/zz_under_construction/tensorflow/linear/model.py | just4jc/pipeline | 1 | 6618346 | import dill as pickle
class Predictor(object):
def __init__(self):
pass
def setup(self):
pass
def predict(self, inputs):
return 'response'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('model_pkl_filename')
args = parser.parse_args()
model_pkl_filename = args.model_pkl_filename
print("Training model...")
print("...Done!")
print("Pickling model to '%s'..." % model_pkl_filename)
predictor = Predictor()
with open(model_pkl_filename, 'wb') as model_pkl_file:
pickle.dump(predictor, model_pkl_file)
print("...Done!")
| import dill as pickle
class Predictor(object):
def __init__(self):
pass
def setup(self):
pass
def predict(self, inputs):
return 'response'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('model_pkl_filename')
args = parser.parse_args()
model_pkl_filename = args.model_pkl_filename
print("Training model...")
print("...Done!")
print("Pickling model to '%s'..." % model_pkl_filename)
predictor = Predictor()
with open(model_pkl_filename, 'wb') as model_pkl_file:
pickle.dump(predictor, model_pkl_file)
print("...Done!")
| none | 1 | 2.874542 | 3 | |
tests/test_firestore_cache.py | christippett/django-firebase-cache | 1 | 6618347 | <gh_stars>1-10
import os
import pytest
from mockfirestore import MockFirestore
from google.cloud import firestore
from django.core.cache import cache
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "firestore_settings")
DEFAULT_CACHE_LOCATION = settings.CACHES["default"]["LOCATION"]
mock_firestore = MockFirestore()
@pytest.fixture(autouse=True)
def db(monkeypatch):
with monkeypatch.context() as m:
m.setattr(firestore, "Client", lambda: mock_firestore)
yield mock_firestore
mock_firestore.reset()
cache._db = None # reset cached Firestore client
def test_can_write_to_cache(db):
cache.add(key="key1", value="value1", timeout=None)
doc_refs = db.collection(DEFAULT_CACHE_LOCATION).list_documents()
assert cache.get("key1") == "value1"
assert len(doc_refs) == 1
def test_can_write_multiple_keys_to_cache(db):
cache.add(key="key1", value="value1", timeout=None)
cache.add(key="key2", value="value2", timeout=None)
doc_refs = db.collection(DEFAULT_CACHE_LOCATION).list_documents()
assert len(doc_refs) == 2
| import os
import pytest
from mockfirestore import MockFirestore
from google.cloud import firestore
from django.core.cache import cache
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "firestore_settings")
DEFAULT_CACHE_LOCATION = settings.CACHES["default"]["LOCATION"]
mock_firestore = MockFirestore()
@pytest.fixture(autouse=True)
def db(monkeypatch):
with monkeypatch.context() as m:
m.setattr(firestore, "Client", lambda: mock_firestore)
yield mock_firestore
mock_firestore.reset()
cache._db = None # reset cached Firestore client
def test_can_write_to_cache(db):
cache.add(key="key1", value="value1", timeout=None)
doc_refs = db.collection(DEFAULT_CACHE_LOCATION).list_documents()
assert cache.get("key1") == "value1"
assert len(doc_refs) == 1
def test_can_write_multiple_keys_to_cache(db):
cache.add(key="key1", value="value1", timeout=None)
cache.add(key="key2", value="value2", timeout=None)
doc_refs = db.collection(DEFAULT_CACHE_LOCATION).list_documents()
assert len(doc_refs) == 2 | en | 0.640763 | # reset cached Firestore client | 2.307687 | 2 |
bin/manage-dashboards.py | dmuth/grafana-playground | 4 | 6618348 | #!/usr/bin/env python3
#
# This script lets us export and import all dashboards from our Grafana instance.
#
import argparse
import json
import sys
import requests
#
# Parse our arguments.
#
def getArgs():
parser = argparse.ArgumentParser(description = "Export and import Grafana dashboards")
parser.add_argument("--api-key", required = True,
help = "The Grafana API key. Can be Generated in Grafana at: Configuration -> API Keys")
parser.add_argument("--url", default ="http://grafana:3000/",
help = "Base URL for a Grafana sever. Defaults to http://grafana:3000/")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--export", action = "store_true", help = "Export dashboards as JSON to stdout.")
group.add_argument("--import", action = "store_true", help = "Read JSON from stdin to create dashboards.")
args = parser.parse_args()
return(args)
#
# Get our dashbaord IDs and return them as an array.
#
def getDashboardIds(url, headers):
retval = []
r = requests.get(url = url, headers = headers)
if r.status_code != 200:
raise Exception(f"Status code {r.status_code} != 200 for URL '{url}'!")
for dashboard in r.json():
retval.append(dashboard["uid"])
return(retval)
#
# Get the data for each of our dashboards and return it in an array.
#
def getDashboards(url, headers, ids):
retval = []
for uid in ids:
dashboard = getDashboard(url, headers, uid)
retval.append(dashboard)
return(retval)
#
# Get the data for a specific dashboard and return it in a dict.
#
def getDashboard(url, headers, uid):
url = f"{url}/api/dashboards/uid/{uid}"
r = requests.get(url = url, headers = headers)
if r.status_code != 200:
raise Exception(f"Status code {r.status_code} != 200 for URL '{url}'!")
#
# We don't need the meta data, just the dashboard.
#
return(r.json()["dashboard"])
#
# Get all of our dashboards and return them as a dictionary.
#
def export(url, api_key):
url_search = f"{url}/api/search?query=%"
headers = {
"Authorization": f"Bearer {api_key}"
}
ids = getDashboardIds(url_search, headers)
dashboards = getDashboards(url, headers, ids)
return(dashboards)
#
# Wrapper to print a string to stderr.
#
def stderr(string):
print(string, file=sys.stderr)
#
# Read JSON from stdin and parse it.
#
def import_json():
string = ""
for line in sys.stdin:
string += line
data = json.loads(string)
#
# Check to see if we have the right meta data.
# This is mainly a sanity check because I don't want random data being fed
# into this and then bug reports getting triggered.
# I *only* want data that was created by this script's export process. :-)
#
if "meta" in data:
if "app" in data["meta"]:
if data["meta"]["app"] == "Grafana Playground":
return(data["dashboards"])
else:
raise Exception(f"Invalid app type '{data['meta']['app']}' in metadata, expecting 'Grafana Playground'!")
else:
raise Exception("Invalid metadata in export file.")
else:
raise Exception("Metadata not found in export file.")
#
# Go through our dashboards and import them.
#
def import_dashboards(url, api_key, dashboards):
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type":"application/json",
"Accept": "application/json"
}
url_import = f"{url}/api/dashboards/db"
for dashboard in dashboards:
new_dashboard = {"dashboard": dashboard}
id = new_dashboard["dashboard"]["id"]
uid = new_dashboard["dashboard"]["uid"]
title = new_dashboard["dashboard"]["title"]
new_dashboard["overwrite"] = True
r = requests.post(url = url_import, headers = headers,
data = json.dumps(new_dashboard))
stderr(f"# Importing dashboard uid={uid} id={id} title={title}...")
if r.status_code == 404:
stderr(f"Dashboard uid {uid} not found, let's create it instead!")
new_dashboard["overwrite"] = False
new_dashboard["dashboard"]["id"] = None
r = requests.post(url = url_import, headers = headers,
data = json.dumps(new_dashboard))
elif r.status_code != 200:
raise Exception(f"! Status code {r.status_code} != 200 for URL '{url_import}'! Message returned: {r.text}")
stderr(f"# Imported dashboard '{new_dashboard['dashboard']['title']}' " +
f"(uid {uid}), "
+ f"results: {r.text}")
#
# Our main entrypoint.
#
def main():
args = getArgs()
#print(args) # Debug
# Double-slashes don't play nice with Grafana.
url = args.url.rstrip("/")
if args.export:
dashboards = export(url, args.api_key)
output = {"dashboards": dashboards}
output["meta"] = {"app": "Grafana Playground"}
print(json.dumps(output))
elif args.__dict__["import"]:
# import is a great term, but a reserved word, hence messing with __dict__...
stderr("Now reading JSON of an export file from stdin...")
dashboards = import_json()
import_dashboards(url, args.api_key, dashboards)
else:
raise("Invalid arguments!")
main()
| #!/usr/bin/env python3
#
# This script lets us export and import all dashboards from our Grafana instance.
#
import argparse
import json
import sys
import requests
#
# Parse our arguments.
#
def getArgs():
parser = argparse.ArgumentParser(description = "Export and import Grafana dashboards")
parser.add_argument("--api-key", required = True,
help = "The Grafana API key. Can be Generated in Grafana at: Configuration -> API Keys")
parser.add_argument("--url", default ="http://grafana:3000/",
help = "Base URL for a Grafana sever. Defaults to http://grafana:3000/")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--export", action = "store_true", help = "Export dashboards as JSON to stdout.")
group.add_argument("--import", action = "store_true", help = "Read JSON from stdin to create dashboards.")
args = parser.parse_args()
return(args)
#
# Get our dashbaord IDs and return them as an array.
#
def getDashboardIds(url, headers):
retval = []
r = requests.get(url = url, headers = headers)
if r.status_code != 200:
raise Exception(f"Status code {r.status_code} != 200 for URL '{url}'!")
for dashboard in r.json():
retval.append(dashboard["uid"])
return(retval)
#
# Get the data for each of our dashboards and return it in an array.
#
def getDashboards(url, headers, ids):
retval = []
for uid in ids:
dashboard = getDashboard(url, headers, uid)
retval.append(dashboard)
return(retval)
#
# Get the data for a specific dashboard and return it in a dict.
#
def getDashboard(url, headers, uid):
url = f"{url}/api/dashboards/uid/{uid}"
r = requests.get(url = url, headers = headers)
if r.status_code != 200:
raise Exception(f"Status code {r.status_code} != 200 for URL '{url}'!")
#
# We don't need the meta data, just the dashboard.
#
return(r.json()["dashboard"])
#
# Get all of our dashboards and return them as a dictionary.
#
def export(url, api_key):
url_search = f"{url}/api/search?query=%"
headers = {
"Authorization": f"Bearer {api_key}"
}
ids = getDashboardIds(url_search, headers)
dashboards = getDashboards(url, headers, ids)
return(dashboards)
#
# Wrapper to print a string to stderr.
#
def stderr(string):
print(string, file=sys.stderr)
#
# Read JSON from stdin and parse it.
#
def import_json():
string = ""
for line in sys.stdin:
string += line
data = json.loads(string)
#
# Check to see if we have the right meta data.
# This is mainly a sanity check because I don't want random data being fed
# into this and then bug reports getting triggered.
# I *only* want data that was created by this script's export process. :-)
#
if "meta" in data:
if "app" in data["meta"]:
if data["meta"]["app"] == "Grafana Playground":
return(data["dashboards"])
else:
raise Exception(f"Invalid app type '{data['meta']['app']}' in metadata, expecting 'Grafana Playground'!")
else:
raise Exception("Invalid metadata in export file.")
else:
raise Exception("Metadata not found in export file.")
#
# Go through our dashboards and import them.
#
def import_dashboards(url, api_key, dashboards):
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type":"application/json",
"Accept": "application/json"
}
url_import = f"{url}/api/dashboards/db"
for dashboard in dashboards:
new_dashboard = {"dashboard": dashboard}
id = new_dashboard["dashboard"]["id"]
uid = new_dashboard["dashboard"]["uid"]
title = new_dashboard["dashboard"]["title"]
new_dashboard["overwrite"] = True
r = requests.post(url = url_import, headers = headers,
data = json.dumps(new_dashboard))
stderr(f"# Importing dashboard uid={uid} id={id} title={title}...")
if r.status_code == 404:
stderr(f"Dashboard uid {uid} not found, let's create it instead!")
new_dashboard["overwrite"] = False
new_dashboard["dashboard"]["id"] = None
r = requests.post(url = url_import, headers = headers,
data = json.dumps(new_dashboard))
elif r.status_code != 200:
raise Exception(f"! Status code {r.status_code} != 200 for URL '{url_import}'! Message returned: {r.text}")
stderr(f"# Imported dashboard '{new_dashboard['dashboard']['title']}' " +
f"(uid {uid}), "
+ f"results: {r.text}")
#
# Our main entrypoint.
#
def main():
args = getArgs()
#print(args) # Debug
# Double-slashes don't play nice with Grafana.
url = args.url.rstrip("/")
if args.export:
dashboards = export(url, args.api_key)
output = {"dashboards": dashboards}
output["meta"] = {"app": "Grafana Playground"}
print(json.dumps(output))
elif args.__dict__["import"]:
# import is a great term, but a reserved word, hence messing with __dict__...
stderr("Now reading JSON of an export file from stdin...")
dashboards = import_json()
import_dashboards(url, args.api_key, dashboards)
else:
raise("Invalid arguments!")
main()
| en | 0.90335 | #!/usr/bin/env python3 # # This script lets us export and import all dashboards from our Grafana instance. # # # Parse our arguments. # # # Get our dashbaord IDs and return them as an array. # # # Get the data for each of our dashboards and return it in an array. # # # Get the data for a specific dashboard and return it in a dict. # # # We don't need the meta data, just the dashboard. # # # Get all of our dashboards and return them as a dictionary. # # # Wrapper to print a string to stderr. # # # Read JSON from stdin and parse it. # # # Check to see if we have the right meta data. # This is mainly a sanity check because I don't want random data being fed # into this and then bug reports getting triggered. # I *only* want data that was created by this script's export process. :-) # # # Go through our dashboards and import them. # # # Our main entrypoint. # #print(args) # Debug # Double-slashes don't play nice with Grafana. # import is a great term, but a reserved word, hence messing with __dict__... | 2.778971 | 3 |
analysis/ExperimentRecorder.py | cuguilke/psykedelic | 3 | 6618349 | <filename>analysis/ExperimentRecorder.py
"""
Title :ExperimentRecorder.py
Description :JSON exporter class to store experimental results over time
Author :<NAME>
Date Created :15-01-2020
Date Modified :16-06-2020
version :1.1.4
python_version :3.6.6
"""
import os
import json
class ExperimentRecorder:
"""
Custom JSON exporter class to store information on MicroResNet experiments
# Arguments
:param model_name: (string) Keras model name
:param optimizer: (string) name of the selected Keras optimizer
:param loss: (string) name of the selected Keras loss function
:param base_lr: (float) learning rate
:param batch_size: (int) # of inputs in a mini-batch
:param epochs: (int) # of full training passes
:param init_mode: (string) {random_init | static_init}
:param dataset: (string) selected dataset
:param l1_penalty: (float) regularization coefficient
:param history_interval: (int) for history callbacks, defines the interval of logs
:param threshold: (float) {significance_threshold | contribution_threshold}
:param compression_mode: (string) selected compression mode
:param verbose: (int) Keras verbose argument
:param path: (string) absolute path to 'experiment.json' file if already exists
"""
def __init__(self,
model_name,
optimizer,
loss,
base_lr,
batch_size,
epochs,
init_mode,
dataset,
l1_penalty,
history_interval=1,
threshold=None,
compression_mode=None,
verbose=0,
path="experiment.json"):
self.model_name = model_name
self.optimizer = optimizer
self.loss = loss
self.base_lr = "{:.0e}".format(base_lr)
self.batch_size = batch_size
self.epochs = str(epochs)
self.init_mode = init_mode
self.dataset = dataset
self.history_interval = str(history_interval)
self.l1_penalty = "{:.0e}".format(l1_penalty)
self.threshold = "{:.0e}".format(threshold)
self.compression_mode = compression_mode
self.verbose = verbose
self.path = path
self.hist_cache = {}
if os.path.isfile(self.path):
self.load_data()
self.model = "%s:%s:%s:%s" % (self.model_name, self.optimizer, self.loss, self.base_lr)
self.base_key = ":".join([self.model, str(self.epochs), str(self.batch_size), self.init_mode, self.dataset, str(self.l1_penalty)])
def load_data(self):
"""
Loads the current experiment history to append new results
"""
with open(self.path, "r") as hist_file:
self.hist_cache = json.load(hist_file)
def save_data(self):
"""
Saves the accumulated data by updating the existing file located at self.path
"""
with open(self.path, "w+") as hist_file:
json.dump(self.hist_cache, hist_file)
def record(self, data, mode, compression_mode=None):
if mode == "learning_curve":
self.record_learning_curve(data["hist"])
elif mode == "pruning_per_layer":
self.record_pruning_per_layer(data["layer_names"], data["active_params"], data["total_params"], compression_mode)
elif mode == "pruning_per_layer_history":
self.record_pruning_per_layer_history(data["pruning_per_layer_history"])
elif mode == "eig_analysis":
self.record_eig_analysis(data["layer_names"], data["active_real"], data["pruned_real"], data["active_complex"], data["pruned_complex"])
elif mode == "eig_stats":
self.record_eig_stats(data["layer_names"], data["total_complex_list"], data["total_real_list"], data["target_complex_list"], data["pruned_complex_list"], data["target_real_list"], data["pruned_real_list"])
elif mode == "set_analysis":
self.record_set_analysis(data["group_sizes"], data["groups"], data["codes"])
elif mode == "performance":
self.record_performance(data["performance"])
elif mode == "performance_history":
self.record_performance_history(data["performance_history"])
elif mode == "pruning_per_threshold":
self.record_pruning_per_threshold(data["final_vals"], data["thresholds"])
def record_learning_curve(self, hist):
if "learning_curve" in self.hist_cache:
if self.base_key in self.hist_cache["learning_curve"]:
if self.threshold in self.hist_cache["learning_curve"][self.base_key]:
if self.compression_mode in self.hist_cache["learning_curve"][self.base_key][self.threshold]:
self.hist_cache["learning_curve"][self.base_key][self.threshold][self.compression_mode].append(hist)
else:
self.hist_cache["learning_curve"][self.base_key][self.threshold][self.compression_mode] = [hist]
else:
self.hist_cache["learning_curve"][self.base_key][self.threshold] = {self.compression_mode: [hist]}
else:
self.hist_cache["learning_curve"][self.base_key] = {self.threshold: {self.compression_mode: [hist]}}
else:
self.hist_cache["learning_curve"] ={self.base_key: {self.threshold: {self.compression_mode: [hist]}}}
def record_pruning_per_layer(self, layer_names, active_params, total_params, compression_mode=None):
"""
# Arguments
:param layer_names:
:param active_params:
:param total_params:
:param compression_mode: (string) Optional. ComperativeTestingCallback set this to record data for multiple modes
"""
compression_mode = self.compression_mode if compression_mode is None else compression_mode
entry = {"layer_names": layer_names, "active_params": active_params, "total_params": total_params}
if "pruning_per_layer" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_layer"]:
if self.threshold in self.hist_cache["pruning_per_layer"][self.base_key]:
if compression_mode in self.hist_cache["pruning_per_layer"][self.base_key][self.threshold]:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold][compression_mode].append(entry)
else:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold][compression_mode] = [entry]
else:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold] = {compression_mode: [entry]}
else:
self.hist_cache["pruning_per_layer"][self.base_key] = {self.threshold: {compression_mode: [entry]}}
else:
self.hist_cache["pruning_per_layer"] ={self.base_key: {self.threshold: {compression_mode: [entry]}}}
def record_pruning_per_layer_history(self, pruning_per_layer_history):
entry = pruning_per_layer_history
if "pruning_per_layer_history" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_layer_history"]:
if self.threshold in self.hist_cache["pruning_per_layer_history"][self.base_key]:
if self.history_interval in self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold]:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold][self.history_interval].append(entry)
else:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold][self.history_interval] = [entry]
else:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold] = {self.history_interval: [entry]}
else:
self.hist_cache["pruning_per_layer_history"][self.base_key] = {self.threshold: {self.history_interval: [entry]}}
else:
self.hist_cache["pruning_per_layer_history"] ={self.base_key: {self.threshold: {self.history_interval: [entry]}}}
def record_eig_analysis(self, layer_names, active_real, pruned_real, active_complex, pruned_complex):
entry = {"layer_names": layer_names, "active_real": active_real, "pruned_real": pruned_real, "active_complex": active_complex, "pruned_complex": pruned_complex}
if "eig_analysis" in self.hist_cache:
if self.base_key in self.hist_cache["eig_analysis"]:
if self.threshold in self.hist_cache["eig_analysis"][self.base_key]:
if self.compression_mode in self.hist_cache["eig_analysis"][self.base_key][self.threshold]:
self.hist_cache["eig_analysis"][self.base_key][self.threshold][self.compression_mode].append(entry)
else:
self.hist_cache["eig_analysis"][self.base_key][self.threshold][self.compression_mode] = [entry]
else:
self.hist_cache["eig_analysis"][self.base_key][self.threshold] = {self.compression_mode: [entry]}
else:
self.hist_cache["eig_analysis"][self.base_key] = {self.threshold: {self.compression_mode: [entry]}}
else:
self.hist_cache["eig_analysis"] ={self.base_key: {self.threshold: {self.compression_mode: [entry]}}}
def record_eig_stats(self, layer_names, total_complex_list, total_real_list, target_complex_list, pruned_complex_list, target_real_list, pruned_real_list):
entry = {
"layer_names": layer_names,
"total_complex_list": total_complex_list,
"total_real_list": total_real_list,
"target_complex_list": target_complex_list,
"pruned_complex_list": pruned_complex_list,
"target_real_list": target_real_list,
"pruned_real_list": pruned_real_list
}
if "eig_stats" in self.hist_cache:
if self.base_key in self.hist_cache["eig_stats"]:
if self.threshold in self.hist_cache["eig_stats"][self.base_key]:
self.hist_cache["eig_stats"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["eig_stats"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["eig_stats"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["eig_stats"] = {self.base_key: {self.threshold: [entry]}}
def record_set_analysis(self, group_sizes, groups, codes):
entry = {"group_sizes": group_sizes, "groups": groups, "codes": codes}
if "set_analysis" in self.hist_cache:
if self.base_key in self.hist_cache["set_analysis"]:
if self.threshold in self.hist_cache["set_analysis"][self.base_key]:
self.hist_cache["set_analysis"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["set_analysis"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["set_analysis"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["set_analysis"] = {self.base_key: {self.threshold: [entry]}}
def record_performance(self, performance):
entry = performance
if "performance" in self.hist_cache:
if self.base_key in self.hist_cache["performance"]:
if self.threshold in self.hist_cache["performance"][self.base_key]:
self.hist_cache["performance"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["performance"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["performance"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["performance"] = {self.base_key: {self.threshold: [entry]}}
def record_performance_history(self, performance_history):
entry = performance_history
if "performance_history" in self.hist_cache:
if self.base_key in self.hist_cache["performance_history"]:
if self.threshold in self.hist_cache["performance_history"][self.base_key]:
if self.history_interval in self.hist_cache["performance_history"][self.base_key][self.threshold]:
self.hist_cache["performance_history"][self.base_key][self.threshold][self.history_interval].append(entry)
else:
self.hist_cache["performance_history"][self.base_key][self.threshold][self.history_interval] = entry
else:
self.hist_cache["performance_history"][self.base_key][self.threshold] = {self.history_interval: [entry]}
else:
self.hist_cache["performance_history"][self.base_key] = {self.threshold: {self.history_interval: [entry]}}
else:
self.hist_cache["performance_history"] = {self.base_key: {self.threshold: {self.history_interval: [entry]}}}
def record_pruning_per_threshold(self, final_vals, thresholds):
entry = {"final_vals": final_vals, "thresholds": thresholds}
if "pruning_per_threshold" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_threshold"]:
self.hist_cache["pruning_per_threshold"][self.base_key].append(entry)
else:
self.hist_cache["pruning_per_threshold"][self.base_key] = [entry]
else:
self.hist_cache["pruning_per_threshold"] = {self.base_key: [entry]} | <filename>analysis/ExperimentRecorder.py
"""
Title :ExperimentRecorder.py
Description :JSON exporter class to store experimental results over time
Author :<NAME>
Date Created :15-01-2020
Date Modified :16-06-2020
version :1.1.4
python_version :3.6.6
"""
import os
import json
class ExperimentRecorder:
"""
Custom JSON exporter class to store information on MicroResNet experiments
# Arguments
:param model_name: (string) Keras model name
:param optimizer: (string) name of the selected Keras optimizer
:param loss: (string) name of the selected Keras loss function
:param base_lr: (float) learning rate
:param batch_size: (int) # of inputs in a mini-batch
:param epochs: (int) # of full training passes
:param init_mode: (string) {random_init | static_init}
:param dataset: (string) selected dataset
:param l1_penalty: (float) regularization coefficient
:param history_interval: (int) for history callbacks, defines the interval of logs
:param threshold: (float) {significance_threshold | contribution_threshold}
:param compression_mode: (string) selected compression mode
:param verbose: (int) Keras verbose argument
:param path: (string) absolute path to 'experiment.json' file if already exists
"""
def __init__(self,
model_name,
optimizer,
loss,
base_lr,
batch_size,
epochs,
init_mode,
dataset,
l1_penalty,
history_interval=1,
threshold=None,
compression_mode=None,
verbose=0,
path="experiment.json"):
self.model_name = model_name
self.optimizer = optimizer
self.loss = loss
self.base_lr = "{:.0e}".format(base_lr)
self.batch_size = batch_size
self.epochs = str(epochs)
self.init_mode = init_mode
self.dataset = dataset
self.history_interval = str(history_interval)
self.l1_penalty = "{:.0e}".format(l1_penalty)
self.threshold = "{:.0e}".format(threshold)
self.compression_mode = compression_mode
self.verbose = verbose
self.path = path
self.hist_cache = {}
if os.path.isfile(self.path):
self.load_data()
self.model = "%s:%s:%s:%s" % (self.model_name, self.optimizer, self.loss, self.base_lr)
self.base_key = ":".join([self.model, str(self.epochs), str(self.batch_size), self.init_mode, self.dataset, str(self.l1_penalty)])
def load_data(self):
"""
Loads the current experiment history to append new results
"""
with open(self.path, "r") as hist_file:
self.hist_cache = json.load(hist_file)
def save_data(self):
"""
Saves the accumulated data by updating the existing file located at self.path
"""
with open(self.path, "w+") as hist_file:
json.dump(self.hist_cache, hist_file)
def record(self, data, mode, compression_mode=None):
if mode == "learning_curve":
self.record_learning_curve(data["hist"])
elif mode == "pruning_per_layer":
self.record_pruning_per_layer(data["layer_names"], data["active_params"], data["total_params"], compression_mode)
elif mode == "pruning_per_layer_history":
self.record_pruning_per_layer_history(data["pruning_per_layer_history"])
elif mode == "eig_analysis":
self.record_eig_analysis(data["layer_names"], data["active_real"], data["pruned_real"], data["active_complex"], data["pruned_complex"])
elif mode == "eig_stats":
self.record_eig_stats(data["layer_names"], data["total_complex_list"], data["total_real_list"], data["target_complex_list"], data["pruned_complex_list"], data["target_real_list"], data["pruned_real_list"])
elif mode == "set_analysis":
self.record_set_analysis(data["group_sizes"], data["groups"], data["codes"])
elif mode == "performance":
self.record_performance(data["performance"])
elif mode == "performance_history":
self.record_performance_history(data["performance_history"])
elif mode == "pruning_per_threshold":
self.record_pruning_per_threshold(data["final_vals"], data["thresholds"])
def record_learning_curve(self, hist):
if "learning_curve" in self.hist_cache:
if self.base_key in self.hist_cache["learning_curve"]:
if self.threshold in self.hist_cache["learning_curve"][self.base_key]:
if self.compression_mode in self.hist_cache["learning_curve"][self.base_key][self.threshold]:
self.hist_cache["learning_curve"][self.base_key][self.threshold][self.compression_mode].append(hist)
else:
self.hist_cache["learning_curve"][self.base_key][self.threshold][self.compression_mode] = [hist]
else:
self.hist_cache["learning_curve"][self.base_key][self.threshold] = {self.compression_mode: [hist]}
else:
self.hist_cache["learning_curve"][self.base_key] = {self.threshold: {self.compression_mode: [hist]}}
else:
self.hist_cache["learning_curve"] ={self.base_key: {self.threshold: {self.compression_mode: [hist]}}}
def record_pruning_per_layer(self, layer_names, active_params, total_params, compression_mode=None):
"""
# Arguments
:param layer_names:
:param active_params:
:param total_params:
:param compression_mode: (string) Optional. ComperativeTestingCallback set this to record data for multiple modes
"""
compression_mode = self.compression_mode if compression_mode is None else compression_mode
entry = {"layer_names": layer_names, "active_params": active_params, "total_params": total_params}
if "pruning_per_layer" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_layer"]:
if self.threshold in self.hist_cache["pruning_per_layer"][self.base_key]:
if compression_mode in self.hist_cache["pruning_per_layer"][self.base_key][self.threshold]:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold][compression_mode].append(entry)
else:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold][compression_mode] = [entry]
else:
self.hist_cache["pruning_per_layer"][self.base_key][self.threshold] = {compression_mode: [entry]}
else:
self.hist_cache["pruning_per_layer"][self.base_key] = {self.threshold: {compression_mode: [entry]}}
else:
self.hist_cache["pruning_per_layer"] ={self.base_key: {self.threshold: {compression_mode: [entry]}}}
def record_pruning_per_layer_history(self, pruning_per_layer_history):
entry = pruning_per_layer_history
if "pruning_per_layer_history" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_layer_history"]:
if self.threshold in self.hist_cache["pruning_per_layer_history"][self.base_key]:
if self.history_interval in self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold]:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold][self.history_interval].append(entry)
else:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold][self.history_interval] = [entry]
else:
self.hist_cache["pruning_per_layer_history"][self.base_key][self.threshold] = {self.history_interval: [entry]}
else:
self.hist_cache["pruning_per_layer_history"][self.base_key] = {self.threshold: {self.history_interval: [entry]}}
else:
self.hist_cache["pruning_per_layer_history"] ={self.base_key: {self.threshold: {self.history_interval: [entry]}}}
def record_eig_analysis(self, layer_names, active_real, pruned_real, active_complex, pruned_complex):
entry = {"layer_names": layer_names, "active_real": active_real, "pruned_real": pruned_real, "active_complex": active_complex, "pruned_complex": pruned_complex}
if "eig_analysis" in self.hist_cache:
if self.base_key in self.hist_cache["eig_analysis"]:
if self.threshold in self.hist_cache["eig_analysis"][self.base_key]:
if self.compression_mode in self.hist_cache["eig_analysis"][self.base_key][self.threshold]:
self.hist_cache["eig_analysis"][self.base_key][self.threshold][self.compression_mode].append(entry)
else:
self.hist_cache["eig_analysis"][self.base_key][self.threshold][self.compression_mode] = [entry]
else:
self.hist_cache["eig_analysis"][self.base_key][self.threshold] = {self.compression_mode: [entry]}
else:
self.hist_cache["eig_analysis"][self.base_key] = {self.threshold: {self.compression_mode: [entry]}}
else:
self.hist_cache["eig_analysis"] ={self.base_key: {self.threshold: {self.compression_mode: [entry]}}}
def record_eig_stats(self, layer_names, total_complex_list, total_real_list, target_complex_list, pruned_complex_list, target_real_list, pruned_real_list):
entry = {
"layer_names": layer_names,
"total_complex_list": total_complex_list,
"total_real_list": total_real_list,
"target_complex_list": target_complex_list,
"pruned_complex_list": pruned_complex_list,
"target_real_list": target_real_list,
"pruned_real_list": pruned_real_list
}
if "eig_stats" in self.hist_cache:
if self.base_key in self.hist_cache["eig_stats"]:
if self.threshold in self.hist_cache["eig_stats"][self.base_key]:
self.hist_cache["eig_stats"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["eig_stats"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["eig_stats"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["eig_stats"] = {self.base_key: {self.threshold: [entry]}}
def record_set_analysis(self, group_sizes, groups, codes):
entry = {"group_sizes": group_sizes, "groups": groups, "codes": codes}
if "set_analysis" in self.hist_cache:
if self.base_key in self.hist_cache["set_analysis"]:
if self.threshold in self.hist_cache["set_analysis"][self.base_key]:
self.hist_cache["set_analysis"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["set_analysis"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["set_analysis"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["set_analysis"] = {self.base_key: {self.threshold: [entry]}}
def record_performance(self, performance):
entry = performance
if "performance" in self.hist_cache:
if self.base_key in self.hist_cache["performance"]:
if self.threshold in self.hist_cache["performance"][self.base_key]:
self.hist_cache["performance"][self.base_key][self.threshold].append(entry)
else:
self.hist_cache["performance"][self.base_key][self.threshold] = [entry]
else:
self.hist_cache["performance"][self.base_key] = {self.threshold: [entry]}
else:
self.hist_cache["performance"] = {self.base_key: {self.threshold: [entry]}}
def record_performance_history(self, performance_history):
entry = performance_history
if "performance_history" in self.hist_cache:
if self.base_key in self.hist_cache["performance_history"]:
if self.threshold in self.hist_cache["performance_history"][self.base_key]:
if self.history_interval in self.hist_cache["performance_history"][self.base_key][self.threshold]:
self.hist_cache["performance_history"][self.base_key][self.threshold][self.history_interval].append(entry)
else:
self.hist_cache["performance_history"][self.base_key][self.threshold][self.history_interval] = entry
else:
self.hist_cache["performance_history"][self.base_key][self.threshold] = {self.history_interval: [entry]}
else:
self.hist_cache["performance_history"][self.base_key] = {self.threshold: {self.history_interval: [entry]}}
else:
self.hist_cache["performance_history"] = {self.base_key: {self.threshold: {self.history_interval: [entry]}}}
def record_pruning_per_threshold(self, final_vals, thresholds):
entry = {"final_vals": final_vals, "thresholds": thresholds}
if "pruning_per_threshold" in self.hist_cache:
if self.base_key in self.hist_cache["pruning_per_threshold"]:
self.hist_cache["pruning_per_threshold"][self.base_key].append(entry)
else:
self.hist_cache["pruning_per_threshold"][self.base_key] = [entry]
else:
self.hist_cache["pruning_per_threshold"] = {self.base_key: [entry]} | en | 0.669716 | Title :ExperimentRecorder.py
Description :JSON exporter class to store experimental results over time
Author :<NAME>
Date Created :15-01-2020
Date Modified :16-06-2020
version :1.1.4
python_version :3.6.6 Custom JSON exporter class to store information on MicroResNet experiments
# Arguments
:param model_name: (string) Keras model name
:param optimizer: (string) name of the selected Keras optimizer
:param loss: (string) name of the selected Keras loss function
:param base_lr: (float) learning rate
:param batch_size: (int) # of inputs in a mini-batch
:param epochs: (int) # of full training passes
:param init_mode: (string) {random_init | static_init}
:param dataset: (string) selected dataset
:param l1_penalty: (float) regularization coefficient
:param history_interval: (int) for history callbacks, defines the interval of logs
:param threshold: (float) {significance_threshold | contribution_threshold}
:param compression_mode: (string) selected compression mode
:param verbose: (int) Keras verbose argument
:param path: (string) absolute path to 'experiment.json' file if already exists Loads the current experiment history to append new results Saves the accumulated data by updating the existing file located at self.path # Arguments
:param layer_names:
:param active_params:
:param total_params:
:param compression_mode: (string) Optional. ComperativeTestingCallback set this to record data for multiple modes | 2.997482 | 3 |
Eigen_Face_Recognizer.py | Liuzy0908/Fatigue-Driving-Detection-Based-on-Dlib | 5 | 6618350 | # -*- coding: utf-8 -*-
####################################################
# 作者: 刘朝阳
# 时间: 2020.05.01
# 更新时间: 2021.11.25
# 功能: 利用采集好的驾驶人的人脸图像, 进行特征脸模型的训练.
# 使用说明: 自动调用, 无需操作.
####################################################
import os
import cv2
import numpy as np
face_path = './face_path'
def LoadImages(data): # data:训练数据所在的目录,要求图片尺寸一样。需要自己创建好后指定
images = []
names = []
labels = []
label = 0
# 遍历所有文件夹
for subdir in os.listdir(data): # os.listdir()输出该目录下的所有文件名字
subpath = os.path.join(data, subdir) # 拼接路径,定位到子文件夹路径
if os.path.isdir(subpath): # 如果子文件夹路径存在
# 在每一个文件夹中存放着一个人的许多照片
names.append(subdir) # 把每个文件夹的名字 当成每个驾驶人名字
# 遍历文件夹中的图片文件
for filename in os.listdir(subpath): # os.listdir()输出该目录下的所有文件名字
imgpath = os.path.join(subpath, filename) # 连接路径,定位到子文件夹路径
img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
images.append(gray_img) # 把该文件夹下人的灰度图像全放在一起,形成列表
labels.append(label) # 给该文件夹下的人打上标签
label += 1 # label 为计数该人采集了多少张脸的数据
else:
raise Exception("还未正确采集某个驾驶人图像数据")
images = np.asarray(images) # 将数据列表矩阵化,形成一张张单独的图片
names = np.asarray(names)
labels = np.asarray(labels) # 将数据列表矩阵化
# 返回值:images:[m,height,width] m为样本数,height为高,width为宽;
# names:名字的集合;
# labels:标签集合.
return images, labels, names
print('/*/*/*/*/*/*/* 特征脸识别器正在训练 /*/*/*/*/*/*/*')
X, y, names = LoadImages(face_path) # 加载图像数据
Eigen_Face_Model = cv2.face.EigenFaceRecognizer_create()
Eigen_Face_Model.train(X, y)
print('-*-*-*-*-*-*-* 特征脸识别器训练完成 -*-*-*-*-*-*-*') | # -*- coding: utf-8 -*-
####################################################
# 作者: 刘朝阳
# 时间: 2020.05.01
# 更新时间: 2021.11.25
# 功能: 利用采集好的驾驶人的人脸图像, 进行特征脸模型的训练.
# 使用说明: 自动调用, 无需操作.
####################################################
import os
import cv2
import numpy as np
face_path = './face_path'
def LoadImages(data): # data:训练数据所在的目录,要求图片尺寸一样。需要自己创建好后指定
images = []
names = []
labels = []
label = 0
# 遍历所有文件夹
for subdir in os.listdir(data): # os.listdir()输出该目录下的所有文件名字
subpath = os.path.join(data, subdir) # 拼接路径,定位到子文件夹路径
if os.path.isdir(subpath): # 如果子文件夹路径存在
# 在每一个文件夹中存放着一个人的许多照片
names.append(subdir) # 把每个文件夹的名字 当成每个驾驶人名字
# 遍历文件夹中的图片文件
for filename in os.listdir(subpath): # os.listdir()输出该目录下的所有文件名字
imgpath = os.path.join(subpath, filename) # 连接路径,定位到子文件夹路径
img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
images.append(gray_img) # 把该文件夹下人的灰度图像全放在一起,形成列表
labels.append(label) # 给该文件夹下的人打上标签
label += 1 # label 为计数该人采集了多少张脸的数据
else:
raise Exception("还未正确采集某个驾驶人图像数据")
images = np.asarray(images) # 将数据列表矩阵化,形成一张张单独的图片
names = np.asarray(names)
labels = np.asarray(labels) # 将数据列表矩阵化
# 返回值:images:[m,height,width] m为样本数,height为高,width为宽;
# names:名字的集合;
# labels:标签集合.
return images, labels, names
print('/*/*/*/*/*/*/* 特征脸识别器正在训练 /*/*/*/*/*/*/*')
X, y, names = LoadImages(face_path) # 加载图像数据
Eigen_Face_Model = cv2.face.EigenFaceRecognizer_create()
Eigen_Face_Model.train(X, y)
print('-*-*-*-*-*-*-* 特征脸识别器训练完成 -*-*-*-*-*-*-*') | zh | 0.967879 | # -*- coding: utf-8 -*- #################################################### # 作者: 刘朝阳 # 时间: 2020.05.01 # 更新时间: 2021.11.25 # 功能: 利用采集好的驾驶人的人脸图像, 进行特征脸模型的训练. # 使用说明: 自动调用, 无需操作. #################################################### # data:训练数据所在的目录,要求图片尺寸一样。需要自己创建好后指定 # 遍历所有文件夹 # os.listdir()输出该目录下的所有文件名字 # 拼接路径,定位到子文件夹路径 # 如果子文件夹路径存在 # 在每一个文件夹中存放着一个人的许多照片 # 把每个文件夹的名字 当成每个驾驶人名字 # 遍历文件夹中的图片文件 # os.listdir()输出该目录下的所有文件名字 # 连接路径,定位到子文件夹路径 # 把该文件夹下人的灰度图像全放在一起,形成列表 # 给该文件夹下的人打上标签 # label 为计数该人采集了多少张脸的数据 # 将数据列表矩阵化,形成一张张单独的图片 # 将数据列表矩阵化 # 返回值:images:[m,height,width] m为样本数,height为高,width为宽; # names:名字的集合; # labels:标签集合. # 加载图像数据 | 2.892595 | 3 |