index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
28,032
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/setup.py
|
from setuptools import setup
setup(
name='adaptation-imputation',
version='',
packages=['src', 'src.eval', 'src.dataset', 'src.plotting', 'data', 'experiments', 'experiments.launcher',
'orchestration'],
url='',
license='',
author='',
author_email='',
description=''
)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,033
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/plotting/utils_plotting.py
|
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from pandas import DataFrame
from sklearn.manifold import TSNE
import numpy as np
import torch
import os
from src.utils.utils_network import get_data_classifier, get_feature_extractor
def colored_scattered_plot(X_train_s, X_train_t, y_sparse_train_s, y_sparse_train_t, name=None, data_type=None):
# scatter plot, dots colored by class value
df_s = DataFrame(dict(x=X_train_s[:, 0], y=X_train_s[:, 1], label=y_sparse_train_s))
df_t = DataFrame(dict(x=X_train_t[:, 0], y=X_train_t[:, 1], label=y_sparse_train_t))
colors_s = {0: 'red', 1: 'blue'}
colors_t = {0: 'magenta', 1: 'cyan'}
marker_s = {0: 'o', 1: 'o'}
marker_t = {0: 'x', 1: 'x'}
fig, ax = plt.subplots()
grouped_s = df_s.groupby('label')
grouped_t = df_t.groupby('label')
for key, group in grouped_s:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=str(key) + "_source", color=colors_s[key],
marker=marker_s[key])
for key, group in grouped_t:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=str(key) + "_target", color=colors_t[key],
marker=marker_t[key])
if data_type:
plt.title(f"Source and target {data_type} data distribution on dimension 1 and 2")
plt.savefig(f"../../figures/{data_type}/{name}/data_{data_type}")
def plot_data_frontier(X_train, X_test, y_train, y_test, net, data_type=None, name=None):
dim = X_train.shape[1]
feat_extract = get_feature_extractor(net)
data_class = get_data_classifier(net)
if dim == 2:
colored_scattered_plot(X_train, X_test, y_train, y_test)
x_min, x_max = -4, 4
y_min, y_max = -4, 4
h = 0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.c_[xx.ravel(), yy.ravel()]
Z_feat = feat_extract((torch.from_numpy(np.atleast_2d(Z)).float()))
Z_class = data_class(Z_feat)
classe = Z_class.data.max(1)[1].numpy()
classe = classe.reshape(xx.shape)
plt.contour(xx, yy, classe, levels=[0], colors="r")
# Get embeddings
subset = 100
x = torch.from_numpy(X_train[:subset, :]).float()
X_train_map = feat_extract(x).data.numpy()
x = torch.from_numpy(X_test[:subset, :]).float()
X_test_map = feat_extract(x).data.numpy()
emb_all = np.vstack([X_train_map, X_test_map])
pca = PCA(n_components=2)
pca_emb = pca.fit_transform(emb_all)
num = X_train[:subset, :].shape[0]
colored_scattered_plot(pca_emb[:num, :], pca_emb[num:, :], y_train[:subset], y_test[:subset])
plt.show()
if data_type is not None and name is not None:
plt.savefig(f"../../figures/{data_type}/{name}/frontier_{data_type}")
def plot_data_frontier_digits(net, data_loader_s, data_loader_t, epoch=None, is_imput=False, is_pca=False):
if not is_imput:
feat_extract = net.feat_extractor
else:
feat_extract1 = net.feat_extractor1.eval()
recons = net.reconstructor.eval()
model_config = net.model_config
S_batches = iter(data_loader_s)
T_batches = iter(data_loader_t)
X_train, y_train = next(S_batches)
X_test, y_test = next(T_batches)
if net.cuda:
X_train = X_train.cuda()
X_test = X_test.cuda()
if not is_imput:
X_train_map = feat_extract(X_train).cpu().data.numpy()
X_test_map = feat_extract(X_test).cpu().data.numpy()
else:
data1_train = torch.mul(X_train, net.mask_1)
output_feat1_train = feat_extract1(data1_train)
output_feat2_train = recons(output_feat1_train)
X_train_map = torch.cat((output_feat1_train, output_feat2_train), 1).cpu().data.numpy()
data1_test = torch.mul(X_test, net.mask_1)
output_feat1_test = feat_extract1(data1_test)
output_feat2_test = recons(output_feat1_test)
X_test_map = torch.cat((output_feat1_test, output_feat2_test), 1).cpu().data.numpy()
emb_all = np.vstack([X_train_map, X_test_map])
if is_pca:
pca = PCA(n_components=2)
else:
pca = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3000)
pca_emb = pca.fit_transform(emb_all)
num = X_train.shape[0]
fig_dir = f"./figures/{model_config.mode}/{model_config.source}_{model_config.target}"
try:
os.mkdir("./figures")
except FileExistsError:
pass
try:
os.mkdir(f"./figures/{model_config.mode}")
except FileExistsError:
pass
try:
os.mkdir(fig_dir)
print(f"Directory {fig_dir} created ")
except FileExistsError:
pass
colored_scattered_plot_digits(pca_emb[:num, :], pca_emb[num:, :], y_train.data.numpy(), y_test.data.numpy(),
is_pca=is_pca)
plt.savefig(f"{fig_dir}/frontier_{epoch}")
colored_scattered_plot_digits(pca_emb[:num, :], pca_emb[num:, :], y_train.data.numpy(), y_test.data.numpy(), mode=2,
is_pca=is_pca)
plt.savefig(f"{fig_dir}/frontier_label_{epoch}")
def colored_scattered_plot_digits(X_train_s, X_train_t, y_sparse_train_s, y_sparse_train_t, mode=1, is_pca=False):
# scatter plot, dots colored by class value
df_s = DataFrame(dict(x=X_train_s[:, 0], y=X_train_s[:, 1], label=y_sparse_train_s))
df_t = DataFrame(dict(x=X_train_t[:, 0], y=X_train_t[:, 1], label=y_sparse_train_t))
fig, ax = plt.subplots()
grouped_s = df_s.groupby('label')
grouped_t = df_t.groupby('label')
colors = plt.cm.rainbow(np.linspace(0, 1, 10))
if mode == 1:
colors = {0: 'red', 1: 'blue'}
for key, group in grouped_s:
group.plot(ax=ax, kind='scatter', x='x', y='y', color=colors[0], marker='x')
for key, group in grouped_t:
group.plot(ax=ax, kind='scatter', x='x', y='y', color=colors[1], marker='o')
elif mode == 2:
for key, group in grouped_s:
group.plot(ax=ax, kind='scatter', x='x', y='y', color=colors[key], marker='x')
for key, group in grouped_t:
group.plot(ax=ax, kind='scatter', x='x', y='y', color=colors[key], marker='o')
if is_pca:
ax.set_xlim(-3.5, 3.5)
ax.set_ylim(-3.5, 3.5)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,034
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/__init__.py
|
from experiments.launcher.experiments_criteo import DannImputCriteo, SourceIgnoreCriteo, \
DannIgnoreCriteo, SourceZeroImputCriteo, DannZeroImputCriteo
from experiments.launcher.experiments_mnist_mnistm import DannMNISTMNISTM, DannZeroImputMNISTMNISTM, \
DannImputMNISTMNISTM, DjdotMNISTMNISTM, DjdotZeroImputMNISTMNISTM, DjdotImputMNISTMNISTM, DannIgnoreMNISTMNISTM, \
DjdotIgnoreMNISTMNISTM
from experiments.launcher.experiments_svhn_mnist import DannSVHNMNIST, DannZeroImputSVHNMNIST, DannImputSVHNMNIST, \
DjdotImputSVHNMNIST, DjdotSVHNMNIST, DjdotZeroImputSVHNMNIST, DjdotIgnoreSVHNMNIST, DannIgnoreSVHNMNIST
from experiments.launcher.experiments_mnist_usps import DannMNISTUSPS, DannZeroImputMNISTUSPS, DannImputMNISTUSPS, \
DjdotImputMNISTUSPS, DjdotZeroImputMNISTUSPS, DjdotMNISTUSPS, DannIgnoreMNISTUSPS, DjdotIgnoreMNISTUSPS
from experiments.launcher.experiments_usps_mnist import DannUSPSMNIST, DannZeroImputUSPSMNIST, DannImputUSPSMNIST, \
DjdotUSPSMNIST, DjdotZeroImputUSPSMNIST, DjdotImputUSPSMNIST, DjdotIgnoreUSPSMNIST, DannIgnoreUSPSMNIST
all_experiments = {
"dann_mnist_usps": DannMNISTUSPS(),
"dann_ignore_mnist_usps": DannIgnoreMNISTUSPS(),
"dann_zeroimput_mnist_usps": DannZeroImputMNISTUSPS(),
"dann_imput_mnist_usps": DannImputMNISTUSPS(),
"djdot_mnist_usps": DjdotMNISTUSPS(),
"djdot_ignore_mnist_usps": DjdotIgnoreMNISTUSPS(),
"djdot_zeroimput_mnist_usps": DjdotZeroImputMNISTUSPS(),
"djdot_imput_mnist_usps": DjdotImputMNISTUSPS(),
"dann_usps_mnist": DannUSPSMNIST(),
"dann_ignore_usps_mnist": DannIgnoreUSPSMNIST(),
"dann_zeroimput_usps_mnist": DannZeroImputUSPSMNIST(),
"dann_imput_usps_mnist": DannImputUSPSMNIST(),
"djdot_usps_mnist": DjdotUSPSMNIST(),
"djdot_ignore_usps_mnist": DjdotIgnoreUSPSMNIST(),
"djdot_zeroimput_usps_mnist": DjdotZeroImputUSPSMNIST(),
"djdot_imput_usps_mnist": DjdotImputUSPSMNIST(),
"dann_svhn_mnist": DannSVHNMNIST(),
"dann_ignore_svhn_mnist": DannIgnoreSVHNMNIST(),
"dann_zeroimput_svhn_mnist": DannZeroImputSVHNMNIST(),
"dann_imput_svhn_mnist": DannImputSVHNMNIST(),
"djdot_svhn_mnist": DjdotSVHNMNIST(),
"djdot_ignore_svhn_mnist": DjdotIgnoreSVHNMNIST(),
"djdot_zeroimput_svhn_mnist": DjdotZeroImputSVHNMNIST(),
"djdot_imput_svhn_mnist": DjdotImputSVHNMNIST(),
"dann_mnist_mnistm": DannMNISTMNISTM(),
"dann_ignore_mnist_mnistm": DannIgnoreMNISTMNISTM(),
"dann_zeroimput_mnist_mnistm": DannZeroImputMNISTMNISTM(),
"dann_imput_mnist_mnistm": DannImputMNISTMNISTM(),
"djdot_mnist_mnistm": DjdotMNISTMNISTM(),
"djdot_ignore_mnist_mnistm": DjdotIgnoreMNISTMNISTM(),
"djdot_zeroimput_mnist_mnistm": DjdotZeroImputMNISTMNISTM(),
"djdot_imput_mnist_mnistm": DjdotImputMNISTMNISTM(),
"source_zeroimput_criteo": SourceZeroImputCriteo(),
"source_ignore_criteo": SourceIgnoreCriteo(),
"dann_zeroimput_criteo": DannZeroImputCriteo(),
"dann_ignore_criteo": DannIgnoreCriteo(),
"dann_imput_criteo": DannImputCriteo()
}
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,035
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/launcher/config.py
|
import typing
import json
import argparse
class RunConfig(typing.NamedTuple):
experiment_name: str = "test"
metarun_id: int = 0
run_id: int = 0
max_nb_processes: int = 2
gpu_id: int = 0
class ModelConfig(typing.NamedTuple):
mode: str
upper_bound: int = 1
adaptive_lr: int = 0
djdot_alpha: float = 1.0
source: str = "SVHN"
target: str = "MNIST"
epoch_to_start_align: int = 11
stop_grad: int = 0
bigger_reconstructor: int = 0
bigger_discrim: int = 1
is_balanced: int = 0
initialize_model: int = 0
random_seed: int = 1985
init_lr: float = 0.002
output_fig: int = 0
use_categorical: int = 1
crop_ratio: float = 0.5
adapt_only_first: int = 0
adaptive_grad_scale: int = 1
smaller_lr_imput: int = 0
weight_d2: float = 1.0
weight_mse: float = 1.0
weight_classif: float = 1.0
activate_adaptation_d1: int = 1
activate_mse: int = 1
activate_adaptation_imp: int = 1
refinement: int = 0
n_epochs_refinement: int = 10
lambda_regul: float = 1.0
lambda_regul_s: float = 1.0
threshold_value: float = 0.95
class TrainingConfig(typing.NamedTuple):
n_epochs: int = 20
batch_size: int = 128
test_batch_size: int = 1000
init_batch_size: int = 128
class DatasetConfig(typing.NamedTuple):
channel: int = 1
im_size: int = 28
class Config(typing.NamedTuple):
run: RunConfig
model: ModelConfig
training: TrainingConfig
def json_encode(self):
return json.dumps({
"run": self.run._asdict(),
"model": self.model._asdict(),
"training": self.training._asdict()
})
@staticmethod
def json_decode(json_string: str):
data = json.loads(json_string)
return Config(
run=RunConfig(**data["run"]),
model=ModelConfig(**data["model"]),
training=TrainingConfig(**data["training"])
)
@staticmethod
def get_config_from_args() -> "Config":
parser = argparse.ArgumentParser(description='Learn classifier for domain adaptation')
parser.add_argument('--experiment_name', type=str)
parser.add_argument('--metarun_id', type=int)
parser.add_argument('--run_id', type=int)
parser.add_argument('--max_nb_processes', type=int)
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--mode', type=str)
parser.add_argument('--upper_bound', type=int, default=1)
parser.add_argument('--djdot_alpha', type=float)
parser.add_argument('--adaptive_lr', type=int)
parser.add_argument('--adaptive_grad_scale', type=int, default=1)
parser.add_argument('--epoch_to_start_align', type=int, default=11)
parser.add_argument('--stop_grad', type=int)
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--n_epochs', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--bigger_reconstructor', type=int, default=0)
parser.add_argument('--bigger_discrim', type=int, default=1)
parser.add_argument('--is_balanced', type=int, default=0)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--initialize_model', type=int, default=0)
parser.add_argument('--init_batch_size', type=int, default=128)
parser.add_argument('--random_seed', type=int, default=1985)
parser.add_argument('--init_lr', type=float, default=0.002)
parser.add_argument('--output_fig', type=int, default=0)
parser.add_argument('--use_categorical', type=int, default=1)
parser.add_argument('--crop_ratio', type=float, default=0.5)
parser.add_argument('--adapt_only_first', type=int, default=0)
parser.add_argument('--smaller_lr_imput', type=int, default=0)
parser.add_argument('--weight_mse', type=float, default=1.0)
parser.add_argument('--weight_d2', type=float, default=1.0)
parser.add_argument('--activate_adaptation_imp', type=int, default=1)
parser.add_argument('--activate_adaptation_d1', type=int, default=1)
parser.add_argument('--activate_mse', type=int, default=1)
parser.add_argument('--weight_classif', type=float, default=1.0)
parser.add_argument('--refinement', type=int, default=0)
parser.add_argument('--n_epochs_refinement', type=int, default=10)
parser.add_argument('--lambda_regul', type=float, default=1.0)
parser.add_argument('--threshold_value', type=float, default=0.95)
parser.add_argument('--lambda_regul_s', type=float, default=1.0)
args, _ = parser.parse_known_args()
run_config = RunConfig(
experiment_name=args.experiment_name,
metarun_id=args.metarun_id,
run_id=args.run_id,
max_nb_processes=args.max_nb_processes,
gpu_id=args.gpu_id
)
model_config = ModelConfig(
mode=args.mode,
upper_bound=args.upper_bound,
djdot_alpha=args.djdot_alpha,
adaptive_lr=args.adaptive_lr,
epoch_to_start_align=args.epoch_to_start_align,
source=args.source,
target=args.target,
stop_grad=args.stop_grad,
bigger_reconstructor=args.bigger_reconstructor,
bigger_discrim=args.bigger_discrim,
is_balanced=args.is_balanced,
initialize_model=args.initialize_model,
random_seed=args.random_seed,
init_lr=args.init_lr,
output_fig=args.output_fig,
use_categorical=args.use_categorical,
crop_ratio=args.crop_ratio,
adapt_only_first=args.adapt_only_first,
adaptive_grad_scale=args.adaptive_grad_scale,
smaller_lr_imput=args.smaller_lr_imput,
activate_adaptation_imp=args.activate_adaptation_imp,
activate_adaptation_d1=args.activate_adaptation_d1,
activate_mse=args.activate_mse,
weight_mse=args.weight_mse,
weight_d2=args.weight_d2,
weight_classif=args.weight_classif,
refinement=args.refinement,
n_epochs_refinement=args.n_epochs_refinement,
lambda_regul=args.lambda_regul,
threshold_value=args.threshold_value,
lambda_regul_s=args.lambda_regul_s
)
training_config = TrainingConfig(
n_epochs=args.n_epochs,
batch_size=args.batch_size,
test_batch_size=args.test_batch_size,
init_batch_size=args.init_batch_size
)
my_config = Config(
run=run_config,
model=model_config,
training=training_config
)
return my_config
dummy_model_config = Config(
run=RunConfig(gpu_id=0, run_id=0, metarun_id=5),
model=ModelConfig(mode="dann", upper_bound=1, source="USPS", target="MNIST", epoch_to_start_align=0,
is_balanced=0, stop_grad=1, djdot_alpha=0.1, initialize_model=0, refinement=1,
n_epochs_refinement=10),
training=TrainingConfig(n_epochs=0, batch_size=500, test_batch_size=500, init_batch_size=32)
)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,036
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/dataset/utils_dataset.py
|
import numpy as np
import torch
from torch import nn
from src.dataset.dataset_criteo import get_criteo
from experiments.launcher.config import DatasetConfig
from src.dataset.sampler import BalancedBatchSampler
from torchvision import datasets
from src.dataset.dataset_mnistm import get_mnistm
from src.dataset.dataset_usps import get_usps
import torch.utils.data as data_utils
from torchvision.transforms import transforms
transform_usps = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
transform_mnist32 = transforms.Compose([transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
transform_usps32 = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
transform_svhn = transforms.Compose([transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_mnist32rgb = transforms.Compose([transforms.Resize(32),
transforms.Grayscale(3),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_mnistrgb = transforms.Compose([transforms.Grayscale(3),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_mnistm = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_mnist(train, transform, path, image_size=28, batch_size=32, in_memory=True, num_channel=1, is_balanced=False,
drop_last=True, download=True):
"""Get MNIST dataset loader."""
# dataset and data loader
mnist_dataset = datasets.MNIST(root=f"{path}/data/",
train=train,
transform=transform,
download=download)
if in_memory:
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=1,
shuffle=True,
drop_last=False)
data = torch.zeros((len(mnist_data_loader), num_channel, image_size, image_size))
label = torch.zeros(len(mnist_data_loader))
for i, (data_, target) in enumerate(mnist_data_loader):
# print(i, data_.shape)
data[i] = data_
label[i] = target
full_data = torch.utils.data.TensorDataset(data, label.long())
if is_balanced:
mnist_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
sampler=BalancedBatchSampler(full_data, in_memory=True),
drop_last=drop_last)
else:
mnist_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
else:
if is_balanced:
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=batch_size,
sampler=BalancedBatchSampler(mnist_dataset),
drop_last=drop_last)
else:
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
return mnist_data_loader
def get_svhn(train, transform, path, image_size=28, batch_size=32, in_memory=True, num_channel=1, is_balanced=False,
drop_last=True, download=True):
"""Get SVHN dataset loader."""
# dataset and data loader
if train:
split = "train"
else:
split = "test"
svhn_dataset = datasets.SVHN(root=f"{path}/data/", split=split, transform=transform, download=download)
# svhn_dataset = SVHN(root=f"{path}/data/", split=split, transform=transform, download=True)
if in_memory:
svhn_data_loader = torch.utils.data.DataLoader(
dataset=svhn_dataset,
batch_size=1,
shuffle=True,
drop_last=False)
data = torch.zeros((len(svhn_data_loader), num_channel, image_size, image_size))
label = torch.zeros(len(svhn_data_loader))
for i, (data_, target) in enumerate(svhn_data_loader):
# print(i, data_.shape)
data[i] = data_
label[i] = target
full_data = torch.utils.data.TensorDataset(data, label.long())
if is_balanced:
svhn_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
sampler=BalancedBatchSampler(full_data, in_memory=True),
drop_last=drop_last)
else:
svhn_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
else:
if is_balanced:
svhn_data_loader = torch.utils.data.DataLoader(
dataset=svhn_dataset,
batch_size=batch_size,
sampler=BalancedBatchSampler(svhn_dataset),
drop_last=drop_last)
else:
svhn_data_loader = torch.utils.data.DataLoader(
dataset=svhn_dataset,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
return svhn_data_loader
def create_dataset(config, path, in_memory=True, is_balanced=False):
init_batch_size = config.training.init_batch_size
if config.model.source == "SVHN" and config.model.target == "MNIST":
dataset = DatasetConfig(channel=3, im_size=32)
data_loader_train_s = get_svhn(train=True, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_train_s_init = get_svhn(train=True, transform=transform_svhn, path=path,
image_size=dataset.im_size,
batch_size=init_batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_test_s = get_svhn(train=False, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
data_loader_train_t = get_mnist(train=True, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory)
data_loader_test_t = get_mnist(train=False, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
elif config.model.source == "MNIST" and config.model.target == "SVHN":
dataset = DatasetConfig(channel=3, im_size=32)
data_loader_train_s = get_mnist(train=True, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory)
data_loader_test_s = get_mnist(train=False, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
data_loader_train_s_init = get_mnist(train=True, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=init_batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_train_t = get_svhn(train=True, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_test_t = get_svhn(train=False, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
elif config.model.source == "USPS" and config.model.target == "MNIST":
dataset = DatasetConfig(channel=1, im_size=32)
data_loader_train_s = get_usps(train=True, transform=transform_usps32, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_train_s_init = get_usps(train=True, transform=transform_usps32, path=path,
image_size=dataset.im_size,
batch_size=init_batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_test_s = get_usps(train=False, transform=transform_usps32, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
data_loader_train_t = get_mnist(train=True, transform=transform_mnist32, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory)
data_loader_test_t = get_mnist(train=False, transform=transform_mnist32, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
elif config.model.source == "MNIST" and config.model.target == "USPS":
dataset = DatasetConfig(channel=1, im_size=32)
data_loader_train_s = get_mnist(train=True, transform=transform_mnist32, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_train_s_init = get_mnist(train=True, transform=transform_mnist32, path=path,
image_size=dataset.im_size,
batch_size=init_batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_test_s = get_mnist(train=False, transform=transform_mnist32, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
data_loader_train_t = get_usps(train=True, transform=transform_usps32, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory)
data_loader_test_t = get_usps(train=False, transform=transform_usps32, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
elif config.model.source == "MNIST" and config.model.target == "MNISTM":
dataset = DatasetConfig(channel=3, im_size=32)
data_loader_train_s = get_mnist(train=True, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_train_s_init = get_mnist(train=True, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=init_batch_size, num_channel=dataset.channel,
in_memory=in_memory, is_balanced=is_balanced)
data_loader_test_s = get_mnist(train=False, transform=transform_mnist32rgb, path=path,
image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
data_loader_train_t = get_mnistm(train=True, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.batch_size, num_channel=dataset.channel,
in_memory=in_memory)
data_loader_test_t = get_mnistm(train=False, transform=transform_svhn, path=path, image_size=dataset.im_size,
batch_size=config.training.test_batch_size, num_channel=dataset.channel,
in_memory=in_memory, drop_last=False)
else:
raise Exception("Source and Target do not exist")
return dataset, data_loader_train_s, data_loader_test_s, data_loader_train_t, data_loader_test_t, \
data_loader_train_s_init
def create_dataset_criteo(config, path, in_memory=True, is_balanced=False, indexes=np.array([])):
data_loader_train_s = get_criteo(is_train=True, is_source=True, path=path, config=config,
batch_size=config.training.batch_size, drop_last=True, is_balanced=is_balanced,
in_memory=in_memory, indexes=indexes)
if config.training.init_batch_size != config.training.batch_size:
data_loader_train_s_init = get_criteo(is_train=True, is_source=True, path=path, config=config,
batch_size=config.training.init_batch_size, drop_last=True,
is_balanced=is_balanced, in_memory=in_memory, indexes=indexes)
else:
data_loader_train_s_init = data_loader_train_s
data_loader_test_s = get_criteo(is_train=False, is_source=True, path=path, config=config,
batch_size=config.training.test_batch_size, drop_last=False,
in_memory=in_memory, indexes=indexes)
data_loader_train_t = get_criteo(is_train=True, is_source=False, path=path, config=config,
batch_size=config.training.batch_size, drop_last=True,
in_memory=in_memory, indexes=indexes)
data_loader_test_t = get_criteo(is_train=False, is_source=False, path=path, config=config,
batch_size=config.training.batch_size, drop_last=False,
in_memory=in_memory, indexes=indexes)
return data_loader_train_s, data_loader_test_s, data_loader_train_t, data_loader_test_t, data_loader_train_s_init
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,037
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/models/criteo/dann_criteo.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import cycle
from time import clock as tick
import numpy as np
from src.eval.utils_eval import evaluate_data_classifier, evaluate_domain_classifier
from src.utils.network import weight_init_glorot_uniform
from src.utils.utils_network import set_lr, build_label_domain, get_optimizer, get_models_criteo, entropy_loss
class DANN(object):
def __init__(self, data_loader_train_s, data_loader_train_t, model_config, cuda=False, logger_file=None,
data_loader_test_s=None, data_loader_test_t=None, data_loader_train_s_init=None, feature_sizes=None,
n_class=2):
self.cuda = cuda
self.data_loader_train_s = data_loader_train_s
self.data_loader_train_t = data_loader_train_t
self.data_loader_test_t = data_loader_test_t
self.data_loader_test_s = data_loader_test_s
self.data_loader_train_s_init = data_loader_train_s_init
self.domain_label_s = 1
self.domain_label_t = 0
self.refinement = model_config.refinement
self.n_epochs_refinement = model_config.n_epochs_refinement
self.lambda_regul = model_config.lambda_regul
self.lambda_regul_s = model_config.lambda_regul_s
self.threshold_value = model_config.threshold_value
self.logger = logger_file
self.crop_dim = 0
self.epoch_to_start_align = model_config.epoch_to_start_align
self.output_fig = model_config.output_fig
self.stop_grad = model_config.stop_grad
self.adaptive_lr = model_config.adaptive_lr
self.use_categorical = model_config.use_categorical
self.lr_decay_epoch = model_config.epoch_to_start_align
self.lr_decay_factor = 0.5
self.model_config = model_config
self.initialize_model = model_config.initialize_model
self.init_lr = model_config.init_lr
self.best_score_auc = 0
self.n_epochs_no_change_auc = 0
self.best_score_loss = 1000
self.n_epochs_no_change_loss = 0
self.grad_scale = 1.0
self.adapt_only_first = model_config.adapt_only_first
self.non_missing_features = [1, 2, 3, 7, 8, 9, 12]
self.n_missing = 0
if model_config.adapt_only_first:
self.n_missing = 13 - len(self.non_missing_features)
feat_extractor, data_classifier, domain_classifier = get_models_criteo(model_config, n_class, feature_sizes,
self.n_missing)
feat_extractor.apply(weight_init_glorot_uniform)
data_classifier.apply(weight_init_glorot_uniform)
domain_classifier.apply(weight_init_glorot_uniform)
_parent_class = self
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(self, x):
return x.clone()
@staticmethod
def backward(self, grad_output):
return grad_output.neg() * _parent_class.grad_scale
class GRLDomainClassifier(nn.Module):
def __init__(self, domain_classifier, stop_grad):
super(GRLDomainClassifier, self).__init__()
self.domain_classifier = domain_classifier
self.stop_grad = stop_grad
def forward(self, input):
if self.stop_grad:
x = GradReverse.apply(input.detach())
else:
x = GradReverse.apply(input)
x = self.domain_classifier.forward(x)
return x
self.feat_extractor = feat_extractor
self.data_classifier = data_classifier
self.grl_domain_classifier = GRLDomainClassifier(domain_classifier, self.stop_grad)
if self.cuda:
self.feat_extractor.cuda()
self.data_classifier.cuda()
self.grl_domain_classifier.cuda()
self.optimizer_feat_extractor, self.optimizer_data_classifier, self.optimizer_domain_classifier = \
get_optimizer(model_config, self)
def construct_input(self, X_batch_s_I, X_batch_s_C):
if self.adapt_only_first:
for idx, id in enumerate(self.non_missing_features):
if idx == 0:
X_batch_s_I_filtered = X_batch_s_I[:, id, :]
else:
X_batch_s_I_filtered = torch.cat((X_batch_s_I_filtered, X_batch_s_I[:, id, :]), 1)
else:
X_batch_s_I_filtered = X_batch_s_I[:, :, 0]
if self.use_categorical:
X_batch_s_C = X_batch_s_C.to(torch.int64)
for i, emb in enumerate(self.feat_extractor.categorical_embeddings):
if i == 0:
X_batch_s_C_embedded = emb(X_batch_s_C[:, 0, :])
else:
X_batch_s_C_embedded = torch.cat((X_batch_s_C_embedded, emb(X_batch_s_C[:, i, :])), 2)
if self.n_missing == 13:
return X_batch_s_C_embedded[:, 0, :]
return torch.cat((X_batch_s_I_filtered, X_batch_s_C_embedded[:, 0, :]), 1)
return X_batch_s_I_filtered
def fit(self):
self.loss_history = []
self.error_history = []
if self.initialize_model:
self.logger.info("Initialize DANN")
for epoch in range(self.epoch_to_start_align):
self.feat_extractor.train()
self.data_classifier.train()
tic = tick()
for batch_idx, (X_batch_s_I, X_batch_s_C, y_batch_s) in enumerate(self.data_loader_train_s_init):
y_batch_s = y_batch_s.view(-1)
self.feat_extractor.zero_grad()
self.data_classifier.zero_grad()
if self.cuda:
X_batch_s_I, X_batch_s_C, y_batch_s = X_batch_s_I.cuda(), X_batch_s_C.cuda(), y_batch_s.cuda()
X_batch_s = self.construct_input(X_batch_s_I, X_batch_s_C)
output_feat_s = self.feat_extractor(X_batch_s)
output_class_s = self.data_classifier(output_feat_s)
loss = F.cross_entropy(output_class_s, y_batch_s)
loss.backward()
self.optimizer_feat_extractor.step()
self.optimizer_data_classifier.step()
toc = tick() - tic
self.logger.info(
"\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, toc, loss.item(), 0))
if epoch % 5 == 0:
evaluate_data_classifier(self, is_test=True, is_target=False, is_criteo=True)
loss_t, acc_t, w_acc_t, auc_t = evaluate_data_classifier(self, is_test=True, is_target=True,
is_criteo=True)
evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t,
comments="Domain test", is_criteo=True)
if auc_t > self.best_score_auc:
self.best_score_auc = auc_t
self.logger.info(f"Best AUC score: Loss {loss_t}, AUC {auc_t}, WAcc {w_acc_t}")
self.n_epochs_no_change_auc = 0
else:
self.n_epochs_no_change_auc += 1
if loss_t < self.best_score_loss:
self.best_score_loss = loss_t
self.logger.info(f"Best loss score: Loss {loss_t}, AUC {auc_t}, WAcc {w_acc_t}")
self.n_epochs_no_change_loss = 0
else:
self.n_epochs_no_change_loss += 1
self.logger.info(f"n_epochs_no_change_loss: {self.n_epochs_no_change_loss} / "
f"n_epochs_no_change_auc: {self.n_epochs_no_change_auc}")
self.loss_history.append(loss.item())
self.error_history.append(loss.item())
start_epoch = self.epoch_to_start_align
self.logger.info(f"Finished initializing with init batch size")
else:
start_epoch = 0
self.logger.info("Start aligning")
for epoch in range(start_epoch, self.nb_epochs):
self.feat_extractor.train()
self.data_classifier.train()
self.grl_domain_classifier.train()
tic = tick()
self.T_batches = cycle(iter(self.data_loader_train_t))
for batch_idx, (X_batch_s_I, X_batch_s_C, y_batch_s) in enumerate(self.data_loader_train_s):
size_s = X_batch_s_I.size(0)
y_batch_s = y_batch_s.view(-1)
p = (batch_idx + (epoch - start_epoch) * len(self.data_loader_train_s)) / (
len(self.data_loader_train_s) * (self.nb_epochs - start_epoch))
if self.adaptive_lr:
lr = self.init_lr / (1. + 10 * p) ** 0.75
set_lr(self.optimizer_feat_extractor, lr)
set_lr(self.optimizer_data_classifier, lr)
set_lr(self.optimizer_domain_classifier, lr)
self.feat_extractor.zero_grad()
self.data_classifier.zero_grad()
self.grl_domain_classifier.zero_grad()
X_batch_t_I, X_batch_t_C, _ = next(self.T_batches)
if self.cuda:
X_batch_t_I, X_batch_t_C, X_batch_s_I, X_batch_s_C, y_batch_s = \
X_batch_t_I.cuda(), X_batch_t_C.cuda(), X_batch_s_I.cuda(), X_batch_s_C.cuda(), y_batch_s.cuda()
X_batch_t = self.construct_input(X_batch_t_I, X_batch_t_C)
size_t = X_batch_t.size(0)
X_batch_s = self.construct_input(X_batch_s_I, X_batch_s_C)
output_feat_s = self.feat_extractor(X_batch_s)
output_class_s = self.data_classifier(output_feat_s)
loss = F.cross_entropy(output_class_s, y_batch_s)
# -----------------------------------------------------------------
# domain classification
# -----------------------------------------------------------------
self.grad_scale = (2. / (1. + np.exp(-10 * p)) - 1)
output_domain_s = self.grl_domain_classifier(output_feat_s)
label_domain_s = build_label_domain(self, size_s, self.domain_label_s)
error_s = F.cross_entropy(output_domain_s, label_domain_s)
output_feat_t = self.feat_extractor(X_batch_t)
output_domain_t = self.grl_domain_classifier(output_feat_t)
label_domain_t = build_label_domain(self, size_t, self.domain_label_t)
error_t = F.cross_entropy(output_domain_t, label_domain_t)
dist_loss = (error_s + error_t)
error = loss + dist_loss
error.backward()
self.optimizer_feat_extractor.step()
self.optimizer_data_classifier.step()
self.optimizer_domain_classifier.step()
toc = tick() - tic
self.logger.info(
"\nTrain epoch: {}/{} {:.1f}% {:2.2f}s \tTotalLoss: {:.6f} LossS: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, p * 100, toc, error.item(), loss.item(), dist_loss.item()))
self.loss_history.append(loss.item())
self.error_history.append(error.item())
if epoch % 5 == 0:
evaluate_data_classifier(self, is_test=True, is_target=False, is_criteo=True)
loss_t, acc_t, w_acc_t, auc_t = evaluate_data_classifier(self, is_test=True, is_target=True, is_criteo=True)
evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t,
comments="Domain test", is_criteo=True)
if auc_t > self.best_score_auc:
self.best_score_auc = auc_t
self.logger.info(f"Best AUC score: Loss {loss_t}, AUC {auc_t}, WAcc {w_acc_t}")
self.n_epochs_no_change_auc = 0
else:
self.n_epochs_no_change_auc += 1
if loss_t < self.best_score_loss:
self.best_score_loss = loss_t
self.logger.info(f"Best loss score: Loss {loss_t}, AUC {auc_t}, WAcc {w_acc_t}")
self.n_epochs_no_change_loss = 0
else:
self.n_epochs_no_change_loss += 1
self.logger.info(f"n_epochs_no_change_loss: {self.n_epochs_no_change_loss} / "
f"n_epochs_no_change_auc: {self.n_epochs_no_change_auc}")
self.logger.info(f"Best Loss {self.best_score_loss}, best AUC {self.best_score_auc}")
if self.refinement:
self.logger.info("Refinement")
n_epochs_refinement = self.n_epochs_refinement
lambda_regul = self.lambda_regul
lambda_regul_s = self.lambda_regul_s
threshold_value = self.threshold_value
for epoch in range(self.nb_epochs, self.nb_epochs + n_epochs_refinement):
self.data_classifier.train()
self.T_batches = cycle(iter(self.data_loader_train_t))
evaluate_data_classifier(self, is_test=True, is_target=False, is_criteo=True)
evaluate_data_classifier(self, is_test=True, is_target=True, is_criteo=True)
for batch_idx, (X_batch_s_I, X_batch_s_C, y_batch_s) in enumerate(self.data_loader_train_s):
y_batch_s = y_batch_s.view(-1)
self.data_classifier.zero_grad()
X_batch_t_I, X_batch_t_C, y_batch_t = next(self.T_batches)
if self.cuda:
X_batch_t_I, X_batch_t_C, X_batch_s_I, X_batch_s_C, y_batch_s, y_batch_t = \
X_batch_t_I.cuda(), X_batch_t_C.cuda(), X_batch_s_I.cuda(), X_batch_s_C.cuda(), \
y_batch_s.cuda(), y_batch_t.cuda()
X_batch_t = self.construct_input(X_batch_t_I, X_batch_t_C)
X_batch_s = self.construct_input(X_batch_s_I, X_batch_s_C)
# Source Domain Data : forward feature extraction + data classifier
output_feat_s = self.feat_extractor(X_batch_s)
output_class_s = self.data_classifier(output_feat_s)
loss = F.cross_entropy(output_class_s, y_batch_s)
# Target Domain Data
output_feat_t = self.feat_extractor(X_batch_t)
output_class_t = self.data_classifier(output_feat_t)
threshold_index = F.log_softmax(output_class_t).data.max(1)[0] > np.log(threshold_value)
loss_t_ent = entropy_loss(output_class_t[~threshold_index])
# pseudo-labels
y_batch_pseudo_t = output_class_t.data.max(1)[1][threshold_index]
if torch.sum(threshold_index) > 0:
loss_t = F.cross_entropy(output_class_t[threshold_index], y_batch_pseudo_t)
else:
loss_t = torch.zeros(1).cuda() if self.cuda else torch.zeros(1)
n_pseudo_labelled = torch.sum(threshold_index).item()
error = lambda_regul_s * loss + loss_t + lambda_regul * loss_t_ent
error.backward()
self.optimizer_data_classifier.step()
self.logger.info(
"\nTrain epoch: {}/{} \tTotalLoss: {:.6f} LossS: {:.6f} LossT: {:.6f} "
"EntropyT: {:.6f}".format(epoch, self.nb_epochs + n_epochs_refinement, error.item(),
lambda_regul_s * loss.item(), loss_t.item(), lambda_regul * loss_t_ent.item()))
self.logger.info("N_Pseudo: {:.1f}".format(n_pseudo_labelled))
self.loss_test_s, self.acc_test_s, self.w_acc_test_s, self.auc_test_s = \
evaluate_data_classifier(self, is_test=True, is_target=False, is_criteo=True)
self.loss_test_t, self.acc_test_t, self.w_acc_test_t, self.auc_test_t = \
evaluate_data_classifier(self, is_test=True, is_target=True, is_criteo=True)
self.loss_d_test, self.acc_d_test = \
evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t, comments="Domain test",
is_criteo=True)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,038
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/utils/network.py
|
import numpy as np
import torch
import torch.nn as nn
def num_flat_features(x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def weight_init_glorot_uniform(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
##########
# DIGITS #
##########
class FeatureExtractorDigits(nn.Module):
def __init__(self, dataset, kernel_size=5):
super(FeatureExtractorDigits, self).__init__()
self.conv1 = nn.Conv2d(dataset.channel, 64, kernel_size=kernel_size)
self.bn1 = nn.BatchNorm2d(64)
self.pool1 = nn.MaxPool2d(2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(64, 64, kernel_size=kernel_size)
self.bn2 = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d(2)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(64, 64 * 2, kernel_size=kernel_size)
self.bn3 = nn.BatchNorm2d(64 * 2)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
x = self.bn1(self.conv1(input))
x = self.relu1(self.pool1(x))
x = self.bn2(self.conv2(x))
x = self.relu2(self.pool2(x))
x = self.sigmoid(self.bn3(self.conv3(x)))
x = x.view(x.size(0), -1)
return x
class DataClassifierDigits(nn.Module):
def __init__(self, n_class, is_imput=False):
super(DataClassifierDigits, self).__init__()
factor = 2 if is_imput else 1
input_size = 64 * 2 * factor
self.fc1 = nn.Linear(input_size, 100)
self.bn1 = nn.BatchNorm1d(100)
self.relu1 = nn.ReLU()
self.dp1 = nn.Dropout2d()
self.fc2 = nn.Linear(100, 100)
self.bn2 = nn.BatchNorm1d(100)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(100, n_class)
def forward(self, input):
x = self.dp1(self.relu1(self.bn1(self.fc1(input))))
x = self.relu2(self.bn2(self.fc2(x)))
x = self.fc3(x)
return x
class DomainClassifierDigits(nn.Module):
def __init__(self, is_d1=False, bigger_discrim=True):
super(DomainClassifierDigits, self).__init__()
self.domain_classifier = nn.Sequential()
factor = 2 if is_d1 else 1
input_size = 64 * 2 * factor
output_size = 500 if bigger_discrim else 100
self.bigger_discrim = bigger_discrim
self.fc1 = nn.Linear(input_size, output_size)
self.bn1 = nn.BatchNorm1d(output_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(output_size, 100) if bigger_discrim else nn.Linear(output_size, 2)
self.bn2 = nn.BatchNorm1d(100)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(100, 2)
def forward(self, input):
x = self.relu1(self.bn1(self.fc1(input)))
if self.bigger_discrim:
x = self.relu2(self.bn2(self.fc2(x)))
x = self.fc3(x)
else:
x = self.fc2(x)
return x
class ReconstructorDigits(nn.Module):
def __init__(self, bigger_reconstructor=False):
super(ReconstructorDigits, self).__init__()
self.domain_classifier = nn.Sequential()
input_size = 64 * 2
output_size = 512
self.bigger_reconstructor = bigger_reconstructor
self.fc1 = nn.Linear(input_size, output_size)
self.bn1 = nn.BatchNorm1d(output_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(output_size, output_size)
self.bn2 = nn.BatchNorm1d(output_size)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(output_size, input_size)
self.bn3 = nn.BatchNorm1d(input_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
x = self.relu1(self.bn1(self.fc1(input)))
if self.bigger_reconstructor:
x = self.relu2(self.bn2(self.fc2(x)))
x = self.sigmoid(self.bn3(self.fc3(x)))
return x
##########
# Criteo #
##########
class FeatureExtractorCriteo(nn.Module):
def __init__(self, feature_sizes=None, input_size=13, output_size=128):
super(FeatureExtractorCriteo, self).__init__()
self.nn1 = nn.Linear(input_size, output_size)
self.relu1 = nn.ReLU()
self.nn2 = nn.Linear(output_size, output_size)
self.relu2 = nn.ReLU()
self.nn3 = nn.Linear(output_size, output_size)
self.sigmoid = nn.Sigmoid()
self.feature_sizes = feature_sizes
if feature_sizes is not None:
self.categorical_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, int(6 * np.power(feature_size, 1 / 4)))
for feature_size in self.feature_sizes])
def forward(self, input):
x = input.view(input.size(0), -1)
x = self.relu1(self.nn1(x))
x = self.relu2(self.nn2(x))
x = self.sigmoid(self.nn3(x))
x = x.view(x.size(0), -1)
return x
class DataClassifierCriteo(nn.Module):
def __init__(self, n_class, input_size, is_imput=False):
super(DataClassifierCriteo, self).__init__()
factor = 2 if is_imput else 1
self.fc1 = nn.Linear(input_size * factor, input_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(input_size, n_class)
def forward(self, input):
x = self.relu1(self.fc1(input))
x = self.fc2(x)
return x
class DomainClassifierCriteo(nn.Module):
def __init__(self, input_size=128, is_d1=False, bigger_discrim=True):
super(DomainClassifierCriteo, self).__init__()
self.domain_classifier = nn.Sequential()
factor = 2 if is_d1 else 1
size = input_size * factor
self.bigger_discrim = bigger_discrim
self.fc1 = nn.Linear(size, input_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(input_size, input_size)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(input_size, 2)
def forward(self, input):
x = self.relu1(self.fc1(input))
if self.bigger_discrim:
x = self.relu2(self.fc2(x))
x = self.fc3(x)
return x
class ReconstructorCriteo(nn.Module):
def __init__(self, input_size=128, bigger_reconstructor=False):
super(ReconstructorCriteo, self).__init__()
self.domain_classifier = nn.Sequential()
self.bigger_reconstructor = bigger_reconstructor
output_size = input_size * 2
self.fc1 = nn.Linear(input_size, output_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(output_size, output_size)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(output_size, input_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
x = self.relu1(self.fc1(input))
if self.bigger_reconstructor:
x = self.relu2(self.fc2(x))
x = self.sigmoid(self.fc3(x))
return x
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,039
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/launcher/experiments_criteo.py
|
use_categorical = 0
n_epochs = 50
activate_adaptation_imp = 1
activate_mse = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 0.005
refinement = 1
n_epochs_refinement = 10
lambda_regul = [1.0]
lambda_regul_s = [1.0]
is_balanced = 0
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class SourceZeroImputCriteo(object):
MAX_NB_PROCESSES = 1
DEBUG = False
BINARY = "experiments/launcher/criteo_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-n_epochs": [n_epochs],
"-epoch_to_start_align": [n_epochs],
"-adaptive_lr": [1],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [500],
"-init_lr": [10 ** -6],
"-use_categorical": [use_categorical],
"-random_seed": random_seed
}
class SourceIgnoreCriteo(object):
MAX_NB_PROCESSES = 1
DEBUG = False
BINARY = "experiments/launcher/criteo_binary.py"
GRID = {
"-mode": ["dann"],
"-n_epochs": [n_epochs],
"-epoch_to_start_align": [n_epochs],
"-adaptive_lr": [1],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [500],
"-init_lr": [10 ** -6],
"-use_categorical": [use_categorical],
"-adapt_only_first": [1],
"-random_seed": random_seed
}
class DannZeroImputCriteo(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/criteo_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-n_epochs": [n_epochs],
"-epoch_to_start_align": [6],
"-batch_size": [500],
"-is_balanced": [is_balanced],
"-initialize_model": [1],
"-init_batch_size": [500],
"-init_lr": [10 ** -6],
"-use_categorical": [use_categorical],
"-bigger_discrim": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannIgnoreCriteo(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/criteo_binary.py"
GRID = {
"-mode": ["dann"],
"-adaptive_lr": [1],
"-n_epochs": [n_epochs],
"-epoch_to_start_align": [6],
"-batch_size": [500],
"-initialize_model": [1],
"-is_balanced": [is_balanced],
"-init_batch_size": [500],
"-init_lr": [10 ** -6],
"-use_categorical": [use_categorical],
"-bigger_discrim": [1],
"-adapt_only_first": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannImputCriteo(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/criteo_binary.py"
GRID = {
"-mode": ["dann_imput"],
"-upper_bound": [0],
"-n_epochs": [n_epochs],
"-epoch_to_start_align": [6],
"-adaptive_lr": [1],
"-adaptive_grad_scale": [1],
"-stop_grad": [0],
"-is_balanced": [is_balanced],
"-batch_size": [500],
"-init_lr": [10 ** -6],
"-initialize_model": [1],
"-init_batch_size": [500],
"-bigger_reconstructor": [1],
"-bigger_discrim": [1],
"-use_categorical": [use_categorical],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_mse": [activate_mse],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-smaller_lr_imput": [1],
"-weight_d2": [weight_d2],
"-weight_mse": [weight_mse],
"-weight_classif": [1.0],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,040
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/utils/utils_network.py
|
import numpy as np
import torch
import logging
import os
import torch.optim as optim
import torch.nn.functional as F
from logging.handlers import RotatingFileHandler
from src.utils.network import FeatureExtractorDigits, DataClassifierDigits, DomainClassifierDigits, \
ReconstructorDigits, FeatureExtractorCriteo, DataClassifierCriteo, DomainClassifierCriteo, \
ReconstructorCriteo
def exp_lr_scheduler(optimizer, epoch, lr_decay_step=100, factor=0.5, name=None, decay_once=True, logger=None):
"""
Decay current learning rate by a factor of 0.5 every lr_decay_epoch epochs.
"""
init_lr = optimizer.param_groups[0]["lr"]
if decay_once:
if epoch > 0 and epoch == lr_decay_step:
lr = init_lr * factor
if logger:
logger.info(f"Changing {name} LR to {lr} at step {lr_decay_step}")
set_lr(optimizer, lr)
else:
if epoch > 0 and (epoch % lr_decay_step == 0):
lr = init_lr * factor
if logger:
logger.info(f"Changing {name} LR to {lr} at step {lr_decay_step}")
set_lr(optimizer, lr)
return optimizer
def set_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return optimizer
def get_feature_extractor(model):
return model.feat_extractor
def get_data_classifier(model):
return model.data_classifier
def set_logger(model, logger):
model.logger = logger
def set_nbepoch(model, nb_epoch):
model.nb_epochs = nb_epoch
def build_label_domain(model, size, label):
label_domain = torch.LongTensor(size)
if model.cuda:
label_domain = label_domain.cuda()
label_domain.data.resize_(size).fill_(label)
return label_domain
def create_log_name(name, config):
name = f"{name}_{config.model.mode}_{config.model.source}_{config.model.target}_{config.run.run_id}_{config.run.metarun_id}"
return name
def get_models(model_config, n_class, dataset):
feat_extract = FeatureExtractorDigits(dataset)
data_class = DataClassifierDigits(n_class)
domain_class = DomainClassifierDigits(bigger_discrim=model_config.bigger_discrim)
return feat_extract, data_class, domain_class
def get_models_imput(model_config, n_class, dataset):
bigger_reconstructor = model_config.bigger_reconstructor
bigger_discrim = model_config.bigger_discrim
feat_extract_1 = FeatureExtractorDigits(dataset)
feat_extract_2 = FeatureExtractorDigits(dataset)
data_class = DataClassifierDigits(n_class, is_imput=True)
domain_class_1 = DomainClassifierDigits(is_d1=True, bigger_discrim=bigger_discrim)
domain_class_2 = DomainClassifierDigits(bigger_discrim=bigger_discrim)
reconstructor = ReconstructorDigits(bigger_reconstructor=bigger_reconstructor)
return feat_extract_1, feat_extract_2, data_class, domain_class_1, domain_class_2, reconstructor
def get_models_criteo(model_config, n_class, feature_sizes, n_missing=0):
if model_config.use_categorical:
total_size = np.sum([int(6 * np.power(feature_size, 1 / 4)) for feature_size in feature_sizes]) + 13
input_size = total_size - n_missing
else:
input_size = 13 - n_missing
output_size = 1024 if model_config.use_categorical else 128
feat_extract = FeatureExtractorCriteo(feature_sizes=feature_sizes, input_size=input_size,
output_size=output_size)
data_class = DataClassifierCriteo(n_class=n_class, input_size=output_size)
domain_class = DomainClassifierCriteo(input_size=output_size, bigger_discrim=model_config.bigger_discrim)
return feat_extract, data_class, domain_class
def get_models_imput_criteo(model_config, n_class, feature_sizes, n_missing=3):
bigger_reconstructor = model_config.bigger_reconstructor
bigger_discrim = model_config.bigger_discrim
if model_config.use_categorical:
total_size = np.sum([int(6 * np.power(feature_size, 1 / 4)) for feature_size in feature_sizes]) + 13
input_size = total_size - n_missing
else:
input_size = 13 - n_missing
output_size = 1024 if model_config.use_categorical else 128
feat_extract_1 = FeatureExtractorCriteo(feature_sizes=feature_sizes, input_size=input_size,
output_size=output_size)
feat_extract_2 = FeatureExtractorCriteo(input_size=n_missing, output_size=output_size)
data_class = DataClassifierCriteo(n_class=n_class, is_imput=True, input_size=output_size)
domain_class_1 = DomainClassifierCriteo(input_size=output_size, is_d1=True, bigger_discrim=bigger_discrim)
domain_class_2 = DomainClassifierCriteo(input_size=output_size, bigger_discrim=bigger_discrim)
reconstructor = ReconstructorCriteo(input_size=output_size, bigger_reconstructor=bigger_reconstructor)
return feat_extract_1, feat_extract_2, data_class, domain_class_1, domain_class_2, reconstructor
def get_optimizer(model_config, model):
optimizer_g = optim.Adam(model.feat_extractor.parameters(), lr=model_config.init_lr, betas=(0.8, 0.999))
optimizer_f = optim.Adam(model.data_classifier.parameters(), lr=model_config.init_lr,
betas=(0.8, 0.999))
if model_config.mode.find("dann") != -1:
optimizer_d = optim.Adam(model.grl_domain_classifier.parameters(), lr=model_config.init_lr,
betas=(0.8, 0.999))
if model_config.mode.find("dann") != -1:
return optimizer_f, optimizer_g, optimizer_d
return optimizer_f, optimizer_g, optimizer_g
def get_optimizer_imput(model_config, model):
init_lr = model_config.init_lr
optimizer_g1 = optim.Adam(model.feat_extractor1.parameters(), lr=init_lr, betas=(0.8, 0.999))
optimizer_g2 = optim.Adam(model.feat_extractor2.parameters(), lr=init_lr, betas=(0.8, 0.999))
optimizer_h = optim.Adam(model.reconstructor.parameters(), lr=init_lr, betas=(0.8, 0.999))
optimizer_data_classifier = optim.Adam(model.data_classifier.parameters(), lr=init_lr, betas=(0.8, 0.999))
if model_config.mode.find("dann") != -1:
optimizer_d1 = optim.Adam(model.grl_domain_classifier1.parameters(), lr=init_lr, betas=(0.8, 0.999))
optimizer_d2 = optim.Adam(model.grl_domain_classifier2.parameters(), lr=init_lr, betas=(0.8, 0.999))
if model_config.mode.find("dann") != -1:
return optimizer_data_classifier, optimizer_g1, optimizer_g2, optimizer_h, optimizer_d1, optimizer_d2
return optimizer_data_classifier, optimizer_g1, optimizer_g2, optimizer_h, optimizer_h, optimizer_h
def create_logger(outfile):
try:
os.mkdir("./results/")
print(f"Directory ./results/ created")
except FileExistsError:
print(f"Directory ./results/ already exists replacing files in this notebook")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = RotatingFileHandler(outfile, "w")
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
steam_handler = logging.StreamHandler()
steam_handler.setLevel(logging.DEBUG)
logger.addHandler(steam_handler)
return logger
def entropy_loss(v):
"""
Entropy loss for probabilistic prediction vectors
"""
assert v.dim() == 2
n, c = v.size()
# return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * np.log2(c))
# mask = v.ge(0.000001)
# mask_out = torch.masked_select(v, mask)
# entropy = -(torch.sum(mask_out * torch.log(mask_out)))
# return entropy / float(v.size(0))
b = F.softmax(v) * F.log_softmax(v)
b = -1.0 * b.sum()
return b / n
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,041
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/orchestration/launcher.py
|
import argparse
import sys
import os
from sklearn.model_selection import ParameterGrid
from concurrent.futures import ThreadPoolExecutor
import subprocess
from datetime import datetime
import time
from experiments import all_experiments
root_dir = "{}/..".format(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description='Experiment Launcher')
parser.add_argument('--experiment', type=str, default="dann_imput_usps_mnist")
parser.add_argument('--debug', type=str, default="false")
parser.add_argument('--gpu_id', type=int, default=0)
args = parser.parse_args()
def launcher_with_environment(env, debug):
def launch_command_line(command):
tab = command.split()
print("Executing {}".format(command))
if not debug:
print(tab)
try:
myPopen = subprocess.Popen(
tab,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for l in myPopen.stderr:
print(l)
except subprocess.CalledProcessError as e:
print(e.output)
return launch_command_line
# Retrieving experiment
experiment_name = args.experiment
experiment = all_experiments[experiment_name]
gpu_id = args.gpu_id
# Creating env for the runs
env = os.environ.copy()
print("Using env {}".format(env))
# Checking how many tests we want to launch
all_configs = list(ParameterGrid(experiment.GRID))
nb_tests = len(all_configs)
print("%d experiments to launch..." % nb_tests)
# Creating executors with max nb processes from the config
executor = ThreadPoolExecutor(max_workers=experiment.MAX_NB_PROCESSES)
# Running the tests
with open("params_to_remember.txt", "a") as f_params:
f_params.write("------------\n")
now = datetime.now()
ts = int(time.mktime(now.timetuple()))
f_params.write("Starting run at {} (metarunid {})\n".format(str(now), str(ts)))
for runid, parameter_set in enumerate(all_configs):
print(parameter_set)
f_params.write("{} => {}\n".format(runid, parameter_set))
f_params.flush()
# The python binary is available in sys.executable
args = ["{} {}".format(sys.executable, "{}/{}".format(root_dir, experiment.BINARY))]
for a in parameter_set:
args.append("-" + a + " " + str(parameter_set[a]))
args.append("--run_id {}".format(str(runid)))
# The experiment should be aware of the number of running processes so that it does not
# ask for too much memory on the GPU
args.append("--max_nb_processes {}".format(min([experiment.MAX_NB_PROCESSES, nb_tests])))
args.append("--experiment_name {}".format(experiment_name))
args.append("--metarun_id {}".format(str(ts)))
args.append("--gpu_id {}".format(gpu_id))
command = " ".join(args)
executor.submit(launcher_with_environment(env, experiment.DEBUG), command)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,042
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/dataset/dataset_criteo.py
|
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
import numpy as np
import os
import pandas as pd
from src.dataset.sampler import BalancedBatchSampler
class CriteoDataset(Dataset):
"""
Custom dataset class for Criteo dataset in order to use efficient
dataloader tool provided by PyTorch.
"""
def __init__(self, path, config, is_train, is_source, indexes):
"""
Initialize file path and train/test mode.
"""
data_source = pd.read_csv(os.path.join(path, "data/total_source_data.txt"))
data_target = pd.read_csv(os.path.join(path, "data/total_target_data.txt"))
# Prepare dataset for log normalization
data_source.iloc[:, :13] = data_source.iloc[:, :13] + 1
data_target.iloc[:, :13] = data_target.iloc[:, :13] + 1
data_source.iloc[:, 1] = data_source.iloc[:, 1] + 2
data_target.iloc[:, 1] = data_target.iloc[:, 1] + 2
data_source.iloc[:, :13] = np.floor(np.log(data_source.iloc[:, :13]))
data_target.iloc[:, :13] = np.floor(np.log(data_target.iloc[:, :13]))
if not config.model.upper_bound:
data_target.iloc[:, 0] = 0
data_target.iloc[:, 4] = 0
data_target.iloc[:, 5] = 0
data_target.iloc[:, 6] = 0
data_target.iloc[:, 10] = 0
data_target.iloc[:, 11] = 0
X_source = data_source.iloc[:, :-1].values
y_source = data_source.iloc[:, -1].values
X_target = data_target.iloc[:, :-1].values
y_target = data_target.iloc[:, -1].values
X_train_source, X_test_source, y_train_source, y_test_source = \
train_test_split(X_source, y_source, test_size=0.2, random_state=12)
X_train_target, X_test_target, y_train_target, y_test_target = \
train_test_split(X_target, y_target, test_size=0.2, random_state=12)
X1_train_source = X_train_source[:, :13]
X1_test_source = X_test_source[:, :13]
X1_train_target = X_train_target[:, :13]
X1_test_target = X_test_target[:, :13]
use_categorical = config.model.use_categorical
if is_source:
self.X1 = X1_train_source
self.X2 = X_train_source[:, indexes] if not use_categorical else X_train_source[:, 13:]
self.y = y_train_source
self.X1_test = X1_test_source
self.X2_test = X_test_source[:, indexes] if not use_categorical else X_test_source[:, 13:]
self.y_test = y_test_source
else:
self.X1 = X1_train_target
self.X2 = X_train_target[:, indexes] if not use_categorical else X_train_target[:, 13:]
self.y = y_train_target
self.X1_test = X1_test_target
self.X2_test = X_test_target[:, indexes] if not use_categorical else X_test_target[:, 13:]
self.y_test = y_test_target
self.is_train = is_train
def __getitem__(self, idx):
dataI = self.X1[idx, :] if self.is_train else self.X1_test[idx, :]
dataC = self.X2[idx, :] if self.is_train else self.X2_test[idx, :]
target = self.y[idx] if self.is_train else self.y_test[idx]
Xi = torch.from_numpy(dataI.astype(np.int32)).unsqueeze(-1).float()
Xc = torch.from_numpy(dataC.astype(np.int32)).unsqueeze(-1).float()
return Xi, Xc, target
def __len__(self):
if self.is_train:
return len(self.X1)
else:
return len(self.X1_test)
def get_criteo(is_train, is_source, path, config, indexes=np.array([]), batch_size=32, in_memory=True,
is_balanced=False, drop_last=True):
criteo_dataset = CriteoDataset(path, config, is_train=is_train, is_source=is_source, indexes=indexes)
if in_memory:
criteo_loader = torch.utils.data.DataLoader(
dataset=criteo_dataset,
batch_size=1,
shuffle=True,
drop_last=False)
dataI = torch.zeros((len(criteo_loader), 13, 1))
dataC = torch.zeros((len(criteo_loader), len(indexes), 1))
label = torch.zeros(len(criteo_loader))
for i, (dataI_, dataC_, target) in enumerate(criteo_loader):
dataI[i] = dataI_
dataC[i] = dataC_
label[i] = target
full_data = torch.utils.data.TensorDataset(dataI, dataC, label.long())
if is_balanced:
criteo_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
sampler=BalancedBatchSampler(full_data, in_memory=True, is_criteo=True),
drop_last=drop_last)
else:
criteo_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
else:
if is_balanced:
criteo_loader = torch.utils.data.DataLoader(
dataset=criteo_dataset,
batch_size=batch_size,
sampler=BalancedBatchSampler(criteo_dataset, is_criteo=True),
drop_last=drop_last)
else:
criteo_loader = torch.utils.data.DataLoader(
dataset=criteo_dataset,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
return criteo_loader
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,043
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/data/data_preprocessing.py
|
"""
Preprocess Criteo dataset. This dataset was used for the Display Advertising
Challenge (https://www.kaggle.com/c/criteo-display-ad-challenge).
Derived from https://github.com/chenxijun1029/DeepFM_with_PyTorch
"""
import os
import random
import collections
# There are 13 integer features and 26 categorical features
continuous_features = range(1, 14)
categorical_features = range(14, 40)
# Clip integer features. The clip point for each integer feature
# is derived from the 95% quantile of the total values in each feature
continuous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
class CategoryDictGenerator:
"""
Generate dictionary for each of the categorical features
"""
def __init__(self, num_feature):
self.dicts = []
self.num_feature = num_feature
for i in range(0, num_feature):
self.dicts.append(collections.defaultdict(int))
def build(self, datafile, categorial_features, cutoff=0):
with open(datafile, 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
for i in range(0, self.num_feature):
if features[categorial_features[i]] != '':
self.dicts[i][features[categorial_features[i]]] += 1
for i in range(0, self.num_feature):
self.dicts[i] = filter(lambda x: x[1] >= cutoff,
self.dicts[i].items())
self.dicts[i] = sorted(self.dicts[i], key=lambda x: (-x[1], x[0]))
vocabs, _ = list(zip(*self.dicts[i]))
self.dicts[i] = dict(zip(vocabs, range(1, len(vocabs) + 1)))
self.dicts[i]['<unk>'] = 0
def gen(self, idx, key):
if key not in self.dicts[idx]:
res = self.dicts[idx]['<unk>']
else:
res = self.dicts[idx][key]
return res
def dicts_sizes(self):
return [len(self.dicts[idx]) for idx in range(0, self.num_feature)]
class ContinuousFeatureGenerator:
"""
Clip continuous features.
"""
def __init__(self, num_feature):
self.num_feature = num_feature
def gen(self, idx, val):
if val == '':
return 0.0
value = float(val)
# if int(val) > continuous_clip[idx]:
# value = float(continuous_clip[idx])
return value
def preprocess(datadir, outdir):
"""
All the 13 integer features are normalzied to continous values and these
continous features are combined into one vecotr with dimension 13.
Each of the 26 categorical features are one-hot encoded and all the one-hot
vectors are combined into one sparse binary vector.
"""
continuous_dict = ContinuousFeatureGenerator(len(continuous_features))
categorical_dict = CategoryDictGenerator(len(categorical_features))
categorical_dict.build(
os.path.join(datadir, 'train.txt'), categorical_features, cutoff=200)
dict_sizes = categorical_dict.dicts_sizes()
with open(os.path.join(outdir, 'feature_sizes.txt'), 'w') as feature_sizes:
sizes = [1] * len(continuous_features) + dict_sizes
sizes = [str(i) for i in sizes]
feature_sizes.write(','.join(sizes))
random.seed(0)
# Saving the data used for training
with open(os.path.join(outdir, 'total_source.txt'), 'w') as out_source:
with open(os.path.join(outdir, 'total_target_data.txt'), 'w') as out_target:
with open(os.path.join(datadir, 'train.txt'), 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
continuous_vals = []
for i in range(0, len(continuous_features)):
val = continuous_dict.gen(i, features[continuous_features[i]])
continuous_vals.append("{0:.6f}".format(val).rstrip('0').rstrip('.'))
categorical_vals = []
for i in range(0, len(categorical_features)):
val = categorical_dict.gen(i, features[categorical_features[i]])
categorical_vals.append(str(val))
continuous_vals = ','.join(continuous_vals)
categorical_vals = ','.join(categorical_vals)
label = features[0]
if features[30] == "2005abd1":
out_target.write(','.join([continuous_vals, categorical_vals, label]) + '\n')
else:
out_source.write(','.join([continuous_vals, categorical_vals, label]) + '\n')
if __name__ == "__main__":
preprocess('../data/dac', '../data')
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,044
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/launcher/experiments_mnist_mnistm.py
|
activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class DannMNISTMNISTM(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [128],
"-epoch_to_start_align": [11],
"-initialize_model": [1],
"-init_batch_size": [32],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DannIgnoreMNISTMNISTM(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [128],
"-epoch_to_start_align": [11],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-adapt_only_first": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannZeroImputMNISTMNISTM(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [128],
"-epoch_to_start_align": [11],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2.5],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannImputMNISTMNISTM(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann_imput"],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-epoch_to_start_align": [11],
"-is_balanced": [1],
"-stop_grad": [0],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-bigger_reconstructor": [1],
"-weight_d2": [weight_d2],
"-weight_mse": [weight_mse],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-bigger_discrim": [0],
"-init_lr": [10 ** -2],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DjdotMNISTMNISTM(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-epoch_to_start_align": [11],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotIgnoreMNISTMNISTM(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-epoch_to_start_align": [11],
"-init_lr": [10 ** -2],
"-adapt_only_first": [1],
"-random_seed": random_seed
}
class DjdotZeroImputMNISTMNISTM(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-epoch_to_start_align": [11],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotImputMNISTMNISTM(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot_imput"],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["MNISTM"],
"-is_balanced": [1],
"-epoch_to_start_align": [11],
"-stop_grad": [1],
"-djdot_alpha": [0.1],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-bigger_reconstructor": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-random_seed": random_seed
}
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,045
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/dataset/sampler.py
|
from copy import deepcopy
import torchvision
import torch.utils.data
import random
import numpy as np
"""
Adapted from https://github.com/galatolofederico/pytorch-balanced-batch
"""
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, in_memory=False, is_criteo=False):
self.dataset = {}
self.dataset_backup = {}
self.balanced_max = 0
self.in_memory = in_memory
self.is_criteo = is_criteo
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
if label not in self.dataset:
self.dataset[label] = []
self.dataset[label].append(idx)
self.balanced_max = len(self.dataset[label]) \
if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
self.currentkey = 0
self.dataset_backup = deepcopy(self.dataset)
def __iter__(self):
while len(self.dataset[self.keys[self.currentkey]]) > 0:
yield self.dataset[self.keys[self.currentkey]].pop()
self.currentkey = (self.currentkey + 1) % len(self.keys)
for label in self.dataset_backup:
np.random.shuffle(self.dataset_backup[label])
self.dataset = deepcopy(self.dataset_backup)
def _get_label(self, dataset, idx):
dataset_type = type(dataset)
if dataset_type is torchvision.datasets.SVHN:
return dataset.labels[idx].item()
elif str(dataset_type) == "<class 'src.dataset.dataset_criteo.CriteoDataset'>":
return dataset.y[idx].item()
elif self.in_memory:
if not self.is_criteo:
return dataset[idx][1].item()
else:
return dataset[idx][2].item()
elif dataset_type is torchvision.datasets.ImageFolder:
return dataset.imgs[idx][1]
else:
return dataset.train_labels[idx].item()
def __len__(self):
return self.balanced_max * len(self.keys)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,046
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/models/digits/dann_imput_digits.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import cycle
from time import clock as tick
from experiments.launcher.config import DatasetConfig
import numpy as np
from src.eval.utils_eval import evaluate_data_imput_classifier, evaluate_domain_imput_classifier
from src.plotting.utils_plotting import plot_data_frontier_digits
from src.utils.network import weight_init_glorot_uniform
from src.utils.utils_network import build_label_domain, set_lr, get_models_imput, get_optimizer_imput, entropy_loss
class DANNImput(object):
def __init__(self, data_loader_train_s, data_loader_train_t, model_config, cuda=False,
logger_file=None, data_loader_test_s=None, data_loader_test_t=None,
dataset=DatasetConfig(), data_loader_train_s_init=None, n_class=10):
self.data_loader_train_s = data_loader_train_s
self.data_loader_train_t = data_loader_train_t
self.data_loader_test_t = data_loader_test_t
self.data_loader_test_s = data_loader_test_s
self.data_loader_train_s_init = data_loader_train_s_init
self.refinement = model_config.refinement
self.n_epochs_refinement = model_config.n_epochs_refinement
self.lambda_regul = model_config.lambda_regul
self.lambda_regul_s = model_config.lambda_regul_s
self.threshold_value = model_config.threshold_value
self.domain_label_s = self.domain_label_true2 = 1
self.domain_label_t = self.domain_label_fake2 = 0
self.cuda = cuda
self.logger = logger_file
self.crop_dim = int(dataset.im_size * model_config.crop_ratio)
self.adaptive_lr = model_config.adaptive_lr
self.dataset = dataset
self.activate_adaptation_imp = model_config.activate_adaptation_imp
self.activate_mse = model_config.activate_mse
self.activate_adaptation_d1 = model_config.activate_adaptation_d1
self.epoch_to_start_align = model_config.epoch_to_start_align
self.lr_decay_epoch = model_config.epoch_to_start_align
self.lr_decay_factor = 0.5
self.output_fig = model_config.output_fig
self.initialize_model = model_config.initialize_model
self.model_config = model_config
self.grad_scale1 = self.grad_scale2 = 1.0
self.weight_d2 = model_config.weight_d2
self.weight_mse = model_config.weight_mse
feat_extractor1, feat_extractor2, data_classifier, domain_classifier1, domain_classifier2, reconstructor = \
get_models_imput(model_config, n_class, dataset)
feat_extractor1.apply(weight_init_glorot_uniform)
feat_extractor2.apply(weight_init_glorot_uniform)
data_classifier.apply(weight_init_glorot_uniform)
domain_classifier1.apply(weight_init_glorot_uniform)
domain_classifier2.apply(weight_init_glorot_uniform)
reconstructor.apply(weight_init_glorot_uniform)
_parent_class = self
# adding gradient reversal layer transparently to the user
class GradReverse1(torch.autograd.Function):
@staticmethod
def forward(self, x):
return x.clone()
@staticmethod
def backward(self, grad_output):
return grad_output.neg() * _parent_class.grad_scale1
class GradReverse2(torch.autograd.Function):
@staticmethod
def forward(self, x):
return x.clone()
@staticmethod
def backward(self, grad_output):
return grad_output.neg() * _parent_class.grad_scale2
class GRLDomainClassifier1(nn.Module):
def __init__(self, domain_classifier):
super(GRLDomainClassifier1, self).__init__()
self.domain_classifier1 = domain_classifier
def forward(self, input):
x = GradReverse1.apply(input)
x = self.domain_classifier1.forward(x)
return x
class GRLDomainClassifier2(nn.Module):
def __init__(self, domain_classifier):
super(GRLDomainClassifier2, self).__init__()
self.domain_classifier2 = domain_classifier
def forward(self, input):
x = GradReverse2.apply(input)
x = self.domain_classifier2.forward(x)
return x
self.feat_extractor1 = feat_extractor1
self.feat_extractor2 = feat_extractor2
self.data_classifier = data_classifier
self.reconstructor = reconstructor
self.grl_domain_classifier1 = GRLDomainClassifier1(domain_classifier1)
self.grl_domain_classifier2 = GRLDomainClassifier2(domain_classifier2)
if self.cuda:
self.feat_extractor1.cuda()
self.feat_extractor2.cuda()
self.data_classifier.cuda()
self.grl_domain_classifier1.cuda()
self.grl_domain_classifier2.cuda()
self.reconstructor.cuda()
self.optimizer_data_classifier, self.optimizer_g1, self.optimizer_g2, self.optimizer_h, self.optimizer_d1, \
self.optimizer_d2 = get_optimizer_imput(model_config, self)
self.init_lr = model_config.init_lr
def fit(self):
self.loss_history = []
self.error_history = []
self.mask_1 = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size))
self.mask_2 = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size))
if self.cuda:
self.mask_1 = self.mask_1.cuda()
self.mask_2 = self.mask_2.cuda()
self.mask_1[:, :self.crop_dim, :] = 0.0
self.mask_2[:, self.crop_dim:, :] = 0.0
if self.initialize_model:
self.logger.info("Initialize model")
for epoch in range(self.epoch_to_start_align):
self.feat_extractor1.train()
self.feat_extractor2.train()
self.data_classifier.train()
tic = tick()
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s_init):
y_batch_s = y_batch_s.view(-1)
self.feat_extractor1.zero_grad()
self.feat_extractor2.zero_grad()
self.data_classifier.zero_grad()
if self.cuda:
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
X_batch_s1 = torch.mul(X_batch_s, self.mask_1)
X_batch_s2 = torch.mul(X_batch_s, self.mask_2)
size = X_batch_s.size()
output_feat_s1 = self.feat_extractor1(X_batch_s1)
output_feat_s2 = self.feat_extractor2(X_batch_s2)
output_class_s = self.data_classifier(torch.cat((output_feat_s1, output_feat_s2), 1))
loss = F.cross_entropy(output_class_s, y_batch_s)
loss.backward()
self.optimizer_g1.step()
self.optimizer_g2.step()
self.optimizer_data_classifier.step()
toc = tick() - tic
self.logger.info(
"\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, toc, loss.item(), 0))
if epoch % 5 == 0 and epoch != 0:
# evaluate_data_imput_classifier(self, is_test=False, is_target=False)
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
self.loss_history.append(loss.item())
self.error_history.append(loss.item())
start_epoch = self.epoch_to_start_align
self.logger.info(f"Finished initializing with batch size: {size}")
else:
start_epoch = 0
self.logger.info("Start aligning")
for epoch in range(start_epoch, self.nb_epochs):
self.feat_extractor1.train()
self.feat_extractor2.train()
self.data_classifier.train()
self.grl_domain_classifier1.train()
self.grl_domain_classifier2.train()
self.reconstructor.train()
tic = tick()
self.T_batches = cycle(iter(self.data_loader_train_t))
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s):
y_batch_s = y_batch_s.view(-1)
p = (batch_idx + (epoch - start_epoch) * len(self.data_loader_train_s)) / (
len(self.data_loader_train_s) * (self.nb_epochs - start_epoch))
if self.adaptive_lr:
lr = self.init_lr / (1. + 10 * p) ** 0.75
set_lr(self.optimizer_d1, lr)
set_lr(self.optimizer_d2, lr)
set_lr(self.optimizer_g1, lr)
set_lr(self.optimizer_g2, lr)
set_lr(self.optimizer_h, lr)
set_lr(self.optimizer_data_classifier, lr)
self.feat_extractor1.zero_grad()
self.feat_extractor2.zero_grad()
self.data_classifier.zero_grad()
self.grl_domain_classifier1.zero_grad()
self.grl_domain_classifier2.zero_grad()
self.reconstructor.zero_grad()
X_batch_t, y_batch_t = next(self.T_batches)
if self.cuda:
X_batch_t = X_batch_t.cuda()
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
X_batch_s1 = torch.mul(X_batch_s, self.mask_1)
X_batch_s2 = torch.mul(X_batch_s, self.mask_2)
X_batch_t1 = torch.mul(X_batch_t, self.mask_1)
size_t1 = X_batch_t1.size(0)
size_s1 = X_batch_s1.size(0)
size_s2 = X_batch_s2.size(0)
output_feat_s1 = self.feat_extractor1(X_batch_s1)
output_feat_s2 = self.feat_extractor2(X_batch_s2)
output_feat_s2_imputed = self.reconstructor(output_feat_s1)
output_feat_t1 = self.feat_extractor1(X_batch_t1)
output_feat_t2 = self.reconstructor(output_feat_t1)
# -----------------------------------------------------------------
# source classification
# -----------------------------------------------------------------
output_class_s = self.data_classifier(torch.cat((output_feat_s1, output_feat_s2), 1))
loss = F.cross_entropy(output_class_s, y_batch_s)
# -----------------------------------------------------------------
# domain classification
# -----------------------------------------------------------------
if self.activate_adaptation_d1:
self.grad_scale1 = 2. / (1. + np.exp(-10 * p)) - 1
input_domain_s1_1 = torch.cat((output_feat_s1, output_feat_s2_imputed), 1)
output_domain_s1_1 = self.grl_domain_classifier1(input_domain_s1_1)
label_domain_s1 = build_label_domain(self, size_s1, self.domain_label_s)
error_s1 = F.cross_entropy(output_domain_s1_1, label_domain_s1)
input_domain_t1_1 = torch.cat((output_feat_t1, output_feat_t2), 1)
output_domain_t1_1 = self.grl_domain_classifier1(input_domain_t1_1)
label_domain_t1 = build_label_domain(self, size_t1, self.domain_label_t)
error_t1 = F.cross_entropy(output_domain_t1_1, label_domain_t1)
dist_loss1 = (error_s1 + error_t1)
else:
dist_loss1 = torch.zeros(1).cuda() if self.cuda else torch.zeros(1)
# -----------------------------------------------------------------
# imputation
# -----------------------------------------------------------------
if self.activate_adaptation_imp:
self.grad_scale2 = 2. / (1. + np.exp(-10 * p)) - 1
output_domain_s2_imputed = self.grl_domain_classifier2(output_feat_s2_imputed)
label_domain_fake2 = build_label_domain(self, size_s2, self.domain_label_fake2)
error_s2_imputed = F.cross_entropy(output_domain_s2_imputed, label_domain_fake2)
output_domain_s2_true = self.grl_domain_classifier2(output_feat_s2)
label_domain_true2 = build_label_domain(self, size_s2, self.domain_label_true2)
error_s2 = F.cross_entropy(output_domain_s2_true, label_domain_true2)
dist_loss2 = self.weight_d2 * (error_s2 + error_s2_imputed)
else:
dist_loss2 = torch.zeros(1).cuda() if self.cuda else torch.zeros(1)
# MSE
if self.activate_mse:
dist_loss_mse = self.weight_mse * torch.dist(output_feat_s2, output_feat_s2_imputed, 2)
else:
dist_loss_mse = torch.zeros(1).cuda() if self.cuda else torch.zeros(1)
error = loss + dist_loss1 + dist_loss2 + dist_loss_mse
error.backward()
self.optimizer_data_classifier.step()
self.optimizer_d1.step()
self.optimizer_d2.step()
self.optimizer_h.step()
self.optimizer_g1.step()
self.optimizer_g2.step()
toc = tick() - tic
self.logger.info("\nTrain epoch: {}/{} {:2.2f}s \tTotalLoss: {:.6f} LossS: {:.6f} Dist_loss1:{:.6f} "
"Dist_loss2:{:.6f} Dist_lossMSE:{:.6f}".format(epoch, self.nb_epochs, toc, error.item(), loss.item(),
dist_loss1.item(), dist_loss2.item(), dist_loss_mse.item()))
self.loss_history.append(loss.item())
self.error_history.append(error.item())
if epoch % 5 == 0 and epoch != 0:
evaluate_data_imput_classifier(self, is_test=True, is_target=False)
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
evaluate_domain_imput_classifier(self, self.data_loader_test_s, self.data_loader_test_t,
is_imputation=False, comments="Domain1 test")
evaluate_domain_imput_classifier(self, self.data_loader_test_s, self.data_loader_test_t,
is_imputation=True, comments="Domain2 test")
if self.refinement:
self.logger.info("Refinement")
n_epochs_refinement = self.n_epochs_refinement
lambda_regul = self.lambda_regul
lambda_regul_s = self.lambda_regul_s
threshold_value = self.threshold_value
set_lr(self.optimizer_data_classifier, self.init_lr / 10)
set_lr(self.optimizer_g1, self.init_lr / 10)
set_lr(self.optimizer_g2, self.init_lr / 10)
set_lr(self.optimizer_h, self.init_lr / 10)
for epoch in range(self.nb_epochs, self.nb_epochs + n_epochs_refinement):
evaluate_data_imput_classifier(self, is_test=True, is_target=False)
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
self.data_classifier.train()
self.feat_extractor1.train()
self.feat_extractor2.train()
self.reconstructor.train()
self.T_batches = cycle(iter(self.data_loader_train_t))
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s):
self.data_classifier.zero_grad()
self.feat_extractor1.zero_grad()
self.feat_extractor2.zero_grad()
self.reconstructor.zero_grad()
y_batch_s = y_batch_s.view(-1)
X_batch_t, y_batch_t = next(self.T_batches)
if self.cuda:
X_batch_t = X_batch_t.cuda()
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
X_batch_s1 = torch.mul(X_batch_s, self.mask_1)
X_batch_s2 = torch.mul(X_batch_s, self.mask_2)
X_batch_t1 = torch.mul(X_batch_t, self.mask_1)
output_feat_s1 = self.feat_extractor1(X_batch_s1)
output_feat_s2 = self.feat_extractor2(X_batch_s2)
output_feat_t1 = self.feat_extractor1(X_batch_t1)
output_feat_t2 = self.reconstructor(output_feat_t1)
# Source Domain Data
output_class_s = self.data_classifier(torch.cat((output_feat_s1, output_feat_s2), 1))
loss = F.cross_entropy(output_class_s, y_batch_s)
# Target Domain Data
output_class_t = self.data_classifier(torch.cat((output_feat_t1, output_feat_t2), 1))
threshold_index = F.log_softmax(output_class_t).data.max(1)[0] > np.log(threshold_value)
loss_t_ent = entropy_loss(output_class_t[~threshold_index])
y_batch_pseudo_t = output_class_t.data.max(1)[1][threshold_index]
if torch.sum(threshold_index) > 0:
loss_t = F.cross_entropy(output_class_t[threshold_index], y_batch_pseudo_t)
else:
loss_t = torch.zeros(1).cuda() if self.cuda else torch.zeros(1)
n_pseudo_labelled = torch.sum(threshold_index).item()
error = lambda_regul_s * loss + loss_t + lambda_regul * loss_t_ent
error.backward()
self.optimizer_data_classifier.step()
self.optimizer_g1.step()
self.optimizer_g2.step()
self.optimizer_h.step()
self.logger.info(
"\nTrain epoch: {}/{} \tTotalLoss: {:.6f} LossS: {:.6f} LossT: {:.6f} EntropyT: {:.6f}".format(
epoch, self.nb_epochs + n_epochs_refinement, error.item(), lambda_regul_s * loss.item(),
loss_t.item(), lambda_regul * loss_t_ent.item()))
self.logger.info("N_Pseudo: {:.1f}".format(n_pseudo_labelled))
self.loss_test_s, self.acc_test_s, _, _ = \
evaluate_data_imput_classifier(self, is_test=True, is_target=False)
self.loss_test_t, self.acc_test_t, _, _ = \
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
self.loss_d1_test, self.acc_d1_test = evaluate_domain_imput_classifier(
self, self.data_loader_test_s, self.data_loader_test_t, is_imputation=False, comments="Domain1 test")
self.loss_d2_test, self.acc_d2_test = evaluate_domain_imput_classifier(
self, self.data_loader_test_s, self.data_loader_test_t, is_imputation=True, comments="Domain2 test")
if self.output_fig:
plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "dann_imput_100", is_imput=True)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,060
|
freddi-kit/deeplearning_workshop
|
refs/heads/master
|
/network.py
|
import chainer
import chainer.links as L
import chainer.functions as F
import math
class Network(chainer.Chain):
def __init__(self,sizes,output):
super(Network, self).__init__()
w = chainer.initializers.HeNormal()
links=[]
c_k,c_s,c_p = 3,3,1
self.m_k,self.m_s,self.m_p = 3,1,1
for i in range(len(sizes)):
links += [('conv{}'.format(i), L.Convolution2D(sizes[i-1] if i > 0 else 3, sizes[i], c_k,c_s,c_p,initialW=w))]
links += [('linear0',L.Linear(in_size=None,out_size=1000))]
links += [('linear2',L.Linear(1000,output))]
for link in links:
self.add_link(*link)
self.forward = links
def __call__(self,x,t):
h = x
for name, f in self.forward:
h = F.relu(f(h))
loss = F.softmax_cross_entropy(h,t)
return h,loss
def predict(self,x):
h = x
for name, f in self.forward:
if 'conv' in name:
h = F.relu(f(h))
else:
h = F.relu(f(h))
return F.softmax(h)
|
{"/pict_test.py": ["/network.py"], "/pict_train.py": ["/network.py"]}
|
28,061
|
freddi-kit/deeplearning_workshop
|
refs/heads/master
|
/pict_test.py
|
import sys,os
from PIL import Image
from network import Network
from chainer import Variable,serializers,cuda
from chainer import functions as F
import numpy as np
import random
from chainer.datasets import mnist
GPU = -1
batch = 10
argv = sys.argv
input_size = int(argv[1])
network_sizes = []
for i in argv[2].split(','):
network_sizes.append(int(i))
dir_train = argv[4]+'/'
dir_lists = sorted(os.listdir(dir_train))
net = Network(network_sizes,len(dir_lists))
serializers.load_npz(argv[3],net)
if GPU >= 0:
gpu_device = 0
cuda.get_device(GPU).use()
net.to_gpu(GPU)
xp = cuda.cupy
else:
xp = np
train_data = []
train_label = []
test_data = []
test_label = []
for i in dir_lists:
sub_dirs = os.listdir(argv[4]+'/'+i+'/')
for j in xp.random.permutation(range(len(sub_dirs))):
img = Image.open(argv[4]+'/'+i+'/'+sub_dirs[j])
img = img.resize((input_size,input_size)).convert('RGB')
img = xp.asarray(img,dtype=xp.float32).transpose((2,0,1))/255.
test_data += [img]
test_label += [dir_lists.index(i)]
acc=0
for i in range(0,len(test_data)):
x = Variable(xp.asarray([test_data[i]],dtype=xp.float32))
t = Variable(xp.asarray([test_label[i]]))
y = net.predict(x)
accuracy = F.accuracy(y, t)
accuracy.to_cpu()
acc+=accuracy.data
print((acc/len(test_data))*100,'%')
|
{"/pict_test.py": ["/network.py"], "/pict_train.py": ["/network.py"]}
|
28,062
|
freddi-kit/deeplearning_workshop
|
refs/heads/master
|
/pict_train.py
|
import sys,os
from PIL import Image
from network import Network
from chainer import Variable,optimizers,serializers,cuda
from chainer import functions as F
import numpy as np
import random
from chainer.datasets import mnist
GPU = -1
batch = 5
argv = sys.argv
input_size = int(argv[1])
network_sizes = []
for i in argv[2].split(','):
network_sizes.append(int(i))
dir_train = argv[3]+'/'
dir_lists = sorted(os.listdir(dir_train))
epoch = 500
net = Network(network_sizes,len(dir_lists))
optimizer = optimizers.SGD()
optimizer.setup(net)
if GPU >= 0:
cuda.get_device(GPU).use()
net.to_gpu(GPU)
xp = cuda.cupy
else:
xp = np
train_data = []
train_label = []
test_data = []
test_label = []
for i in dir_lists:
sub_dirs = os.listdir(argv[3]+'/'+i+'/')
for j in xp.random.permutation(range(len(sub_dirs))):
img = Image.open(argv[3]+'/'+i+'/'+sub_dirs[j])
img = img.resize((input_size,input_size)).convert('RGB')
img = xp.asarray(img,dtype=xp.float32).transpose((2,0,1))/255.
train_data += [img]
train_label += [dir_lists.index(i)]
for e in range(epoch):
train_data_sub = []
train_label_sub = []
for i in xp.random.permutation(range(len(train_data))):
train_data_sub += [train_data[i]]
train_label_sub += [train_label[i]]
print('epoch',e)
for i in range(0,len(train_data_sub),batch):
x = Variable(xp.asarray(train_data_sub[i:i+batch],dtype=xp.float32))
t = Variable(xp.asarray(train_label_sub[i:i+batch]))
y,loss = net(x,t)
net.cleargrads()
loss.backward()
optimizer.update()
print(loss.data)
if e % 10 == 0:
serializers.save_npz('model/model.npz',net)
|
{"/pict_test.py": ["/network.py"], "/pict_train.py": ["/network.py"]}
|
28,063
|
AlperenKanik/basiczoo
|
refs/heads/master
|
/hayvanlar.py
|
import sqlite3
class hayvanlar:
def __init__(self):
self.con = sqlite3.connect("hayvanat_bahcesi.db")
self.cursor = self.con.cursor()
def ekle(self, Zooz, TasmaNo, HayvanAdi, HayvanTuru):
self.cursor.execute("INSERT INTO {Zoo_z} VALUES(?,?,?)".format(Zoo_z = Zooz),
(TasmaNo, HayvanAdi, HayvanTuru))
hayvanlar = hayvanlar()
|
{"/main.py": ["/tur.py", "/hayvanlar.py"]}
|
28,064
|
AlperenKanik/basiczoo
|
refs/heads/master
|
/kutuphane.py
|
import sqlite3
def login():
while True:
ogrencino = input("lütfen öğrenci numaranızı girin:")
sifre = input("lütfen şifrenizi girin:")
with sqlite3.connect("kutuphane.db") as db:
cursor = db.cursor()
findUser =("SELECT * FROM user WHERE ogrencino = ? And sifre = ?")
cursor.execute(findUser,[(ogrencino),(sifre)])
results = cursor.fetchall()
if results:
for i in results:
print ("MERHABALAR"+i(2))
#programı kapatmaması için
break
else:
print("yanlış kullanıcı adı veya şifre")
again = input("tekrar denemek ister misiniz ?E/H")
if again.lower() == "h":
print ("iyi günler")
time.sleep(1)
break
def newUser():
found = 0
while found == 0:
ogrencino = input(" lütfen ögrenci numaranızı girin")
with sqlite3.connect("kutuphane.db") as db:
cursor = db.cursor()
findUser=("SELECT * FROM user WHERE ogrencino = ?")
cursor.execute(findUser,((ogrencino)))
if cursor.fetchall():
print("zaten bir üyeliğiniz mevcut")
else:
found = 1
ad = input("adınız")
soyad = input("soyadınız")
sifre = input("sifre")
insertData = '''INSERT INTO user(ogrencino,ad,soyad,sifre)
VALUES(?,?,?,?)'''
cursor.execute(insertData,[(ogrencino),(ad),(soyad),(sifre)])
db.commit()
newUser()
def menu():
while True:
print("BASKENT KUTUPHANESİ SİSTEMİNE HOSGELDİNİZ")
menu =('''
1 - ÜYE OL
2- GİRİŞ YAP
3- UYGULAMAYI KAPAT /n''')
userChoice = input(menu)
if userChoice =="1":
newUser()
elif userChoice =="2":
login()
elif userChoice =="2":
sys.exit()
else:
print("yanlış giriş yaptınız")
menu()
|
{"/main.py": ["/tur.py", "/hayvanlar.py"]}
|
28,065
|
AlperenKanik/basiczoo
|
refs/heads/master
|
/tur.py
|
mport sqlite3
class tur:
def __init__(self):
self.con = sqlite3.connect("hayvanat_bahcesi.db")
self.cursor = self.con.cursor()
def ekle(self, Zooz):
self.cursor.execute("CREATE TABLE IF NOT EXISTS {Zoo_z}"
"(TasmaNo INT, "
"HayvanAdi TEXT, "
"HayvanTuru TEXT)".format(Zoo_z=Zooz))
Zoozz = Zoozz()
|
{"/main.py": ["/tur.py", "/hayvanlar.py"]}
|
28,066
|
AlperenKanik/basiczoo
|
refs/heads/master
|
/main.py
|
import tur, hayvanlar
import random
while True:
menu = input("$$$-Hayvanat Bahçesine HOŞGELDİNİZ!-$$$\n "
"tür eklemek için A'ye tıklayın. \n "
"Öğrenci eklemek için B'ye tıklayın: \n"
"Çıkış için C'ya tıklayınız: \n")
if menu == "C" or menu == "c":
print("##-ßß-CYA-###")
break
else:
Sınıfİsmi = input("Hayvanın türünü giriniz: \n")
if menu == "A" or menu == "a":
zoo.Zoozz.ekle(zooz)
elif menu == "C" or menu == "c":
TasmaNO = random.randint(5001,9998 )
HayvanAdi = input("Hayvanın adını giriniz: \n")
HayvanTuru = input("Hayvanın türünü giriniz: \n")
zoo.Zoo.ekle(Zoozz,TasmaNo, HayvanAdi, HayvanTuru)
|
{"/main.py": ["/tur.py", "/hayvanlar.py"]}
|
28,067
|
AlperenKanik/basiczoo
|
refs/heads/master
|
/main2.py
|
import sqlite3
with sqlite3.connect("kutuphane.db") as db:
cursor = db.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS user(
userID INTEGER PRIMARY KEY,
ogrencino VARCHAR(20) NOT NULL,
ad VARCHAR(20) NOT NULL,
soyad VARCHAR(20) NOT NULL,
sifre VARCHAR(20) NOT NULL);
''')
cursor.execute("""
INSERT INTO user(ogrencino,ad,soyad,sifre)
VALUES("21794986","Alperen","Kanık","123")
""")
db.commit()
cursor.execute("SELECT * FROM user")
print(cursor.fetchall())
|
{"/main.py": ["/tur.py", "/hayvanlar.py"]}
|
28,069
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/get_data.py
|
import datetime
import pandas as pd
import yfinance as yf
from finta import TA
class GetData:
"""
Class to retrieve the training data for a given stock
"""
NUM_DAYS = 365
INDICATORS = ['EMA', 'RSI', 'MACD']
NOTUSED_STATE = ['high', 'low', 'open', 'Adj Close', 'volume']
def __init__(self, stock, train):
"""
Function to get the past 5 days of data for a stock, minute by minute for training
For live data, just need todays 1m interval data
:param stock: symbol of the stock
"""
if train:
start = (datetime.date.today() - datetime.timedelta( self.NUM_DAYS ) )
end = datetime.datetime.today()
self.data = yf.download(stock, start=start, end=end, interval='1d')
self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'}, inplace=True)
print(self.data)
else:
start = datetime.date.today()
end = datetime.datetime.today() + datetime.timedelta( 1 )
self.data = yf.download(stock, start=start, end=end, interval='1d')
self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'}, inplace=True)
def get_indicator_data(self):
"""
Function that adds the indicators to the data table used in analysis
Can add whichever indicators you would need
:return:
"""
for indicator in self.INDICATORS:
ind_data = eval('TA.' + indicator + '(self.data)')
if not isinstance(ind_data, pd.DataFrame):
ind_data = ind_data.to_frame()
self.data = self.data.merge(ind_data, left_index=True, right_index=True)
def update_data(self, symbol):
start = datetime.date.today()
end = datetime.datetime.today() + datetime.timedelta(1)
self.data = yf.download(symbol, start=start, end=end, interval='1m')
self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'},inplace=True)
return self.format_data()
def format_data(self):
"""
Return the data in a form that can be passed into the neural net (numpy array)
:return:
"""
# Filter out the other columns and transform into a np array
state = self.data.drop( self.NOTUSED_STATE, axis=1 )
self.vec = state.values.flatten()
return self.vec
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,070
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/train_reinforce.py
|
from agent import Agent
from env import Environment
import sys
WINDOW_SIZE = 10
EPOCHS = 200
BATCH_SIZE = 30
stock_symbol = sys.argv[1]
def train_stock_model(agent, stockenv):
for e in range(EPOCHS + 1):
print("Episode " + str(e) + "/" + str(EPOCHS))
state = stockenv.get_state(t=WINDOW_SIZE + 1)
agent.inventory = []
stockenv.reset_params()
for t in range(WINDOW_SIZE, stockenv.data_len-1):
action = agent.act(state)
next_state = stockenv.get_state(t = t + 1)
reward = stockenv.step(agent, action, t)
done = True if t == stockenv.data_len - 1 else False
agent.memorize(state, action, reward, next_state, done)
state = next_state
if len(agent.memory) > BATCH_SIZE:
agent.replay(BATCH_SIZE)
dir = stock_symbol +'_models'
agent.model.save(dir+ "/model_ep" + str(e))
price = stockenv.stock.vec[len(stockenv.stock.vec) - 1]
book, net = stockenv.p.get_net_worth(stockenv.symbol, price)
print(' Net Profit : ' + str(net - 100000))
print('History is : ', stockenv.history)
print('Buys : ' + str(stockenv.buy_count) +' Sells: ' + str(stockenv.sell_count) + '\n')
env = Environment(WINDOW_SIZE, EPOCHS, BATCH_SIZE, stock_symbol)
agent = Agent(WINDOW_SIZE, stock_symbol)
train_stock_model(agent, env)
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,071
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/agent.py
|
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
import random
from collections import deque
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
"""
Initialization of the Agent
:param state_size: WINDOW_LENGTH, used for the input dimension of the model
:param is_eval: bool variable to determine if we are using a saved model or not
:param model_name: name of the model in the directory models/
"""
self.state_size = state_size # normalized previous days
self.action_size = 3 # sit, buy, sell
self.memory = deque(maxlen=1000)
self.inventory = []
self.model_name = model_name
self.is_eval = is_eval
# RL variables
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = load_model("amd_models/" + model_name) if is_eval else self.model()
def model(self):
"""
Creation of the ANN using Tensorflow
"""
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.001))
return model
def act(self, state):
"""
Function where the model outputs a prediction given the state
:param state: numpy array representation of the current stae to be fed into the network
:return: Value of 0 : Hold 1 : Buy 2 : Sell
"""
if not self.is_eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def replay(self, batch_size):
"""
Function where the training takes place
Uses accumulated data from the memory to fit the agent's model
:param batch_size:
"""
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def memorize(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,072
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/env.py
|
import math
import numpy as np
from get_data import GetData
from trader import Portfolio
class Environment:
def __init__(self, WINDOW_LENGTH, EPOCHS, BATCH_SIZE, symbol, train=True):
"""
Constants used for things like
- WINDOW_LENGTH : how many elements of data are in a state
- EPOCHS : number of episodes the model is trained for
- BATCH_SIZE : how many cycles we go until we fit the agent's model
"""
self.WINDOW_LENGTH = WINDOW_LENGTH
self.EPOCHS = EPOCHS
self.BATCH_SIZE = BATCH_SIZE
self.AMPLIFIER = 1000
"""
Variables for the environment data (prices of the stock)
Train variable to determine if were being run in training mode or trading mode
"""
self.stock = GetData(symbol, train)
self.data = self.stock.format_data()
self.symbol = symbol
self.data_len = len(self.data)
self.train = train
self.p = Portfolio('lucas')
"""
Parameters that are subject to reset after every training episode
"""
self.buy_count = 0
self.sell_count = 0
self.active_positions = 0
self.history = []
def get_state(self, t=0):
"""
Function to break the data up into window sized chunks
Returns an n sized array up until t
If we are in train mode, we already have all of the data, so we use the t iterator to determine where we want the state to end
Otherwise, we need to pull the next minute of data, and retrieve the last WINDOW_LENGTH elements
:return: a numpy array of length WINDOW_SIZE with the sigmoid transformed data
"""
def sigmoid(x):
return 1 / (1 + math.exp(-x))
n = self.WINDOW_LENGTH + 1
if self.train: # A length check for when we run the live trader
d = t - n
block = self.data[d:t + 1] if d >= 0 else np.append(-d * [self.data[0]], self.data[0:t + 1]) # pad with t0
else: # If we are not training, we just need to grab the last WINDOW_SIZE + 1 # of elements
self.data = self.stock.update_data(self.symbol) # Get the updated minute-by-minute data
block = self.data[ self.data_len - n : self.data_len + 1 ]
res = []
for i in range(n - 1):
res.append(sigmoid(block[i + 1] - block[i]))
return np.array([res])
def step(self, agent, action, t):
"""
Function that will determine the reward for the agent depending on the action
*** Only used during training ***
-For a buy, we check how many better options there were (where the price was cheaper)
-For a sell, we check if there was a better time to sell (where the price was more expensive
:param agent:
:param action:
:param t: index of data
:return: reward value, the more positive -> better
"""
if action == 0: # Hold
self.history.append('H')
return 0
if action == 1: # Buy
self.buy_count += 1
self.history.append('B')
buy_price = self.stock.vec[t]
self.p.place_buy_order(self.symbol, buy_price)
diff = self.p.get_avg_price(self.symbol) - buy_price
# If we are buying at a lower price than our avg price, give it a reward
return (max(diff, 0) + 1) * self.AMPLIFIER * 3
if action == 2: # Sell
if not self.p.have_stock(self.symbol):
return 0
self.sell_count += 1
self.history.append('S')
sell_price = self.stock.vec[t]
self.p.place_sell_order(self.symbol, sell_price)
diff = sell_price - self.p.get_avg_price(self.symbol)
# If we are selling at a gain, give it a reward
return max(diff, 0) * self.AMPLIFIER
def reset_params(self):
"""
Function to reset some parameters at the beginning of every episode
:return:
"""
self.buy_count = 0
self.sell_count = 0
self.history = []
self.p.reset_info()
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,073
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/eval.py
|
import trader
from env import Environment
from agent import Agent
import datetime
import sys
WINDOW_SIZE = 10
EPOCHS = 200
BATCH_SIZE = 95
MODEL_NAME = 'model_ep199'
stock_name = sys.argv[1]
def run_trader():
p = trader.Portfolio('lucas')
a = Agent(WINDOW_SIZE, is_eval=True, model_name=MODEL_NAME)
e = Environment(WINDOW_SIZE, EPOCHS, BATCH_SIZE, stock_name, train=False)
t_330pm = datetime.time(hour=15, minute=30)
while True:
now = datetime.datetime.now()
now = now.time()
if now == t_330pm and trader.is_market_open(): # Perform the action near the end of the day
state = e.get_state()
action = a.act(state)
if action == 1: # BUY
p.place_buy_order('AMD', e.stock.vec[-1])
elif action == 2: # SELL
p.place_sell_order('AMD', e.stock.vec[-1])
# Main program that will use the trained model to trade
if __name__ == "__main__":
run_trader()
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,074
|
delebasyq/algorithmicTrader
|
refs/heads/master
|
/trader.py
|
# """"
# This file will be the main point of control for the trader
# This will call upon the models build in trading.py to generate predictions and place buy and sell orders
# """"
import datetime
import json
import pandas as pd
import pytz, holidays
import yfinance as yf
import time
import os
# Global data and DS, dict to keep stocks and their current bullish or bearish status
# True to indicate a bullish status, False to indicate a bearish one
_stock_list = {'AMD':True, 'ATZ.TO':True, 'DOO.TO':True, 'QSR.TO':True}
def is_market_open(now = None):
tz = pytz.timezone('US/Eastern')
us_holidays = holidays.US()
if not now:
now = datetime.datetime.now(tz)
openTime = datetime.time(hour = 9, minute = 30, second = 0)
closeTime = datetime.time(hour = 16, minute = 0, second = 0)
# If a holiday
if now.strftime('%Y-%m-%d') in us_holidays:
return False
# If before 09:30 or after 16:00
if (now.time() < openTime) or (now.time() > closeTime):
return False
# If it's a weekend
if now.date().weekday() > 4:
return False
return True
class Portfolio:
"""
The class that represents the users trading portfolio
Contains information about their balance and functions to process trades
To retain information about the portfolio when the program stops, it saves all info to a json
"""
def __init__(self, username):
"""
Constructor that checks if the username already has an 'account' (json file with their info). If they do
it pulls the info from there to be used in this class
Otherwise, we create a new user with a default balance of $10,000 to be traded with
:param username: the username you would like your json file to be called
"""
self.filename = username + "_trades.json"
self.PCT_OF_MAX = 0.14
self.STOP_LOSS = 0.92
if os.path.exists(self.filename):
print("File exists for " + username)
with open(self.filename, 'r') as f:
json_obj = json.load(f)
self.username = username
self.balance = json_obj['balance']
self.stocks = json_obj['stocks']
else:
print("User doesn't exist, creating a new portfolio")
self.balance = 10_000 # Initial balance to trade with
self.username = username
self.stocks = {}
self.write_to_json()
def place_buy_order(self, symbol, price):
"""
Function that takes the steps to process a buy
- remove the amount of: amt = quantity * price, from the users balance
- add 'quantity' number of shares of 'symbol' (new dictionary key-value to self.stocks)
- add the total book value (amt) of that specific stock
TODO : Add in support that indicates stop loss point
:param symbol: symbol of the stock being bought
:param quantity: number of shares of the stock
:param price: the price the stock was bought at
:return: no return value, function will modify the data and rewrite json to store the info
"""
# Determine the number of shares,
max_possible = int(self.balance / price)
quantity = int(self.PCT_OF_MAX * max_possible) # Only allow 5% of the max possible shares to be bought at a time
amt = price * quantity
if self.balance >= amt:
self.balance -= amt
if self.have_stock(symbol):
# We have the stock, just add it to our current balance
self.stocks[symbol]['num_shares'] += quantity
self.stocks[symbol]['book_value'] += amt
else: # We don't currently own the stock, so we need to add it
self.stocks[symbol] = {'num_shares' : quantity, 'book_value' : amt}
self.write_to_json()
else:
#print("Insufficient funds to buy " + str(quantity) + " shares of " + str(symbol) + " at " + str(price))
pass
def place_sell_order(self, symbol, price):
# First make sure we have a sufficient number of shares
if self.have_stock(symbol):
# Determine the number of shares,
quantity = int(self.PCT_OF_MAX * self.stocks[symbol]['num_shares']) # Only allow 10% of the max possible shares to be sold at a time
amt = price * quantity
curr_avg = (self.stocks[symbol]['book_value'] / self.stocks[symbol]['num_shares'])
diff = (price - curr_avg) / curr_avg
if self.stocks[symbol]['num_shares'] >= quantity:
self.stocks[symbol]['book_value'] -= curr_avg * quantity
self.stocks[symbol]['num_shares'] -= quantity
amt = quantity * price # Get the amount to return to the account
self.balance += amt
self.write_to_json()
else:
#print("We dont have the stock or we tried selling more shares than we own")
pass
def write_to_json(self): # We write to the json after we buy and sell, so we can also update the running stop loss
f = open(self.filename, "w")
user_info = {'username': self.username,
'balance': self.balance,
'stocks': self.stocks}
json_obj = json.dumps(user_info, indent=4)
f.write(json_obj)
f.close()
def get_avg_price(self, symbol):
if self.have_stock(symbol):
return (self.stocks[symbol]['book_value'] / self.stocks[symbol]['num_shares'])
else:
return 0
def get_stop_loss_point(self, symbol):
if self.have_stock(symbol):
return self.get_avg_price(symbol) * self.STOP_LOSS
def have_stock(self, symbol):
# We want to check our stocks to see if we have one we are either buying or selling
if symbol in self.stocks.keys():
return True
return False
def get_net_worth(self, symbol, price):
if self.have_stock(symbol):
total_bookval = self.stocks[symbol]['book_value'] # The current price of the stock, times the number of shares, gives the actual value of all the shares
total_worth = self.stocks[symbol]['num_shares'] * price
return self.balance + total_bookval, self.balance + total_worth
def get_balance(self):
return self.balance
def reset_info(self):
self.balance = 100000
self.stocks = {}
self.write_to_json()
def
def filter_for_day_trading(path):
"""
Filter out any stocks that may not be a good fit for day trading, such as low volume, low price fluctuation, and high price per share
Parameters
----------
path : `str`
The path to the csv that contains stock symbol data
Returns
-------
None
Generates a new csv with the updated symbols, does not return a value
"""
symbols = pd.read_csv(path)
# Get the string values for today's date and tomorrow's date
today = datetime.datetime.today()
last_week = today - datetime.timedelta(days=7)
today = today.strftime('%Y-%m-%d')
last_week = last_week.strftime('%Y-%m-%d')
symbol_lst = []
for index, row in symbols.iterrows():
symbol = row['Symbol']
data = yf.download(symbol, start=last_week, end=today, interval='1m')
index = data.index
if len(index) > 1400: # Need to filter out data where there isn't a lot of information
# For a week (5 days) that has every minute documented, it accounts for about 1900 entries
min = data['Close'].min()
max = data['Close'].max()
avg = (min + max) / 2
percent_diff = (max - min) / avg * 100
vol_mean = data['Volume'].mean()
if vol_mean > 15000 and percent_diff > 5.0: # Candidate for day trading
if max < 80:
symbol_lst.append(symbol)
print(percent_diff, vol_mean, max, symbol)
df = pd.DataFrame(symbol_lst, columns=['Symbol'])
df.to_csv('filtered_symbols.csv', index=False, header=True)
|
{"/train_reinforce.py": ["/agent.py", "/env.py"], "/env.py": ["/get_data.py", "/trader.py"], "/eval.py": ["/trader.py", "/env.py", "/agent.py"]}
|
28,085
|
xyahh/xBlade
|
refs/heads/master
|
/Characters/char_class.py
|
from pico2d import *
from General.bounding_box import BoundingBox
from Sound import sound_manager as sound
from Logo import logo
file_name = "CharacterClass"
def clamp(minimum, value, maximum):
return max(minimum, min(value, maximum))
class Character:
# CONSTANTS
PIXEL_PER_METER, GRAVITY_M2PS, GRAVITY_P2PS = None, None, None
RUN_SPEED_KMPH, RUN_SPEED_MPS, RUN_SPEED_PPS = None, None, None
JUMP_SPEED_KMPH, JUMP_SPEED_MPS, JUMP_SPEED_PPS = None, None, None
TIME_PER_ACTION, ACTION_PER_TIME = None, None
LIVES_PER_CHAR = None
DEPTH_SIZE = None
STATES = {}
ACTIONS = {}
CHAR_SCALE = None
# END CONSTANTS
heart = None
@staticmethod
def init_const():
# all constants named global
global PIXEL_PER_METER, GRAVITY_M2PS, GRAVITY_P2PS, RUN_SPEED_KMPH, RUN_SPEED_MPS, RUN_SPEED_PPS
global JUMP_SPEED_KMPH, JUMP_SPEED_MPS, JUMP_SPEED_PPS, TIME_PER_ACTION, ACTION_PER_TIME
global CHAR_SCALE, STATES, ACTIONS, LIVES_PER_CHAR, DEPTH_SIZE
constants_file = open('Characters/constants.txt', 'r')
constants = json.load(constants_file)
constants_file.close()
PIXEL_PER_METER = constants['Physics']['PIXEL_PER_METER']
RUN_SPEED_KMPH = constants['Physics']['RUN_SPEED_KMPH']
RUN_SPEED_MPS = RUN_SPEED_KMPH * 1000.0 / 3600.0 # convert km/h to m/s
RUN_SPEED_PPS = RUN_SPEED_MPS * PIXEL_PER_METER
GRAVITY_M2PS = constants['Physics']['GRAVITY_M2PS']
GRAVITY_P2PS = GRAVITY_M2PS * PIXEL_PER_METER * PIXEL_PER_METER
JUMP_SPEED_KMPH = constants['Physics']['JUMP_SPEED_KMPH']
JUMP_SPEED_MPS = JUMP_SPEED_KMPH * 1000.0 / 3600.0
JUMP_SPEED_PPS = JUMP_SPEED_MPS * PIXEL_PER_METER
DEPTH_SIZE = constants['Physics']['DEPTH_SIZE']
TIME_PER_ACTION = constants['Events']['TIME_PER_ACTION']
ACTION_PER_TIME = 1 / TIME_PER_ACTION
STATES = {}
for i in constants['States']:
STATES.update({i: constants['States'][i], constants['States'][i]: constants['StateMap'][i]})
ACTIONS = {}
for i in constants['Actions']:
ACTIONS.update({i: constants['Actions'][i]['name'],
constants['Actions'][i]['name']: {'is_attack': constants['Actions'][i]['is_attack']}
})
CHAR_SCALE = constants['Char_Scale']
LIVES_PER_CHAR = constants['Char_Lives']
def init_vars(self, player_id, spawn_x, spawn_y, spawn_state, spawn_action):
self.jump_key_down = self.left_key_down = self.right_key_down = False
self.frame = self.total_frames = self.last_key = self.accel = 0
self.last_x, self.last_y = (spawn_x, spawn_y)
self.spawn_x, self.spawn_y = (spawn_x, spawn_y)
self.x, self.y = (spawn_x, spawn_y)
self.curr_time = self.start_time = 0
self.i_hit, self.im_hit, self.extra_jump = False, False, False
self.action = spawn_action
self.player_id = player_id
self.lives = LIVES_PER_CHAR
self.state = STATES[spawn_state]
def init_chars(self, char_name):
sprite = {}
chars_file = open('Characters/characters.txt', 'r')
char_info = json.load(chars_file)
chars_file.close()
for name in char_info:
if name == char_name:
sprite_file = open(char_info[name]['sprite'], 'r')
sprite_info = json.load(sprite_file)
sprite_file.close()
for action in sprite_info:
sprite[action] = sprite_info[action]
sprite[action].update({"img": load_image(sprite[action]['path'])})
self.char = char_info[name]
self.char.update({"name": name, "sprite": sprite})
self.char['hp'].update({"bar": load_image(self.char['hp']['bar']),
"red": load_image(self.char['hp']['red'])})
self.max_hp = self.hp = self.char['hp']['hp']
def init_other_media(self):
font_path = open('General/font.txt', 'r')
font_info = json.load(font_path)
font_path.close()
self.font = load_font(font_info['font']['path'], font_info['font']['size'])
self.font2 = load_font(font_info['font']['path'], int(font_info['font']['size'] * 0.5))
self.player_colors = {}
for id in font_info['player_colors']:
self.player_colors[int(id)] = (font_info['player_colors'][id]['R'],
font_info['player_colors'][id]['G'],
font_info['player_colors'][id]['B'])
media_path = open('Characters/media.txt', 'r')
media_info = json.load(media_path)
media_path.close()
if Character.heart is None:
Character.heart = media_info['heart']
Character.heart.update({'img': load_image(Character.heart['img'])})
def init_bounding_boxes(self):
self.bounding_box = {}
bboxes_file = open(self.char['bounding_boxes'], 'r')
bboxes_info = json.load(bboxes_file)
bboxes_file.close()
for action in bboxes_info:
bb_states = {}
for state in bboxes_info[action]:
bb_states.update({STATES[state]: (bboxes_info[action][state]['left'],
bboxes_info[action][state]['top'],
bboxes_info[action][state]['right'],
bboxes_info[action][state]['bottom'],
bboxes_info[action][state]['damage'],
bboxes_info[action][state]['heal'])})
self.bounding_box.update({action: bb_states})
def __init__(self, char_name, player_id: int, spawn_x: int, spawn_y: int, spawn_state: int, spawn_action):
self.init_const()
self.init_vars(player_id, spawn_x, spawn_y, spawn_state, spawn_action)
self.init_chars(char_name)
self.init_other_media()
self.init_bounding_boxes()
def draw(self, override_x=None, override_y=None):
coord_x = self.x
coord_y = self.y
if override_x is not None:
coord_x = override_x
if override_y is not None:
coord_y = override_y
h = self.char['sprite'][self.action]['h']
w = self.char['sprite'][self.action]['w']
self.char['sprite'][self.action]['img'].clip_draw(self.frame * w, self.state * h,
w, h, coord_x, coord_y, w * CHAR_SCALE,
h * CHAR_SCALE)
x = self.char['hp']['dx'] + coord_x
y = self.char['hp']['dy'] + coord_y
self.char['hp']['bar'].draw(x, y)
h_ = self.char['hp']['bar'].h
w_ = self.char['hp']['bar'].w
dw = w_*(self.hp / self.max_hp)
self.char['hp']['red'].draw(x-0.5*w_+0.5*dw, y, dw, h_)
if self.player_id > 0:
self.font.draw(x-w_, y+h_, str(self.player_id), color=self.player_colors[self.player_id])
else:
self.font2.draw(x - w_*1.3, y + h_*0.8, 'Test', color=(125, 125, 125))
heart = Character.heart
for i in range(self.lives):
heart['img'].draw(x-w_*0.5+i*heart['w']*2, y+h_+heart['h']*0.5, heart['w'], heart['h'])
def check_hp(self):
if self.hp < 0 or self.y < -self.char['sprite'][self.action]['h']*CHAR_SCALE-DEPTH_SIZE:
if self.lives == 0:
return True
self.lives -= 1
self.hp = self.max_hp
self.x, self.y = (self.spawn_x, self.spawn_y)
self.action = ACTIONS['MOVE']
if self.im_hit:
self.im_hit = False
return False
return False
def update(self, frame_time, boxes):
game_end = self.check_hp()
self.update_frames(frame_time)
self.update_attack(frame_time, boxes)
self.move(frame_time, boxes)
self.update_state()
return game_end
def update_attack(self, frame_time, boxes):
if ACTIONS[self.action]['is_attack']:
sd = BoundingBox
for i in range(len(boxes.char_box)):
is_there_collision = sd.collide(sd, boxes.char_box[self.player_id - 1], boxes.char_box[i])
if is_there_collision and boxes.char_id[i] != self.player_id \
and not self.i_hit and not boxes.char[i].im_hit:
dmg = self.bounding_box[self.action][self.state][sd.DAMAGE]
if dmg > 0:
boxes.char[i].hp -= dmg
boxes.char[i].im_hit = True
boxes.char[i].action = ACTIONS['IM_HIT']
if boxes.char[i].char['sprite'][boxes.char[i].action]['has_sound']:
sound.play(boxes.char[i].char['sprite'][boxes.char[i].action]['sound'])
boxes.char[i].total_frames = 0.0
self.hp += self.bounding_box[self.action][self.state][sd.HEAL]
if self.hp > self.max_hp:
self.hp = self.max_hp
self.i_hit = True
def update_frames(self, frame_time):
state_frames = STATES[self.state]
self.total_frames += self.char['sprite'][self.action][state_frames]['frames'] * ACTION_PER_TIME * \
frame_time * self.char['sprite'][self.action][state_frames]['action_time_ratio']
self.frame = int(self.total_frames) % self.char['sprite'][self.action][state_frames]['frames']
if self.action != ACTIONS['MOVE'] \
and self.total_frames > self.char['sprite'][self.action][state_frames]['frames']\
and not self.char['sprite'][self.action][state_frames]['loop']:
self.action = ACTIONS['MOVE']
if self.im_hit:
self.im_hit = False
def move(self, frame_time, boxes):
self.curr_time += frame_time
dt = self.curr_time - self.start_time
temp_y = self.y
self.y = self.last_y - GRAVITY_P2PS * dt*dt # there's always a force!
if self.jump_key_down:
self.y += JUMP_SPEED_PPS * dt # only had initial speed if it has jumped
self.accel = JUMP_SPEED_PPS - 2*GRAVITY_P2PS*dt # derivative of speed*t - at^2
sd = BoundingBox
for i in range(len(boxes.map_box)):
is_there_collision = sd.collide(sd, boxes.char_box[self.player_id-1], boxes.map_box[i]) and self.accel <= 0
is_above_level = self.y + self.bounding_box[self.action][self.state][sd.BOTTOM] >= \
boxes.map_box[i][sd.BOTTOM] and self.y >= boxes.map_box[i][sd.TOP]
time_delay_condition = boxes.map_box[i][sd.LEFT] <= self.x <= boxes.map_box[i][sd.RIGHT] and \
self.y < boxes.map_box[i][sd.TOP] and temp_y >= boxes.map_box[i][sd.TOP]
if (is_there_collision and is_above_level) or time_delay_condition:
# update based on feet
self.y = boxes.map_box[i][sd.TOP] - self.bounding_box[self.action][self.state][sd.BOTTOM]
self.start_time = self.curr_time # update the starting time for the next jump / fall
self.last_y = self.y # update position for the next jump / fall
self.x += boxes.map.map['objects'][boxes.map_object_id[i]]['dir_x'] * frame_time # update direction
self.accel = 0
self.jump_key_down, self.extra_jump = False, False
break
distance = RUN_SPEED_PPS * frame_time
if self.left_key_down and not self.right_key_down or self.last_key == STATES['RUN_L']:
self.x -= distance
if self.right_key_down and not self.left_key_down or self.last_key == STATES['RUN_R']:
self.x += distance
self.x = clamp(0, self.x, logo.win_width)
def get_name(self):
return self.char['name']
def process_action(self, ACTION_NAME):
self.action = ACTIONS[ACTION_NAME]
if self.char['sprite'][self.action]['has_sound']:
sound.play(self.char['sprite'][self.action]['sound'])
self.i_hit = False
self.total_frames = 0.0
if self.char['sprite'][self.action][STATES[self.state]]['extra_jump'] and not self.extra_jump:
self.extra_jump, self.jump_key_down = True, True
self.start_time = self.curr_time = 0.0
self.last_y = self.y
def handle_actions(self, frame_time, event, ability1_key, ability2_key):
ability_list = (ACTIONS['ABILITY1'], ACTIONS['ABILITY2'])
if event.key == ability1_key and event.type == SDL_KEYDOWN and self.action not in ability_list:
self.process_action('ABILITY1')
if event.key == ability2_key and event.type == SDL_KEYDOWN and self.action not in ability_list:
self.process_action('ABILITY2')
def handle_moves(self, frame_time, event, left_key, right_key, jump_key):
if event.key == left_key:
self.left_key_down = event.type == SDL_KEYDOWN and not self.im_hit
if self.left_key_down:
self.last_key = STATES['RUN_L']
else:
self.last_key = STATES['STAND_L']
if event.key == right_key:
self.right_key_down = event.type == SDL_KEYDOWN and not self.im_hit
if self.right_key_down:
self.last_key = STATES['RUN_R']
else:
self.last_key = STATES['STAND_R']
if event.key == jump_key and event.type == SDL_KEYDOWN and not self.jump_key_down and not self.im_hit:
self.start_time = self.curr_time = self.total_frames = 0
self.jump_key_down = True
self.last_y = self.y
def update_state(self):
if self.jump_key_down:
if self.state in (STATES['RUN_L'], STATES['STAND_L']) or (self.left_key_down and not self.right_key_down):
self.state = STATES['JUMP_L']
elif self.state in (STATES['RUN_R'], STATES['STAND_R']) or (self.right_key_down and not self.left_key_down):
self.state = STATES['JUMP_R']
else:
if self.left_key_down and not self.right_key_down:
self.state = STATES['RUN_L']
elif self.right_key_down and not self.left_key_down:
self.state = STATES['RUN_R']
elif self.right_key_down and self.left_key_down:
self.state = self.last_key
else:
if self.state in (STATES['RUN_L'], STATES['STAND_L'], STATES['JUMP_L']):
self.state = STATES['STAND_L']
elif self.state in (STATES['RUN_R'], STATES['STAND_R'], STATES['JUMP_R']):
self.state = STATES['STAND_R']
def handle_events(self, frame_time, event, player_id,
left_key, right_key, jump_key, down_key, ability1_key, ability2_key):
if player_id == self.player_id:
self.handle_moves(frame_time, event, left_key, right_key, jump_key)
self.handle_actions(frame_time, event, ability1_key, ability2_key)
class CharacterSelect:
def init_chars(self):
self.chars = []
chars_file = open('Characters/characters.txt', 'r')
char_info = json.load(chars_file)
chars_file.close()
for name in char_info:
self.chars .append({"name": name, "img": load_image(char_info[name]['img'])})
def init_selection_format(self):
selection_file = open('Characters/selection_format.txt', 'r')
selection_info = json.load(selection_file)
selection_file.close()
self.__dict__.update(selection_info)
def init_players(self, player_num):
self.player_choice = []
if len(self.player_choice) > 0: del self.player_choice
for i in range(player_num):
self.player_choice.append(0)
def __init__(self, player_num):
self.init_chars()
self.init_selection_format()
self.init_players(player_num)
def size(self):
return len(self.chars)
def draw(self):
for i in range(self.size()):
self.chars[i]['img'].draw(self.start_x+(i%self.chars_per_row)*self.col_dist_diff,
self.start_y+int(i/self.chars_per_row)*self.row_dist_diff)
def handle_events(self, frame_time, event, player_id, left_key, right_key, up_key, down_key):
if player_id <= len(self.player_choice):
i = player_id - 1
if event.key == left_key:
if self.player_choice[i] > 0:
sound.play("change")
self.player_choice[i] -= 1
if event.key == right_key:
if self.player_choice[i] < self.size() - 1:
sound.play("change")
self.player_choice[i] += 1
if event.key == down_key:
if self.player_choice[i] < self.size() - self.chars_per_row:
sound.play("change")
self.player_choice[i] += self.chars_per_row
if event.key == up_key:
if self.player_choice[i] > self.chars_per_row - 1:
sound.play("change")
self.player_choice[i] -= self.chars_per_row
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,086
|
xyahh/xBlade
|
refs/heads/master
|
/General/key_mapping.py
|
from pico2d import *
# If a Key is missing, feel free to add it here.
KEY_MAP = \
{
#alphabet
"A": SDLK_a, "B": SDLK_b, "C": SDLK_c, "D": SDLK_d, "E": SDLK_e, "F": SDLK_f,
"G": SDLK_g, "H": SDLK_h, "I": SDLK_i, "J": SDLK_j, "K": SDLK_k, "L": SDLK_l,
"M": SDLK_m, "N": SDLK_n, "O": SDLK_o, "P": SDLK_p, "Q": SDLK_q, "R": SDLK_r,
"S": SDLK_s, "T": SDLK_t, "U": SDLK_u, "V": SDLK_v, "W": SDLK_w, "X": SDLK_x,
"Y": SDLK_y, "Z": SDLK_z,
#special
"UP": SDLK_UP, "DOWN": SDLK_DOWN, "LEFT": SDLK_LEFT, "RIGHT": SDLK_RIGHT,
"ESC": SDLK_ESCAPE, "DEL": SDLK_DELETE, "ENTER": SDLK_RETURN, "SPACE": SDLK_SPACE,
"BACK": SDLK_BACKSPACE, "TAB": SDLK_TAB, "LSHIFT": SDLK_LSHIFT, "RSHIFT" : SDLK_RSHIFT,
"LCTRL": SDLK_LCTRL, "RCTRL": SDLK_RCTRL, "CAPS": SDLK_CAPSLOCK, "HOME": SDLK_HOME,
#numbers
"0": SDLK_0, "F1": SDLK_F1, "F11": SDLK_F11, "F21" :SDLK_F21,
"1": SDLK_1, "F2": SDLK_F2, "F12": SDLK_F12, "F22" :SDLK_F22,
"2": SDLK_2, "F3": SDLK_F3, "F13": SDLK_F13, "F23" :SDLK_F23,
"3": SDLK_3, "F4": SDLK_F4, "F14": SDLK_F14, "F24" :SDLK_F24,
"4": SDLK_4, "F5": SDLK_F5, "F15": SDLK_F15,
"5": SDLK_5, "F6": SDLK_F6, "F16": SDLK_F16,
"6": SDLK_6, "F7": SDLK_F7, "F17": SDLK_F17,
"7": SDLK_7, "F8": SDLK_F8, "F18": SDLK_F18,
"8": SDLK_8, "F9": SDLK_F9, "F19": SDLK_F19,
"9": SDLK_9, "F10": SDLK_F10, "F20": SDLK_F20
}
controls = None
def map_key(string):
return KEY_MAP[string]
def bind_keys():
global controls
control_file = open('General/controls.txt', 'r')
control_info = json.load(control_file)
control_file.close()
controls = []
for id in control_info:
controls_dict = control_info[id]
for i in controls_dict:
controls_dict[i] = map_key(controls_dict[i])
controls_dict.update({"player_id": int(id)})
controls.append(controls_dict)
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,087
|
xyahh/xBlade
|
refs/heads/master
|
/Menu/main_menu.py
|
from pico2d import *
from Characters import char_select
from General import pFramework
from General import key_mapping as key
from Sound import sound_manager as sound
file_name = "MainMenu"
num_of_players, num_of_players_choices, choice = None, None, None
options, main_theme = None, None
RECT_W, RECT_H = None, None
images, font = None, None
def init_images():
global images
image_file = open('Menu/image.txt', 'r')
image_info = json.load(image_file)
image_file.close()
images = []
for name in image_info:
images.append({"img": load_image(image_info[name]['path']),
"x": image_info[name]['x'], "y": image_info[name]['y']})
def init_sounds():
sound.play("main")
def init_menu():
global font, options, RECT_H, RECT_W, num_of_players_choices, choice
key.bind_keys()
menu_file = open('Menu/menu.txt', 'r')
menu_info = json.load(menu_file)
menu_file.close()
font = load_font(menu_info['font']['path'], menu_info['font']['size'])
RECT_W = menu_info['rect_size']['width']
RECT_H = menu_info['rect_size']['height']
num_of_players_choices = []
options = []
for name in menu_info['options']:
y = menu_info['options'][name]['start_y'] + \
menu_info['options'][name]['diff_y'] * menu_info['options'][name]['priority']
options.append({"name": name, "x": menu_info['options'][name]['x'], "y": y})
num_of_players_choices.append(menu_info['options'][name]['num_of_players'])
choice = len(num_of_players_choices) - 1 # just as a default value
def enter():
init_sounds()
init_images()
init_menu()
def exit():
global images, options, main_theme
del images, options, main_theme
def update(frame_time):
pass
def draw(frame_time):
clear_canvas()
for i in range(len(images)):
images[i]['img'].draw(images[i]['x'], images[i]['y'])
for i in options:
font.draw(i['x'], i['y'], i['name'])
draw_rectangle(options[choice]['x'], options[choice]['y']-RECT_H/2,
options[choice]['x']+RECT_W, options[choice]['y']+RECT_H/2)
update_canvas()
def handle_events(frame_time):
global num_of_players, choice
events = get_events()
for event in events:
if event.type == SDL_KEYDOWN:
for i in range(len(key.controls)): # can add the control id check if only one player needs to control menu
if event.key == key.controls[i]['up']:
sound.play("change")
choice= (choice + 1) % len(options)
elif event.key == key.controls[i]['down']:
sound.play("change")
choice = (choice - 1) % len(options)
elif event.key == key.controls[i]['submit']:
sound.play("submit")
if choice > 0:
num_of_players = num_of_players_choices[choice]
pFramework.push_state(char_select)
else:
pFramework.quit()
elif event.type==SDL_QUIT:
pFramework.quit()
def pause(): pass
def resume(): pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,088
|
xyahh/xBlade
|
refs/heads/master
|
/Logo/logo.py
|
from pico2d import *
from General import pFramework
from Menu import main_menu
from Sound import sound_manager as sound
file_name = "StartState"
images = None
logo_time, alpha = 0.0, 0.0
fade_in, fade_out = None, None
win_width, win_height, win_caption = None, None, None
FADE_TIME_CONSTRAINT, LOGO_SHOW_TIME_CONSTRAINT, MAX_ALPHA_VALUE = None, None, None
alpha_change_rate = None
def init_window():
global win_width, win_height, win_caption
window_file = open('Logo/window.txt', 'r')
window_info = json.load(window_file)
window_file.close()
win_width = window_info['width']
win_height = window_info['height']
win_caption = window_info['title']
open_canvas(w=win_width, h=win_height, title=win_caption)
def init_media():
global images
sound_file = open('Sound/sound.txt', 'r')
sound_info = json.load(sound_file)
sound_file.close()
for sound_name in sound_info:
sound.add(sound_name, sound_info[sound_name]['path'], sound_info[sound_name]['is_bgm'])
image_file = open('Logo/image.txt', 'r')
image_info = json.load(image_file)
image_file.close()
images = []
for name in image_info:
images.append({"img": load_image(image_info[name]['path']),
"x": image_info[name]['x'], "y": image_info[name]['y'],
"opacify": image_info[name]['opacify']})
def init_fade():
global FADE_TIME_CONSTRAINT, LOGO_SHOW_TIME_CONSTRAINT, MAX_ALPHA_VALUE, alpha_change_rate
global fade_in, fade_out, logo_time, alpha
fade_file = open('Logo/fade.txt', 'r')
file_info = json.load(fade_file)
fade_file.close()
fade_in, fade_out = True, False
FADE_TIME_CONSTRAINT = file_info['FADE_TIME_CONSTRAINT']
LOGO_SHOW_TIME_CONSTRAINT = file_info['LOGO_SHOW_TIME_CONSTRAINT']
alpha_change_rate = 1.0 / FADE_TIME_CONSTRAINT
logo_time, alpha = 0.0, 0.0
def enter():
init_window()
init_media()
init_fade()
def exit():
global images
del images
sound.delete_all()
close_canvas()
def update(frame_time):
global logo_time, alpha, fade_in, fade_out, alpha_change_rate
logo_time += frame_time
if fade_in or fade_out:
alpha += alpha_change_rate*frame_time
if logo_time > FADE_TIME_CONSTRAINT and fade_out:
pFramework.push_state(main_menu)
elif logo_time > FADE_TIME_CONSTRAINT and fade_in:
logo_time = 0
fade_in = False
elif logo_time > LOGO_SHOW_TIME_CONSTRAINT and not (fade_in or fade_out):
logo_time = 0
fade_out = True
alpha_change_rate = -alpha_change_rate
def draw(frame_time):
clear_canvas()
for i in range(len(images)):
if images[i]['opacify']: images[i]['img'].opacify(alpha)
images[i]['img'].draw(images[i]['x'], images[i]['y'])
update_canvas()
def handle_events(frame_time):
events = get_events()
for event in events:
if event.type == SDL_KEYDOWN or event.type == SDL_MOUSEBUTTONDOWN:
pFramework.push_state(main_menu)
elif event.type == SDL_QUIT:
pFramework.quit()
def pause(): pass
def resume(): pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,089
|
xyahh/xBlade
|
refs/heads/master
|
/General/bounding_box.py
|
from pico2d import *
class BoundingBox:
LEFT, TOP, RIGHT, BOTTOM, DAMAGE, HEAL = range(6)
def __init__(self, char, map: object):
global LEFT, TOP, RIGHT, BOTTOM, DAMAGE, HEAL
LEFT, TOP, RIGHT, BOTTOM, DAMAGE, HEAL = range(6)
self.char = char
self.map = map
self.map_object_id = []
self.char_id = []
self.map_box = []
self.char_box = []
def update_map_box(self):
self.map_box.clear()
self.map_object_id.clear()
objects = self.map.map['objects']
for i in range(len(objects)):
obj = objects[i]
if obj['has_bbox']:
self.map_object_id.append(i)
self.map_box.append((obj['pos_x'] + obj['bounding_box'][LEFT],
obj['pos_y'] + obj['bounding_box'][TOP],
obj['pos_x'] + obj['bounding_box'][RIGHT],
obj['pos_y'] + obj['bounding_box'][BOTTOM]))
def update_char_box(self):
self.char_box.clear()
self.char_id.clear()
for i in range(len(self.char)):
ch = self.char[i]
self.char_id.append(ch.player_id)
bx = ch.bounding_box[ch.action][ch.state]
self.char_box.append((ch.x + bx[LEFT],
ch.y + bx[TOP],
ch.x + bx[RIGHT],
ch.y + bx[BOTTOM]))
def update(self):
self.update_map_box()
self.update_char_box()
def draw(self):
for i in range(len(self.map_box)):
draw_rectangle(self.map_box[i][LEFT],
self.map_box[i][TOP],
self.map_box[i][RIGHT],
self.map_box[i][BOTTOM])
for i in range(len(self.char_box)):
draw_rectangle(self.char_box[i][LEFT],
self.char_box[i][TOP],
self.char_box[i][RIGHT],
self.char_box[i][BOTTOM])
def collide(self, a, b):
left_a, bottom_a, right_a, top_a = a[LEFT], a[BOTTOM], a[RIGHT], a[TOP]
left_b, bottom_b, right_b, top_b = b[LEFT], b[BOTTOM], b[RIGHT], b[TOP]
if left_a > right_b: return False
if right_a < left_b: return False
if top_a < bottom_b: return False
if bottom_a > top_b: return False
return True
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,090
|
xyahh/xBlade
|
refs/heads/master
|
/Results/results.py
|
from pico2d import *
from General import pFramework
from Sound import sound_manager as sound
from General import key_mapping as key
from General import game_play
file_name = "Results"
images, font = None, None
def enter():
global images, font
media_file = open('Results/media.txt', 'r')
media_info = json.load(media_file)
media_file.close()
font = load_font(media_info['font']['path'], media_info['font']['size'])
images = []
for name in media_info['images']:
images.append({"img": load_image(media_info['images'][name]['path']),
"x": media_info['images'][name]['x'], "y": media_info['images'][name]['y']})
def exit():
pass
def update(frame_time):
pass
def draw(frame_time):
clear_canvas()
for i in range(len(images)):
images[i]['img'].draw(images[i]['x'], images[i]['y'])
game_play.winner.draw(images[i]['x'], images[i]['y'])
update_canvas()
def handle_events(frame_time):
events = get_events()
for event in events:
for i in range(len(key.controls)):
if event.key in (key.controls[i]['pause'], key.controls[i]['submit']):
sound.play("submit")
sound.stop("victory")
for j in range(4):
pFramework.pop_state()
sound.play("main")
break
if event.type == SDL_QUIT:
pFramework.quit()
def pause(): pass
def resume(): pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,091
|
xyahh/xBlade
|
refs/heads/master
|
/Maps/map_class.py
|
from pico2d import *
from Sound import sound_manager as sound
file_name = "MapClass"
class Map:
def init_map(self, map_name):
maps_file = open('Maps/maps.txt', 'r')
map_info = json.load(maps_file)
maps_file.close()
self.map = {}
for name in map_info:
if name == map_name:
object_file = open(map_info[name]['objects'], 'r')
object_info = json.load(object_file)
object_file.close()
self.map_objects = []
for obj_name in object_info:
img = None
has_img = object_info[obj_name]['has_img']
if has_img:
img = load_image(object_info[obj_name]['img'])
this_object = object_info[obj_name]
this_object.update({"name": obj_name, "img": img, "has_img": has_img,
"pos_x": this_object['start_x'],
"pos_y": this_object['start_y'],
"bounding_box": (this_object['bounding_box']['left'],
this_object['bounding_box']['top'],
this_object['bounding_box']['right'],
this_object['bounding_box']['bottom'])})
self.map_objects.append(this_object)
self.map = {"name": name, "dsp_img": load_image(map_info[name]['dsp_img']),
"x": map_info[name]['x'],
"y": map_info[name]['y'],
"map_img": load_image(map_info[name]['map_img']), "objects": self.map_objects}
def init_spawn(self, map_name, num_of_players):
spawn_file = open('Maps/spawn.txt', 'r')
spawn_info = json.load(spawn_file)
spawn_file.close()
self.spawn = []
for name in spawn_info:
if name == map_name and len(spawn_info[name]) >= num_of_players:
for i in spawn_info[name]:
spawn_dict = spawn_info[name][i]
spawn_dict.update({"player_id": int(i)})
self.spawn.append(spawn_dict)
def __init__(self, map_name, num_of_players):
self.init_map(map_name)
self.init_spawn(map_name, num_of_players)
def draw(self):
self.map['map_img'].draw(self.map['x'], self.map['y'])
for count in range(len(self.map['objects'])):
if self.map['objects'][count]['has_img']:
self.map['objects'][count]['img'].draw(self.map['objects'][count]['pos_x'], self.map['objects'][count]['pos_y'])
def update(self, frame_time):
for count in range(len(self.map_objects)):
def reset():
self.map_objects[count]['pos_x'] = self.map_objects[count]['start_x']
self.map_objects[count]['pos_y'] = self.map_objects[count]['start_y']
if self.map_objects[count]['has_img']:
self.map_objects[count]['pos_x'] += self.map_objects[count]['dir_x']*frame_time
self.map_objects[count]['pos_y'] += self.map_objects[count]['dir_y']*frame_time
if (self.map_objects[count]['pos_x'] <= self.map_objects[count]['limit_x1'] and
self.map_objects[count]['dir_x'] < 0) or \
(self.map_objects[count]['pos_x'] >= self.map_objects[count]['limit_x2'] and
self.map_objects[count]['dir_x'] > 0):
if self.map_objects[count]['new']: reset()
else: self.map_objects[count]['dir_x'] *= self.map_objects[count]['factor_x']
if (self.map_objects[count]['pos_y'] <= self.map_objects[count]['limit_y1'] and
self.map_objects[count]['dir_y'] < 0) \
or (self.map_objects[count]['pos_y'] >= self.map_objects[count]['limit_y2'] and
self.map_objects[count]['dir_y'] > 0):
if self.map_objects[count]['new']: reset()
else: self.map_objects[count]['dir_y'] *= self.map_objects[count]['factor_y']
def get_name(self):
return self.map[self.id]['name']
def size(self):
return len(self.map)
class MapSelect:
def __init__(self):
maps_file = open('Maps/maps.txt', 'r')
map_info = json.load(maps_file)
maps_file.close()
self.map = []
self.id = 0
for name in map_info:
self.map.append({"name": name, "dsp_img": load_image(map_info[name]['dsp_img']),
"theme": map_info[name]['theme']})
def draw(self, x, y):
self.map[self.id]['dsp_img'].draw(x, y)
def get_curr_map_name(self):
return self.map[self.id]['name']
def get_curr_map_theme(self):
return self.map[self.id]['theme']
def handle_events(self, event, left_key, right_key):
if event.key == left_key:
sound.play("change")
if self.id == 0:
self.id = len(self.map) -1
else:
self.id -= 1
if event.key == right_key:
sound.play("change")
if self.id == len(self.map)-1:
self.id = 0
else:
self.id += 1
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,092
|
xyahh/xBlade
|
refs/heads/master
|
/Sound/sound_manager.py
|
from sdl2.sdlmixer import *
song_list = {}
CHANNEL_NUMBER = 16
Mix_AllocateChannels(CHANNEL_NUMBER) # 16 sounds can be played at a time
def add(song_name, path, is_bgm=False):
data = Mix_LoadWAV(path.encode('UTF-8'))
if not data:
print('cannot load %s' % path)
return
song_list.update({song_name: {"sound": data, "repeat": is_bgm, "channel": -1}})
def delete(song_name):
if song_name not in song_list:
return
song_list.pop(song_name, None)
def delete_all():
global song_list
song_list.clear()
del song_list
def play(song_name, vol=65):
if song_name not in song_list:
return
repeat = 0
if song_list[song_name]['repeat']:
repeat = -1
i = Mix_PlayChannel(-1, song_list[song_name]['sound'], repeat)
Mix_Volume(i, vol)
song_list[song_name]['channel'] = i
def stop(song_name):
if song_name not in song_list or song_list[song_name]['channel'] == -1:
return
Mix_HaltChannel(song_list[song_name]['channel'])
song_list[song_name]['channel'] = -1
def stop_bgms():
for name in song_list:
if song_list[name]['repeat']:
stop(name)
def stop_all():
for i in range(CHANNEL_NUMBER):
if Mix_Playing(i):
Mix_Pause(i)
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,093
|
xyahh/xBlade
|
refs/heads/master
|
/General/game_play.py
|
from pico2d import *
from Characters.char_class import Character
from Results import results
from Maps.map_class import Map
from General import pFramework
from Sound import sound_manager as sound
from General import key_mapping as key
from General.bounding_box import BoundingBox
from Maps import map_select
from Menu import main_menu
from Characters import char_select
file_name = "Gameplay"
pause_game = None
char, map = None, None
winner = None
boxes = None
show_boxes = False
def enter():
global char, map, boxes
char = []
map = Map(map_select.map_sel.get_curr_map_name(), main_menu.num_of_players)
for i in range(main_menu.num_of_players):
name = char_select.char_sel.chars[char_select.char_sel.player_choice[i]]['name']
char.append(Character(name, map.spawn[i]['player_id'], map.spawn[i]['x'],
map.spawn[i]['y'], map.spawn[i]['state'], map.spawn[i]['action']))
if main_menu.num_of_players == 1:
name = char_select.char_sel.chars[char_select.char_sel.player_choice[0]-1]['name']
char.append(Character(name, 0, map.spawn[1]['x'],
map.spawn[1]['y'], map.spawn[1]['state'], map.spawn[1]['action']))
boxes = BoundingBox(char, map)
def exit():
pass
def update(frame_time):
global winner
map.update(frame_time)
alive = []
for i in range(len(char)):
if not char[i].update(frame_time, boxes):
alive.append(i)
boxes.update()
if len(alive) == 1 and main_menu.num_of_players > 1:
winner = char[alive[0]]
pFramework.push_state(results)
sound.play("victory")
sound.stop(map_select.map_sel.get_curr_map_theme())
def draw(frame_time):
clear_canvas()
map.draw()
for i in range(len(char)):
char[-i-1].draw() # reversed drawing. Player 1 drawn at the top
if show_boxes:
boxes.draw()
update_canvas()
def handle_events(frame_time):
global show_boxes
events = get_events()
for event in events:
for i in range(len(key.controls)):
if i < main_menu.num_of_players:
char[i].handle_events(frame_time, event, key.controls[i]['player_id'],
key.controls[i]['left'], key.controls[i]['right'],
key.controls[i]['up'], key.controls[i]['down'],
key.controls[i]['ability1'], key.controls[i]['ability2'])
if event.key == key.controls[i]['pause']:
sound.play("back")
sound.stop(map_select.map_sel.get_curr_map_theme())
pFramework.pop_state()
sound.play("main")
if event.key == SDLK_F1 and event.type == SDL_KEYDOWN:
if show_boxes:
show_boxes = False
else:
show_boxes = True
if event.type == SDL_QUIT:
pFramework.quit()
def pause():
pass
def resume():
pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,094
|
xyahh/xBlade
|
refs/heads/master
|
/Maps/map_select.py
|
from pico2d import *
from General import game_play, pFramework
from General import key_mapping as key
from Sound import sound_manager as sound
from Maps.map_class import MapSelect
from Characters import char_select
from Menu import main_menu
file_name = "MapSelect"
images, map_text = None, None
player_text, custom_text = None, None
font, choices, map_sel = None, None, None
player_colors = None
def init_media():
global images, choices, map_sel
map_sel = MapSelect()
media_file = open('Maps/media.txt', 'r')
media_info = json.load(media_file)
media_file.close()
choices = []
images = []
for name in media_info['images']:
images.append({"img": load_image(media_info['images'][name]['path']),
"is_map_border": media_info['images'][name]['is_map_border'],
"x": media_info['images'][name]['x'],
"y": media_info['images'][name]['y']})
def init_text():
global font, map_text, player_text, custom_text, player_colors
text_file = open('Maps/text.txt', 'r')
text_info = json.load(text_file)
text_file.close()
font_path = open('General/font.txt', 'r')
font_info = json.load(font_path)
font_path.close()
font = load_font(font_info['font']['path'], font_info['font']['size'])
player_text = []
custom_text = []
map_text = {}
player_colors = {}
for id in font_info['player_colors']:
player_colors[int(id)] = (font_info['player_colors'][id]['R'],
font_info['player_colors'][id]['G'],
font_info['player_colors'][id]['B'])
for name in text_info['player_text']:
player_text.append({"player_id": text_info['player_text'][name]['player_id'],
"x": text_info['player_text'][name]['x'],
"y": text_info['player_text'][name]['y']})
for name in text_info['custom_text']:
custom_text.append({"string": text_info['custom_text'][name]['string'],
"x": text_info['custom_text'][name]['x'],
"y": text_info['custom_text'][name]['y'],
"RGB": (text_info['custom_text'][name]['red'],
text_info['custom_text'][name]['green'],
text_info['custom_text'][name]['blue'])})
map_text = {"x": text_info['map_text']['map_name']['x'],
"y": text_info['map_text']['map_name']['y'],
"RGB": (text_info['map_text']['map_name']['red'],
text_info['map_text']['map_name']['green'],
text_info['map_text']['map_name']['blue'])}
def enter():
init_media()
init_text()
def exit():
global images, custom_text, font, choices, map_text, player_text
del images, custom_text, font, choices, map_text, player_text
def update(frame_time):
pass
def draw(frame_time):
clear_canvas()
for i in range(len(images)):
if images[i]['is_map_border']:
map_sel.draw(images[i]['x'], images[i]['y'])
images[i]['img'].draw(images[i]['x'], images[i]['y'])
for i in range(len(custom_text)):
font.draw(custom_text[i]['x'], custom_text[i]['y'], custom_text[i]['string'], custom_text[i]['RGB'])
for i in range(len(player_text)):
if i < main_menu.num_of_players:
id = player_text[i]['player_id']
char_name = char_select.char_sel.chars[char_select.char_sel.player_choice[i]]['name']
font.draw(player_text[i]['x'], player_text[i]['y'], char_name, player_colors[id])
font.draw(map_text['x'], map_text['y'], map_sel.get_curr_map_name(), map_text['RGB'])
update_canvas()
def handle_events(frame_time):
global maps
events = get_events()
for event in events:
if event.type == SDL_KEYDOWN:
for i in range(len(key.controls)):
map_sel.handle_events(event, key.controls[i]['left'], key.controls[i]['right'])
if event.key == key.controls[i]['pause']:
sound.play("back")
pFramework.pop_state()
break
if event.key == key.controls[i]['submit']:
sound.play("submit")
sound.stop("main")
sound.play(map_sel.get_curr_map_theme())
pFramework.push_state(game_play)
elif event.type == SDL_QUIT:
pFramework.quit()
def resume():
pass
def pause():
pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,095
|
xyahh/xBlade
|
refs/heads/master
|
/x_blade.py
|
import platform
import os
if platform.architecture()[0] == '32bit':
os.environ["PYSDL2_DLL_PATH"] = "./SDL2/x86"
else:
os.environ["PYSDL2_DLL_PATH"] = "./SDL2/x64"
from General import pFramework
from Logo import logo
pFramework.run(logo)
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,096
|
xyahh/xBlade
|
refs/heads/master
|
/Characters/char_select.py
|
from pico2d import *
from General import pFramework
from General import key_mapping as key
from Sound import sound_manager as sound
from Menu import main_menu
from Characters.char_class import CharacterSelect
from Maps import map_select
file_name = "CharSelect"
images, arrows = None, None
font = None
char_sel, text = None, None
player_colors = None
def init_media():
global images, arrows, font, font2, char_sel, text, player_colors
char_sel = CharacterSelect(main_menu.num_of_players)
media_file = open('Characters/media.txt', 'r')
media_info = json.load(media_file)
media_file.close()
font_path = open('General/font.txt', 'r')
font_info = json.load(font_path)
font_path.close()
font = load_font(font_info['font']['path'], font_info['font']['size'])
player_colors = {}
for id in font_info['player_colors']:
player_colors[int(id)] = (font_info['player_colors'][id]['R'],
font_info['player_colors'][id]['G'],
font_info['player_colors'][id]['B'])
text = []
for name in media_info['text']:
if media_info['text'][name]['player_id'] <= main_menu.num_of_players:
text.append({"player_id": media_info['text'][name]['player_id'],
"x": media_info['text'][name]['x'],
"y": media_info['text'][name]['y']})
images = []
for name in media_info['images']:
images.append({"img": load_image(media_info['images'][name]['path']),
"x": media_info['images'][name]['x'], "y": media_info['images'][name]['y']})
arrows = []
for name in media_info['arrows']:
if media_info['arrows'][name]['player_id'] <= main_menu.num_of_players:
arrows.append({"img": load_image(media_info['arrows'][name]['path']),
"player_id": media_info['arrows'][name]['player_id'],
"x_offset": media_info['arrows'][name]['x_offset'],
"y_offset" : media_info['arrows'][name]['y_offset']})
def enter():
init_media()
def exit():
global images, arrows, font, char_sel, text
del images, arrows, font, char_sel, text
def update(frame_time):
pass
def draw_media():
for i in range(len(images)):
images[i]['img'].draw(images[i]['x'], images[i]['y'])
for i in range(len(arrows)):
if i < main_menu.num_of_players:
arrows[i]['img'].draw((char_sel.player_choice[i] % char_sel.chars_per_row) * char_sel.col_dist_diff
+ char_sel.start_x + arrows[i]['x_offset'],
int(char_sel.player_choice[i] / char_sel.chars_per_row) * char_sel.row_dist_diff
+ char_sel.start_y + arrows[i]['y_offset'])
for i in range(main_menu.num_of_players):
id = text[i]['player_id']
font.draw(text[i]['x'], text[i]['y'], char_sel.chars[char_sel.player_choice[i]]['name'],
player_colors[id])
def draw(frame_time):
clear_canvas()
draw_media()
char_sel.draw()
update_canvas()
def handle_events(frame_time):
events = get_events()
for event in events:
if event.type == SDL_KEYDOWN:
for i in range(len(key.controls)):
char_sel.handle_events(frame_time, event, key.controls[i]['player_id'],
key.controls[i]['left'], key.controls[i]['right'],
key.controls[i]['up'], key.controls[i]['down'])
if event.key == key.controls[i]['pause']:
sound.play("back")
pFramework.pop_state()
break
if event.key == key.controls[i]['submit']:
sound.play("submit")
pFramework.push_state(map_select)
elif event.type == SDL_QUIT:
pFramework.quit()
def resume():
pass
def pause():
pass
|
{"/Characters/char_class.py": ["/General/bounding_box.py"], "/General/game_play.py": ["/Characters/char_class.py", "/Maps/map_class.py", "/General/bounding_box.py"], "/Maps/map_select.py": ["/Maps/map_class.py"], "/Characters/char_select.py": ["/Characters/char_class.py"]}
|
28,122
|
quintanadaniel/test_python
|
refs/heads/master
|
/test/test_UserRegister.py
|
import unittest
from src.User import User
from src.Spotipy import Spotipy
from src.customExceptions.NotAvailableEmail import NotAvailableEmail
#todos los nombres de las clases o funciones deben comenzar con test_
class UserRegisterTestCase(unittest.TestCase):
def setUp(self):
self.__spotipy = Spotipy()
pepe_mail = "dperez@gmail.com"
pepe_name = "daniel"
pepe_last_name = "Perez"
self.__new_user_pepe = User(pepe_mail,pepe_name,pepe_last_name)
def test_usuario_sin_cuenta_se_registra_con_mail_disponible(self):
self.__spotipy.register_user(self.__new_user_pepe)
expectedResponse = self.__spotipy.is_registered(self.__new_user_pepe)
self.assertTrue(expectedResponse)
def test_usuario_se_registra_con_email_duplicado_lanza_excepcion(self):
self.__spotipy.register_user(self.__new_user_pepe)
self.assertRaises(NotAvailableEmail,lambda: self.__spotipy.register_user(self.__new_user_pepe))
if __name__ == '__main__':
unittest.main()
|
{"/test/test_UserRegister.py": ["/src/User.py", "/src/Spotipy.py", "/src/customExceptions/NotAvailableEmail.py"], "/src/Spotipy.py": ["/src/customExceptions/NotAvailableEmail.py"]}
|
28,123
|
quintanadaniel/test_python
|
refs/heads/master
|
/src/Spotipy.py
|
from src.customExceptions.NotAvailableEmail import NotAvailableEmail
class Spotipy:
def __init__(self):
self.__users = []
def register_user(self,new_user):
if self.__is_available__email(new_user):
self.__users.append(new_user)
else:
raise NotAvailableEmail(new_user.email)
def __is_available__email(self,email):
repeated = list(filter(lambda u: u.email == email,self.__users))
return len(repeated) == 0
def is_registered(self,new_user_pepe):
return new_user_pepe.email in list(map(lambda u: u.email,self.__users))
|
{"/test/test_UserRegister.py": ["/src/User.py", "/src/Spotipy.py", "/src/customExceptions/NotAvailableEmail.py"], "/src/Spotipy.py": ["/src/customExceptions/NotAvailableEmail.py"]}
|
28,124
|
quintanadaniel/test_python
|
refs/heads/master
|
/src/User.py
|
class User:
def __init__(self,email,name,last_name):
self.__email = email
self.__name = name
self.__last_name = last_name
@property
def email(self):
return self.__email
@property
def name(self):
return self.__name
@property
def last_name(self):
return self.__last_name
|
{"/test/test_UserRegister.py": ["/src/User.py", "/src/Spotipy.py", "/src/customExceptions/NotAvailableEmail.py"], "/src/Spotipy.py": ["/src/customExceptions/NotAvailableEmail.py"]}
|
28,125
|
quintanadaniel/test_python
|
refs/heads/master
|
/src/customExceptions/NotAvailableEmail.py
|
class NotAvailableEmail:
pass
|
{"/test/test_UserRegister.py": ["/src/User.py", "/src/Spotipy.py", "/src/customExceptions/NotAvailableEmail.py"], "/src/Spotipy.py": ["/src/customExceptions/NotAvailableEmail.py"]}
|
28,136
|
georgingently/hospital_management_system
|
refs/heads/main
|
/core/admins.py
|
import pickle
class Admin :
def add_employee(self, name, role, specialization) :
''' Adds a new employee to "/database/doctors.pkl" or "/database/nurses.pkl" or "/database/pharmacists.pkl" in database '''
if role.lower()== 'doctor':
with open("database/doctors.pkl", "rb") as file :
doctors=pickle.load(file)
with open("database/doctors.pkl", "wb") as file :
doctor=[name,specialization]
doctors.append(doctor)
pickle.dump(doctors, file)
return 1
elif role.lower()== 'nurse':
with open("database/nurses.pkl", "rb") as file :
nurses=pickle.load(file)
with open("database/nurses.pkl", "wb") as file :
nurse=[name,specialization]
nurses.append(nurse)
pickle.dump(nurses, file)
return 1
elif role.lower()== 'pharmacist':
with open("database/pharmacists.pkl", "rb") as file :
pharmacists=pickle.load(file)
with open("database/pharmacists.pkl", "wb") as file :
pharmacist=[name,specialization]
pharmacists.append(pharmacist)
pickle.dump(pharmacists, file)
return 1
return 0
def update_employee(self, name, role, specialization) :
''' Updates an employee's details in "/database/doctors.pkl" or "/database/nurses.pkl" or "/database/pharmacists.pkl" in database '''
with open("database/doctors.pkl", "rb") as file :
doctors=pickle.load(file)
for doctor in doctors:
if doctor[0].lower()==name.lower():
doctors.remove(doctor)
with open("database/doctors.pkl", "wb") as file :
pickle.dump(doctors, file)
with open("database/pharmacists.pkl", "rb") as file :
pharmacists=pickle.load(file)
for pharmacist in pharmacists:
if pharmacist[0].lower()==name.lower():
pharmacists.remove(pharmacist)
with open("database/pharmacists.pkl", "wb") as file :
pickle.dump(pharmacists, file)
with open("database/nurses.pkl", "rb") as file :
nurses=pickle.load(file)
for nurse in nurses:
if nurse[0].lower()==name.lower():
nurses.remove(nurse)
with open("database/nurses.pkl", "wb") as file :
pickle.dump(nurses, file)
self.add_employee(name, role, specialization)
def view_all_employees(self) :
''' Get details of all doctors from database '''
with open(r"database\doctors.pkl","rb")as file:
doctors=pickle.load(file)
with open(r"database\nurses.pkl","rb")as file:
nurses=pickle.load(file)
with open(r"database\pharmacists.pkl","rb")as file:
pharmacists=pickle.load(file)
return doctors,nurses,pharmacists
def view_all_patients(self) :
''' Get details of all patients from database '''
with open(r"database\patients.pkl","rb")as file:
patients=pickle.load(file)
return patients
|
{"/cli.py": ["/core/employees.py", "/core/admins.py"]}
|
28,137
|
georgingently/hospital_management_system
|
refs/heads/main
|
/core/employees.py
|
import pickle
import time
class Doctor :
def __new__(cls, name):
with open("database/doctors.pkl", "rb") as file :
for doctor in pickle.load(file) :
if doctor[0].lower() == name.lower() :
return super(Doctor, cls).__new__(cls)
return 0
def __init__(self, name) :
self.name = name
with open("database/doctors.pkl", "rb") as file :
for doctor in pickle.load(file) :
if doctor[0].lower() == name.lower() :
self.specialization = doctor[1]
def get_appointments(self) :
''' Get all appointments of given doctor name '''
with open ("database/appointments.pkl","rb") as file:
all_appointments=pickle.load(file)
appointments=[]
dictionary={}
for appointment in all_appointments:
if appointment[0].lower()==self.name.lower():
dictionary[ "patient_name"]=appointment[1]
dictionary[ "date"]=appointment[2]
appointments.append(dictionary)
return appointments
def prescribe_medicine(self, patient_name,prescription) :
''' Write medicine prescription name into most recent appointment list in database '''
with open ("database/appointments.pkl","rb") as file:
all_appointments=pickle.load(file)
for i in reversed(range(len(all_appointments))) :
if all_appointments[i][0].lower()==self.name.lower() and all_appointments[i][1].lower()==patient_name.lower() :
all_appointments[i].append(prescription)
with open ("database/appointments.pkl","wb") as file:
pickle.dump(all_appointments,file)
class Nurse :
''' Methods '''
pass
class Pharmacist :
''' Methods '''
pass
|
{"/cli.py": ["/core/employees.py", "/core/admins.py"]}
|
28,138
|
georgingently/hospital_management_system
|
refs/heads/main
|
/cli.py
|
import os
import core.patients
import core.employees
import core.admins
clrscr = lambda : os.system('cls||clear') # Clears terminal viewport
def patients() :
patient = core.patients.Patient()
clrscr()
print("\nLogin or Signup:")
patient.name = input("\nEnter your name: ")
if patient.patient_exists() :
while True :
clrscr()
print("\nPatient Dashboard\n\n1. Make an appointment\n2. View Appointments\n3. Update personal information\n4. Logout")
choice = int(input("\nEnter your choice: "))
clrscr()
if choice == 1 :
specialization = input("\nWhat specialization of doctor would you like to consult with? (Ortho/Surgeon/Physician) ")
doctors = patient.get_doctors(specialization)
if doctors == 0 :
print("No doctors of that specialization")
else :
for i in range(len(doctors)) :
print(f"\n{i + 1} : {doctors[i]}")
selected_doctor = int(input("\nEnter choice of doctor: "))
while(True) :
clrscr()
time = int(input("\nWhen would you like to make an appointment?\n1. Today\n2. Tomorrow\n3. Day After tomorrow\nEnter choice: "))
if patient.make_appointment(doctors[selected_doctor-1][0], time) == 1 :
break
else :
print("\nInvalid Input")
input("\nAppointment made successfully\nPress enter to continue...")
elif choice == 2 :
appointments = patient.get_appointments()
print(appointments)
input("\nPress enter to continue...")
elif choice == 3 :
name = input("\nEnter your name: ")
age = int(input("Enter your age: "))
patient.update_user(name, age)
input("\nDetails updated successfully\nPress enter to continue...")
else :
del patient
break
else :
age = int(input("Enter your age: "))
gender = input("Gender ( male/female ): ")
patient.create_user(age,gender)
input("\nNew user created successfully\nPress enter to continue...")
patients()
def doctors() :
clrscr()
name = input("\nLogin\n\nEnter your name: ")
doctor = core.employees.Doctor(name)
while doctor != 0 :
clrscr()
print("\nDoctor's Dashboard\n\n1. See appointments\n2. Prescribe medicine\n3. Logout")
choice = int(input("\nEnter your choice: "))
if choice == 1 :
appointments = doctor.get_appointments()
print(appointments)
input("\nPress enter to continue...")
elif choice == 2 :
patient_name = input("Enter name of patient: ")
prescription = input("Enter prescription for patient: ")
doctor.prescribe_medicine(patient_name, prescription)
input("\nPrescription sent\nPress enter to continue...")
else :
del doctor
break
else :
print("Doctor name does not exist")
input("\nPress enter to continue...")
def admins() :
admin = core.admins.Admin()
while True :
clrscr()
print("\nAdmin's Dashboard\n\n1. View employees\n2. View patients\n3. Add Employee\n4. Update Employee\n5. Logout")
choice = int(input("\nEnter your choice: "))
clrscr()
if choice == 1 :
print("\nEmployees")
doctors, nurses, pharmacists = admin.view_all_employees()
print(doctors, nurses, pharmacists)
input("\nPress enter to continue...")
elif choice == 2 :
print("\nPatients")
patients = admin.view_all_patients()
print(patients)
input("\nPress enter to continue...")
elif choice == 3 :
print("\nAdd Employee")
name = input("\nEnter name: ")
role = input("Enter role: ")
specialization = input("Enter specialization: ")
admin.add_employee(name, role, specialization)
input("\nEmployee Added Successfully\nPress enter to continue...")
elif choice == 4 :
print("\nUpdate Employee")
name = input("\nEnter name: ")
role = input("Enter role: ")
specialization = input("Enter specialization: ")
admin.update_employee(name, role, specialization)
input("\nEmployee Details Updated Successfully\nPress enter to continue...")
else :
del admin
break
def main() :
''' FLow of control starts here '''
run = True
while run :
clrscr()
print("\nLogin for:\n\n1. Patients\n2. Doctors\n3. Managers\n4. Exit")
choice = int(input("\nEnter your choice: "))
if choice == 1 :
patients()
elif choice == 2 :
doctors()
elif choice == 3 :
admins()
else :
clrscr()
run = False
if __name__ == "__main__" :
main()
|
{"/cli.py": ["/core/employees.py", "/core/admins.py"]}
|
28,139
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/__init__.py
|
# coding: utf-8
from nanoid.generate import generate
from nanoid.non_secure_generate import non_secure_generate
__all__ = ['generate', 'non_secure_generate']
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,140
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/non_secure_generate.py
|
# coding: utf-8
from __future__ import unicode_literals
from __future__ import division
from random import random
from nanoid.resources import alphabet, size
def non_secure_generate(alphabet=alphabet, size=size):
alphabet_len = len(alphabet)
id = ''
for _ in range(size):
id += alphabet[int(random() * alphabet_len) | 0]
return id
if __name__ == '__main__':
print(non_secure_generate())
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,141
|
jgarte/py-nanoid
|
refs/heads/master
|
/test/generate_test.py
|
from sys import maxsize
from unittest import TestCase
from nanoid import generate
class TestGenerate(TestCase):
def test_has_flat_distribution(self):
count = 100 * 1000
length = 5
alphabet = 'abcdefghijklmnopqrstuvwxyz'
chars = {}
for _ in range(count):
id = generate(alphabet, length)
for j in range(len(id)):
char = id[j]
if not chars.get(char):
chars[char] = 0
chars[char] += 1
self.assertEqual(len(chars.keys()), len(alphabet))
max = 0
min = maxsize
for k in chars:
distribution = (chars[k] * len(alphabet)) / float((count * length))
if distribution > max:
max = distribution
if distribution < min:
min = distribution
self.assertLessEqual(max - min, 0.05)
def test_has_no_collisions(self):
count = 100 * 1000
used = {}
for _ in range(count):
id = generate()
self.assertIsNotNone(id in used)
used[id] = True
def test_has_options(self):
count = 100 * 1000
for _ in range(count):
self.assertEqual(generate('a', 5), 'aaaaa')
self.assertEqual(len(generate(alphabet="12345a", size=3)), 3)
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,142
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/generate.py
|
# coding: utf-8
from __future__ import unicode_literals
from __future__ import division
from nanoid.algorithm import algorithm_generate
from nanoid.method import method
from nanoid.resources import alphabet, size
def generate(alphabet=alphabet, size=size):
return method(algorithm_generate, alphabet, size)
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,143
|
jgarte/py-nanoid
|
refs/heads/master
|
/test/url_test.py
|
from unittest import TestCase
from nanoid.resources import alphabet
class TestURL(TestCase):
def test_has_no_duplicates(self):
for i in range(len(alphabet)):
self.assertEqual(alphabet.rindex(alphabet[i]), i)
def test_is_string(self):
self.assertEqual(type(alphabet), str)
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,144
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/method.py
|
from __future__ import unicode_literals
from __future__ import division
from math import ceil, log
def method(algorithm, alphabet, size):
alphabet_len = len(alphabet)
mask = 1
if alphabet_len > 1:
mask = (2 << int(log(alphabet_len - 1) / log(2))) - 1
step = int(ceil(1.6 * mask * size / alphabet_len))
id = ''
while True:
random_bytes = algorithm(step)
for i in range(step):
random_byte = random_bytes[i] & mask
if random_byte < alphabet_len:
if alphabet[random_byte]:
id += alphabet[random_byte]
if len(id) == size:
return id
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,145
|
jgarte/py-nanoid
|
refs/heads/master
|
/test/method_test.py
|
# coding: utf-8
from unittest import TestCase
from nanoid.method import method
from nanoid.resources import size
class TestMethod(TestCase):
def test_generates_random_string(self):
sequence = [2, 255, 3, 7, 7, 7, 7, 7, 0, 1]
def rand(size=size):
random_bytes = []
for i in range(0, size, len(sequence)):
random_bytes += sequence[0:size-i]
return random_bytes
self.assertEqual(method(rand, 'abcde', 4), 'cdac')
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,146
|
jgarte/py-nanoid
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
with open("README.md", "r") as f:
long_description = f.read()
setup(
name='nanoid',
version='2.0.0',
author='Paul Yuan',
author_email='puyuan1@gmail.com',
description='A tiny, secure, URL-friendly, unique string ID generator for Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/puyuan/py-nanoid',
license='MIT',
packages=['nanoid'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
zip_safe=False
)
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,147
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/algorithm.py
|
from __future__ import unicode_literals
from __future__ import division
from os import urandom
def algorithm_generate(random_bytes):
return bytearray(urandom(random_bytes))
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,148
|
jgarte/py-nanoid
|
refs/heads/master
|
/test/non_secure_test.py
|
from sys import maxsize
from unittest import TestCase
from nanoid import non_secure_generate
from nanoid.resources import alphabet
class TestNonSecure(TestCase):
def test_changes_id_length(self):
self.assertEqual(len(non_secure_generate(size=10)), 10)
def test_generates_url_friendly_id(self):
for _ in range(10):
id = non_secure_generate()
self.assertEqual(len(id), 21)
for j in range(len(id)):
self.assertNotEqual(alphabet.find(id[j]), -1)
def test_has_flat_distribution(self):
count = 100 * 1000
length = len(non_secure_generate())
chars = {}
for _ in range(count):
id = non_secure_generate()
for j in range(len(id)):
char = id[j]
if not chars.get(char):
chars[char] = 0
chars[char] += 1
self.assertEqual(len(chars.keys()), len(alphabet))
max = 0
min = maxsize
for k in chars:
distribution = (chars[k] * len(alphabet)) / float((count * length))
if distribution > max:
max = distribution
if distribution < min:
min = distribution
self.assertLessEqual(max - min, 0.05)
def test_has_no_collisions(self):
count = 100 * 1000
used = {}
for _ in range(count):
id = non_secure_generate()
self.assertIsNotNone(id in used)
used[id] = True
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,149
|
jgarte/py-nanoid
|
refs/heads/master
|
/test/algorithm_test.py
|
from unittest import TestCase
from nanoid.algorithm import algorithm_generate
class TestAlgorithm(TestCase):
def test_generates_random_buffers(self):
numbers = {}
random_bytes = algorithm_generate(10000)
self.assertEqual(len(random_bytes), 10000)
for i in range(len(random_bytes)):
if not numbers.get(random_bytes[i]):
numbers[random_bytes[i]] = 0
numbers[random_bytes[i]] += 1
self.assertEqual(type(random_bytes[i]), int)
self.assertLessEqual(random_bytes[i], 255)
self.assertGreaterEqual(random_bytes[i], 0)
def test_generates_small_random_buffers(self):
self.assertEqual(len(algorithm_generate(10)), 10)
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,150
|
jgarte/py-nanoid
|
refs/heads/master
|
/nanoid/resources.py
|
alphabet = '_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
size = 21
|
{"/nanoid/__init__.py": ["/nanoid/generate.py", "/nanoid/non_secure_generate.py"], "/nanoid/non_secure_generate.py": ["/nanoid/resources.py"], "/test/generate_test.py": ["/nanoid/__init__.py"], "/nanoid/generate.py": ["/nanoid/algorithm.py", "/nanoid/method.py", "/nanoid/resources.py"], "/test/url_test.py": ["/nanoid/resources.py"], "/test/method_test.py": ["/nanoid/method.py", "/nanoid/resources.py"], "/test/non_secure_test.py": ["/nanoid/__init__.py", "/nanoid/resources.py"], "/test/algorithm_test.py": ["/nanoid/algorithm.py"]}
|
28,169
|
JulianH99/cor-pattern
|
refs/heads/master
|
/main.py
|
from cor_pattern.currencies import UsDollarCurrencyHandler, EurCurrencyHandler, YenCurrencyHandler
from cor_pattern.currency_type import CurrencyType
from math import floor
def main():
usdollar_handler = UsDollarCurrencyHandler()
eur_handler = EurCurrencyHandler(usdollar_handler)
yen_handler = YenCurrencyHandler(eur_handler)
(value, currency_type_choice) = get_user_input()
result = yen_handler.convert(value, currency_type_choice)
if isinstance(result, int):
result = floor(result)
print("{} {} is equal to {} COP".format(value, currency_type_choice.name, result))
else:
print(result)
def get_user_input():
print("Currency converter 1.0")
print("Please select the currency you cant to convert to COP")
for currency_type in CurrencyType:
print("{}: {}".format(currency_type.value, currency_type.name))
currency_type_choice = int(input())
currency_type_choice = get_currency_from_list(currency_type_choice)
value = input("Enter the value you want to convert: ")
try:
value = int(value)
except ValueError:
print("cant convert to int")
return (None, None)
return (value, currency_type_choice)
def get_currency_from_list(index):
c_list = list(CurrencyType)
return c_list[index - 1]
if __name__ == "__main__":
main()
|
{"/main.py": ["/cor_pattern/currencies.py", "/cor_pattern/currency_type.py"], "/cor_pattern/currencies.py": ["/cor_pattern/currency_type.py"]}
|
28,170
|
JulianH99/cor-pattern
|
refs/heads/master
|
/cor_pattern/currencies.py
|
from abc import ABCMeta, abstractmethod
from cor_pattern.currency_type import CurrencyType
class BaseCurrencyHandler(metaclass=ABCMeta):
"""
BaseCurrencyHandler exposes common methods and properties for other currency handlers
"""
def __init__(self, next_currency_handler=None):
self.next_currency_handler = next_currency_handler
def convert(self, value, c_type):
"""
Handles the common validation before hadling the conversion between currencies
"""
if value is not None and isinstance(value, int):
if c_type is not None and isinstance(c_type, CurrencyType):
return self.handle_currency(value, c_type)
else:
return "Currency type cannot be none or not a valid currency type"
return "The value cannot be none or non integer type"
@abstractmethod
def handle_currency(self, value, c_type):
"""
Handles the currency converter request, based on a type and a value and a type
of currency
:param c_type: Has to be instance of CurrencyType Enum
:param value: int valut to be converted to COP
"""
pass
@classmethod
def handle_no_currency(cls, value, c_type):
return "Cant convert value {} from currency of type {}. No handler available".format(value, c_type.name)
class UsDollarCurrencyHandler(BaseCurrencyHandler):
base_proportion = 3115
def handle_currency(self, value, c_type):
if c_type == CurrencyType.USDollar:
return self.base_proportion * value
elif self.next_currency_handler is not None:
return self.next_currency_handler.convert(value, c_type)
return BaseCurrencyHandler.handle_no_currency(value, c_type)
class YenCurrencyHandler(BaseCurrencyHandler):
base_proportion = 28.39
def handle_currency(self, value, c_type):
if c_type == CurrencyType.Yen:
return self.base_proportion * value
elif self.next_currency_handler is not None:
return self.next_currency_handler.convert(value, c_type)
return BaseCurrencyHandler.handle_no_currency(value, c_type)
class EurCurrencyHandler(BaseCurrencyHandler):
base_proportion = 3539
def handle_currency(self, value, c_type):
if c_type == CurrencyType.Eur:
return self.base_proportion * value
elif self.next_currency_handler is not None:
return self.next_currency_handler.convert(value, c_type)
return BaseCurrencyHandler.handle_no_currency(value, c_type)
|
{"/main.py": ["/cor_pattern/currencies.py", "/cor_pattern/currency_type.py"], "/cor_pattern/currencies.py": ["/cor_pattern/currency_type.py"]}
|
28,171
|
JulianH99/cor-pattern
|
refs/heads/master
|
/cor_pattern/currency_type.py
|
from enum import Enum, auto
class CurrencyType(Enum):
USDollar = auto()
Yen = auto()
Eur = auto()
Cad = auto()
|
{"/main.py": ["/cor_pattern/currencies.py", "/cor_pattern/currency_type.py"], "/cor_pattern/currencies.py": ["/cor_pattern/currency_type.py"]}
|
28,172
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/pre.py
|
import cv2 as cv
import numpy as np
#read in image from dataset
img = cv.imread('dataset/9.jpg')
cv.imshow('9', img)
#convert image to grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
#Blur and reduce noise- note there are a number
#using Guassian
blur = cv.GaussianBlur(img, (3,3), cv.BORDER_DEFAULT) #the (3,3) is the kernel size and must be odd, to increase the blur increase the kernel size i.e. (7,7)
cv.imshow('Blur', blur)
#create an edge cascade- find edges present
#canny edge detector
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#dilate an image using a specific structuring element (the canny edges)
dilated = cv.dilate(canny, (3,3), iterations=1) #there can be several iterations, and don't forget (3,3) is the kernel size
cv.imshow('Dilated', dilated)
#can erode the image to get back the structuring element
#eroding
eroded = cv.erode(dilated, (3,3), iterations=1) #match the dilated edges and get back to the same area from dilation
cv.imshow('Eroded', eroded)
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,173
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/read.py
|
import cv2 as cv #import OpenCV
img = cv.imread('dataset/142.jpg') #reads in image
cv.imshow('142', img) #displays image in new window
cv.waitKey(0) #waits for a key to be pressed
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,174
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/compiledpreprocessingtest2.py
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import imutils as im
from LoadShowBoundingBox import*
#read in image from dataset and convert to grayscale
img = getImage()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
plt.imshow(cv.cvtColor(gray,cv.COLOR_BGR2RGB))
# Noise removal with bilateral filter(removes noise while preserving edges) + find edges present
bilateral = cv.bilateralFilter(gray, 11, 17, 17)
edged = cv.Canny(bilateral, 30, 200)
plt.imshow(cv.cvtColor(edged,cv.COLOR_BGR2RGB))
#find contours based on edges
keypoints = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) #chainapproxsimple will get simplified ver of contour (approximate two keypoints)
contours = im.grab_contours(keypoints)
contours = sorted(contours, key = cv.contourArea, reverse = True)[0:10] #all contours less 10 than are discarded
#loop through each contour and actually represent square or number plate
location = None
for contour in contours:
approx = cv.approxPolyDP(contour, 10, True) #approximate the polygon from contour, 10 specify how accurate or finegrain our approximate is
if len(approx) == 4: #selects contour w/ 4 corners/keypoints --> most likely our numberplate location
location = approx
break
#masking
mask = np.zeros(gray.shape, np.uint8) #created blank mask (same shape as og gray image, pass shape of og image, fill in with blank 0s )
lp = cv.drawContours(mask,[location],0,255,-1,) #draw contours in the image, want to draw location
lp = cv.bitwise_and(img,img,mask=mask) #overlap mask, return segment of our iamge ie numberplate
plt.imshow(cv.cvtColor(lp, cv.COLOR_BGR2RGB))
plt.show()
#crop segment
(x,y) = np.where(mask==255) #find section of image that isnt blacked out, get set of coordinates that arent masked over
(x1, y1) = (np.min(x), np.min(y)) #onecorner
(x2, y2) = (np.max(x), np.max(y)) #opposing diagonal corner
print(x1, y1, x2, y2)
cropped_lp = gray[x1:x2+1, y1:y2+1] #added 1 to give us a little buffer
plt.imshow(cv.cvtColor(cropped_lp, cv.COLOR_BGR2RGB))
plt.show();
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,175
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/Smoothing.py
|
import cv2 as cv
#read in image from dataset
img = cv.imread('dataset/9.jpg')
cv.imshow('9', img)
#averaging (method of blurring)
average = cv.blur(img, (3,3))
cv.imshow('Average Blur', average)
#Guassian Blur- similar to averaging (instead of computing the average, each pixel is given a weight and the average of the products of the weight give the new value)- less bluring as opposed to averaging (more natural though)
#standard deviation in the x direction is the last component in the bracket
gauss = cv.GaussianBlur(img, (3,3), 0)
cv.imshow('Guass Blur', gauss)
#median blur- same as averaging, but instead of average, it finds the median of the surrounding pixels (more effective in reducing noise)
#instead of kernel size an integer is used, computer assumes 3x3 basically
median = cv.medianBlur(img, 3)
cv.imshow('Median Blur', median)
#bileratial blur- most effective because of how it blurs, applies bluring but retains the edges in the image
#research into this move- doesn't take kernel but diameter, signma colour, sigma space
bilateral = cv.bilateralFilter(img, 10, 35, 25)
cv.imshow('Bileratal', bilateral)
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,176
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Test.py
|
import pandas as pd
import numpy as np
from LoadShowBoundingBox import showImage
import cv2 as cv
#img = cv.imread("dataset/142.jpg")
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# edge = cv.Canny(gray, 5,5)
# cv.imshow("142", img)
# cv.imshow("142_gray", gray)
# cv.imshow("Canny", edge)
# cv.waitKey(0)
#showImage(60)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,177
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/LoadShowBoundingBox.py
|
import os
import numpy as np
import cv2 as cv
import xml.etree.ElementTree as et
import pandas as pd
import random
DIR = r"dataset\\"
#DIR = './dataset/'
# Get Plate Cords Returns the coordinates of a number plate for the i-th image in the folder.
def getPlateCords(filenum):
tree = et.parse(DIR+str(filenum)+".xml")
root = tree.getroot()
cords = []
for i in range(4):
cords.append(int(root[6][4][i].text))
return cords
# Define a function to load an image based on its number
def getImage(filenum):
return cv.imread(DIR+str(filenum)+".jpg")
# OverlayPlateBox returns an image with the bounding box overlayed.
def OverlayPlateBox(filenum):
img = getImage(filenum)
bounds = getPlateCords(filenum)
cv.rectangle(img, (bounds[0],bounds[1]), (bounds[2], bounds[3]), (0,255,0), 3)
return img
def OverlayPlateBox_img(img, number):
bounds = getPlateCords(number)
cv.rectangle(img, (bounds[0],bounds[1]), (bounds[2], bounds[3]), (0,255,0), 3)
return img
# Show plate box with an additional bounding box
def ShowPredBox(img, bounds, num):
img = OverlayPlateBox_img(img, num)
cv.rectangle(img, (bounds[0],bounds[1]), (bounds[2], bounds[3]), (0,0,255), 3)
return img
# Show the image using OpenCV
def showImage(filenum):
cv.imshow("Car", OverlayPlateBox(filenum))
cv.waitKey(0)
# Return a random image based on its type (provide function with 'g', 'a', 'p' or 't')
def loadRandomImage(type):
imageSelection = pd.read_csv("Img_categories.csv")
while True:
img_num = random.randint(0, 708)
if imageSelection.Category.iloc[img_num] == type:
print("Image number selected " + str(img_num))
break
return getImage(img_num)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,178
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/sift.py
|
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('./dataset/50.jpg',cv2.COLOR_BGR2GRAY)
img2 = cv2.imread('./dataset/51.jpg',cv2.COLOR_BGR2GRAY)
#create SIFT detector
orb = cv2.ORB_create()
#get keypoints and descriptors
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
#create matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
#match descriptors from images
matches = bf.match(des1,des2)
#sort descriptors by distance
matches = sorted(matches, key=lambda x:x.distance)
#draw first 10 matches
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)
plt.imshow(img3),plt.show()
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,179
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/PyTesseractTest.py
|
import pytesseract as pt
import cv2 as cv
import numpy as np
from LoadShowBoundingBox import *
import matplotlib.pyplot as plt
import imutils as im
import re
pt.pytesseract.tesseract_cmd = r'C:\Users\danie\tesseract.exe'
custom_config = r'--oem 3 --psm 6'
#read in image from dataset and convert to grayscale
img = getImage(394)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Noise removal with bilateral filter(removes noise while preserving edges) + find edges present
bilateral = cv.bilateralFilter(gray, 11, 17, 17)
edged = cv.Canny(bilateral, 30, 200)
#find contours based on edges
keypoints = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) #chainapproxsimple will get simplified ver of contour (approximate two keypoints)
contours = im.grab_contours(keypoints)
contours = sorted(contours, key = cv.contourArea, reverse = True)[0:10] #all contours less 10 than are discarded
#loop through each contour and actually represent square or number plate
location = None
for contour in contours:
approx = cv.approxPolyDP(contour, 10, True) #approximate the polygon from contour, 10 specify how accurate or finegrain our approximate is
if len(approx) == 4: #selects contour w/ 4 corners/keypoints --> most likely our numberplate location
location = approx
break
#masking
mask = np.zeros(gray.shape, np.uint8) #created blank mask (same shape as og gray image, pass shape of og image, fill in with blank 0s )
lp = cv.drawContours(mask,[location],0,255,-1,) #draw contours in the image, want to draw location
lp = cv.bitwise_and(img,img,mask=mask) #overlap mask, return segment of our iamge ie numberplate
#crop segment
(x,y) = np.where(mask==255) #find section of image that isnt blacked out, get set of coordinates that arent masked over
(x1, y1) = (np.min(x), np.min(y)) #onecorner
(x2, y2) = (np.max(x), np.max(y)) #opposing diagonal corner
cropped_lp = gray[x1:x2+1, y1:y2+1] #added 1 to give us a little buffer
ret, cropped_lp_binary, = cv.threshold(cropped_lp, 128, 255, cv.THRESH_BINARY)
cv.imshow("Image", cropped_lp_binary)
text = pt.image_to_string(cropped_lp_binary, config=custom_config, lang='eng+ara')
text = re.sub('[^0-9] ', '', text)
text = text.split()
text = [re.sub('[^0-9]', '', w) for w in text]
text = str(text[0]) + str(text[2])
print(text)
cv.waitKey(0)
# Old Code
# image = getImage(394)
# text = pt.image_to_string(image)
# print(text)
# cv.imshow("Image", image)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,180
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/histogram.py
|
import cv2 as cv
import matplotlib.pyplot as plt
from numpy import histogram
#histogram computation- visualise the distribution of pixel intensities in an image
img = cv.imread('dataset/9.jpg')
cv.imshow('9', img)
#can compute histogram for both RGB and grayscale images
#gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#cv.imshow('Gray', gray)
#grayscale histogram
#need to pass in a list of images- not just one, therefore the []
#gray_hist = cv.calcHist([gray], [0], None, [256], [0,256])
#plt.figure()
#plt.title('Grayscale Histogram')
#plt.xlabel('Bins')
#plt.ylabel('# of pixels')
#plt.plot(gray_hist)
#plt.xlim([0,256])
#plt.show()
#colour histogram
plt.figure()
plt.title('Colour Histogram')
plt.xlabel('Bins')
plt.ylabel('# of pixels')
colors = ('b', 'g', 'r')
for i,col in enumerate(colors):
hist = cv.calcHist([img], [i], None, [256], [0, 256])
plt.plot(hist, color=col)
plt.xlim([0, 256])
#plt.show()
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,181
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/resize.py
|
import cv2 as cv
from LoadShowBoundingBox import getImage
from LoadShowBoundingBox import*
#DIR = r"\\dataset\\"
#for i in range(0,100):
# img = cv.imread(str(i)+" IMG", cv.imread(DIR+str(i)+".jpg"))
# print('Original Dimensions : ', img.shape)
img = cv.imread('dataset/99.jpg')
print('Original Dimensions : ', img.shape)
scale_percent = 75 #% of the original image size
width = int(img.shape[1] * scale_percent / 100) #calculates the original width
height = int(img.shape[0] * scale_percent / 100) #calculates the original height
dim = (width, height)
#Use resize method to change the dimensions of the image
resized = cv.resize(img, dim, interpolation=cv.INTER_AREA)
print('Resized Dimensions : ', resized.shape)
#Image crop function
img_cropped = img[75:width, 160:height]
print('Cropped Dimensions : ', img_cropped.shape)
#Testt
cv.imshow("Cropped Image", img_cropped)
cv.imshow("Resized Image", resized)
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,182
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/CycleThroughImages.py
|
import cv2 as cv
import os
from time import sleep
DIR = r"\\dataset\\"
# DIR = './dataset/'
# Update two values based on image ranges
for i in range(0,100):
cv.imshow(str(i)+" IMG", cv.imread(DIR+str(i)+".jpg"))
cv.waitKey(0)
# Label Categories
# Good : g
# Average : a
# bad : b
# Terrible : t
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,183
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/DetectandEvaluateLicensePlate.py
|
# Standard Python Library Imports
import numpy as np
import cv2 as cv
import pandas as pd
import os
import sklearn as sk
# Custom Code Imports
from Evaluate import *
from LoadShowBoundingBox import *
from ImagePipeline1 import *
EXCLUDE_CATS = ['a', 'b', 't']
IOU_THRESHOLD = 0.6
Image_Info = pd.read_csv('Img_categories.csv')
# Filtered by excluded categories, create the sub_sample of the dataset.
ImageDataset = []
for i in range(Image_Info.shape[0]):
if Image_Info.Category.iloc[i] not in EXCLUDE_CATS:
ImageDataset.append([str(i),i,0,0,0,0])
# Retrieve the true and predicted bounding box for all images that match the classification condition.
for i, imgdata in enumerate(ImageDataset):
img = getImage(imgdata[1])
imgdata[2] = getPlateCords(imgdata[1])
imgdata[3] = processImg(img)
imgdata[4] = calculateIoU(imgdata[2], imgdata[3])
imgdata[5] = calculateClassfication(imgdata[4], IOU_THRESHOLD)
# Present data in a Dataframe format to increase its readability in the terminal
imageData_df = pd.DataFrame(ImageDataset, columns=['StringNumber','Number', 'trueBox','predBox','IOU','Classification'])
print(imageData_df.head())
print(imageData_df[(imageData_df.Number == 394)])
print(imageData_df.Classification.value_counts())
# Debug Code Only
# num = 9
# img = getImage(num)
# predicted_cords = processImg(img)
# img = OverlayPlateBox_img(img, num)
# img = ShowPredBox(img, predicted_cords, num)
# cv.imshow("Image both boxes", img)
# cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,184
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/contour.py
|
import cv2 as cv
import numpy as np
img = cv.imread('dataset/14.jpg') #reads in image
#cv.imshow('14', img) #displays image in new window
#
blank = np.zeros(img.shape, dtype='uint8')
#cv.imshow('Blank', blank)
#convert image to grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#cv.imshow('Gray', gray)
#Blur and reduce noise- note there are a number
#using Guassian
blur = cv.GaussianBlur(img, (3,3), cv.BORDER_DEFAULT) #the (3,3) is the kernel size and must be odd, to increase the blur increase the kernel size i.e. (7,7)
cv.imshow('Blur', blur)
#canny edge detection
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#threshold - looks at an image and attempts to binarise the image
ret, thresh = cv.threshold(canny, 125, 255, cv.THRESH_BINARY) #if the value is below 125 set to 0 (black) and above 255 set to 1 (white), then it's a type so we are using binary
cv.imshow('Thresh', thresh)
#using fine contours method- returns contours and hierachies
#RETR_TREE if you want all hierarchical contours, RETR_EXTERNAL for external contours, RETR_LIST if you want all the contours in the image
contours, hierarchies = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
#cv.findContours - looks at the structuring elements (the edges in the image) and returns two values- contours and hierachires
#contours is a python list of all the coordinates of the contours found in the image
#hierarchies refers to the hierarchical representation of contours, e.g. rectangle, inside a square, inside a circle,
#cv.RETR_LIST- is a mod in which this fine contours method returns and finds the contours. List returns all the contours in the image, exteral retrieves external contours, tree returns hierachical contours
#cv.CHAIN_APPROX_NONE- how we want to approximate the contour. None just returns all the contours, some poeple prefer to use CHAIN_APPROX_SIMPLE which compresses all the contours returned that makes more sense
print(f'len{(contours)} contour(s) found!') #shows number of contours found
#highlight the edges on the image and draw it on the blank doc in red
cv.drawContours(blank, contours, -1, (0,0,255), 1)
cv.imshow('Contours Drawn', blank)
cv.waitKey(0) #waits for a key to be pressed
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,185
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Evaluate.py
|
import os
import numpy as np
import cv2 as cv
from LoadShowBoundingBox import *
# Setting out a general structure to this document
# Define a function that when given an image number, and a predicted coordinate location, return a classification
def imgAccuracy(img_num, pred_cords):
true_box = getPlateCords(img_num)
return calculateClassfication(true_box, pred_cords)
# Define a function that evalutes the category of outcome.
# If IoU > Threshold : True Positive, 1
# If IoU < Threshold : False Positive 0
# If IoU 0 Then False Negative 0
def calculateClassfication(iou, thresh):
return 1 if iou > thresh else 0
# Define a function that when given two sets of coordinates, returns an IoU score.
def calculateIoU(boxA, boxB):
# Find the dimensions of the internal rectangle.
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# Compute the area of the central rectangle
innerArea = max(0, xB - xA + 1) * max(0, yB - yA +1)
# Compute area of each box
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# Compute and return intersection of union
return innerArea / float(boxAArea + boxBArea - innerArea)
# Test
print(imgAccuracy(0, [1200, 2000, 2000, 2300]))
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,186
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/bitwise.py
|
import cv2 as cv
import numpy as np
blank = np.zeros((400, 400), dtype='uint8')
rectangle = cv.rectangle(blank.copy(), (30, 30), (370,370), 255, -1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, -1)
cv.imshow('Rectangle', rectangle)
cv.imshow('Circle', circle)
#bitwise AND- found the intersecting regions
bitwise_and = cv.bitwise_and(rectangle, circle)
cv.imshow('Bitwise AND', bitwise_and)
#bitwise OR- returns both intersecting and non-intersecting regions
bitwise_or = cv.bitwise_or(rectangle, circle)
cv.imshow('Bitwise OR', bitwise_or)
#bitwise XOR- return the non-intersecting regions
bitwise_xor = cv.bitwise_xor(rectangle, circle)
cv.imshow('Bitwise XOR', bitwise_xor)
#bitwise NOT- inverts the binary colour- converted white sections to black and vice versa
bitwise_not = cv.bitwise_not(rectangle)
cv.imshow('Rectangle NOT', bitwise_not)
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,187
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/Reference/CNN.py
|
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
#file path
lp_Cascade = cv.CascadeClassifier('./datset.xml')
def lp_detect(img, text=''): #detects & performs blurring of number plate
lp_img = img.copy()
roi = img.copy()
# detects numberplates and returns the coordinates and dimensions of detected license plate's contours.
lp_Rect = lp_Cascade.detectMultiScale(lp_Img, scaleFactor = 1.2, minNeighbors = 7)
for (x,y,w,h) in lp_Rect:
roi_ = roi[y:y+h, x:x+w, :] #extracting the Region of Interest of lp for blurring.
lp_plate = roi[y:y+h, x:x+w, :]
cv.rectangle(lp_img,(x+2,y),(x+w-3, y+h-5),(0,255,0),3)
if text!='':
lp_img = cv.putText(lp_img, text, (x-w//2,y-h//2),
cv.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (51,181,155), 1, cv.LINE_AA)
return lp_img, lp_plate
def display_img(img):
img_ = cv.cvtColor(img,cv.COLOR_BGR2RGB)
plt.imshow(img_)
plt.show()
inputimg = cv.imread('./dataset/50.jpg')
inpimg, lp = lp_detect(inputimg)
display_img(inpimg)
display_img(lp)
#read in image from dataset
img = cv.imread('./dataset/50.jpg')
cv.imshow('50', img)
blank = np.zeros(img.shape, dtype='uint8')
#cv.imshow('Blank', blank)
#convert image to grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
bilateral = cv.bilateralFilter(gray, 13, 15, 15)
cv.imshow('Bilateral', bilateral)
#create an edge cascade- find edges present
#canny edge detector
edged = cv.Canny(bilateral, 170, 200)
cv.imshow('Canny Edges', edged)
#find contours based on edges
contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)[0]
contours = sorted(contours, key = cv.contourArea, reverse = True)[:90] #all contoiurs less than are discarded
NumerPlateCnt = None #donthave numberplate contour
# loop over our contours to find the best possible approximate contour of number plate
count = 0
for c in contours:
peri = cv.arcLength(c, True)
approx = cv.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx #This is our approx Number Plate Contour
x,y,w,h = cv.boundingRect(c)
ROI = img[y:y+h, x:x+w]
break
if NumberPlateCnt is not None:
# Drawing the selected contour on the original image
cv.drawContours(img, [NumberPlateCnt], -1, (0,255,0), 3)
cv.waitKey(0)
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,188
|
nicknguyenuts/IPPRProject
|
refs/heads/main
|
/ImagePipeline1.py
|
# Import Standard Python Libraries
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import imutils as im
# Create Image processing pipeline function
def processImg(img):
# Convert Grayscale, bilateral and create canny edge detection.
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
bilateral = cv.bilateralFilter(gray, 11, 17, 17)
edged = cv.Canny(bilateral, 30, 200)
# Find Contours based on edges
keypoints = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) #chainapproxsimple will get simplified ver of contour (approximate two keypoints)
contours = im.grab_contours(keypoints)
contours = sorted(contours, key = cv.contourArea, reverse = True)[0:10]
# Loop through each contour to see if there are any square shapes identified.
location = None
for contour in contours:
approx = cv.approxPolyDP(contour, 10, True)
if len(approx) == 4:
location = approx
break
# Mask Image to identify predicted Bounding Box on the image
mask = np.zeros(gray.shape, np.uint8)
try:
lp = cv.drawContours(mask,[location],0,255,-1,)
lp = cv.bitwise_and(img,img,mask=mask)
except:
return [0,0,0,0]
# Get Bounding Box from Image
(x,y) = np.where(mask==255)
# Note this was reversed to match input data dimensions from xml
return [np.min(y), np.min(x), np.max(y), np.max(x)]
|
{"/compiledpreprocessingtest2.py": ["/LoadShowBoundingBox.py"], "/Test.py": ["/LoadShowBoundingBox.py"], "/PyTesseractTest.py": ["/LoadShowBoundingBox.py"], "/resize.py": ["/LoadShowBoundingBox.py"], "/DetectandEvaluateLicensePlate.py": ["/Evaluate.py", "/LoadShowBoundingBox.py", "/ImagePipeline1.py"], "/Evaluate.py": ["/LoadShowBoundingBox.py"]}
|
28,250
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/3bb00573b9bb_remove_starting_conditions_column.py
|
"""Remove starting conditions column
Revision ID: 3bb00573b9bb
Revises: 23baa65c35dd
Create Date: 2016-04-04 18:11:55.827663
"""
# revision identifiers, used by Alembic.
revision = '3bb00573b9bb'
down_revision = '23baa65c35dd'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('boards', 'starting_conditions')
def downgrade():
op.add_column('boards', sa.Column('starting_conditions'), sa.String(1000), nullable=False)
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,251
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/5ad205d1f5ec_increase_seed_length.py
|
"""increase seed length
Revision ID: 5ad205d1f5ec
Revises: 3ea83d573121
Create Date: 2016-04-07 16:52:57.082576
"""
# revision identifiers, used by Alembic.
revision = '5ad205d1f5ec'
down_revision = '3ea83d573121'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('boards', 'seed')
op.add_column('boards', sa.Column('seed', sa.String(500000), nullable=False))
def downgrade():
op.drop_column('boards', 'seed')
op.add_column('boards', sa.Column('seed', sa.String(1000), nullable=False))
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,252
|
philnova/gameoflife
|
refs/heads/master
|
/views/conway.py
|
#imports
from flask.views import MethodView
from flask import render_template
from flask import Flask, render_template, request, redirect, jsonify, url_for, flash
from flask import make_response
from flask import request, redirect, url_for
import requests
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import random
import string
import arrow
import datetime
from forms.saveform import SaveForm
from forms.edituserform import EditUserForm
from forms.renameboardform import RenameBoardForm
from forms.setprivacyform import SetPrivacyForm
import gameoflife
from models.users import Base, User, Board, Rating
###########
# helpers #
###########
def confirm_login(login_session):
if login_session.get('name') is not None:
return True
else:
return False
def createUser(login_session):
newUser = User(name=login_session['name'], google_id=login_session['google_id'])
gameoflife.session.commit()
gameoflife.session.add(newUser)
gameoflife.session.commit()
user = gameoflife.session.query(User).filter_by(google_id=login_session['google_id']).one()
return user.id
def getUserInfo(user_id):
user = gameoflife.session.query(User).filter_by(id=user_id).one()
return user
def getUserID(google_id):
try:
user = gameoflife.session.query(User).filter_by(google_id=google_id).one()
return user.id
except:
return None
##########
# routes #
##########
class LogoutBackend(MethodView):
def get(self):
try:
revoke = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % gameoflife.login_session['access_token']
gameoflife.login_session.clear()
flash('You have been logged out')
return render_template('login/logout.html', revoke = revoke, development=gameoflife.app.development)
except:
print "No access token"
gameoflife.login_session.clear()
flash('A server error occurred. Please log in again.')
return redirect(url_for('draw',load=None, development=gameoflife.app.development))
class Logout(MethodView):
def get(self):
access_token = gameoflife.login_session.get('access_token')
if access_token is None:
print 'Access Token is None'
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % gameoflife.login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
username = getUserInfo(gameoflife.login_session['user_id']).name
del gameoflife.login_session['access_token']
del gameoflife.login_session['google_id']
del gameoflife.login_session['user_id']
gameoflife.login_session.clear()
#response = make_response(json.dumps('You have logged out'), 200)
#response.headers['Content-Type'] = 'application/json'
#return response
flash('User {0} has been logged out'.format(username))
return redirect(url_for('draw'))
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
class Login(MethodView):
def get(self):
gameoflife.login_session.clear()
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
gameoflife.login_session['state'] = state
return render_template('login/login.html', STATE=state, development=gameoflife.app.development)
class LoginBackend(MethodView):
def post(self):
if request.args.get('state') != gameoflife.login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
else:
print 'state parameter valid'
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
print 'auth code upgraded to credentials'
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token={0}'.format(access_token))
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
print 'there was an error in the access token!'
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
#Verify that the access token is used for the intended user.
u_id = credentials.id_token['sub']
if result['user_id'] != u_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != gameoflife.client_id:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
gameoflife.login_session['access_token'] = credentials.access_token
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
gameoflife.login_session['google_id'] = data['id']
gameoflife.login_session['name'] = data['name']
print 'user info obtained'
# check if user is already in the database
if getUserID(gameoflife.login_session['google_id']) is None:
print 'creating new user'
createUser(gameoflife.login_session)
print 'new user created'
else: # not a new user
pass # might add some additional logic here later
print 'getting user info'
current_user = gameoflife.session.query(User).filter_by(google_id=gameoflife.login_session['google_id']).one()
gameoflife.login_session['user_id'] = current_user.id
print 'Current user.id is ', current_user.id
flash("You are now logged in as {0}".format(current_user.name))
return '<p> </p>'
class Save(MethodView):
def post(self):
if gameoflife.login_session.get('user_id') is not None:
form = SaveForm()
rules = 'B'+validate_conditions(request.form['birth'], default='2')+'S'+validate_conditions(request.form['survival'], default='23')
return render_template('social/save.html', form=form, seed=request.form['grid'], xdim=request.form['x'], ydim=request.form['y'], rules=rules, development=gameoflife.app.development, user_logged_in=(gameoflife.login_session.get('name')!=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
def validate_conditions(condition_string, default):
for i in condition_string:
try:
if 0 <= int(i) <= 8:
pass
else:
print 'Disallowed value detected; returning default'
return default
except:
return default
condition_string = [i for i in sorted(list(set(list(condition_string))))]
return ''.join(condition_string)
class SaveBackend(MethodView):
def post(self):
if gameoflife.login_session.get('user_id') is not None:
form = SaveForm(request.form)
if form.validate():
xdim = request.form['xdim']
ydim = request.form['ydim']
seed = request.form['seed']
rules = request.form['rules']
newBoard = Board(user_id=gameoflife.login_session['user_id'], nickname=form.data['nickname'], xdim = xdim, ydim = ydim, seed=seed, rules = rules, shared=form.data['shared'])
gameoflife.session.add(newBoard)
gameoflife.session.flush() #allows us access to primary key
newRating = Rating(user_id=gameoflife.login_session['user_id'], board_id = newBoard.id, like=True)
gameoflife.session.add(newRating)
gameoflife.session.commit()
flash('Your board has been saved with the nickname: {0}!'.format(form.data['nickname']))
return redirect(url_for('userprofile'), code=303)
else:
print form.data
return render_template('social/save.html', form=form, rules = request.form['rules'], xdim = request.form['xdim'], ydim = request.form['ydim'], seed = request.form['seed'], development=gameoflife.app.development, user_logged_in=(gameoflife.login_session.get('name')!=None)), 400
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class UserProfile(MethodView):
def get(self):
if confirm_login(gameoflife.login_session):
current_user = getUserInfo(gameoflife.login_session['user_id'])
boards = gameoflife.session.query(Board).filter_by(user_id=current_user.id).all()
ratings_num = [gameoflife.session.query(Rating).filter_by(board_id=board.id, like=True, user_id = current_user.id).count() for board in boards]
ratings_denom = [gameoflife.session.query(Rating).filter_by(board_id=board.id, user_id = current_user.id).count() for board in boards]
ratings = [int(ratings_num[i]/float(ratings_denom[i]) * 100) for i in range(len(ratings_num))]
thumbs_down = [ratings_denom[i] - ratings_num[i] for i in range(len(ratings_num))]
return render_template('social/profile.html', user=current_user, boards = boards, ratings=ratings, thumbs_down=thumbs_down, thumbs_up=ratings_num, development=gameoflife.app.development, user_logged_in=(gameoflife.login_session.get('name')!=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteBoardBackend(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
ratings = gameoflife.session.query(Rating).filter_by(board_id = id).all()
print 'board id: ', id
if board.user_id == user.id:
for rating in ratings:
print rating.board_id
gameoflife.session.delete(rating)
gameoflife.session.commit()
gameoflife.session.delete(board)
gameoflife.session.commit()
flash('Your board was deleted')
else:
flash("Whoa, buddy. You don't have persmission to delete that board!")
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteBoard(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
return render_template('social/deleteboard.html', id=id, user_logged_in=(gameoflife.login_session.get('name')!=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteUserBackend(MethodView):
def get(self):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
boards = gameoflife.session.query(Board).filter_by(user_id=user.id).all()
ratings = gameoflife.session.query(Rating).filter_by(user_id=user.id).all()
for rating in ratings:
gameoflife.session.delete(rating)
gameoflife.session.commit()
for board in boards:
gameoflife.session.delete(board)
gameoflife.session.commit()
gameoflife.session.delete(user)
gameoflife.session.commit()
gameoflife.login_session.clear()
flash('Your user account and all saved boards have been deleted')
return redirect(url_for('draw',load=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteUser(MethodView):
def get(self):
if confirm_login(gameoflife.login_session):
return render_template('user/delete.html', development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class UserProfile(MethodView):
def get(self):
current_user = getUserInfo(gameoflife.login_session['user_id'])
boards = gameoflife.session.query(Board).filter_by(user_id=current_user.id).all()
ratings_num = [gameoflife.session.query(Rating).filter_by(board_id=board.id, like=True, user_id = current_user.id).count() for board in boards]
ratings_denom = [gameoflife.session.query(Rating).filter_by(board_id=board.id, user_id = current_user.id).count() for board in boards]
print ratings_num, ratings_denom
ratings = [int(ratings_num[i]/float(ratings_denom[i]) * 100) for i in range(len(ratings_num))]
thumbs_down = [ratings_denom[i] - ratings_num[i] for i in range(len(ratings_num))]
#board_ids = ''
#for board in boards:
# board_ids += str(board.id)
print gameoflife.app.development
return render_template('social/profile.html', user=current_user, boards = boards, ratings=ratings, thumbs_down=thumbs_down, thumbs_up=ratings_num, development=gameoflife.app.development, user_logged_in=(gameoflife.login_session.get('name')!=None))
class DeleteBoardBackend(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
ratings = gameoflife.session.query(Rating).filter_by(board_id = id).all()
print 'board id: ', id
if board.user_id == user.id:
for rating in ratings:
print rating.board_id
gameoflife.session.delete(rating)
gameoflife.session.commit()
gameoflife.session.delete(board)
gameoflife.session.commit()
flash('Your board was deleted')
else:
flash("Whoa, buddy. You don't have persmission to delete that board!")
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteBoard(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
return render_template('social/deleteboard.html', id=id, user_logged_in=(gameoflife.login_session.get('name')!=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteUserBackend(MethodView):
def get(self):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
boards = gameoflife.session.query(Board).filter_by(user_id=user.id).all()
ratings = gameoflife.session.query(Rating).filter_by(user_id=user.id).all()
for rating in ratings:
gameoflife.session.delete(rating)
gameoflife.session.commit()
for board in boards:
gameoflife.session.delete(board)
gameoflife.session.commit()
gameoflife.session.delete(user)
gameoflife.session.commit()
gameoflife.login_session.clear()
flash('Your user account and all saved boards have been deleted')
return redirect(url_for('draw',load=None))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class DeleteUser(MethodView):
def get(self):
if confirm_login(gameoflife.login_session):
return render_template('user/delete.html', development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class RenameBoard(MethodView):
#confirm user has privelege to rename this board!
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
form = RenameBoardForm()
form.nickname.default = board.nickname
form.process()
return render_template('social/renameboard.html', form=form, id=id, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class RenameBoardBackend(MethodView):
def post(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
if user.id == board.user_id:
form = RenameBoardForm(request.form)
if form.validate():
board.nickname = request.form['nickname']
gameoflife.session.add(board)
gameoflife.session.commit()
flash('Rename successful!')
return redirect(url_for('userprofile'), code=303)
else:
return render_template('social/renameboard.html', form=form, id=id, development=gameoflife.app.development, user_logged_in=True)
else:
flash('Permission denied!')
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class SetPrivacy(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
form = SetPrivacyForm()
return render_template('social/setprivacy.html', id=id, board=board, form=form, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class SetPrivacyBackend(MethodView):
def post(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
if user.id == board.user_id:
form = SetPrivacyForm(request.form)
if form.validate():
board.shared = (request.form['privacy'] == 'share')
gameoflife.session.add(board)
gameoflife.session.commit()
flash('Privacy settings for {0} updated!'.format(board.nickname))
return redirect(url_for('userprofile'), code=303)
else:
return render_template('social/setprivacy.html', board=board, form=form, development=gameoflife.app.development, user_logged_in=True)
else:
flash('Permission denied!')
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class RenameBoard(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
form = RenameBoardForm()
form.nickname.default = board.nickname
form.process()
return render_template('social/renameboard.html', form=form, id=id, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class RenameBoardBackend(MethodView):
def post(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
if user.id == board.user_id:
form = RenameBoardForm(request.form)
if form.validate():
board.nickname = request.form['nickname']
gameoflife.session.add(board)
gameoflife.session.commit()
flash('Rename successful!')
return redirect(url_for('userprofile'), code=303)
else:
return render_template('social/renameboard.html', form=form, id=id, development=gameoflife.app.development, user_logged_in=True)
else:
flash('Permission denied!')
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class SetPrivacy(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
form = SetPrivacyForm()
return render_template('social/setprivacy.html', id=id, board=board, form=form, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class SetPrivacyBackend(MethodView):
def post(self, id):
if confirm_login(gameoflife.login_session):
user = getUserInfo(gameoflife.login_session['user_id'])
board = gameoflife.session.query(Board).filter_by(id=id).one()
if user.id == board.user_id:
form = SetPrivacyForm(request.form)
if form.validate():
board.shared = (request.form['privacy'] == 'share')
gameoflife.session.add(board)
gameoflife.session.commit()
flash('Privacy settings for {0} updated!'.format(board.nickname))
return redirect(url_for('userprofile'), code=303)
else:
return render_template('social/setprivacy.html', board=board, form=form, development=gameoflife.app.development, user_logged_in=True)
else:
flash('Permission denied!')
return redirect(url_for('userprofile'))
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class EditUserBackend(MethodView):
def post(self):
if gameoflife.login_session.get('user_id') is not None:
form = EditUserForm(request.form)
if form.validate():
user = getUserInfo(gameoflife.login_session['user_id'])
user.name = request.form['name']
gameoflife.session.add(user)
gameoflife.session.commit()
flash('Your username has been changed to {0}'.format(user.name))
return redirect(url_for('userprofile'), code=303)
else:
return render_template('social/edituser.html', form=form, user=current_user, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class EditUser(MethodView):
def get(self):
if gameoflife.login_session.get('user_id') is not None:
current_user = getUserInfo(gameoflife.login_session['user_id'])
form = EditUserForm()
form.name.default = current_user.name
form.process()
return render_template('social/edituser.html', form=form, user=current_user, development=gameoflife.app.development, user_logged_in=True)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class Load(MethodView):
def get(self, id):
board = gameoflife.session.query(Board).filter_by(id=id).one()
if board.shared == True or board.user_id == gameoflife.login_session.get('user_id'):
if gameoflife.login_session.get('name') is not None:
return render_template('main.html', user_logged_in=True, loaded_board=board, development=gameoflife.app.development)
else:
return render_template('main.html', user_logged_in=False, loaded_board=board, development=gameoflife.app.development)
else:
flash('Sorry, that board is not public!')
if gameoflife.login_session.get('name') is not None:
return render_template('main.html', user_logged_in=True, loaded_board=board, development=gameoflife.app.development)
else:
return render_template('main.html', user_logged_in=False, loaded_board=board, development=gameoflife.app.development)
class Like(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
user = getUserInfo(gameoflife.login_session['user_id'])
if board.shared == True:
if gameoflife.session.query(Rating).filter_by(board_id = id, user_id = user.id).count():
#rating already exists and is being updated
newRating = gameoflife.session.query(Rating).filter_by(board_id = id, user_id=user.id).one()
newRating.like = True
else:
newRating = Rating(user_id = user.id, board_id = board.id, like=True)
gameoflife.session.add(newRating)
gameoflife.session.commit()
return redirect(url_for('load', id=board.id))
else:
flash('Sorry, that board is not public!')
return render_template('main.html', user_logged_in=(gameoflife.login_session.get('name')!=None), loaded_board='', development=gameoflife.app.development)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class Dislike(MethodView):
def get(self, id):
if confirm_login(gameoflife.login_session):
board = gameoflife.session.query(Board).filter_by(id=id).one()
user = getUserInfo(gameoflife.login_session['user_id'])
if board.shared == True:
if gameoflife.session.query(Rating).filter_by(board_id = id, user_id = user.id).count():
#rating already exists and is being updated
newRating = gameoflife.session.query(Rating).filter_by(board_id = id, user_id=user.id).one()
newRating.like = False
else:
newRating = Rating(user_id = user.id, board_id = board.id, like=False)
gameoflife.session.add(newRating)
gameoflife.session.commit()
return redirect(url_for('load', id=board.id))
else:
flash('Sorry, that board is not public!')
return render_template('main.html', user_logged_in=(gameoflife.login_session.get('name')!=None), loaded_board='', development=gameoflife.app.development)
else:
return render_template('main.html', user_logged_in = False, development=gameoflife.app.development, loaded_board='')
class Browse(MethodView):
def get(self):
boards = gameoflife.session.query(Board).filter_by(shared=True).all()
users = [gameoflife.session.query(User).filter_by(id=board.user_id).one().name for board in boards]
ratings_num = [gameoflife.session.query(Rating).filter_by(board_id=board.id, like=True).count() for board in boards]
ratings_denom = [gameoflife.session.query(Rating).filter_by(board_id=board.id).count() for board in boards]
ratings = [int(ratings_num[i]/float(ratings_denom[i]) * 100) for i in range(len(ratings_num))]
#print users
#print boards
#print ratings
thumbs_down = [ratings_denom[i] - ratings_num[i] for i in range(len(ratings_num))]
return render_template('social/browse.html', user_logged_in=(gameoflife.login_session.get('name')!=None), boards=boards, users=users, ratings=ratings, thumbs_up = ratings_num, thumbs_down=thumbs_down, development=gameoflife.app.development)
class Draw(MethodView):
def get(self):
if gameoflife.login_session.get('name') is not None:
return render_template('main.html', user_logged_in=True, loaded_board='', development=gameoflife.app.development)
else:
return render_template('main.html', user_logged_in = False, loaded_board='', development=gameoflife.app.development)
class About(MethodView):
def get(self):
return render_template('about.html', development=gameoflife.app.development, user_logged_in=(gameoflife.login_session.get('name')!=None))
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,253
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/23baa65c35dd_create_ratings_table.py
|
"""create ratings table
Revision ID: 23baa65c35dd
Revises: 2835336d1ae6
Create Date: 2016-03-28 16:31:04.098972
"""
# revision identifiers, used by Alembic.
revision = '23baa65c35dd'
down_revision = '2835336d1ae6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'ratings',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('user_id', sa.Integer),
sa.ForeignKeyConstraint(['user_id'],['users.id'], ),
sa.Column('board_id', sa.Integer),
sa.ForeignKeyConstraint(['board_id'],['boards.id'], ),
)
def downgrade():
op.drop_table('ratings')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,254
|
philnova/gameoflife
|
refs/heads/master
|
/forms/edituserform.py
|
from flask_wtf import Form
from wtforms import StringField, DateTimeField, SelectField, IntegerField, TextAreaField
from wtforms.validators import DataRequired, Length
class EditUserForm(Form):
name = StringField('Username', validators=[DataRequired()])
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,255
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/28cc6d8b4060_add_shared_column_to_board.py
|
"""add shared column to Board
Revision ID: 28cc6d8b4060
Revises: 5ad205d1f5ec
Create Date: 2016-04-14 14:22:10.202530
"""
# revision identifiers, used by Alembic.
revision = '28cc6d8b4060'
down_revision = '5ad205d1f5ec'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('boards', sa.Column('shared', sa.Boolean()))
def downgrade():
op.drop_column('boards', 'shared')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,256
|
philnova/gameoflife
|
refs/heads/master
|
/gameoflife.py
|
from application import Application, handlers
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import os
app = Application(handlers, config=os.environ, debug=True)
db = app.db
development = app.development
#some_engine = create_engine(os.environ.get('DATABASE_URL'))
#Session = sessionmaker(bind=some_engine)
#session = Session()
login_session = app.login_session
client_id = app.google_client_id
engine = create_engine(os.environ['DATABASE_URL'])
Base = declarative_base()
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,257
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/cef561b43b1_create_users_table.py
|
"""create users table
Revision ID: cef561b43b1
Revises:
Create Date: 2016-03-28 16:23:15.848108
"""
# revision identifiers, used by Alembic.
revision = 'cef561b43b1'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'users',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('email', sa.String(200)),
)
def downgrade():
op.drop_table('users')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,258
|
philnova/gameoflife
|
refs/heads/master
|
/forms/saveform.py
|
from flask_wtf import Form
from wtforms import StringField, DateTimeField, SelectField, IntegerField, TextAreaField, BooleanField
from wtforms.validators import DataRequired, Length
class SaveForm(Form):
nickname = StringField('Nickname for this board', validators=[DataRequired()])
shared = BooleanField('Check to make this board publicly shared')
# xdim = IntegerField('X dimension', validators=[DataRequired()])
# ydim = IntegerField('Y dimension', validators=[DataRequired()])
# seed = StringField('Seed', validators=[DataRequired()])
# rules = StringField('Rules', validators=[DataRequired()])
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,259
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/2835336d1ae6_create_board_table.py
|
"""create board table
Revision ID: 2835336d1ae6
Revises: cef561b43b1
Create Date: 2016-03-28 16:26:19.377178
"""
# revision identifiers, used by Alembic.
revision = '2835336d1ae6'
down_revision = 'cef561b43b1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'boards',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('user_id', sa.Integer),
sa.ForeignKeyConstraint(['user_id'],['users.id'], ),
sa.Column('nickname', sa.String(250), nullable=False),
sa.Column('xdim', sa.Integer, nullable=False),
sa.Column('ydim', sa.Integer, nullable=False),
sa.Column('starting_conditions', sa.String(500), nullable=False),
)
def downgrade():
op.drop_table('boards')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,260
|
philnova/gameoflife
|
refs/heads/master
|
/forms/setprivacyform.py
|
from flask_wtf import Form
from wtforms import BooleanField, RadioField
from wtforms.validators import DataRequired, Length
class SetPrivacyForm(Form):
#shared = BooleanField('Check to make this board publicly shared')
privacy = RadioField('Set Privacy for this board', validators=[DataRequired()], choices=[('share', 'Shared'),('private', 'Private')], default='private')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,261
|
philnova/gameoflife
|
refs/heads/master
|
/runapp.py
|
import gameoflife
gameoflife.app.start_app()
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,262
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/498a7e1175e3_remove_email_column.py
|
"""Remove email column
Revision ID: 498a7e1175e3
Revises: 161384c7b438
Create Date: 2016-04-05 13:31:31.600141
"""
# revision identifiers, used by Alembic.
revision = '498a7e1175e3'
down_revision = '161384c7b438'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('users', 'email')
def downgrade():
op.add_column('users', sa.Column('email', sa.String(200)))
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,263
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/161384c7b438_add_seed_string_column.py
|
"""Add seed_string column
Revision ID: 161384c7b438
Revises: 3bb00573b9bb
Create Date: 2016-04-04 18:14:02.220662
"""
# revision identifiers, used by Alembic.
revision = '161384c7b438'
down_revision = '3bb00573b9bb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('boards', sa.Column('seed', sa.String(1000), nullable=False))
op.add_column('boards', sa.Column('rules', sa.String(20), nullable=False))
def downgrade():
op.drop_column('boards', 'seed')
op.drop_column('boards', 'rules')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,264
|
philnova/gameoflife
|
refs/heads/master
|
/application.py
|
import flask
import flask.ext.sqlalchemy
from views.conway import Draw, LoginBackend, Login, LogoutBackend, SaveBackend, Save, UserProfile, EditUser, EditUserBackend
from views.conway import RenameBoard, RenameBoardBackend, DeleteBoard, DeleteBoardBackend, DeleteUser, DeleteUserBackend, Load
from views.conway import SetPrivacy, SetPrivacyBackend, Browse, Like, Dislike, About
from flask import session as login_session
import json
import urlparse
import os
import psycopg2
import urlparse
class Route(object):
def __init__(self, url, route_name, resource):
self.url = url
self.route_name = route_name
self.resource = resource
handlers = [
Route('/', 'draw', Draw),
Route('/load/<int:id>', 'load', Load),
Route('/loginbackend', 'loginbackend', LoginBackend),
Route('/login', 'login', Login),
Route('/logout', 'logout', LogoutBackend),
Route('/saveform/', 'savebackend', SaveBackend),
Route('/save/', 'save', Save),
Route('/profile', 'userprofile', UserProfile),
Route('/profile/edit', 'editprofile', EditUser),
Route('/profile/edit/backend', 'editprofilebackend', EditUserBackend),
Route('/board/<int:id>/delete', 'deleteboard', DeleteBoard),
Route('/board/<int:id>/delete/backend', 'deleteboardbackend', DeleteBoardBackend),
Route('/board/<int:id>/rename/backend', 'renameboardbackend', RenameBoardBackend),
Route('/board/<int:id>/rename', 'renameboard', RenameBoard),
Route('/profile/delete', 'deleteuser', DeleteUser),
Route('/profile/delete/backend', 'deleteuserbackend', DeleteUserBackend),
Route('/profile/<int:id>/setprivacy', 'setprivacy', SetPrivacy),
Route('/profile/<int:id>/setprivacybackend', 'setprivacybackend', SetPrivacyBackend),
Route('/browse', 'browse', Browse),
Route('/like/<int:id>', 'like', Like),
Route('/dislike/<int:id>', 'dislike', Dislike),
Route('/about', 'about', About),
]
APPLICATION_NAME = "Conway"
class Application(object):
def __init__(self, routes, config, debug=True):
self.flask_app = flask.Flask(__name__)
self.routes = routes
self.debug = debug
self.login_session = login_session
self._configure_app(config)
self._set_routes()
def _set_routes(self):
for route in self.routes:
app_view = route.resource.as_view(route.route_name)
self.flask_app.add_url_rule(route.url, view_func=app_view)
def _configure_app(self, config):
self.flask_app.config['SQLALCHEMY_DATABASE_URI'] =config['DATABASE_URL']
self.flask_app.secret_key = "asupersecr3tkeyshouldgo"
self.db = flask.ext.sqlalchemy.SQLAlchemy(self.flask_app)
self.google_client_id = config['GOOGLE_CLIENT_ID']
self.google_client_secret = config['GOOGLE_CLIENT_SECRET']
self.development = bool(config.get('DEVELOPMENT'))
def start_app(self):
self.flask_app.run(debug=self.debug)
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,265
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/3ea83d573121_add_google_id_column.py
|
"""add google id column
Revision ID: 3ea83d573121
Revises: 498a7e1175e3
Create Date: 2016-04-05 13:33:47.141402
"""
# revision identifiers, used by Alembic.
revision = '3ea83d573121'
down_revision = '498a7e1175e3'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('google_id', sa.String(200)))
def downgrade():
op.drop_column('users', 'google_id')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,266
|
philnova/gameoflife
|
refs/heads/master
|
/models/users.py
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, DateTime, String, ForeignKey, Boolean
import arrow
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(200), nullable=False)
google_id = Column(String(200), nullable=False)
def __init__(self, name, google_id):
self.name = name
self.google_id = google_id
class Board(Base):
__tablename__ = 'boards'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
nickname = Column(String(200), nullable=False)
xdim = Column(Integer, nullable=False) #x dimension
ydim = Column(Integer, nullable=False) #y dimension
seed = Column(String(500000), nullable=False)
rules = Column(String(20), nullable=False)
shared = Column(Boolean, default=True)
def __init__(self, user_id, nickname, xdim, ydim, seed, rules, shared):
self.user_id = user_id
self.nickname = nickname
self.xdim = xdim
self.ydim = ydim
self.seed = seed
self.rules = rules
self.shared = shared
class Rating(Base):
__tablename__ = 'ratings'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
board_id = Column(Integer, ForeignKey('boards.id'))
like = Column(Boolean)
def __init__(self, user_id, board_id, like):
self.user_id = user_id
self.board_id = board_id
self.like = like
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,267
|
philnova/gameoflife
|
refs/heads/master
|
/forms/renameboardform.py
|
from flask_wtf import Form
from wtforms import StringField, DateTimeField, SelectField, IntegerField, TextAreaField
from wtforms.validators import DataRequired, Length
class RenameBoardForm(Form):
nickname = StringField('Nickname', validators=[DataRequired()])
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,268
|
philnova/gameoflife
|
refs/heads/master
|
/alembic/versions/555e3a95692b_add_like_column_to_rating.py
|
"""add like column to Rating
Revision ID: 555e3a95692b
Revises: 28cc6d8b4060
Create Date: 2016-04-14 16:52:14.725720
"""
# revision identifiers, used by Alembic.
revision = '555e3a95692b'
down_revision = '28cc6d8b4060'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ratings', sa.Column('like', sa.Boolean()))
def downgrade():
op.drop_column('ratings', 'like')
|
{"/gameoflife.py": ["/application.py"], "/runapp.py": ["/gameoflife.py"], "/application.py": ["/views/conway.py"]}
|
28,293
|
cmedanielle/ProgIIBST
|
refs/heads/master
|
/bst_test.py
|
from bst import BST
arvore = BST()
arvore.insert(30)
arvore.insert(23)
arvore.insert(7)
arvore.insert(28)
arvore.insert(70)
arvore.insert(89)
arvore.insert(26)
arvore.inOrderTransversal()
arvore.remove(30, arvore.root)
print()
arvore.inOrderTransversal()
|
{"/bst_test.py": ["/bst.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.