id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
5163122 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 19:16:41 2021
@author: dv516
"""
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from test_functions import quadratic_constrained
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
import pickle
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
def Problem_quadraticNoise(x, noise_std, N_SAA):
f_SAA = 0 ; g_SAA = - np.inf
for i in range(N_SAA):
f_sample = quadratic_constrained.quadratic_f(x)
f_noise = np.random.normal(0, noise_std[0])
f_SAA += (f_sample + f_noise)/N_SAA
g_sample = quadratic_constrained.quadratic_g(x)
g_noise = np.random.normal(0, noise_std[1])
g_SAA = max(g_SAA, g_sample + g_noise)
return f_SAA, [g_SAA]
n_noise = 6
noise_matrix = np.zeros((n_noise, 2))
for i in range(n_noise):
noise_matrix[i] = np.array([0.05/3, 0.01/3])*i
bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = np.array([1,1])
max_f_eval = 50 ; N_SAA = 1
# max_f_eval = 25 ; N_SAA = 2
max_it = 100
#CUATRO local, CUATRO global, BO, DIRECT
with open('BayesQuadratic_listNoiseConv.pickle', 'rb') as handle:
quadraticNoise_list_Bayes = pickle.load(handle)
with open('BayesQuadratic_listNoiseConstr.pickle', 'rb') as handle:
quadraticConstraint_list_Bayes = pickle.load(handle)
# N_SAA = 1
N_samples = 20
quadraticNoise_list_CUATROl = []
quadraticConstraint_list_CUATROl = []
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_l')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
sol = CUATRO(f, x0, 0.5, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 6, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'local', \
constr_handling = 'Fitting')
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticNoise_list_CUATROl.append(best)
quadraticConstraint_list_CUATROl.append(best_constr)
# N_SAA = 1
N_samples = 20
quadraticNoise_list_DIRECT = []
quadraticConstraint_list_DIRECT = []
init_radius = 0.1
boundsDIR = np.array([[-1.5,1],[-1,1.5]])
for i in range(n_noise):
print('Iteration ', i+1, ' of DIRECT')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
DIRECT_f = lambda x, grad: f(x)
sol = DIRECTWrapper().solve(DIRECT_f, x0, bounds, \
maxfun = max_f_eval, constraints=1)
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticNoise_list_DIRECT.append(best)
quadraticConstraint_list_DIRECT.append(best_constr)
# N_SAA = 1
N_samples = 20
quadraticNoise_list_CUATROg = []
quadraticConstraint_list_CUATROg = []
init_radius = 2
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_g')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
sol = CUATRO(f, x0, init_radius, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 15, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'global', \
constr_handling = 'Discrimination')
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticNoise_list_CUATROg.append(best)
quadraticConstraint_list_CUATROg.append(best_constr)
noise = ['%.3f' % noise_matrix[i][0] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
convergence = list(itertools.chain(*quadraticNoise_list_Bayes)) + \
list(itertools.chain(*quadraticNoise_list_CUATROl)) + \
list(itertools.chain(*quadraticNoise_list_DIRECT)) + \
list(itertools.chain(*quadraticNoise_list_CUATROg))
constraints = list(itertools.chain(*quadraticConstraint_list_Bayes)) + \
list(itertools.chain(*quadraticConstraint_list_CUATROl)) + \
list(itertools.chain(*quadraticConstraint_list_DIRECT)) + \
list(itertools.chain(*quadraticConstraint_list_CUATROg))
noise = list(itertools.chain(*noise_labels))*4
method = ['Bayes. Opt.']*int(len(noise)/4) + ['CUATRO_l']*int(len(noise)/4) + \
['DIRECT']*int(len(noise)/4) + ['CUATRO_g']*int(len(noise)/4)
data = {'Best function evaluation': convergence, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df = pd.DataFrame(data)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 1.2}
plt.rcParams.update(params)
ax = sns.boxplot(x = "Noise standard deviation", y = "Best function evaluation", hue = "Method", data = df, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.legend([])
# plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
# mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.savefig('Quadratic_publication_plots/Quadratic_feval50Convergence.svg', format = "svg")
plt.show()
# ax.set_ylim([0.1, 10])
# ax.set_yscale("log")
plt.clf()
min_list = np.array([np.min([np.min(quadraticNoise_list_Bayes[i]),
np.min(quadraticNoise_list_CUATROl[i]),
np.min(quadraticNoise_list_DIRECT[i]),
np.min(quadraticNoise_list_CUATROg[i])]) for i in range(n_noise)])
convergence_test = list(itertools.chain(*np.array(quadraticNoise_list_Bayes) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticNoise_list_CUATROl) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticNoise_list_DIRECT) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticNoise_list_CUATROg) - min_list.reshape(6,1)))
data_test = {'Best function evaluation': convergence_test, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df_test = pd.DataFrame(data_test)
ax = sns.boxplot(x = "Noise standard deviation", y = 'Best function evaluation', hue = "Method", data = df_test, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
# plt.legend([])
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.ylabel(r'$f_{best, sample}$ - $f_{opt, noise}$')
plt.savefig('Quadratic_publication_plots/Quadratic_feval50ConvergenceLabel.svg', format = "svg")
plt.show()
plt.clf()
ax = sns.boxplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", fliersize = 0)
ax = sns.stripplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", dodge = True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.tight_layout()
plt.savefig('Quadratic_publication_plots/Quadratic_feval50Constraints.svg', format = "svg")
plt.show()
plt.clf()
# max_f_eval = 50 ; N_SAA = 1
max_f_eval = 25 ; N_SAA = 2
max_it = 100
#CUATRO local, CUATRO global, BO, DIRECT
with open('BayesQuadrati_listNoiseConvSAA.pickle', 'rb') as handle:
quadraticSAANoise_list_Bayes = pickle.load(handle)
with open('BayesQuadrati_listNoiseConstrSAA.pickle', 'rb') as handle:
quadraticSAAConstraint_list_Bayes = pickle.load(handle)
# N_SAA = 1
N_samples = 20
quadraticSAANoise_list_CUATROl = []
quadraticSAAConstraint_list_CUATROl = []
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_l')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
sol = CUATRO(f, x0, 0.5, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 6, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'local', \
constr_handling = 'Fitting')
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticSAANoise_list_CUATROl.append(best)
quadraticSAAConstraint_list_CUATROl.append(best_constr)
# N_SAA = 1
N_samples = 20
quadraticSAANoise_list_DIRECT = []
quadraticSAAConstraint_list_DIRECT = []
for i in range(n_noise):
print('Iteration ', i+1, ' of DIRECT')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
DIRECT_f = lambda x, grad: f(x)
sol = DIRECTWrapper().solve(DIRECT_f, x0, bounds, \
maxfun = max_f_eval, constraints=1)
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticSAANoise_list_DIRECT.append(best)
quadraticSAAConstraint_list_DIRECT.append(best_constr)
# N_SAA = 1
N_samples = 20
quadraticSAANoise_list_CUATROg = []
quadraticSAAConstraint_list_CUATROg = []
init_radius = 2
for i in range(n_noise):
print('Iteration ', i+1, ' of CUATRO_g')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_quadraticNoise(x, noise_matrix[i], N_SAA)
sol = CUATRO(f, x0, init_radius, bounds = bounds, max_f_eval = max_f_eval, \
N_min_samples = 15, tolerance = 1e-10,\
beta_red = 0.9, rnd = j, method = 'global', \
constr_handling = 'Discrimination')
best.append(sol['f_best_so_far'][-1])
_, g = Problem_quadraticNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
quadraticSAANoise_list_CUATROg.append(best)
quadraticSAAConstraint_list_CUATROg.append(best_constr)
noise = ['%.3f' % noise_matrix[i][0] for i in range(n_noise)]
noise_labels = [[noise[i]]*N_samples for i in range(n_noise)]
convergence = list(itertools.chain(*quadraticSAANoise_list_Bayes)) + \
list(itertools.chain(*quadraticSAANoise_list_CUATROl)) + \
list(itertools.chain(*quadraticSAANoise_list_DIRECT)) + \
list(itertools.chain(*quadraticSAANoise_list_CUATROg))
constraints = list(itertools.chain(*quadraticSAAConstraint_list_Bayes)) + \
list(itertools.chain(*quadraticSAAConstraint_list_CUATROl)) + \
list(itertools.chain(*quadraticSAAConstraint_list_DIRECT)) + \
list(itertools.chain(*quadraticSAAConstraint_list_CUATROg))
noise = list(itertools.chain(*noise_labels))*4
method = ['Bayes. Opt.']*int(len(noise)/4) + ['CUATRO_l']*int(len(noise)/4) + \
['DIRECT']*int(len(noise)/4) + ['CUATRO_g']*int(len(noise)/4)
data = {'Best function evaluation': convergence, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df = pd.DataFrame(data)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 1.2}
plt.rcParams.update(params)
ax = sns.boxplot(x = "Noise standard deviation", y = "Best function evaluation", hue = "Method", data = df, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.legend([])
# plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
# mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.savefig('Quadratic_publication_plots/Quadratic_SAA2feval25Convergence.svg', format = "svg")
plt.show()
# ax.set_ylim([0.1, 10])
# ax.set_yscale("log")
plt.clf()
min_list = np.array([np.min([np.min(quadraticSAANoise_list_Bayes[i]),
np.min(quadraticSAANoise_list_CUATROl[i]),
np.min(quadraticSAANoise_list_DIRECT[i]),
np.min(quadraticSAANoise_list_CUATROg[i])]) for i in range(n_noise)])
convergence_test = list(itertools.chain(*np.array(quadraticSAANoise_list_Bayes) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticSAANoise_list_CUATROl) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticSAANoise_list_DIRECT) - min_list.reshape(6,1))) + \
list(itertools.chain(*np.array(quadraticSAANoise_list_CUATROg) - min_list.reshape(6,1)))
data_test = {'Best function evaluation': convergence_test, \
"Constraint violation": constraints, \
"Noise standard deviation": noise, \
'Method': method}
df_test = pd.DataFrame(data_test)
ax = sns.boxplot(x = "Noise standard deviation", y = 'Best function evaluation', hue = "Method", data = df_test, palette = "muted")
# plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
# plt.legend([])
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=4)
plt.tight_layout()
plt.ylabel(r'$f_{best, sample}$ - $f_{opt, noise}$')
plt.savefig('Quadratic_publication_plots/Quadratic_SAA2feval25ConvergenceLabel.svg', format = "svg")
plt.show()
plt.clf()
ax = sns.boxplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", fliersize = 0)
ax = sns.stripplot(x = "Noise standard deviation", y = "Constraint violation", \
hue = "Method", data = df, palette = "muted", dodge = True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.tight_layout()
plt.savefig('Quadratic_publication_plots/Quadratic_SAA2feval25Constraints.svg', format = "svg")
plt.show()
plt.clf()
| StarcoderdataPython |
1852790 | from typing import Dict, Type, List
import os
import tempfile
import pathlib
import logging
import time
import pandas as pd
from libs.datasets import dataset_base
PICKLE_CACHE_ENV_KEY = "PICKLE_CACHE_DIR"
_EXISTING_CACHE_KEYS = set()
_logger = logging.getLogger(__name__)
def set_pickle_cache_tempdir(force=False) -> str:
"""Sets the cache dir to a temporary directory.
Note that the directory does not clean up after itself.
Args:
force: If True, will force a cache key to be a new tempdir
if key already exists.
"""
if os.getenv(PICKLE_CACHE_ENV_KEY) and not force:
directory = os.getenv(PICKLE_CACHE_ENV_KEY)
_logger.info(f"Using existing pickle cache tmpdir to {directory}")
return directory
tempdir = tempfile.mkdtemp()
os.environ[PICKLE_CACHE_ENV_KEY] = tempdir
_logger.info(f"Setting pickle cache tmpdir to {tempdir}")
return tempdir
def cache_dataset_on_disk(
target_dataset_cls: Type[dataset_base.DatasetBase], max_age_in_minutes=30, key=None
):
"""Caches underlying pandas data from to an on disk location.
Args:
target_dataset_cls: Class of dataset to wrap pandas data with.
max_age_in_minutes: Maximum age of cache before it becomes stale.
key: Cache key. If not specified, uses name of function.
"""
def decorator(func):
cache_key = key or func.__name__
if cache_key in _EXISTING_CACHE_KEYS:
raise ValueError(
f"Have already wrapped a function with the key name: {func.__name__}. "
"Please specify a different key."
)
_EXISTING_CACHE_KEYS.add(cache_key)
def f() -> target_dataset_cls:
pickle_cache_dir = os.getenv(PICKLE_CACHE_ENV_KEY)
if not pickle_cache_dir:
return func()
cache_path = pathlib.Path(pickle_cache_dir) / (cache_key + ".pickle")
if cache_path.exists():
modified_time = cache_path.stat().st_mtime
cache_age_in_minutes = (time.time() - modified_time) / 60
if cache_age_in_minutes < max_age_in_minutes:
return target_dataset_cls(pd.read_pickle(cache_path))
else:
_logger.debug(f"Cache expired, reloading.")
dataset = func()
dataset.data.to_pickle(cache_path)
return dataset
return f
return decorator
| StarcoderdataPython |
4899709 | <reponame>zemfrog/zemfrog-test<filename>zemfrog_test/__init__.py
from .command import group
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0.3"
command = group
| StarcoderdataPython |
147147 | <reponame>hedrickbt/TigerTag<gh_stars>1-10
import imghdr
import logging
import os
import tempfile
from plexapi.server import PlexServer
from tigertag.scanner import FileInfo
from tigertag.scanner import Scanner
from tigertag.util import calc_hash
logger = logging.getLogger(__name__)
DEFAULT_URL = 'http://127.0.0.1:32400'
class PlexScanner(Scanner):
def connect(self):
if 'TOKEN' in self.props:
if 'URL' in self.props:
self.url = self.props['URL']
self.plex = PlexServer(self.url, self.props['TOKEN'])
else:
raise ValueError('The plex token has not been set for the {} ({}.{})'.format(
self.name, __name__, type(self).__name__))
def __init__(self, name, enabled):
super().__init__(name, enabled)
self.section = None
self.plex = None
self.url = DEFAULT_URL
def scan(self):
if self.section is None and 'SECTION' in self.props:
self.section = self.props['SECTION']
if self.section is None:
raise ValueError('The section has not been set for the {} ({}.{})'.format(
self.name, __name__, type(self).__name__))
self.connect()
albums = self.plex.library.section(self.section)
for album in albums.all():
# print(f'album: {album.title}')
for photo in album.photos():
# print(f'\tphoto: {photo.title}')
path = photo.locations[0]
ext_id = photo.ratingKey
temp_photos = photo.download(tempfile.gettempdir())
temp_photo_path = temp_photos[0]
filename = os.path.basename(path)
image_type = imghdr.what(temp_photo_path)
if image_type is None:
logger.debug('Ignoring {} temporarily at {}'.format(path, temp_photo_path))
else:
logger.info('Scanning {} temporarily at {}'.format(path, temp_photo_path))
file_hash = calc_hash(temp_photo_path)
file_info = FileInfo(filename, path, file_hash, temp_photo_path, ext_id)
for listener in self.listeners:
listener.on_file(self, file_info)
if os.path.exists(temp_photo_path):
os.remove(temp_photo_path)
| StarcoderdataPython |
1763129 | <filename>tasks/preprocessing/finetuning.py
# imports
import numpy as np
import pandas as pd
from pathlib import Path
import json
import yaml
from tqdm import tqdm
import pickle
import librosa
import plotext as plt
from IPython.display import display, HTML
import random
import os
import shutil
import torch
from jiwer import compute_measures
import datasets
from datasets import Dataset, DatasetDict
# from wer import compute
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor, Wav2Vec2CTCTokenizer, Wav2Vec2ForCTC, WavLMForCTC, TrainingArguments, Trainer
from transformers.integrations import TensorBoardCallback
from dataclasses import dataclass, field
from typing import Any, Dict, List, Tuple, Optional, Union
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Device: {device}\n')
class WER(datasets.Metric):
'''
WER metrics
'''
def __init__(self, predictions=None, references=None, concatenate_texts=False):
self.predictions = predictions
self.references = references
self.concatenate_texts = concatenate_texts
def compute(self):
if self.concatenate_texts:
return compute_measures(self.references, self.predictions)["wer"]
else:
incorrect = 0
total = 0
for prediction, reference in zip(self.predictions, self.references):
measures = compute_measures(reference, prediction)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
class FinetuningPreparation:
'''
a class for all the finetuning preparations done
'''
def __init__(self, train_pkl: str, dev_pkl: str, test_pkl: str, processor_path: str = './processor/', max_sample_length: int = 450000, mode: str='finetuning_prep') -> None:
'''
train_pkl: file path of the train pickle file
dev_pkl: file path of the dev pickle file
test_pkl: file path of the test pickle file
processor_path: file path of the processor file
max_sample_length: max audio sample length threshold
mode: either finetune mode or to see the audio length distribution mode
'''
self.train_pkl = train_pkl
self.dev_pkl = dev_pkl
self.test_pkl = test_pkl
self.processor_path = processor_path
self.max_sample_length = max_sample_length
self.mode = mode
def load_pickle_data(self) -> DatasetDict:
'''
load the pickle data file
'''
with open(self.train_pkl, 'rb') as f:
df_train = pickle.load(f)
with open(self.dev_pkl, 'rb') as f:
df_dev = pickle.load(f)
with open(self.test_pkl, 'rb') as f:
df_test = pickle.load(f)
# make it into a DatasetDict Object
dataset = DatasetDict({
"train": Dataset.from_pandas(df_train),
"dev": Dataset.from_pandas(df_dev),
"test": Dataset.from_pandas(df_test)
})
# returns the DatasetDict object from the pkl datasets
return dataset
def extract_all_chars(self, batch) -> List:
'''
extract all characters available in the train and dev datasets
'''
all_text = " ".join(batch["text"])
vocab = list(set(all_text))
# returns a list of all possible characters from the datasets
return vocab
def build_processor(self, dataset: pd.DataFrame) -> Wav2Vec2Processor:
'''
prepare the processor object
dataset: load the pickle datasets into a DataFrame object
'''
# extract characters from train dataset
vocabs_train = self.extract_all_chars(dataset['train'])
# extract characters from dev dataset
vocabs_dev = self.extract_all_chars(dataset['dev'])
# create a union of all distinct letters in the training and the dev datasets
vocab_list = list(set(vocabs_train) | set(vocabs_dev))
# convert resulting list into an enumerated dictionary
vocab_dict = {v: k for k, v in enumerate(vocab_list)}
# replace space with a more visible character |
vocab_dict["|"] = vocab_dict[" "]
del vocab_dict[" "]
# add the [UNK], [PAD], bos and eos token
vocab_dict["[UNK]"] = len(vocab_dict)
vocab_dict["[PAD]"] = len(vocab_dict)
vocab_dict["<s>"] = len(vocab_dict)
vocab_dict["</s>"] = len(vocab_dict)
# make the useless vocabs as [UNK] in the end
try:
del vocab_dict["#"]
except KeyError:
pass
try:
del vocab_dict["-"]
except KeyError:
pass
# renumber the dictionary values to fill up the blanks
count = 0
for key, value in vocab_dict.items():
vocab_dict[key] = count
count += 1
# vocabulary is completed, now save the vocabulary as a json file
with open('vocab.json', 'w') as vocab_file:
json.dump(vocab_dict, vocab_file)
# use the json file to instantiate an object of the Wav2Vec2CTCTokenizer class
tokenizer = Wav2Vec2CTCTokenizer('vocab.json', unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|", bos_token='<s>', eos_token='</s>')
# after the tokenizer object is created, the vocab.json file is not needed anymore, since the processor file will be created and the vocab.json will be there, hence can remove it
os.remove('vocab.json')
# PREPARING THE FEATURE EXTRACTOR
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=False)
# wrap the feature extractor and tokenizer as a single Wav2VevProcessor class object
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# save the processor
processor.save_pretrained(self.processor_path)
# returns the processor object
return processor
def preprocess_dataset_for_transformer(self, batch, processor: Wav2Vec2Processor):
'''
preprocess the dataset to feed into the transformer
'''
# proceed with the preprocessing of data
audio = batch["audio"]
# batched output is "un-batched" to ensure mapping is correct
batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
batch["input_length"] = len(batch["input_values"])
with processor.as_target_processor():
batch["labels"] = processor(batch["text"]).input_ids
return batch
def get_audio_length_distribution(self, dataset: pd.DataFrame, processor: Wav2Vec2Processor) -> None:
'''
get the audio sample distribution - separate branch to check the distribution of the audio length of the train datasets in terms of sampling size
dataset: the dataframe loaded from the pickle data file
processor: the wav2vec2 processor
'''
# further preprocessing of the dataset
dataset = dataset.map(lambda x: self.preprocess_dataset_for_transformer(x, processor), remove_columns=dataset.column_names["train"], num_proc=1)
# make a list to get the list of audio length of all the training data
audio_length_list = []
for idx, item in tqdm(enumerate(dataset['train'])):
audio_length_list.append(dataset['train'][idx]['input_length'])
# get the distribution of the audio sample
#data_dist = pd.Series(audio_length_list)
#data_dist.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
# change to plotext implementation instead of matplotlib
plt.hist(audio_length_list, bins=50, label='train data')
plt.title('Distribution')
plt.xlabel('Samples')
plt.ylabel('Number of inputs')
plt.show()
def filter_audio_length(self, dataset: pd.DataFrame) -> pd.DataFrame:
'''
filter the audio length to prevent OOM issue due to lengthy audio files
dataset: the dataframe loaded from the pickle data file
'''
# filter out those longer duration videos (based on the histogram with the right tail minority)
dataset["train"] = dataset["train"].filter(lambda x: x < self.max_sample_length, input_columns=["input_length"])
# returns the dataset with the audio length within the threshold
return dataset
def finetuning_preparation(self) -> Tuple[pd.DataFrame, Wav2Vec2Processor]:
'''
consolidating all the above methods for preparation
'''
# load the DatasetDict object from the pkl files
dataset = self.load_pickle_data()
# prepare the processor object
processor = self.build_processor(dataset)
# preprocess the dataset to feed into the transformer
dataset = dataset.map(lambda x: self.preprocess_dataset_for_transformer(x, processor), remove_columns=dataset.column_names["train"], num_proc=1)
# filter the audio length to prevent OOM issue due to lengthy audio files
dataset = self.filter_audio_length(dataset)
# returns the dataset and the processer
return dataset, processor
def get_audio_length_distribution_preparation(self) -> None:
'''
wrapper class of get_audio_length_distribution
'''
# load the DatasetDict object from the pkl files
dataset = self.load_pickle_data()
# prepare the processor object
processor = self.build_processor(dataset)
# get the distribution of the train dataset
self.get_audio_length_distribution(dataset, processor)
def __call__(self):
if self.mode == 'finetuning_prep':
return self.finetuning_preparation()
elif self.mode == 'get_audio_length_distribution':
return self.get_audio_length_distribution_preparation()
# build a data collator class that uses ctc with padding
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
class Finetuning:
'''
set up trainer class to proceed with finetuning
'''
def __init__(self, train_pkl: str, dev_pkl: str, test_pkl: str, input_processor_path: str, input_checkpoint_path: str, input_pretrained_model_path: str, output_processor_path: str, output_checkpoint_path: str, output_saved_model_path: str, max_sample_length: int, batch_size: int, epochs: int, gradient_accumulation_steps: int, save_steps: int, eval_logging_steps: int, lr: float, weight_decay: float, warmup_steps: int, architecture: str, finetune_from_scratch: bool=False) -> None:
'''
train_pkl: file path of the train pickle file
dev_pkl: file path of the dev pickle file
test_pkl: file path of the test pickle file
input_processor_path: directory of the processor path
input_checkpoint_path: directory of the checkpoint path
input_pretrained_model_path: directory of the pretrained model path
output_processor_path: directory of the processor path produced after finetuning
output_checkpoint_path: directory of the checkpoint path produced after finetuning
output_saved_model_path: directory of the pretrained model path produced after finetuning
max_sample_length: max audio sample length threshold
batch_size: batch size used to finetune the model
epochs: number of epochs used to finetune the model
gradient_accumulation_steps: how many steps it accumulates before updating the gradient
save_steps: the steps interval before saving the checkpoint
eval_logging_steps: the steps interval before evaluation with the dev set
lr: learning rate used to finetune the model
weight_decay: the weight decay of the learning rate
warmup_steps: number of finetuning steps for warmup
architecture: using either the wav2vec2 or the wavlm architecture
finetune_from_scratch: either finetuning from scratch or resuming from checkpoint
'''
self.train_pkl = train_pkl
self.dev_pkl = dev_pkl
self.test_pkl = test_pkl
self.input_processor_path = input_processor_path
self.input_checkpoint_path = input_checkpoint_path
self.input_pretrained_model_path = input_pretrained_model_path
self.output_processor_path = output_processor_path
self.output_checkpoint_path = output_checkpoint_path
self.output_saved_model_path = output_saved_model_path
self.max_sample_length = max_sample_length
self.batch_size = batch_size
self.epochs = epochs
self.gradient_accumulation_steps = gradient_accumulation_steps
self.save_steps = save_steps
self.eval_logging_steps = eval_logging_steps
self.lr = lr
self.weight_decay = weight_decay
self.warmup_steps = warmup_steps
self.architecture = architecture
self.finetune_from_scratch = finetune_from_scratch
def compute_metrics(self, pred, processor) -> Dict:
'''
defining evaluation metric during finetuning process
'''
# load evaluation metric
#wer_metric = load_metric("wer")
# get the predicted logits
pred_logits = pred.predictions
# get the predicted ids (character)
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
# decode
pred_str = processor.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
# obtain metric score
# wer = wer_metric.compute(predictions=pred_str, references=label_str)
get_wer = WER(predictions=pred_str, references=label_str)
wer = get_wer.compute()
# returns the word error rate
return {"wer": wer}
def finetune(self) -> str:
'''
proceed with finetuning of the model
'''
# load the preprocessed dataset from the FinetuningPreparation class
data_preparation = FinetuningPreparation(train_pkl=self.train_pkl,
dev_pkl=self.dev_pkl,
test_pkl=self.test_pkl,
processor_path=self.input_processor_path,
max_sample_length=self.max_sample_length,
mode='finetuning_prep')
if self.finetune_from_scratch:
# obtain the preprocessed dataset and the processor
dataset, processor = data_preparation()
if self.architecture == 'wav2vec2':
# load the pretrained model, and finetune from scratch (using wav2vec2_base_model from huggingface)
model = Wav2Vec2ForCTC.from_pretrained(
self.input_pretrained_model_path,
ctc_loss_reduction="mean",
pad_token_id=processor.tokenizer.pad_token_id,
)
elif self.architecture == 'wavlm':
# load the pretrained model, and finetune from scratch (using wavlm_base_model from huggingface)
model = WavLMForCTC.from_pretrained(
self.input_pretrained_model_path,
ctc_loss_reduction="mean",
pad_token_id=processor.tokenizer.pad_token_id,
)
else:
# obtain only the preprocessed dataset and not the processor as it has already been built before, hence just load it
dataset, _ = data_preparation()
# to resume finetuning from checkpoints
if self.architecture == 'wav2vec2':
model = Wav2Vec2ForCTC.from_pretrained(self.input_pretrained_model_path)
elif self.architecture == 'wavlm':
model = WavLMForCTC.from_pretrained(self.input_pretrained_model_path)
processor = Wav2Vec2Processor.from_pretrained(self.input_processor_path)
# load the data collator that uses CTC with padding
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
# setup training arguments
training_args = TrainingArguments(
output_dir=self.input_checkpoint_path,
group_by_length=True,
per_device_train_batch_size=self.batch_size,
evaluation_strategy="steps",
num_train_epochs=self.epochs,
fp16=True,
gradient_checkpointing=True,
gradient_accumulation_steps=self.gradient_accumulation_steps,
save_steps=self.save_steps,
eval_steps=self.eval_logging_steps,
logging_steps=self.eval_logging_steps,
learning_rate=self.lr,
weight_decay=self.weight_decay,
warmup_steps=self.warmup_steps,
save_total_limit=1,
push_to_hub=False,
)
# defining the trainer class
trainer = Trainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=lambda x : self.compute_metrics(x, processor),
train_dataset=dataset["train"],
eval_dataset=dataset["dev"],
tokenizer=processor.feature_extractor,
callbacks=[TensorBoardCallback(),]
)
# start the finetuning - either finetuning from scratch or resume from checkpoint
if self.finetune_from_scratch:
trainer.train()
else:
trainer.train(resume_from_checkpoint=True)
# for clearml: make a copy of the folder from input checkpoint to output checkpoint destination
# if local then just ignore
if self.input_checkpoint_path == self.output_checkpoint_path:
pass
else:
shutil.copytree(self.input_checkpoint_path, self.output_checkpoint_path)
# save the model to local directory
trainer.save_model(self.output_saved_model_path)
trainer.save_state()
# save the processor
processor.save_pretrained(self.output_processor_path)
# returns the file paths
return self.output_checkpoint_path, self.output_processor_path, self.input_pretrained_model_path, self.output_saved_model_path
def __call__(self):
return self.finetune()
class Evaluation:
'''
evaluation of the model, without a language model
'''
def __init__(self, dev_pkl: str, test_pkl: str, processor_path: str, saved_model_path: str, architecture: str) -> None:
'''
dev_pkl: file path of the dev pickle file
test_pkl: file path of the test pickle file
processor_path: directory of the processor path after finetuning
saved_model_path: directory of the model path after finetuning
architecture: using either the wav2vec2 or the wavlm architecture
'''
self.dev_pkl = dev_pkl
self.test_pkl = test_pkl
self.processor_path = processor_path
self.saved_model_path = saved_model_path
self.architecture = architecture
def load_pickle_data(self) -> DatasetDict:
'''
load the pickle data file - train data not required here
'''
with open(self.dev_pkl, 'rb') as f:
df_dev = pickle.load(f)
with open(self.test_pkl, 'rb') as f:
df_test = pickle.load(f)
# make it into a DatasetDict Object
dataset = DatasetDict({
"dev": Dataset.from_pandas(df_dev),
"test": Dataset.from_pandas(df_test)
})
# returns the DatasetDict object
return dataset
def preprocess_dataset_for_transformer(self, batch, processor):
'''
preprocess the dataset to feed into the transformer
'''
# proceed with the preprocessing of data
audio = batch["audio"]
# batched output is "un-batched" to ensure mapping is correct
batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
batch["input_length"] = len(batch["input_values"])
with processor.as_target_processor():
batch["labels"] = processor(batch["text"]).input_ids
return batch
def map_to_result_gpu(self, batch, model, processor):
'''
get the prediction result
'''
model.to(device)
with torch.no_grad():
input_values = torch.tensor(batch["input_values"], device=device).unsqueeze(0)
logits = model(input_values).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_str"] = processor.batch_decode(pred_ids)[0]
batch["text"] = processor.decode(batch["labels"], group_tokens=False)
return batch
def evaluate(self):
'''
get prediction score
'''
# load the saved model and processor from local
if self.architecture == 'wav2vec2':
model = Wav2Vec2ForCTC.from_pretrained(self.saved_model_path)
elif self.architecture == 'wavlm':
model = WavLMForCTC.from_pretrained(self.saved_model_path)
processor = Wav2Vec2Processor.from_pretrained(self.processor_path)
# load the dev and test dataset
dataset = self.load_pickle_data()
# preprocess the dataset to feed into the transformer
dataset = dataset.map(lambda x: self.preprocess_dataset_for_transformer(x, processor), remove_columns=dataset.column_names["dev"], num_proc=1)
# get the prediction result
results_dev = dataset["dev"].map(lambda x: self.map_to_result_gpu(x, model, processor), remove_columns=dataset["dev"].column_names)
results_test = dataset["test"].map(lambda x: self.map_to_result_gpu(x, model, processor), remove_columns=dataset["test"].column_names)
# get the wer of the dev and the test set
get_wer_dev = WER(predictions=results_dev["pred_str"], references=results_dev["text"])
get_wer_test = WER(predictions=results_test["pred_str"], references=results_test["text"])
print("\nValidation WER: {:.5f}".format(get_wer_dev.compute()))
print("Test WER: {:.5f}".format(get_wer_test.compute()))
print()
def __call__(self):
return self.evaluate()
if __name__ == "__main__":
########## LIBRISPEECH: GET AUDIO LENGTH DISTRIBUTION ##########
# print('Getting the audio length distribution of the train dataset\n')
# distribution = FinetuningPreparation(train_pkl='./root/pkl/librispeech_train.pkl',
# dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# processor_path='./root/wav2vec2/processor/',
# max_sample_length=None,
# mode='get_audio_length_distribution')
# distribution()
###################################################
# ########## LIBRISPEECH: FINETUNING (FROM SCRATCH) - WAV2VEC2 ##########
# finetune_model = Finetuning(train_pkl='./root/pkl/librispeech_train.pkl',
# dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# input_processor_path='./root/librispeech/wav2vec2/processor/',
# input_checkpoint_path='./root/librispeech/wav2vec2/ckpt/',
# input_pretrained_model_path='./root_base_model/wav2vec2_base_model/',
# output_processor_path='./root/librispeech/wav2vec2/processor/',
# output_checkpoint_path='./root/librispeech/wav2vec2/ckpt/',
# output_saved_model_path='./root/librispeech/wav2vec2/saved_model/',
# max_sample_length=450000,
# batch_size=8,
# epochs=10,
# gradient_accumulation_steps=4,
# save_steps=500,
# eval_logging_steps=50,
# lr=1e-4,
# weight_decay=0.005,
# warmup_steps=1000,
# architecture='wav2vec2',
# finetune_from_scratch=True)
# _, _, _, _ = finetune_model()
# ##################################################
# # ########## LIBRISPEECH: FINETUNING (RESUMING FROM CHECKPOINT) - WAV2VEC2 ##########
# finetune_model = Finetuning(train_pkl='./root/pkl/librispeech_train.pkl',
# dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# input_processor_path='./root/librispeech/wav2vec2/processor/',
# input_checkpoint_path='./root/librispeech/wav2vec2/ckpt/',
# input_pretrained_model_path='./root/librispeech/wav2vec2/saved_model/',
# output_processor_path='./root/librispeech/wav2vec2/processor/',
# output_checkpoint_path='./root/librispeech/wav2vec2/ckpt/',
# output_saved_model_path='./root/librispeech/wav2vec2/saved_model/',
# max_sample_length=450000,
# batch_size=8,
# epochs=15,
# gradient_accumulation_steps=4,
# save_steps=500,
# eval_logging_steps=50,
# lr=1e-4,
# weight_decay=0.005,
# warmup_steps=1000,
# architecture='wav2vec2',
# finetune_from_scratch=False)
# _, _, _, _ = finetune_model()
# ####################################################
# # ########## LIBRISPEECH: EVALUATION - WAV2VEC2 ##########
# evaluation = Evaluation(dev_pkl='./root/pkl/magister_data_v2_wav_16000_dev.pkl',
# test_pkl='./root/pkl/magister_data_v2_wav_16000_test.pkl',
# processor_path='./root/magister_v2/wav2vec2/processor/',
# saved_model_path='./root/magister_v2/wav2vec2/saved_model/',
# architecture='wav2vec2')
# evaluation()
# ####################################################
# ########## LIBRISPEECH: FINETUNING (FROM SCRATCH) - WAVLM ##########
# finetune_model = Finetuning(train_pkl='./root/pkl/librispeech_train.pkl',
# dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# input_processor_path='./root/librispeech/wavlm/processor/',
# input_checkpoint_path='./root/librispeech/wavlm/ckpt/',
# input_pretrained_model_path='./root_base_model/wavlm_base_model/',
# output_processor_path='./root/librispeech/wavlm/processor/',
# output_checkpoint_path='./root/librispeech/wavlm/ckpt/',
# output_saved_model_path='./root/librispeech/wavlm/saved_model/',
# max_sample_length=450000,
# batch_size=8,
# epochs=10,
# gradient_accumulation_steps=4,
# save_steps=500,
# eval_logging_steps=50,
# lr=1e-4,
# weight_decay=0.005,
# warmup_steps=1000,
# architecture='wavlm',
# finetune_from_scratch=True)
# _, _, _, _ = finetune_model()
# ####################################################
# ########## LIBRISPEECH: FINETUNING (RESUMING FROM CHECKPOINT) - WAVLM ##########
# finetune_model = Finetuning(train_pkl='./root/pkl/librispeech_train.pkl',
# dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# input_processor_path='./root/librispeech/wavlm/processor/',
# input_checkpoint_path='./root/librispeech/wavlm/ckpt/',
# input_pretrained_model_path='./root/librispeech/wavlm/saved_model/',
# output_processor_path='./root/librispeech/wavlm/processor/',
# output_checkpoint_path='./root/librispeech/wavlm/ckpt/',
# output_saved_model_path='./root/librispeech/wavlm/saved_model/',
# max_sample_length=450000,
# batch_size=8,
# epochs=15,
# gradient_accumulation_steps=4,
# save_steps=500,
# eval_logging_steps=50,
# lr=1e-4,
# weight_decay=0.005,
# warmup_steps=1000,
# architecture='wavlm',
# finetune_from_scratch=False)
# _, _, _, _ = finetune_model()
# ####################################################
# ########## LIBRISPEECH: EVALUATION - WAVLM ##########
# evaluation = Evaluation(dev_pkl='./root/pkl/librispeech_dev.pkl',
# test_pkl='./root/pkl/librispeech_test.pkl',
# processor_path='./root/librispeech/wavlm/processor/',
# saved_model_path='./root/librispeech/wavlm/saved_model/',
# architecture='wavlm')
# evaluation()
# ###################################################
# ########## COMBINED: FINETUNING (FROM SCRATCH) - WAV2VEC2 ##########
# finetune_model = Finetuning(train_pkl='./root/pkl/combined_train.pkl',
# dev_pkl='./root/pkl/combined_dev.pkl',
# test_pkl='./root/pkl/combined_test.pkl',
# input_processor_path='./root/combined/wav2vec2/processor/',
# input_checkpoint_path='./root/combined/wav2vec2/ckpt/',
# input_pretrained_model_path='./root_base_model/wav2vec2_base_model/',
# output_processor_path='./root/combined/wav2vec2/processor/',
# output_checkpoint_path='./root/combined/wav2vec2/ckpt/',
# output_saved_model_path='./root/combined/wav2vec2/saved_model/',
# max_sample_length=450000,
# batch_size=8,
# epochs=10,
# gradient_accumulation_steps=4,
# save_steps=500,
# eval_logging_steps=50,
# lr=1e-4,
# weight_decay=1e-5,
# warmup_steps=1000,
# architecture='wav2vec2',
# finetune_from_scratch=True)
# _, _, _, _ = finetune_model()
# ##################################################
# ########## COMBINED: FINETUNING (RESUMING FROM CHECKPOINT) - WAV2VEC2 ##########
finetune_model = Finetuning(train_pkl='./root/pkl/combined_train.pkl',
dev_pkl='./root/pkl/combined_dev.pkl',
test_pkl='./root/pkl/combined_test.pkl',
input_processor_path='./root/combined/wav2vec2/processor/',
input_checkpoint_path='./root/combined/wav2vec2/ckpt/',
input_pretrained_model_path='./root/combined/wav2vec2/saved_model/',
output_processor_path='./root/combined/wav2vec2/processor/',
output_checkpoint_path='./root/combined/wav2vec2/ckpt/',
output_saved_model_path='./root/combined/wav2vec2/saved_model/',
max_sample_length=450000,
batch_size=8,
epochs=30,
gradient_accumulation_steps=4,
save_steps=500,
eval_logging_steps=50,
lr=1e-4,
weight_decay=1e-5,
warmup_steps=1000,
architecture='wav2vec2',
finetune_from_scratch=False)
_, _, _, _ = finetune_model()
# #################################################### | StarcoderdataPython |
8174045 | from django.shortcuts import render
from .forms import Applicant
# Create your views here.
def profilepage(request):
if request.session['username'] is None:
return render(request, 'jobs/error.html')
profile = Applicant()
if request.method == "POST":
profile = Applicant(request.POST)
if profile.is_valid():
profile.save()
context = {'profile':profile}
return render(request,'applicant/profilePage.html',context) | StarcoderdataPython |
299740 | <reponame>webdevhub42/Lambda
def Rotate(arr):
temp = []
for i in range(len(arr)):
for j in range(0, len(arr)):
if i != j and i < j:
arr[i][j], arr[j][i] = arr[j][i], arr[i][j]
for l in arr:
l.reverse()
print(l)
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
Rotate(arr)
| StarcoderdataPython |
8142185 | #! /usr/bin/env python
import os,sys,gc,glob
import re,difflib,time,random,copy
import requests,urllib2,urlparse
from optparse import OptionParser
from bs4 import BeautifulSoup
from HTMLParser import HTMLParser
############ Global setting #############
escaper=HTMLParser()
#disable requests warning
requests.packages.urllib3.disable_warnings()
outfileprefix="downacs"
jshort=['achre4', 'jafcau', 'ancham', 'aamick', 'bichaw', 'bcches', 'bomaf6', 'abseba', 'accacs', 'acscii', 'acbcct', 'jceda8', 'jceaax', 'jcisd8', 'acncdm', 'crtoec', 'chreay', 'jctcce', 'cmatex', 'acsccc', 'cgdefu', 'enfuem', 'esthag', 'estlcu', 'iechad', 'iecred', 'aidcbc', 'inocaj', 'jacsat', 'langd5', 'amlccd', 'mamobx', 'jmcmar', 'amclct', 'mpohbp', 'ancac3', 'nalefd', 'jnprdf', 'joceah', 'orlef7', 'oprdfk', 'orgnd7', 'acsodf', 'apchd5', 'jpcafh', 'jpcbfk', 'jpccck', 'jpclcd', 'jpchax', 'jprobs', 'ascefj', 'ascecg', 'asbcd6', 'cenear']
jdone=['bichaw','jpcafh','jpccck', 'orlef7', 'joceah', 'jmcmar', 'inocaj','jacsat', 'acbcct','bomaf6']
jtodo=['enfuem','esthag','estlcu','iechad']
scriptre=re.compile(r"<script(.|\n)*?</script>")
for i in range(len(jtodo)):
loi="http://pubs.acs.org/loi/"+jtodo[i]
rloi=requests.get(loi)
simpletext=scriptre.sub('',rloi.text)
sloi=BeautifulSoup(simpletext, "html.parser")
rows=sloi.findChildren("div",attrs={'class':'row'})
issueurl=[ row.a['href'] for row in rows ]
f=open(outfileprefix+str(i)+".txt",'a')
for ilink in issueurl:
print "Doing: "+ilink
tmp=ilink.split('/')
#if (int(tmp[-2])>43):
# continue
#if (int(tmp[-2]) == 43 and int(tmp[-1]) >=11):
# continue
try:
r=requests.get(ilink)
rs=BeautifulSoup(scriptre.sub("",r.text), "html.parser")
eds=rs.findChildren(attrs={'class':"icon-item editors-choice"})
aus=rs.findChildren(attrs={'class':"icon-item author-choice"})
outs= [ out.parent.findChild(attrs={'class':"icon-item pdf-high-res"}).a['href'] for out in eds+aus]
corr=rs.findChildren(attrs={'id':'AdditionsandCorrections'})
outs=outs+[out.parent.parent.findChild(attrs={'class':"icon-item pdf-high-res"}).a['href'] for out in corr]
for out in outs:
f.write(out+'\n')
#'/doi/pdf/10.1021/acs.jmedchem.5b00326'
sys.stdout.flush()
f.flush()
except:
pass
f.close()
| StarcoderdataPython |
6659565 | from utility.DBConnectivity import create_connection,create_cursor
def fetch_trans(accno):
try:
list_tra=[]
con=create_connection()
cur=create_cursor(con)
cur.execute('Select B.AccName,T.TDate,T.TType,T.AmtTrans,T.Trans_acc from Transactions T inner join Bank B on T.Trans_acc=B.AccNo where T.AccNo='+str(accno)+' Order By T.Tdate Desc')
for i in cur:
list_tra.append(i)
except:
print('Unable to fetch Transactions')
finally:
cur.close()
con.close()
if(len(list_tra)>4):
list_tra=list_tra[:4]
return list_tra
def insert_trans(accno,trans_acc,amount,ty):
try:
con=create_connection()
cur=create_cursor(con)
cur.execute('Insert into Transactions Values('+str(accno)+','+str(trans_acc)+','+str(amount)+',SYSDATE,\''+ty+'\')')
con.commit()
except:
print('Unable to add Transaction')
finally:
cur.close()
con.close()
def fetch_accounts(User_ID):
try:
account_list=[]
count=1
con=create_connection()
cur=create_cursor(con)
cur.execute('SELECT AccNo,AccType,AccName,AccBalance FROM BANK WHERE UserID =\''+User_ID+'\' ORDER BY CreationTime DESC')
for i in cur:
account_list.append(i)
count+=1
except:
print("Error in fetching details")
finally:
cur.close()
con.close()
return account_list | StarcoderdataPython |
1655013 | <reponame>nfahlgren/hsi_toolkit_py<gh_stars>10-100
from hsi_toolkit import anomaly_detectors
from hsi_toolkit import classifiers
from hsi_toolkit import endmember_extraction
from hsi_toolkit import signature_detectors
from hsi_toolkit import spectral_indices
from hsi_toolkit import dim_reduction
from hsi_toolkit import util
| StarcoderdataPython |
1744875 | <reponame>AdarshKvT/python-oop
# generalized class
class Pet:
def __init__(self, name, age):
self.name = name
self.age = age
def show(self):
print(f"I am {self.name} and I am {self.age} years old")
def speak(self):
print("I dont no what to say")
# child class inheriting from parent class.
class Cat(Pet):
# if need to add more attributes, we can simply introduce __init in the child class. And use super() method for
# using generalized init values
def __init__(self, name, age, color):
super().__init__(name, age)
self.color = color
def speak(self):
print("Meow")
# defining child own show method for the color attribute
def show(self):
print(f"I am {self.name} and I am {self.age} years old. I am {self.color} ")
class Dog(Pet):
def speak(self):
print("bark")
class Fish(Pet):
pass
# create an instance of the pet class
p = Pet("KvT", 26)
p.show()
p.speak()
# below snippet shows child class inheriting properties of parent class method show()
c = Cat("Bill", 34, "Green")
c.show()
c.speak()
d = Dog("Jill", 24)
d.show()
d.speak()
# As there is no method for speak inside the Fish class so it uses parent method, else overriding takes place
f = Fish("Bubble", 23)
f.speak()
| StarcoderdataPython |
4993847 | <filename>old_scripts/hd_regional_stats.py
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
import pandas as pd
#from scipy.optimize import curve_fit
#from scipy.stats import linregress
from scipy.stats import median_absolute_deviation
from scipy.stats import linregress
#import xarray as xr
import debrisglobal.globaldebris_input as debris_prms
option_regional_stats = True
option_regional_plots = False
#%%% ===== SCRIPT OPTIONS =====
#if not os.path.exists(melt_compare_fp):
# os.makedirs(melt_compare_fp)
if option_regional_stats:
#rois = ['01','02','03','04','05','06','07','08','09','10','11','12','HMA','16','17','18']
rois = ['01','02','03','04','05','06','07','08','09','11','12','13','14', '15', '16','17','18']
# rois = ['18']
hd_cn = 'hd_ts_mean_m'
mf_cn = 'mf_ts_mean'
reg_stats_cns = ['roi', 'hd_mean', 'hd_std', 'hd_med', 'hd_mad', 'hd_25', 'hd_75',
'mf_mean', 'mf_std', 'mf_med', 'mf_mad', 'mf_25', 'mf_75']
reg_stats_df = pd.DataFrame(np.zeros((len(rois)+1,len(reg_stats_cns))), columns=reg_stats_cns)
## ===== REGIONAL MELT FACTOR STATISTICS =====
hdts_bin_fp = debris_prms.mb_binned_fp + '_wdebris_hdts/'
hdts_bin_fp_extrap = debris_prms.mb_binned_fp + '_wdebris_hdts_extrap/'
hd_list_all = []
mf_list_all = []
for nroi, roi in enumerate(rois):
print(roi)
# #%%
# # Glaciers with DEM
# glac_binned_fullfns = []
# binned_all_fp = debris_prms.output_fp + 'mb_bins_all/csv/' + roi + '/'
# rgiids = []
# # binned fns with dhdt data
# for i in os.listdir(binned_all_fp):
# if i.endswith('_bins.csv'):
# reg_str = str(int(i.split('.')[0])).zfill(2)
# if reg_str == roi:
# glac_binned_fullfns.append(binned_all_fp + i)
# rgiids.append(i.split('_')[0])
# # binned fns without dhdt data
# binned_all_fp_nodhdt = binned_all_fp + 'no_dhdt/'
# for i in os.listdir(binned_all_fp_nodhdt):
# if i.endswith('_bins.csv'):
# reg_str = str(int(i.split('.')[0])).zfill(2)
# if reg_str == roi:
# glac_binned_fullfns.append(binned_all_fp_nodhdt + i)
# rgiids.append(i.split('_')[0])
#
# # Sorted files
# glac_binned_fullfns = [x for _,x in sorted(zip(rgiids, glac_binned_fullfns))]
# rgiids = sorted(rgiids)
#
# main_glac_rgi = debris_prms.selectglaciersrgitable(rgiids)
print('make a nice hypsometry file')
# # Load data for each region
# for count, region in enumerate(rgi_regionsO1):
# # Select regional data for indexing
# glac_no = sorted(glac_no_byregion[region])
# rgi_table_region = rgi_table.iloc[np.where(rgi_table.O1Region.values == region)[0]]
#
# # Load table
# ds = pd.read_csv(filepath + filedict[region])
#
# # Select glaciers based on 01Index value from main_glac_rgi table
# # as long as Huss tables have all rows associated with rgi attribute table, then this shortcut works
# glac_table = ds.iloc[rgi_table_region['O1Index'].values]
# # Merge multiple regions
# if count == 0:
# glac_table_all = glac_table
# else:
# # If more columns in region, then need to expand existing dataset
# if glac_table.shape[1] > glac_table_all.shape[1]:
# all_col = list(glac_table_all.columns.values)
# reg_col = list(glac_table.columns.values)
# new_cols = [item for item in reg_col if item not in all_col]
# for new_col in new_cols:
# glac_table_all[new_col] = 0
# elif glac_table.shape[1] < glac_table_all.shape[1]:
# all_col = list(glac_table_all.columns.values)
# reg_col = list(glac_table.columns.values)
# new_cols = [item for item in all_col if item not in reg_col]
# for new_col in new_cols:
# glac_table[new_col] = 0
# glac_table_all = glac_table_all.append(glac_table)
#
# # Clean up table and re-index (make copy to avoid SettingWithCopyWarning)
# glac_table_copy = glac_table_all.copy()
# glac_table_copy.reset_index(drop=True, inplace=True)
# glac_table_copy.index.name = indexname
# # drop columns that are not elevation bins
# glac_table_copy.drop(drop_col_names, axis=1, inplace=True)
# # change NAN from -99 to 0
# glac_table_copy[glac_table_copy==-99] = 0.
# # Shift Huss bins by 20 m since the elevation bins appear to be 20 m higher than they should be
# if pygem_prms.option_shift_elevbins_20m == 1:
# colnames = glac_table_copy.columns.tolist()[:-2]
# glac_table_copy = glac_table_copy.iloc[:,2:]
# glac_table_copy.columns = colnames
#%%
# Glaciers optimized
glac_hd_fullfns = []
for i in os.listdir(hdts_bin_fp):
if i.endswith('hdts.csv'):
reg_str = str(int(i.split('.')[0])).zfill(2)
# if region in debris_prms.roi_rgidict[roi]:
if reg_str == roi:
glac_hd_fullfns.append(hdts_bin_fp + i)
# Glaciers extrapolated
for i in os.listdir(hdts_bin_fp_extrap):
if i.endswith('hdts_extrap.csv'):
# region = int(i.split('.')[0])
# if region in debris_prms.roi_rgidict[roi]:
reg_str = str(int(i.split('.')[0])).zfill(2)
if reg_str == roi:
glac_hd_fullfns.append(hdts_bin_fp_extrap + i)
glac_hd_fullfns = sorted(glac_hd_fullfns)
area_reg_km2 = 0
hd_list = []
mf_list = []
for nfn, fullfn in enumerate(glac_hd_fullfns):
# for nfn, fullfn in enumerate([glac_hd_fullfns[0]]):
if nfn%500 == 0:
print(' ', nfn)
df = pd.read_csv(fullfn)
# print(df.loc[:,['bin_center_elev_m', ' z1_bin_area_valid_km2', ' dc_bin_area_valid_km2',
# ' dc_bin_area_perc', 'debris_perc', 'hd_ts_med', 'mf_ts_med']])
if 'hd_ts_mean_m' in list(df.columns):
# Can switch 1e5 to 1e6 for m accuracy, right now its 10 m, so 3x3 m pixel
area_factor = 1e4
n_values = np.round(df['dc_bin_area_valid_km2'].values*area_factor,0).astype(int)
area_reg_km2 += n_values.sum() /area_factor
hd_values = df[hd_cn].values
mf_values = df[mf_cn].values
for nidx, n_value in enumerate(n_values):
if n_value > 0 and not np.isnan(hd_values[nidx]):
hd_list.extend(np.repeat(hd_values[nidx],n_values[nidx]))
mf_list.extend(np.repeat(mf_values[nidx],n_values[nidx]))
# Record stats
hd_array = np.array(hd_list)
hd_mean = hd_array.mean()
hd_std = hd_array.std()
hd_med = np.median(hd_array)
hd_mad = median_absolute_deviation(hd_array)
hd_25 = np.percentile(hd_array, 25)
hd_75 = np.percentile(hd_array, 75)
mf_array = np.array(mf_list)
mf_mean = mf_array.mean()
mf_std = mf_array.std()
mf_med = np.median(mf_array)
mf_mad = median_absolute_deviation(mf_array)
mf_25 = np.percentile(mf_array, 25)
mf_75 = np.percentile(mf_array, 75)
reg_stats_df.loc[nroi,:] = [roi, hd_mean, hd_std, hd_med, hd_mad, hd_25, hd_75,
mf_mean, mf_std, mf_med, mf_mad, mf_25, mf_75]
print(roi, 'hd:', np.round(hd_med,2), '(' + str(np.round(hd_25,2)) + ' - ', str(np.round(hd_75,2)) + ')',
' mf:', np.round(mf_med,2), '(' + str(np.round(mf_25,2)) + ' - ', str(np.round(mf_75,2)) + ')')
#%%
# ===== HISTOGRAM: regional debris thickness ======
color = 'k'
hd_bins = np.arange(0,2.01,0.05)
label_frequency = 10
hist_fn = roi + '_hd_hist.png'
hist, bin_edges = np.histogram(hd_array,hd_bins) # make the histogram
hist_area_km2 = hist / area_factor / area_reg_km2
fig,ax = plt.subplots()
# Plot the histogram heights against integers on the x axis
ax.bar(range(len(hist)),hist_area_km2, width=1, edgecolor='k', facecolor='grey', linewidth=0.5, clip_on=False,
zorder=2)
# ax.set_xticks([i-0.5 for i,j in enumerate(hd_bins)], minor=True)
ax.set_xticks(np.arange(0,len(hd_bins),2)-0.5, minor=True)
bin_idx = np.arange(0,len(hd_bins),label_frequency)
ax.set_xticks(bin_idx-0.5)
ax.set_xticklabels([str(np.round(x,2)) for x in hd_bins[bin_idx]], rotation=0, ha='center', color=color)
ax.set_xlabel('$h_{d}$ (m)', fontsize=16)
# ax.set_xlabel('Debris thickness (m)', fontsize=14)
ax.set_xlim(-0.5,len(hd_bins)-1.5)
ax.set_ylim(0,0.21)
ax.yaxis.set_major_locator(MultipleLocator(0.1))
# ax.yaxis.set_minor_locator(MultipleLocator(0.02))
# ax.set_ylabel('Area (-)', fontsize=16, color=color)
ax.tick_params(axis='x', which='major', length=6, labelsize=12, color=color)
ax.tick_params(axis='x', which='minor', length=3, color=color)
ax.tick_params(axis='y', which='both', color='none')
ax.spines['bottom'].set_color(color)
ax.xaxis.label.set_color(color)
# text_str = str(int(np.round(area_reg_km2,0)))
# ax.text(0.98, 0.95, text_str, size=14,
# horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
for yline in [0.1, 0.2]:
ax.axhline(yline,-0.01,1.01, color='grey', lw=1, clip_on=False, zorder=1)
# if roi not in ['01', '16']:
# ax.get_yaxis().set_visible(False)
ax.set_frame_on(False)
# ax.tick_params(axis='x', colors='grey')
# ax.axis('off')
# Save figure
fig_fp = debris_prms.output_fp + 'figures/histograms/'
if not os.path.exists(fig_fp):
os.makedirs(fig_fp)
fig.set_size_inches(3,1.25)
fig.savefig(fig_fp + hist_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
#%%
# ===== HISTOGRAM: regional melt factors ======
color = 'k'
mf_bins = np.arange(0,1.51,0.05)
label_frequency = 10
hist_fn = roi + '_mf_hist.png'
hist, bin_edges = np.histogram(mf_array,mf_bins) # make the histogram
hist_area_km2 = hist / area_factor / area_reg_km2
fig,ax = plt.subplots()
barlist = ax.bar(range(len(hist)),hist_area_km2, width=1, edgecolor='k', facecolor='#fa8072', linewidth=0.5,
clip_on=False, zorder=2)
for nbar, mf in enumerate(mf_bins[1:]):
if mf > 1:
barlist[nbar].set_facecolor('#800000')
# ax.set_xticks([i-0.5 for i,j in enumerate(hd_bins)], minor=True)
ax.set_xticks(np.arange(0,len(hd_bins),2)-0.5, minor=True)
bin_idx = np.arange(0,len(hd_bins),label_frequency)
ax.set_xticks(bin_idx-0.5)
ax.set_xticklabels([str(np.round(x,2)) for x in hd_bins[bin_idx]], rotation=0, ha='center')
# ax.set_xlabel('Enhancement factor (-)', fontsize=16, color=color)
ax.set_xlabel('$E_{debris}$ (-)', fontsize=16)
ax.set_xlim(-0.5,len(mf_bins)-1.5)
ax.set_ylim(0,0.125)
ax.yaxis.set_major_locator(MultipleLocator(0.1))
# ax.yaxis.set_minor_locator(MultipleLocator(0.02))
# ax.set_ylabel('Area (-)', fontsize=16, color=color)
ax.tick_params(axis='x', which='major', length=6, labelsize=12, color=color)
ax.tick_params(axis='x', which='minor', length=3, color=color)
ax.tick_params(axis='y', which='major', length=6, color='none')
ax.spines['bottom'].set_color(color)
ax.xaxis.label.set_color(color)
for yline in [0.05, 0.1]:
ax.axhline(yline,-0.01,1.01, color='grey', lw=1, zorder=1, clip_on=False)
ax.set_frame_on(False)
# Save figure
fig_fp = debris_prms.output_fp + 'figures/histograms/'
if not os.path.exists(fig_fp):
os.makedirs(fig_fp)
fig.set_size_inches(3,1.25)
fig.savefig(fig_fp + hist_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
#%%
hd_list_all.extend(hd_list)
mf_list_all.extend(mf_list)
# All regions statistic
hd_array_all = np.array(hd_list_all)
hd_mean_all = hd_array_all.mean()
hd_std_all = hd_array_all.std()
hd_med_all = np.median(hd_array_all)
hd_mad_all = median_absolute_deviation(hd_array_all)
hd_25_all = np.percentile(hd_array_all, 25)
hd_75_all = np.percentile(hd_array_all, 75)
mf_array_all = np.array(mf_list_all)
mf_mean_all = mf_array_all.mean()
mf_std_all = mf_array_all.std()
mf_med_all = np.median(mf_array_all)
mf_mad_all = median_absolute_deviation(mf_array_all)
mf_25_all = np.percentile(mf_array_all, 25)
mf_75_all = np.percentile(mf_array_all, 75)
nroi += 1
reg_stats_df.loc[nroi,:] = ['all', hd_mean_all, hd_std_all, hd_med_all, hd_mad_all, hd_25_all, hd_75_all,
mf_mean_all, mf_std_all, mf_med_all, mf_mad_all, mf_25_all, mf_75_all]
# Export regional statistics
reg_stats_df.to_csv(debris_prms.output_fp + 'reg_stats_hd_mf.csv', index=False)
#%%
if option_regional_plots:
stat = 'mean'
# stat = 'med'
hd_cn = 'hd_' + stat
mf_cn = 'mf_' + stat
reg_stats_df = pd.read_csv(debris_prms.output_fp + 'reg_stats_hd_mf_wdc.csv')
reg_stats_df.dropna(subset=[hd_cn], inplace=True)
# ====== % debris cover vs. median debris thickness =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False, gridspec_kw = {'wspace':0.35, 'hspace':0})
ax[0,0].scatter(reg_stats_df['% DC'], reg_stats_df[hd_cn], color='k', marker='o', facecolor='none', s=30,
clip_on=False)
slope, intercept, r_value, p_value, std_err = linregress(reg_stats_df['% DC'].values, reg_stats_df[hd_cn].values)
print('DC vs. hd: r = ' + str(np.round(r_value,2)), '(p = ' + str(p_value) + ')')
lobf_x = np.arange(reg_stats_df['% DC'].min(),reg_stats_df['% DC'].max()+0.1,0.1)
lobf_y = intercept + slope * lobf_x
ax[0,0].plot(lobf_x, lobf_y, color='k', linewidth=1)
# X-label
ax[0,0].set_xlabel('Relative debris-covered area (%)', size=12)
ax[0,0].set_xlim(0,30)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(10))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(2))
# Y-label
if hd_cn == 'hd_med':
ax[0,0].set_ylabel('$h_{d}$ median (m)', size=12)
ax[0,0].set_ylim(0,0.29)
elif hd_cn == 'hd_mean':
ax[0,0].set_ylabel('$h_{d}$ mean (m)', size=12)
ax[0,0].set_ylim(0,0.5)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(0.1))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(0.02))
# Tick parameters
#ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=10, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=8, direction='in')
# Save plot
fig.set_size_inches(3, 3)
fig_fn = hd_cn + '_vs_dc%.png'
fig.savefig(debris_prms.output_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
#%% ====== % debris cover vs. median debris thickness =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False, gridspec_kw = {'wspace':0.35, 'hspace':0})
ax[0,0].scatter(reg_stats_df[hd_cn], reg_stats_df[mf_cn], color='k', marker='o', facecolor='none', s=30,
clip_on=False)
slope, intercept, r_value, p_value, std_err = linregress(reg_stats_df[hd_cn].values, reg_stats_df[mf_cn].values)
print('Melt factor vs. hd: r = ' + str(np.round(r_value,2)), '(p = ' + str(p_value) + ')')
lobf_x = np.arange(reg_stats_df['hd_med'].min(),reg_stats_df['mf_med'].max()+0.1,0.1)
lobf_y = intercept + slope * lobf_x
ax[0,0].plot(lobf_x, lobf_y, color='k', linewidth=1)
# X-label
if hd_cn == 'hd_med':
ax[0,0].set_xlabel('$h_{d}$ median (m)', size=12)
ax[0,0].set_xlim(0,0.29)
elif hd_cn == 'hd_mean':
ax[0,0].set_xlabel('$h_{d}$ mean (m)', size=12)
ax[0,0].set_xlim(0,0.5)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(0.1))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(0.02))
# Y-label
if mf_cn == 'mf_med':
ax[0,0].set_ylabel('Melt factor median (-)', size=12)
elif mf_cn == 'mf_mean':
ax[0,0].set_ylabel('Melt factor mean (-)', size=12)
ax[0,0].set_ylim(0,1)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(0.05))
# Tick parameters
#ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=10, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=8, direction='in')
# Save plot
fig.set_size_inches(3, 3)
fig_fn = hd_cn + '_vs_' + mf_cn + '.png'
fig.savefig(debris_prms.output_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
| StarcoderdataPython |
9701719 | # Python program to check if given string is an interleaving of the other two strings
# Returns true if C is an interleaving of A and B, otherwise returns false
def is_interleaved(A, B, C):
# Utility variables
i = 0
j = 0
k = 0
# Iterate through all characters of C.
while k != len(C) - 1:
# Match first character of C with first character of A,
# If matches them move A to next
if A[i] == C[k]:
i += 1
# Else Match first character of C with first character
# of B. If matches them move B to next
elif B[j] == C[k]:
j += 1
# If doesn't match with either A or B, then return false
else:
return 0
# Move C to next for next iteration
k += 1
# If A or B still have some characters, then length of C is
# smaller than sum of lengths of A and B, so return false
if A[i - 1] or B[j - 1]:
return 0
return 1
# Driver program to test the above function
A = "AB"
B = "CD"
C = "ABC"
if is_interleaved(A, B, C) == 1:
print(C + " is interleaved of " + A + " and " + B)
else:
print(C + " is not interleaved of " + A + " and " + B)
| StarcoderdataPython |
12800744 | <filename>__init__.py
#!/usr/bin/python
from .rdml import *
name = "rdmlpython"
__all__ = ["rdml"]
| StarcoderdataPython |
200093 | from pm4pymdl import algo, objects, visualization, order_log_generation, util
__version__ = '0.0.45'
__doc__ = "Process Mining for Python - Multi-Dimensional Event Logs"
__author__ = 'PADS'
__author_email__ = '<EMAIL>'
__maintainer__ = 'PADS'
__maintainer_email__ = "<EMAIL>"
| StarcoderdataPython |
176013 | <reponame>bilbeyt/otokon-e_form<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('form', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='form',
name='experience',
field=models.CharField(max_length=150, verbose_name='Experiences'),
),
migrations.AlterField(
model_name='form',
name='interests',
field=models.CharField(max_length=150, verbose_name='Interests'),
),
migrations.AlterField(
model_name='form',
name='tech_info',
field=models.CharField(max_length=150, verbose_name='Technical Information'),
),
]
| StarcoderdataPython |
3242679 | <reponame>nkanak/GraphOfDocs
"""
This script contains wrapper functions that
call algorithms in the database,
such as Pagerank, Louvain Community Detection,
and Jaccard Similarity Measure.
Their implementantions are located
in the Neo4j Algorithms library.
"""
def pagerank(database, node, edge, iterations, property, weight = ''):
type_correct = all([isinstance(node, str),
isinstance(edge, str),
isinstance(iterations, int),
isinstance(property, str),
isinstance(weight, str)])
if not type_correct:
raise TypeError('All arguments should be strings, except iterations which should be int!')
if weight: # If weight is not an empty str.
weight = ', weightProperty: "'+ weight +'"'
query = ('CALL algo.pageRank("'+ node +'", "'+ edge +'", '
'{iterations: '+ str(iterations) +', dampingFactor: 0.85, write: true, writeProperty: "'+ property +'"'+ weight +'}) '
'YIELD nodes, iterations, loadMillis, computeMillis, writeMillis, dampingFactor, write, writeProperty')
database.execute(' '.join(query.split()), 'w')
return
def louvain(database, node, edge, property, weight = ''):
type_correct = all([isinstance(node, str),
isinstance(edge, str),
isinstance(property, str),
isinstance(weight, str)])
if not type_correct:
raise TypeError('All arguments should be strings!')
if weight: # If weight is not an empty str.
weight = ', weightProperty: "'+ weight +'"'
query = ('CALL algo.louvain("'+ node +'", "'+ edge +'", '
'{direction: "BOTH", writeProperty: "'+ property +'"'+ weight +'}) '
'YIELD nodes, communityCount, iterations, loadMillis, computeMillis, writeMillis')
database.execute(' '.join(query.split()), 'w')
return
def jaccard(database, source, edge, target, cutoff, relationship, property):
type_correct = all([isinstance(source, str),
isinstance(edge, str),
isinstance(target, str),
isinstance(relationship, str),
isinstance(property, str),
isinstance(cutoff, float)])
if not type_correct:
raise TypeError('All arguments should be strings, except cutoff which should be a float!')
query = ('MATCH (d:'+ source + ')-[:'+ edge +']->(w:'+ target + ') '
'WITH {item:id(d), categories: collect(id(w))} as data '
'WITH collect(data) as Data '
'CALL algo.similarity.jaccard(Data, {topK: 1, similarityCutoff: '+ str(cutoff) +', write: true, writeRelationshipType: "'+ relationship +'", writeProperty: "'+ property +'"}) '
'YIELD nodes, similarityPairs, write, writeRelationshipType, writeProperty, min, max, mean, stdDev, p25, p50, p75, p90, p95, p99, p999, p100 '
'RETURN nodes, similarityPairs, write, writeRelationshipType, writeProperty, min, max, mean, p95 ')
database.execute(' '.join(query.split()), 'w')
return | StarcoderdataPython |
11212667 | from urllib.parse import urlparse, urljoin, urlencode, quote, urlunparse, parse_qsl
from Data.TorrentsUrlProvider import TorrentsUrlProvider
BASE_URL = 'https://kickass.onl/usearch/'
MOST_SEEDERS_QUERY = 'field=seeders&sorder=desc'
def compose_full_url(query) -> str:
search_url_with_query = urljoin(BASE_URL, quote(f"{query}/"))
parsed_search_url_with_query = urlparse(search_url_with_query)
query_params = dict(parse_qsl(MOST_SEEDERS_QUERY))
url_parts = list(parsed_search_url_with_query)
url_parts[4] = urlencode(query_params)
return urlunparse(url_parts)
class KickassTorrentsUrlProvider:
def __init__(self, query):
full_url = compose_full_url(query)
self.url_provider = TorrentsUrlProvider(full_url)
def get_torrents(self):
return self.url_provider.get_torrents()
| StarcoderdataPython |
311185 | import datetime
import os
from flask import abort
from flask_login import current_user
from flask_socketio import disconnect, emit
from home import settings
from home.core.models import get_action, devices
from home.web.models import SecurityEvent, SecurityController
from home.web.utils import send_to_subscribers, ws_login_required
from home.web.web import socketio
class Security:
@staticmethod
def handle_event(sec, action, app, device):
"""
Currently, a jumble of things that needs to be majorly refactored
:param device:
:param sec:
:param action:
:param app:
:return:
"""
if action == 'eventstart':
app.logger.info("EVENT START")
sec.alert()
get_action('alert').run()
SecurityEvent.create(controller=sec, device=device.name)
socketio.emit('state change', {'state': sec.state})
send_to_subscribers("New event alert")
elif action == 'eventend':
app.logger.info("EVENT END")
try:
event = SecurityEvent.filter(controller=sec,
device=device.name).order_by(
SecurityEvent.id.desc()).get()
event.duration = (datetime.datetime.now() - event.datetime).total_seconds()
app.logger.info(event.duration)
event.in_progress = False
event.save()
# emit something here
except SecurityEvent.DoesNotExist:
abort(412)
@socketio.on('change state')
@ws_login_required
def change_state():
"""
Toggle the state of the security controller depending on its current state.
"""
if not current_user.admin:
disconnect()
sec = SecurityController.get()
message = ""
if sec.state == 'disabled':
# Set to armed
sec.arm()
message = get_action('arm').run()
elif sec.state == 'armed':
# Set to disabled
sec.disable()
message = get_action('disable').run()
elif sec.state == 'alert':
# Restore to armed
sec.arm()
emit('state change', {'state': sec.state, 'message': message}, broadcast=True)
@socketio.on('get feeds')
@ws_login_required
def get_feeds():
feeds = [d.name for d in devices if d.driver.name == 'motion' and current_user.has_permission(d)]
emit('push video', {'feeds': feeds})
@socketio.on('get recordings')
@ws_login_required
def get_recordings(page=6):
if not current_user.admin:
disconnect()
recordings = {}
for dir in settings.SECURITY_FOOTAGE_DIRS:
recordings[os.path.basename(dir)] = sorted(os.listdir(dir),
key=lambda x: os.path.getmtime(os.path.join(dir, x)))[
-page - 1:-page + 5]
emit('push video', {'recordings': recordings})
| StarcoderdataPython |
3401676 | <gh_stars>10-100
#!/usr/bin/env python
import boto.ec2
import datetime
import os
import time
from fabric.api import cd, env, execute, local, put, run, sudo
from fabric.colors import green as _green, yellow as _yellow
from fabric.context_managers import shell_env
from fabric.contrib.files import exists
from fabric.network import disconnect_all
#
# Edit env defaults to customize AMI.
#
env.ec2_region = "us-west-2"
env.ec2_amis = ['ami-fb68f8cb'] # Ubuntu 12.04 LTS amd64 EBS
env.ec2_keypair = 'MinecraftEC2'
env.ec2_secgroups = ['minecraft']
env.ec2_instancetype = 'm1.small'
env.ec2_userdata = open('cloud-config').read()
def launch_instance():
print(_green("Launching instance of %s..." % env.ec2_amis[0]))
conn = boto.ec2.connect_to_region(env.ec2_region)
reservation = conn.run_instances(
image_id=env.ec2_amis[0],
key_name=env.ec2_keypair,
security_groups=env.ec2_secgroups,
instance_type=env.ec2_instancetype,
user_data=env.ec2_userdata)
instance = reservation.instances[0]
while instance.state == u'pending':
print(_yellow("Instance state: %s" % instance.state))
time.sleep(15)
instance.update()
while not instance.public_dns_name:
print(_yellow("Waiting for Public DNS"))
time.sleep(15)
instance.update()
print(_green("Public DNS: %s" % instance.public_dns_name))
print(_green("Public IP address: %s" % instance.ip_address))
print(_green("Instance state: %s" % instance.state))
print(_green("Instance ID: %s" % instance.id))
print(_green("Waiting 60 seconds for instance to boot..."))
time.sleep(60)
return instance
def set_host_env(instance):
env.user = 'ubuntu'
env.hosts = [instance.public_dns_name]
env.key_filename = os.path.join(os.getenv('HOME'), '.ssh', env.ec2_keypair)
def check_instance_availability():
while not exists('/var/lib/cloud/instance/boot-finished'):
print(_yellow("Waiting for cloud-init to finish running..."))
time.sleep(15)
print(_green("Instance is ready."))
def copy_manifests():
print(_green("Copying puppet manifests..."))
local('git archive --prefix=puppet-minecraft/ --output=puppet-minecraft.tar.gz HEAD')
put('puppet-minecraft.tar.gz', '/home/ubuntu')
with cd('/home/ubuntu'):
run('tar xzf puppet-minecraft.tar.gz')
local('rm puppet-minecraft.tar.gz')
def apply_manifests():
print(_green("Running puppet apply..."))
sudo("puppet apply -v " +
"--modulepath=/home/ubuntu/puppet-minecraft/modules " +
"/home/ubuntu/puppet-minecraft/manifests/base.pp")
def image_name():
"""
Return image name in format 'Minecraft-Server-XXX',
where 'XXX' is the version number, which increments by one
each time the AMI is built.
"""
conn = boto.ec2.connect_to_region(env.ec2_region)
images = conn.get_all_images(owners='self')
prev_versions = [int(i.name.split('-')[-1]) for i in images
if i.name.split('-')[0] == 'Minecraft']
prev_versions.append(0) # Ensure prev_versions isn't empty
version = str(max(prev_versions) + 1).zfill(3)
return "Minecraft-Server-%s" % version
def image_description():
today = datetime.date.today().isoformat()
head_sha1 = local('git rev-parse --verify --short HEAD', capture=True)
return "Built on %s from %s" % (today, head_sha1)
def create_image(instance_id):
conn = boto.ec2.connect_to_region(env.ec2_region)
ami_id = conn.create_image(instance_id, image_name(), image_description())
return ami_id
def check_image_availability(ami_id):
print(_green("Building AMI..."))
conn = boto.ec2.connect_to_region(env.ec2_region)
image = conn.get_image(ami_id)
while image.state == u'pending':
print(_yellow("AMI state: %s" % image.state))
time.sleep(15)
image.update()
if image.state == u'available':
print(_green("AMI is ready."))
print(_green("AMI ID: %s" % image.id))
print(_green("AMI Name: %s" % image.name))
print(_green("AMI Description: %s" % image.description))
else:
print(_yellow("AMI state: %s" % image.state))
def terminate_instance(instance_id):
print(_green("Terminating instance..."))
conn = boto.ec2.connect_to_region(env.ec2_region)
results = conn.terminate_instances(instance_ids=[instance_id])
instance = results[0]
while instance.state == u'shutting-down':
print(_yellow("Instance state: %s" % instance.state))
time.sleep(15)
instance.update()
if instance.state == u'terminated':
print(_green("Instance terminated."))
else:
print(_yellow("Instance state: %s" % instance.state))
def main():
instance = launch_instance()
set_host_env(instance)
execute(check_instance_availability)
execute(copy_manifests)
execute(apply_manifests)
disconnect_all()
ami_id = create_image(instance.id)
check_image_availability(ami_id)
terminate_instance(instance.id)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9721404 | from flask_wtf import FlaskForm
from wtforms import PasswordField, EmailField, StringField, DateField, TextAreaField
from wtforms.validators import ValidationError, Optional, InputRequired
from .model import User
from flask_login import current_user
class UpdateCredentials(FlaskForm):
surname = StringField('Surname', [Optional()])
first_name = StringField('First name', [Optional()])
password = PasswordField('Password', [InputRequired()])
email = EmailField("Email", [Optional()])
username =StringField('Username', [Optional()])
course = StringField('Course', [Optional()])
education = StringField('Education', [Optional()])
about_me = TextAreaField('About me', [Optional()])
new_password = PasswordField('<PASSWORD>', [Optional()])
headline = TextAreaField('Head line', [Optional()])
def validate_email(self, email):
user = User.query.filter_by(username=email.data).first()
if user:
raise ValidationError(f'{self.username.data} is not available, choose another username.')
def validate_password(self, password):
user = User.query.filter_by(email=current_user.email).first()
if not user.verify_password(password.data):
raise ValidationError('Incorrect pasword, we need to verify your account before updating it.')
class EditPostForm(FlaskForm):
content = TextAreaField('Content', [InputRequired()]) | StarcoderdataPython |
8180597 | <filename>library/searchengine/nova3/engines/rarbg.py
#VERSION: 2.10
# AUTHORS: b0nk
# CONTRIBUTORS: <NAME> (<EMAIL>)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
import time
try:
# python3
from urllib.parse import urlencode, unquote
except ImportError:
# python2
from urllib import urlencode, unquote
# qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url
class rarbg(object):
url = 'https://rarbg.to'
name = 'RARBG'
supported_categories = {'all': '1;4;14;15;16;17;21;22;42;18;19;41;27;28;29;30;31;32;40;23;24;25;26;33;34;43;44;45;46;47;48', # noqa
'movies': 'movies',
'tv': 'tv',
'music': '1;23;24;25;26',
'games': '1;27;28;29;30;31;32;40',
'software': '1;33;34;43'}
def search(self, what, cat='all'):
base_url = "https://torrentapi.org/pubapi_v2.php?%s"
app_id = "qbittorrent"
# get token
params = urlencode({'get_token': 'get_token', 'app_id': app_id})
response = retrieve_url(base_url % params)
j = json.loads(response)
token = j['token']
time.sleep(2.1)
# get response json
what = unquote(what)
category = self.supported_categories[cat]
params = urlencode({'mode': 'search',
'search_string': what,
'ranked': 0,
'category': category,
'limit': 100,
'sort': 'seeders',
'format': 'json_extended',
'token': token,
'app_id': 'qbittorrent'})
response = retrieve_url(base_url % params)
j = json.loads(response)
# parse results
for result in j['torrent_results']:
res = {'link': result['download'],
'name': result['title'],
'size': str(result['size']) + " B",
'seeds': result['seeders'],
'leech': result['leechers'],
'engine_url': self.url,
'desc_link': result['info_page'] + "&app_id=" + app_id}
prettyPrinter(res)
| StarcoderdataPython |
5051233 | import numpy as np
import numpy.testing as npt
from dipy.segment.clustering import QuickBundles
from dipy.segment.clusteringspeed import evaluate_aabbb_checks
from dipy.data import get_data
import nibabel as nib
from dipy.tracking.streamline import set_number_of_points
def test_aabb_checks():
A, B, res = evaluate_aabbb_checks()
npt.assert_equal(res, 1)
def show_streamlines(streamlines=None, centroids=None):
from dipy.viz import actor, window
ren = window.Renderer()
if streamlines is not None:
stream_actor = actor.line(streamlines)
ren.add(stream_actor)
window.show(ren)
ren.clear()
if centroids is not None:
stream_actor2 = actor.line(centroids)
ren.add(stream_actor2)
window.show(ren)
def test_qbundles_aabb():
streams, hdr = nib.trackvis.read(get_data('fornix'))
streamlines = [s[0] for s in streams]
for i in range(100):
streamlines += [s[0] + np.array([i * 70, 0, 0]) for s in streams]
from dipy.tracking.streamline import select_random_set_of_streamlines
streamlines = select_random_set_of_streamlines(streamlines,
len(streamlines))
print(len(streamlines))
rstreamlines = set_number_of_points(streamlines, 20)
from time import time
qb = QuickBundles(2.5, bvh=False)
t = time()
clusters = qb.cluster(rstreamlines)
print('Without BVH {}'.format(time() - t))
print(len(clusters))
show_streamlines(rstreamlines, clusters.centroids)
qb = QuickBundles(2.5, bvh=True)
t = time()
clusters = qb.cluster(rstreamlines)
print('With BVH {}'.format(time() - t))
print(len(clusters))
show_streamlines(rstreamlines, clusters.centroids)
#from ipdb import set_trace
#set_trace()
#test_qbundles_aabb()
def test_qbundles_full_brain():
fname = '/home/eleftherios/Data/Test_data_Jasmeen/Elef_Test_RecoBundles/tracts.trk'
#streams, hdr = nib.trackvis.read(fname)
obj = nib.streamlines.load(fname)
streamlines = obj.streamlines
from dipy.tracking.streamline import select_random_set_of_streamlines
streamlines = select_random_set_of_streamlines(streamlines,
len(streamlines))
print(len(streamlines))
rstreamlines = set_number_of_points(streamlines, 20)
del streamlines
from time import time
from dipy.segment.metric import AveragePointwiseEuclideanMetric
threshold = 15
# qb = QuickBundles(threshold, metric=AveragePointwiseEuclideanMetric(), bvh=False)
# t = time()
# clusters1 = qb.cluster(rstreamlines)
# print('Without BVH {}'.format(time() - t))
# print(len(clusters1))
#show_streamlines(None, clusters1.centroids)
qb = QuickBundles(threshold, metric=AveragePointwiseEuclideanMetric(), bvh=True)
t = time()
clusters2 = qb.cluster(rstreamlines)
print('With BVH {}'.format(time() - t))
print(len(clusters2))
show_streamlines(None, clusters2.centroids)
from ipdb import set_trace
set_trace()
# 30
# 329 vs 210 1.5X
# 20
# 2006s (1110 clusters) vs 1218s (1103 clusters) 1.6X
# 15
# 8669 (4274 clusters) vs 4657 (4274 clusters) 1.86X
# 15 but with 1/2 padding
# 8669 (4274 clusters) vs 3842 (4314 clusters) 2.2X
# 15 but with No padding
test_qbundles_full_brain()
| StarcoderdataPython |
9672522 | from flask import render_template
from flask_json_schema import JsonValidationError
def json_validation_error(error):
return render_template("validation_error.html", error=error), JsonValidationError | StarcoderdataPython |
6596673 | <gh_stars>1-10
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cam', type=int, default=0)
parser.add_argument('--hd', action='store_true', help='Save in 720p if possible')
args = parser.parse_args()
cap = cv2.VideoCapture(args.cam)
if args.hd:
cap.set(3, 1280)
cap.set(4, 720)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
8088071 | # %% Definitions
#
# # The idea here is to
# (see flow_deck_graph in prefect_flows)
#
# 1. Load the previously ETLelled outgoing and incoming graphs
# 2. Build simple paths from card to its entity nodes
# 3. Build a paths df keyed by card_id, entity and orders with some common attributes of these paths:
# paragraph type/order, pop type/order, part type/order,
# entity pos (actualy head's pos), entity head (actually head's head)
# 4. Store in postgres
# 5. Draw graph
from mtgnlp import config
import networkx as nx
from networkx.readwrite import json_graph
from sqlalchemy import create_engine
from tqdm import tqdm
import json
import textwrap
import pandas as pd
from typing import List
import logging
logPathFileName = config.LOGS_DIR.joinpath("deck_graph_functions.log")
# create logger'
logger = logging.getLogger("deck_graph_functions")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{logPathFileName}", mode="w")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# This is for jupyter notebook
# from tqdm.notebook import tqdm_notebook
# tqdm_notebook.pandas()
# This is for terminal
tqdm.pandas(desc="Progress")
# # Params
logger.info("CREATE ENGINE")
ENGINE = create_engine(config.DB_STR)
CARDS_TNAME = config.CARDS_TNAME
CARDS_JSON_TNAME = config.CARDS_JSON_TNAME
DECKS_TNAME = config.DECKS_TNAME
CARDS_TEXT_TO_ENTITY_SIMPLE_PATHS_TNAME = config.CARDS_TEXT_TO_ENTITY_SIMPLE_PATHS_TNAME
DECKS_GRAPH_TNAME = config.DECKS_GRAPH_TNAME
# # Helping functions
# + code_folding=[0]
# function to draw a graph to png
SHAPES = [
"box",
"polygon",
"ellipse",
"oval",
"circle",
"egg",
"triangle",
"exagon",
"star",
]
COLORS = ["blue", "black", "red", "#db8625", "green", "gray", "cyan", "#ed125b"]
STYLES = ["filled", "rounded", "rounded, filled", "dashed", "dotted, bold"]
ENTITIES_COLORS = {
"PLAYER": "#FF6E6E",
"ZONE": "#F5D300",
"ACTION": "#1ADA00",
"VERBAL_ACTION": "#1ADA00",
"MANA": "#00DA84",
"SUBTYPE": "#0DE5E5",
"TYPE": "#0513F0",
"SUPERTYPE": "#8D0BCA",
"NATURE": "#1ADA",
"ABILITY": "#cc3300",
"COLOR": "#666633",
"STEP": "#E0E0F8",
"PT": "#C10AC1",
"OBJECT": "#F5A40C",
}
ABSENT_COLOR = "#a87f32"
def relayout(pygraph):
"""Given a graph (pygraphviz), redesign its layout"""
for i, node in enumerate(pygraph.nodes()):
attrs = node.attr
entity_node_ent_type = attrs.get("entity_node_ent_type", None)
if (not pd.isnull(entity_node_ent_type)) and entity_node_ent_type:
color = ENTITIES_COLORS.get(entity_node_ent_type.strip('"'), ABSENT_COLOR)
node.attr["fillcolor"] = color
node.attr["color"] = color
node.attr["shape"] = "hexagon"
node.attr["style"] = "filled"
node_type = attrs.get("type", None)
if node_type == '"card"':
color = "#999966"
node.attr["fillcolor"] = color
node.attr["shape"] = "star"
node.attr["style"] = "filled"
return pygraph
def draw_graph(G, filename="test.png"):
pygv = nx.drawing.nx_agraph.to_agraph(G) # pygraphviz
pygv = relayout(pygv)
pygv.layout(prog="dot")
pygv.draw(filename, prog="neato")
# from IPython.display import Image
# return Image(filename)
# -
# # Build graph with Networkx
def load_cards_as_dataframe(
cards_slugs: list,
cards_table_name=CARDS_TNAME,
cards_json_table_name=CARDS_JSON_TNAME,
engine=ENGINE,
) -> pd.DataFrame:
logger.info(f"load_cards_as_dataframe: load cards: {cards_slugs}")
query = f"""
SELECT * from {cards_json_table_name} as cjson
JOIN (SELECT name as card_name, id as card_id, type, name_slug FROM {cards_table_name}) as cards
ON cjson.card_id = cards.card_id
WHERE cards.name_slug IN ({", ".join([f"'{x}'" for x in cards_slugs])})
"""
logger.debug(query)
df = pd.read_sql_query(query, engine)
if not df.shape[0]:
logger.error("Empty result from load_cards_as_dataframe")
df = pd.read_sql_query(f"SELECT name_slug from {CARDS_TNAME} LIMIT 5", engine)
raise Exception(
f"""
The resulting query was empty. Are you sure these cards slugs exist?
Valid names slugs examples: {df['name_slug'].values}
"""
)
df
return df
def load_decks_cards_as_dataframe(
deck_id: str,
cards_table_name=CARDS_TNAME,
cards_json_table_name=CARDS_JSON_TNAME,
decks_tname=DECKS_TNAME,
main_deck="MAIN",
engine=ENGINE,
) -> pd.DataFrame:
logger.info(f"load_decks_cards_as_dataframe: load cards from deck: {deck_id}")
query = f"""
SELECT * from {cards_json_table_name} as cjson
JOIN (SELECT name as card_name_2, id as card_id_2, type, text, power, toughness
FROM {cards_table_name}) as cards
ON cjson.card_id = cards.card_id_2
JOIN {decks_tname} as decks
ON cards.card_name_2 = decks.card_name
WHERE decks.deck_id = '{deck_id}' AND decks.in = '{main_deck}'
"""
logger.debug(query)
df = pd.read_sql_query(query, engine)
if not df.shape[0]:
logger.error("Empty result from load_decks_cards_as_dataframe")
query = f"""
SELECT DISTINCT deck_id from {decks_tname} as decks
"""
decks_ids_df = pd.read_sql_query(query, engine)
raise Exception(
f"""
The resulting query was empty. Are you sure this deck_id is registered?
Add the deck txt to decks folder and run decks ETL no NLP to register it.
Valid deck ids: {decks_ids_df['deck_id'].values}
"""
)
# logger.info(
# f"Filter out Basic Lands for now, the graph is too big with them")
# df = df[~df['type'].str.contains('Basic Land')]
df
return df
def aggregate_cards_and_set_weights(
df: pd.DataFrame,
# the unique identifier in the input dataframe
card_id_col="card_id_in_deck",
# the identifier of the card in the resulting dataframe
index_name="card_unit_id",
) -> pd.DataFrame:
"""Reduce number of cards to unique cards, setting weight (column) equal
to the number of times the card appears in the deck/df"""
counter = df[["card_id", card_id_col]]
counter = (
counter.groupby("card_id")
.count()
.reset_index()
.rename(columns={card_id_col: "weight"})
)
df2 = df.drop_duplicates(subset=["card_id"]).merge(counter, on=["card_id"])
if df2.shape[0] > 40:
raise ValueError(
f"Too many unique cards in deck ({df2.shape[0]}). "
f"I can only process up to 40 at this moment."
)
df2[index_name] = range(df2.shape[0])
return df2
def create_cards_graphs(
# df: one row per card (pk col name: card_id_in_deck), with columns incoming, outgoing
df: pd.DataFrame,
) -> pd.DataFrame: # must contain cols {card_id_in_deck, incoming_graph, outgoing_graph}
df["incoming_graph"] = df["incoming"].progress_apply(
lambda x: json_graph.node_link_graph(json.loads(x))
)
df["outgoing_graph"] = df["outgoing"].progress_apply(
lambda x: json_graph.node_link_graph(json.loads(x))
)
return df
def collapse_single_path(digraph, path):
"""
:param digraph: networkx.DiGraph
:param path: list of nodes (simple path of digraph)
:return: networkx.DiGraph with only first and last nodes and one edge between them
The original graph is an attribute of the edge
"""
digraph_ordered = digraph.subgraph(
path
) # in each element node 0 is card, node 1 is text part
res = nx.DiGraph()
# Add first and last nodes with their respective attributes
res.add_node(path[0], **digraph.nodes[path[0]])
res.add_node(path[-1], **digraph.nodes[path[-1]])
# edge_attr = {'full_original_path_graph': digraph}
edge_attr = {}
labels = []
short_labels = []
for i, node in enumerate(path):
label = ""
short_label = ""
if not i:
continue
# dict: attributes of each edge in order
e_at = digraph_ordered.edges[path[i - 1], node]
edge_attr[f"edge-{i}"] = e_at
label += e_at.get("part_type_full", None) or e_at.get("label") + ":"
if dict(digraph_ordered[node]):
# dict: attributes of each node in order
n_at = dict(digraph_ordered.nodes[node])
edge_attr[f"node-{i}"] = dict(digraph_ordered.nodes[node])
label += n_at.get("label")
if (e_at.get("type", None) == "token_to_head_part") and (
e_at.get("label", None) == "ROOT"
):
short_label += f"{n_at.get('token_node_text')} in {n_at.get('part_type') or ''} of {n_at.get('pop_type') or ''}"
labels.append(label)
if short_label:
short_labels.append(short_label)
res.add_edge(
path[0],
path[-1],
**edge_attr,
# This label is too big to show when plotting a full deck
title=("".join(textwrap.wrap(f'{" |<br>".join(labels)}'))),
label=("".join(textwrap.wrap(f'{" | ".join(short_labels)}'))),
)
return res
# %% Compose all
def compose_all_graphs_collapsed(
# must cointain cols { {node_id_col}, incoming_graph, outgoing_graph}
df: pd.DataFrame,
# the column which contains the card node id
node_id_col: str = "card_id_in_deck",
weight_col: str = "weight",
target="card", # card or entity
) -> nx.Graph:
"""Build all simple paths between cards in the deck, or cards an entities,
and collpase each simple path into an edge between cards or card and entity"""
# If there is not weight definition for nodes, set it as 1
if weight_col not in df.columns:
df[weight_col] = 1
# reset incoming and outgoing graphs for the card nodes to have the
# same id and that id set to card_id_in_deck
# select node of type==card, get its id, create mapping={id: card_id_in_deck}
df["incoming_card_original_label"] = df["incoming_graph"].progress_apply(
lambda g: [n for n, d in g.nodes(data=True) if d["type"] == "card"][0]
)
df["outgoing_card_original_label"] = df["outgoing_graph"].progress_apply(
lambda g: [n for n, d in g.nodes(data=True) if d["type"] == "card"][0]
)
# Relabel card nodes to their id in the deck
df["incoming_graph"] = df.progress_apply(
lambda row: nx.relabel_nodes(
row["incoming_graph"],
{row["incoming_card_original_label"]: row[node_id_col]},
),
axis="columns",
)
df["outgoing_graph"] = df.progress_apply(
lambda row: nx.relabel_nodes(
row["outgoing_graph"],
{row["outgoing_card_original_label"]: row[node_id_col]},
),
axis="columns",
)
# For pyvis layout: set
# group=card_type,
# title=hover_text(whatever I want),
# size=weight,
# label=some short name ({weight} card_name)
def get_label(row):
if row["power"]:
return (
f"{row['weight']} {row['card_name']} {row['power']}/{row['toughness']} "
)
return f"{row['weight']} {row['card_name']}"
for graph_col in ["incoming_graph", "outgoing_graph"]:
nothing_returned = df.progress_apply(
lambda row: nx.set_node_attributes(
row[graph_col],
# {node_id: {attr_name: value}}
{
row[node_id_col]: {
"group": row["type"],
"title": row["text"],
"size": row["weight"],
"weight": row["weight"],
"label": get_label(row),
# To show card image on hover
# 'title': '''<img src="https://c1.scryfall.com/file/scryfall-cards/normal/front/b/f/bf87803b-e7c6-4122-add4-72e596167b7e.jpg" width="150">''',
}
},
),
axis="columns",
)
# Compose graph with all incoming and outgoing graphs
# TODO this does not work, because all simple paths will include paths that don't actually exist
# for example, it generates this: https://drive.google.com/file/d/1mmpore-FLxWZwxQ0TDZjeTyvLpTA8Mnb/view?usp=sharing
# in which worhsip points to aura of silence
# we should instead build all simple paths between every pair of cards or a card and individual entities
# than compose all simple paths
def get_all_target_nodes(target_type: str = "card", df=df):
if target_type == "card":
return list(df[node_id_col].unique())
elif target_type == "entity":
array_of_entity_nodes = df["outgoing_graph"].progress_apply(
lambda g: [n for n, d in g.nodes(data=True) if d["type"] == "entity"]
)
return set([x for lis in array_of_entity_nodes for x in lis])
def get_targets(
node_id, get_all_target_nodes=get_all_target_nodes, target_type=target
):
"""Return list of target nodes for simple paths"""
return [x for x in get_all_target_nodes(target_type) if x != node_id]
def get_all_collapsed_simple_paths(
node_id, df=df, get_targets=get_targets, target_type=target
):
"""Return a list of graphs containing only two nodes:
node_id and target_node_id,
with edges representing simple paths between them
"""
two_node_graphs_list = []
# for each target_node_id
for target_node_id in get_targets(node_id):
# compose graph G1 from outgoing node_id to incoming target_node_id
if target_type == "card":
out = df.loc[df[node_id_col] == node_id, "outgoing_graph"].iloc[0]
incom = df.loc[
df[node_id_col] == target_node_id, "incoming_graph"
].iloc[0]
G1 = nx.algorithms.operators.compose_all([out, incom])
if target_type == "entity":
G1 = df.loc[df[node_id_col] == node_id, "outgoing_graph"].iloc[0]
# get all simple paths in G1 from node_id to target_node_id
simpaths = nx.all_simple_paths(G1, node_id, target_node_id)
# collapse all simple paths in G1 to generate G2 (only two nodes, multiple edges)
collapsed_spaths = [collapse_single_path(G1, path) for path in simpaths]
if collapsed_spaths:
G2 = nx.algorithms.operators.compose_all(collapsed_spaths)
# extend collapsed_paths_list
two_node_graphs_list.append(G2)
return two_node_graphs_list
# Get a column with all two nodes graphs in it
logger.info(f"Get a column with all two nodes graphs in it")
df["two_node_graphs_list"] = df[node_id_col].progress_apply(
get_all_collapsed_simple_paths
)
all_two_node_graphs_list = [
g for graph_list in df["two_node_graphs_list"].values for g in graph_list
]
logger.info(f"Compose all_two_node_graphs_list")
H = nx.algorithms.operators.compose_all(all_two_node_graphs_list)
return H
def save_decks_graphs_to_db(
deck_ids: List[str], # deck_slug in database
engine=ENGINE,
decks_tname=DECKS_TNAME,
decks_graphs_tname=DECKS_GRAPH_TNAME,
target="card",
) -> List[nx.Graph]:
"""Calculate graphs for deck_ids
and save them (serialized do json) to db associated with the deck_id.
Also, return a list of the generated graphs
"""
logger.info(f"Saving decks graphs: {deck_ids}")
res = []
graphs = []
for deck_id in deck_ids:
logger.info(f"Saving deck graph: {deck_id}")
df_orig = load_decks_cards_as_dataframe(deck_id)
df = create_cards_graphs(df_orig.copy())
df = aggregate_cards_and_set_weights(df)
H = compose_all_graphs_collapsed(df, node_id_col="card_unit_id", target=target)
graphs.append(H)
res = pd.DataFrame(
[
{
"deck_id": deck_id,
f"graph_json": json.dumps(json_graph.node_link_data(H)),
"graph_target": target,
}
]
)
res = res.set_index("deck_id")
# Perform upsert
try:
res.to_sql(decks_graphs_tname, engine, if_exists="fail")
except ValueError:
# def create_col_query(col):
# return f"""
# ALTER TABLE {decks_graphs_tname}
# ADD COLUMN IF NOT EXISTS {col} JSON;
# """
delete_query = f"""
DELETE from {decks_graphs_tname}
WHERE deck_id = '{deck_id}' AND graph_target = '{target}'
"""
# WHERE deck_id IN ({", ".join([f"'{x}'" for x in deck_ids])})
with engine.connect() as con:
con.execute(delete_query)
# for col in res.columns:
# if col not in [deck_id]:
# con.execute(create_col_query(col))
res.to_sql(decks_graphs_tname, engine, if_exists="append")
logger.info(f"Finished deck: {deck_id}")
return graphs
def load_decks_graphs_from_db(
deck_ids: List[str], # deck_slug in database
engine=ENGINE,
decks_graphs_tname=DECKS_GRAPH_TNAME,
target="card",
) -> List[nx.Graph]:
"""Load decks graphs as json and de-serialize them to nx.Graphs"""
res = []
graphs = []
query = f"""
SELECT *
FROM {decks_graphs_tname}
WHERE deck_id IN ({", ".join([f"'{x}'" for x in deck_ids])})
AND graph_target = '{target}'
"""
df = pd.read_sql_query(query, engine)
df["graph"] = df[f"graph_json"].apply(
lambda x: json_graph.node_link_graph(json.loads(x))
)
return list(df["graph"].values)
# %% Draw deck graph
if False:
# deckids = ['00deck_frustrado_dano_as_is']
deckids = [
# '00deck_frustrado_dano_as_is',
# '00deck_passarinhos_as_is',
"00deck_alsios_combado"
]
target = "entity"
H = save_decks_graphs_to_db(deck_ids=deckids, target=target)[0]
G = load_decks_graphs_from_db(deck_ids=deckids, target=target)[0]
assert nx.is_isomorphic(H, G)
draw_graph(G, config.PICS_DECKS_GRAPHS_DIR.joinpath(f"{deckids[0]}_{target}.png"))
# %% Draw two cards graph (it will draw left to right)
if False:
# cards_slugs = ['incinerate', 'pardic_firecat']
# cards_slugs = ['aura_of_silence', 'worship']
# cards_slugs = ['thunderbolt', 'pardic_firecat']
# cards_slugs = ['swords_to_plowshares', 'white_knight']
cards_slugs = ["worship", "aura_of_silence"]
df_orig = load_cards_as_dataframe(cards_slugs)
df = create_cards_graphs(df_orig.copy())
outgoing = df.loc[df["name_slug"] == cards_slugs[0], "outgoing_graph"].values[0]
incoming = df.loc[df["name_slug"] == cards_slugs[1], "incoming_graph"].values[0]
G = nx.algorithms.operators.compose_all([outgoing, incoming])
draw_graph(G, config.PICS_DIR.joinpath(f'2_cards/{"-".join(cards_slugs)}.png'))
# %%
| StarcoderdataPython |
3475686 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.cache_manager
from frappe.model.document import Document
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
from frappe.social.doctype.energy_point_log.energy_point_log import create_energy_points_log, revert
class EnergyPointRule(Document):
def on_update(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def on_trash(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def apply(self, doc):
if self.rule_condition_satisfied(doc):
multiplier = 1
points = self.points
if self.multiplier_field:
multiplier = doc.get(self.multiplier_field) or 1
points = round(points * multiplier)
max_points = self.max_points
if max_points and points > max_points:
points = max_points
reference_doctype = doc.doctype
reference_name = doc.name
users = []
if self.for_assigned_users:
users = doc.get_assigned_users()
else:
users = [doc.get(self.user_field)]
rule = self.name
# incase of zero as result after roundoff
if not points: return
try:
for user in users:
if not user or user == 'Administrator': continue
create_energy_points_log(reference_doctype, reference_name, {
'points': points,
'user': user,
'rule': rule
}, self.apply_only_once)
except Exception as e:
frappe.log_error(frappe.get_traceback(), 'apply_energy_point')
def rule_condition_satisfied(self, doc):
if self.for_doc_event == 'New':
# indicates that this was a new doc
return doc.get_doc_before_save() == None
if self.for_doc_event == 'Submit':
return doc.docstatus == 1
if self.for_doc_event == 'Cancel':
return doc.docstatus == 2
if self.for_doc_event == 'Value Change':
field_to_check = self.field_to_check
if not field_to_check: return False
doc_before_save = doc.get_doc_before_save()
# check if the field has been changed
# if condition is set check if it is satisfied
return doc_before_save \
and doc_before_save.get(field_to_check) != doc.get(field_to_check) \
and (not self.condition or self.eval_condition(doc))
if self.for_doc_event == 'Custom' and self.condition:
return self.eval_condition(doc)
return False
def eval_condition(self, doc):
return self.condition and frappe.safe_eval(self.condition, None, {
'doc': doc.as_dict()
})
def process_energy_points(doc, state):
if (frappe.flags.in_patch
or frappe.flags.in_install
or not is_energy_point_enabled()):
return
old_doc = doc.get_doc_before_save()
# check if doc has been cancelled
if old_doc and old_doc.docstatus == 1 and doc.docstatus == 2:
return revert_points_for_cancelled_doc(doc)
for d in frappe.cache_manager.get_doctype_map('Energy Point Rule', doc.doctype,
dict(reference_doctype = doc.doctype, enabled=1)):
frappe.get_doc('Energy Point Rule', d.get('name')).apply(doc)
def revert_points_for_cancelled_doc(doc):
energy_point_logs = frappe.get_all('Energy Point Log', {
'reference_doctype': doc.doctype,
'reference_name': doc.name,
'type': 'Auto'
})
for log in energy_point_logs:
revert(log.name, _('Reference document has been cancelled'))
def get_energy_point_doctypes():
return [
d.reference_doctype for d in frappe.get_all('Energy Point Rule',
['reference_doctype'], {'enabled': 1})
]
| StarcoderdataPython |
9731702 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import os.path
from leaderf.utils import *
from leaderf.explorer import *
from leaderf.manager import *
#*****************************************************
# ColorschemeExplorer
#*****************************************************
class ColorschemeExplorer(Explorer):
def __init__(self):
pass
def getContent(self, *args, **kwargs):
content = []
for dir in lfEval("&rtp").split(','):
try:
colors = os.listdir(os.path.join(dir, "colors"))
content.extend([c[:-4] for c in colors if c.endswith(".vim")])
except:
pass
return content
def getStlCategory(self):
return "Colorscheme"
def getStlCurDir(self):
return escQuote(lfEncode(os.getcwd()))
def isFilePath(self):
return False
#*****************************************************
# ColorschemeExplManager
#*****************************************************
class ColorschemeExplManager(Manager):
def __init__(self):
super(ColorschemeExplManager, self).__init__()
self._orig_line = ''
def _getExplClass(self):
return ColorschemeExplorer
def _defineMaps(self):
lfCmd("call leaderf#Colors#Maps()")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
lfCmd("colorscheme " + line)
def _getDigest(self, line, mode):
"""
specify what part in the line to be processed and highlighted
Args:
mode: 0, 1, 2, return the whole line
"""
if not line:
return ''
return line
def _getDigestStartPos(self, line, mode):
"""
return the start position of the digest returned by _getDigest()
Args:
mode: 0, 1, 2, return 0
"""
return 0
def _createHelp(self):
help = []
help.append('" <CR>/<double-click>/o : execute command under cursor')
help.append('" i : switch to input mode')
help.append('" q : quit')
help.append('" <F1> : toggle this help')
help.append('" ---------------------------------------------------------')
return help
def _afterEnter(self):
super(ColorschemeExplManager, self)._afterEnter()
def _beforeExit(self):
super(ColorschemeExplManager, self)._beforeExit()
def _previewResult(self, preview):
if not self._needPreview(preview):
return
self._acceptSelection(self._getInstance().currentLine)
#*****************************************************
# colorschemeExplManager is a singleton
#*****************************************************
colorschemeExplManager = ColorschemeExplManager()
__all__ = ['colorschemeExplManager']
| StarcoderdataPython |
160223 | <filename>tests/test_day_12.py
from typing import Tuple
from tests.conftest import day_12
import pytest
@pytest.mark.parametrize('human_instruction,expected_x,expected_y,expected_bearing', [
('F10', 10, 0, 0),
('N3', 0, 3, 0),
('F7', 7, 0, 0),
('R90', 0, 0, 270),
('F11', 11, 0, 0)
])
def test_execute_instruction_on_boat(human_instruction: str, expected_x: int, expected_y: int, expected_bearing: int):
instruction = day_12.parse_instruction(human_instruction)
boat = day_12.Boat()
day_12.execute_instruction_on_boat(boat, instruction)
assert boat.x == expected_x
assert boat.y == expected_y
assert boat.bearing == expected_bearing
@pytest.mark.parametrize('human_instructions,expected', [
(['F10', 'N3', 'F7', 'R90', 'F11'], 25)
])
def test_execute_all_instructions_on_boat(human_instructions, expected):
instructions = day_12.parse_input(human_instructions)
boat = day_12.Boat()
day_12.execute_all_instructions_on_boat(boat, instructions)
assert boat.manhattan_distance == expected
@pytest.mark.parametrize('human_instruction,boat_start,waypoint_start,expected_boat,expected_waypoint', [
('F10', (0, 0), (10, 1), (100, 10), (10, 1)),
('N3', (100, 10), (10, 1), (100, 10), (10, 4)),
('F7', (100, 10), (10, 4), (170, 38), (10, 4)),
('R90', (170, 38), (10, 4), (170, 38), (4, -10)),
('F11', (170, 38), (4, -10), (214, -72), (4, -10))
])
def test_execute_instruction_on_waypoint(
human_instruction: str,
boat_start: Tuple[int, int],
waypoint_start: Tuple[int, int],
expected_boat: Tuple[int, int],
expected_waypoint: Tuple[int, int]
):
instruction = day_12.parse_instruction(human_instruction)
boat = day_12.Boat(*boat_start)
waypoint = day_12.Point(*waypoint_start)
day_12.execute_instruction_on_waypoint(boat, waypoint, instruction)
assert (boat.x, boat.y) == expected_boat
assert (waypoint.x, waypoint.y) == expected_waypoint
@pytest.mark.parametrize('human_instructions,expected', [
(['F10', 'N3', 'F7', 'R90', 'F11'], 286)
])
def test_execute_all_instructions_on_waypoint(human_instructions, expected):
instructions = day_12.parse_input(human_instructions)
boat = day_12.Boat()
waypoint = day_12.Point(10, 1)
day_12.execute_all_instructions_on_waypoint(boat, waypoint, instructions)
assert boat.manhattan_distance == expected
| StarcoderdataPython |
5047941 | # models
from ..models import Notification
# serializers
from . import IsActiveListSerializer
# rest framework
from rest_framework import serializers
class NotificationSerializer(serializers.ModelSerializer):
class Meta:
list_serializer_class = IsActiveListSerializer
model = Notification
fields = '__all__'
depth = 1
| StarcoderdataPython |
12835603 | <filename>bike/parsing/load.py
from bike.globals import *
import os
from bike.parsing.fastparser import fastparser
class Cache:
def __init__(self):
self.reset()
def reset(self):
self.srcnodecache = {}
self.typecache = {}
self.maskedlinescache = {}
instance = None
Cache.instance = Cache()
class CantLocateSourceNodeException(Exception): pass
def getSourceNode(filename_path):
#print "getSourceNode:",filename_path
sourcenode = None
try:
sourcenode = Cache.instance.srcnodecache[filename_path]
except KeyError:
pass
if sourcenode is None:
from bike.parsing.newstuff import translateFnameToModuleName
sourcenode = SourceFile.createFromFile(filename_path,
translateFnameToModuleName(filename_path))
if sourcenode is None:
raise CantLocateSourceNodeException(filename_path)
Cache.instance.srcnodecache[filename_path]=sourcenode
return sourcenode
class SourceFile:
def createFromString(filename, modulename, src):
return SourceFile(filename,modulename,src)
createFromString = staticmethod(createFromString)
def createFromFile(filename,modulename):
try:
f = file(filename)
src = f.read()
f.close()
except IOError:
return None
else:
return SourceFile(filename,modulename,src)
createFromFile = staticmethod(createFromFile)
def __init__(self, filename, modulename, src):
if os.path.isabs(filename):
self.filename = filename
else:
self.filename = os.path.abspath(filename)
self.modulename = modulename
self.resetWithSource(src)
def resetWithSource(self, source):
# fastparser ast
self.fastparseroot = fastparser(source,self.modulename,self.filename)
self.fastparseroot.setSourceNode(self)
self._lines = source.splitlines(1)
self.sourcenode = self
def __repr__(self):
return "Source(%s,%s)"%('source', self.filename)
def getChildNodes(self):
return self.fastparseroot.getChildNodes()
def getSource(self):
return "".join(self.getLines())
def getLine(self,linenum):
return self.getLines()[linenum-1]
# TODO: rename me!
def getFlattenedListOfFastParserASTNodes(self):
return self.fastparseroot.getFlattenedListOfChildNodes()
def getLines(self):
return self._lines
| StarcoderdataPython |
3360125 | <reponame>augustin-barillec/sonar<gh_stars>0
import math
def dotproduct(v1, v2):
return sum(a*b for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle_rad(v1, v2):
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
pi = math.pi
def angle_deg(v1, v2):
return angle_rad(v1, v2)*180/pi
def distance(m1, m2):
v = [b-a for a, b in zip(m1, m2)]
return length(v)
def line_shift(m1, m2, m):
x1, y1 = m1
x2, y2 = m2
x, y = m
return (x-x1)*(-(y2-y1)) + (y-y1)*(x2-x1)
def first_long_block(l, value, window_length):
current_block_length = 0
i = 0
while i < len(l) and current_block_length < window_length:
if l[i] == value:
current_block_length += 1
else:
current_block_length = 0
i += 1
if current_block_length == window_length:
j = i
while j < len(l) and l[j] == value:
j += 1
return i-current_block_length, j-1
else:
return None
def last_long_block(l, value, window_length):
current_block_length = 0
i = len(l)-1
while i >= 0 and current_block_length < window_length:
if l[i] == value:
current_block_length += 1
else:
current_block_length = 0
i -= 1
if current_block_length == window_length:
j = i
while j >= 0 and l[j] == value:
j -= 1
return j+1, i+current_block_length
else:
return None
def index_closest(zis, z):
dists = [abs(z-zi) for zi in zis]
return dists.index(min(dists))
def index_furthest(zis, z):
dists = [abs(z-zi) for zi in zis]
return dists.index(max(dists))
| StarcoderdataPython |
3491548 | # Copyright 2020 ViaSat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import alohomora
from setuptools import setup
setup(
name='alohomora',
version=alohomora.__version__,
author=alohomora.__author__,
author_email=alohomora.__author_email__,
license=alohomora.__license__,
url=alohomora.__url__,
description=alohomora.__description__,
packages=['alohomora'],
entry_points={
"console_scripts": [
"alohomora=alohomora.main:main",
],
},
install_requires=[
"boto3>=1.3.1",
"beautifulsoup4>=4.5.1",
"requests>=2.11.1",
],
extras_require={
"u2f": ["python-u2flib-host>=3.0.3"]
}
)
| StarcoderdataPython |
1775909 | <gh_stars>1-10
#coding: utf-8
''' mbinary
#########################################################################
# File : rabin_karp.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-12-11 00:01
# Description: rabin-karp algorithm
#########################################################################
'''
def isPrime(x):
for i in range(2,int(x**0.5)+1):
if x%i==0:return False
return True
def getPrime(x):
'''return a prime which is bigger than x'''
for i in range(x,2*x):
if isPrime(i):return i
def findAll(s,p):
'''s: string p: pattern'''
dic={}
n,m = len(s),len(p)
d=0 #radix
for c in s:
if c not in dic:
dic[c]=d
d+=1
sm = 0
for c in p:
if c not in dic:return []
sm = sm*d+dic[c]
ret = []
cur = 0
for i in range(m): cur=cur*d + dic[s[i]]
if cur==sm:ret.append(0)
tmp = n-m
q = getPrime(m)
cur = cur%q
sm = sm%q
exp = d**(m-1) % q
for i in range(m,n):
cur = ((cur-dic[s[i-m]]*exp)*d+dic[s[i]]) % q
if cur == sm and p==s[i-m+1:i+1]:
ret.append(i-m+1)
return ret
def randStr(n=3):
return [randint(ord('a'),ord('z')) for i in range(n)]
if __name__ =='__main__':
from random import randint
s = randStr(50)
p = randStr(1)
print(s)
print(p)
print(findAll(s,p))
| StarcoderdataPython |
8034322 | from __future__ import print_function
import copy
import os
import arrow
import uuid
import json
from flask import current_app
from flask_login import current_user
from sqlalchemy.types import TypeDecorator, CHAR, VARCHAR
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.dialects.sqlite import JSON
from sqlalchemy.orm import deferred
from validate_email import validate_email
from werkzeug.security import generate_password_hash, \
check_password_hash
from .conn import db
class GUID(TypeDecorator):
"""
Platform-independent GUID type. Uses PostgreSQL's UUID type,
otherwise uses CHAR(32), storing as stringified hex values.
http://docs.sqlalchemy.org/en/latest/core/custom_types.html
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
class JSONEncodedDict(TypeDecorator):
"""
Represents an immutable structure as a json-encoded string.
"""
impl = JSON
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class UserDb(db.Model):
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
id = db.Column(GUID(), default=uuid.uuid4, primary_key=True)
username = db.Column(db.String(255))
name = db.Column(db.String(60))
email = db.Column(db.String(200))
password = db.Column(db.String(255))
is_admin = db.Column(db.Boolean, default=False)
objects = db.relationship('ObjectDb', backref='user', lazy='dynamic')
def __init__(self, **kwargs):
db.Model.__init__(self, **kwargs)
self.set_password(kwargs['password'])
# passwords are salted using werkzeug.security
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
# following methods are required by flask-login
def get_id(self):
return self.id
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return True
class ObjectDb(db.Model):
__tablename__ = 'objects'
__table_args__ = {'extend_existing': True}
id = db.Column(GUID(), default=uuid.uuid4, nullable=False, unique=True, primary_key=True)
user_id = db.Column(GUID(True), db.ForeignKey('users.id'))
#parent_id = db.Column(GUID(True), db.ForeignKey('objects.id'))
obj_type = db.Column(db.Text, default=None)
attr = db.Column(JSONEncodedDict)
blob = deferred(db.Column(db.LargeBinary))
#children = relationship("TreeNode",
# backref=backref('parent', remote_side=[id])
# )
# only needs to be done once but here just in case
db.create_all()
def filter_dict_for_none(d):
new_d = {}
for key, value in d.items():
if value is not None:
new_d[key] = value
return new_d
def verify_db_session(db_session=None):
if db_session is None:
return db.session
return db_session
def get_server_filename(filename):
"""
Returns the path to save a file on the server
"""
dirname = get_user_server_dir(current_app.config['SAVE_FOLDER'])
if not (os.path.exists(dirname)):
os.makedirs(dirname)
if os.path.dirname(filename) == '' and not os.path.exists(filename):
filename = os.path.join(dirname, filename)
return filename
# USER functions
def is_current_user_anonymous():
try:
result = current_user.is_anonymous()
except:
result = current_user.is_anonymous
return result
def parse_user(user):
return {
'id': user.id,
'name': user.name,
'username': user.username,
'email': user.email,
'isAdmin': user.is_admin,
}
def check_valid_email(email):
if not email:
return email
if validate_email(email):
return email
raise ValueError('{} is not a valid email'.format(email))
def check_sha224_hash(password):
if isinstance(password, basestring) and len(password) == 56:
return password
raise ValueError('Invalid password - expecting SHA224')
def check_user_attr(user_attr):
return {
'email': check_valid_email(user_attr.get('email', None)),
'name': user_attr.get('name', ''),
'username': user_attr.get('username', ''),
'password': check_sha224_hash(user_attr.get('password')),
}
def create_user(user_attr, db_session=None):
db_session = verify_db_session(db_session)
for key in user_attr:
user_attr[key] = str(user_attr[key])
user = UserDb(**user_attr)
db_session.add(user)
db_session.commit()
return parse_user(user)
def make_user_query(db_session=None, **kwargs):
db_session = verify_db_session(db_session)
kwargs = filter_dict_for_none(kwargs)
return db_session.query(UserDb).filter_by(**kwargs)
def load_user(db_session=None, **kwargs):
query = make_user_query(db_session=db_session, **kwargs)
return query.one()
def load_users():
return make_user_query().all()
def update_user_from_attr(user_attr, db_session=None):
db_session = verify_db_session(db_session)
user = load_user(id=user_attr['id'])
for key, value in user_attr.items():
if value is not None:
setattr(user, key, value)
db_session.add(user)
db_session.commit()
return parse_user(user)
def delete_user(user_id, db_session=None):
db_session = verify_db_session(db_session)
user = load_user(id=user_id)
user_attr = parse_user(user)
query = make_obj_query(user_id=user_id, db_session=db_session)
for record in query.all():
db_session.delete(record)
db_session.delete(user)
db_session.commit()
return user_attr
def get_user_server_dir(dirpath, user_id=None):
"""
Returns a user directory if user_id is defined
"""
try:
if not is_current_user_anonymous():
current_user_id = user_id if user_id else current_user.id
user_path = os.path.join(dirpath, str(current_user_id))
if not (os.path.exists(user_path)):
os.makedirs(user_path)
return user_path
except:
return dirpath
return dirpath
# OBJECT functions
def make_obj_query(user_id=None, obj_type="project", db_session=None, **kwargs):
db_session = verify_db_session(db_session)
kwargs = filter_dict_for_none(kwargs)
if user_id is not None:
kwargs['user_id'] = user_id
if obj_type is not None:
kwargs['obj_type'] = obj_type
return db_session.query(ObjectDb).filter_by(**kwargs)
def load_obj_attr(id=id, obj_type="project", db_session=None):
query = make_obj_query(id=id, obj_type=obj_type, db_session=db_session)
return query.one().attr
def load_obj_records(user_id=None, obj_type="project", db_session=None):
query = make_obj_query(user_id=user_id, obj_type=obj_type, db_session=db_session)
return query.all()
def load_obj_attr_list(user_id=None, obj_type="project", db_session=None):
records = load_obj_records(user_id=user_id, obj_type=obj_type, db_session=db_session)
return [record.attr for record in records]
def create_obj_id(db_session=None, obj_type="project", **kwargs):
db_session = verify_db_session(db_session)
record = ObjectDb(obj_type=obj_type, **kwargs)
db_session.add(record)
db_session.commit()
return record.id
def save_object(id, obj_type, obj_str, obj_attr, db_session=None):
db_session = verify_db_session(db_session)
record = make_obj_query(id=id, obj_type=obj_type, db_session=db_session).one()
record.blob = obj_str
obj_attr = copy.deepcopy(obj_attr)
obj_attr['userId'] = str(record.user_id)
obj_attr['modifiedTime'] = repr(arrow.now().format())
record.attr = obj_attr
db_session.add(record)
db_session.commit()
def get_user_id(obj_id, db_session=None):
record = make_obj_query(id=obj_id, db_session=db_session).one()
return record.user_id
def load_obj_str(obj_id, obj_type, db_session=None):
record = make_obj_query(id=obj_id, obj_type=obj_type, db_session=db_session).one()
return record.blob
def delete_obj(obj_id, db_session=None):
db_session = verify_db_session(db_session)
record = make_obj_query(id=obj_id, db_session=db_session).one()
db_session.delete(record)
db_session.commit()
| StarcoderdataPython |
9769682 | #!/usr/bin/env python3
import os
import json
import sys
import secret
posted_bytes = os.environ.get("CONTENT_LENGTH", 0)
if posted_bytes:
posted = sys.stdin.read(int(posted_bytes))
for line in posted.splitlines():
values = line.split('&')
usr = values[0].split('=')[1]
passw = values[1].split('=')[1]
if usr == secret.username and passw == secret.password:
print('Set-Cookie: Loggedin=True')
print('Content-Type: text/html')
print()
print("""
<!doctype html>
<html>
<body>
""")
posted_bytes = os.environ.get("CONTENT_LENGTH", 0)
if posted_bytes:
posted = sys.stdin.read(int(posted_bytes))
print(f"<p> POSTED: <pre>")
for line in posted.splitlines():
values = line.split('&')
usr = values[0].split('=')[1]
passw = values[1].split('=')[1]
print(line)
print("</pre></p>")
if usr == secret.username and passw == <PASSWORD>:
print()
print("""
</body>
</html>""")
#print('Content-Type: application/json')
#print() #blank line seperates headers from content
#print(json.dumps(dict(os.environ), indent=2)) | StarcoderdataPython |
11207897 | <reponame>DanHunt27/Music-Website
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import re
from django.db.models import Q
from django.shortcuts import get_object_or_404
class Chat(models.Model):
user1 = models.ForeignKey(User, related_name="user_1", on_delete=models.CASCADE)
user2 = models.ForeignKey(User, related_name="user_2", on_delete=models.CASCADE)
room_name = models.CharField(max_length=65)
def __str__(self):
return "{}".format(self.pk)
class Message(models.Model):
author = models.ForeignKey(User, related_name="author_messages", on_delete=models.CASCADE)
receiver = models.ForeignKey(User, related_name="receiver_messages", on_delete=models.CASCADE)
content = models.TextField()
timestamp = models.DateTimeField(default=timezone.now)
chat = models.ForeignKey(Chat, on_delete=models.CASCADE)
def __str__(self):
return self.author.username
def last_20_messages(room):
try:
user1_name = re.search('([a-zA-Z\d\-]+)_', room).group(1)
except:
user1_name = ''
try:
user2_name = re.search('_([a-zA-Z\d\-]+)', room).group(1)
except:
user2_name = ''
user1 = get_object_or_404(User, username=user1_name)
user2 = get_object_or_404(User, username=user2_name)
return Message.objects.filter(Q(author=user1, receiver=user2)|Q(author=user2, receiver=user1)).order_by('timestamp')[:20]
| StarcoderdataPython |
1730032 | <reponame>dhar174/dataset-superscript
import os
import glob
import pandas as pd
import re
import string
from collections import OrderedDict
import io
import csv
from itertools import zip_longest
import matplotlib
from matplotlib import pyplot as plt
import cv2
import time
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
return yes_or_no("Uhhhh... please enter ")
xml_list = []
filename=[]
width=[]
height=[]
class_var=[]
xmin=[]
ymin=[]
xmax=[]
ymax=[]
errors=0
bad_files=[]
realHeight=[]
realWidth=[]
path=os.getcwd()
testduplicates=0
testdupList=[]
trainduplicates=0
traindupList=[]
matchDupList=[]
biggestMargin=0
unmatchedDims=[]
unmatchedFile=[]
foundDimMatch=[]
for filepath in glob.iglob(path+"*/*.csv*", recursive=True):
print(filepath)
nonascii = bytearray(range(0x80, 0x100))
with open(filepath,'r', newline='') as infile:
reader =csv.DictReader(infile)
next(reader, None)
## raw=open(filepath,'r')
## data=raw.read()
## raw.close()
## printable = set(string.printable)
## re.sub(r'[^\x00-\x7F]+',' ', data)
## data=''.join(filter(lambda x: x in printable, data))
#print("NEW DATA= "+data)
#print(re.search('<imageName>(.+?)</imageName>',data).group(1))
#filename.append(re.search('<imageName>(.+?)</imageName>',data).group(1).rsplit('\\.*?\\',1)[-1])
#idx=re.search('<imageName>(.+?)</imageName>',data).rfind("\\")
for row in reader:
#FIND VALUE, APPEND LIST:
f=row['filename']
for fn in filename:
if fn ==f:
if 'test' in filepath:
testduplicates+=1
testdupList.append(fn)
else:
if 'train' in filepath:
trainduplicates+=1
traindupList.append(fn)
filename.append(f)
co=0
#print(f)
newpath=re.compile("{f}$")
#newpath2=re.compile("/{f}$")
#print(path+'/train')
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
co+=1
#print(file)
if newpath.match(file) or file==f:
#print("yay")
im = cv2.imread(path+'/train/'+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
realWidth.append(rw)
realHeight.append(rh)
else:
pass
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
co+=1
#print(file)
if newpath.match(file) or file==f:
#print("yay")
im = cv2.imread(path+"/test/"+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
realWidth.append(rw)
realHeight.append(rh)
else:
pass
#print(f)
## filename.append('unknown')
## print('unknown')
try:
w=row['width']
width.append(w)
## print(w)
except:
width.append('na')
try:
h=row['height']
height.append(h)
## print(h)
except:
height.append('na')
try:
xmin1=row['xmin']
xmin.append(xmin1)
## print(xmin1)
except:
xmin.append('na')
try:
ymin1=row['ymin']
ymin.append(ymin1)
## print(ymin1)
except:
ymin.append('na')
try:
xmax1=row['xmax']
xmax.append(xmax1)
except:
xmax.append('na')
try:
ymax1=row['ymax']
ymax.append(ymax1)
#print(ymax2)
except:
ymax.append('na')
class_var.append(row['class'])
if 'na' not in (xmin[len(xmin)-1],xmax[len(xmax)-1],ymin[len(ymin)-1],ymax[len(ymax)-1]):
if int(xmin[len(xmin)-1]) < int(xmax[len(xmax)-1]) and int(ymin[len(ymin)-1]) < int(ymax[len(ymax)-1]):
pass
#print("My man!")
else:
bad_files.append(str(filename[len(filename)-1]+" " +xmin[len(xmin)-1]+" " + xmax[len(xmax)-1] +' '+ ymin[len(ymin)-1]+ ' ' + ymax[len(ymax)-1]))
errors+=1
print("MISMATCHED ANNOTATIONS1: "+ xmin[len(xmin)-1]+" " + xmax[len(xmax)-1] +' '+ ymin[len(ymin)-1]+ ' ' + ymax[len(ymax)-1])
if int(xmax[len(xmax)-1])<= int(width[len(width)-1]) and int(ymax[len(ymax)-1]) <= int(height[len(height)-1]):
pass
#print("So far, so good")
else:
bad_files.append(str(filename[len(filename)-1]+" " +xmax[len(xmax)-1]+" " + width[len(width)-1] +' '+ ymax[len(ymax)-1]+ ' ' + height[len(height)-1]))
errors+=1
ymax[len(ymax)-1]=ymax1
print("MISMATCHED ANNOTATIONS2: "+ xmax[len(xmax)-1]+" " + width[len(width)-1] +' '+ ymax[len(ymax)-1]+ ' ' + height[len(height)-1])
else:
bad_files.append(str(filename[len(filename)-1]+" " +xmin[len(xmin)-1]+" " + xmax[len(xmax)-1] +' '+ ymin[len(ymin)-1]+ ' ' + ymax[len(ymax)-1]))
errors+=1
pass
try:
if int(width[len(width)-1])!=int(realWidth[len(realWidth)-1]):
## print("Incorrect Width. W="+width[len(width)-1]+"real= "+str(realWidth[len(realWidth)-1]))
if abs(int(width[len(width)-1])-int(realWidth[len(realWidth)-1]))>biggestMargin:
biggestMargin=abs(int(width[len(width)-1])-int(realWidth[len(realWidth)-1]))
errors+=1
bad_files.append(str(filename[len(filename)-1]+" W="+str(width[len(width)-1])+"real= "+str(realWidth[len(realWidth)-1])))
if int(height[len(height)-1])!=int(realHeight[len(realHeight)-1]):
## print("Incorrect Height. H="+height[len(height)-1]+"real= "+str(realHeight[len(realHeight)-1]))
if abs(int(width[len(width)-1])-int(realWidth[len(realWidth)-1]))>biggestMargin:
biggestMargin=abs(int(height[len(height)-1])-int(realHeight[len(realHeight)-1]))
errors+=1
bad_files.append(str(filename[len(filename)-1]+ " H="+height[len(height)-1])+"real= "+str(realHeight[len(realHeight)-1]))
imageArea=int(width[len(width)-1])*int(height[len(height)-1])
boxWidth = int(xmax[len(xmax)-1]) - int(xmin[len(xmin)-1])
boxHeight = int(ymax[len(ymax)-1]) - int(ymin[len(ymin)-1])
boxArea = boxWidth * boxHeight
if (boxArea < 0.01 * imageArea):
try:
xmax[len(xmax)-1]=str(round((int(xmax[len(xmax)-1])/2)))
xmin[len(xmin)-1]=str(round((int(xmin[len(xmin)-1])/2)))
ymax[len(ymax)-1]=str(int(ymax[len(ymax)-1])+100)
ymin[len(ymin)-1]=str(int(ymin[len(ymin)-1])+100)
except:
errors+=1
print(str(filename[len(filename)-1])+ " Too Small object, boxArea= "+str(boxArea)+' imageArea= '+str(imageArea))
imageArea=int(width[len(width)-1])*int(height[len(height)-1])
boxWidth = int(xmax[len(xmax)-1]) - int(xmin[len(xmin)-1])
boxHeight = int(ymax[len(ymax)-1]) - int(ymin[len(ymin)-1])
boxArea = boxWidth * boxHeight
if (boxArea < 0.01 * imageArea):
try:
xmax[len(xmax)-1]=str(round((int(xmax[len(xmax)-1])-100)))
xmin[len(xmin)-1]=str(round((int(xmin[len(xmin)-1])-100)))
ymax[len(ymax)-1]=str(int(ymax[len(ymax)-1])+120)
ymin[len(ymin)-1]=str(int(ymin[len(ymin)-1])+120)
except:
errors+=1
print(str(filename[len(filename)-1])+ " Too Small object, boxArea= "+str(boxArea)+' imageArea= '+str(imageArea))
except:
print("Assess failed, try fix")
print("Lengths: Filename: " + str(len(filename))+" xmin: " + str(len(xmin)))
if errors>=1:
print("Sorry boss, you had "+str(errors)+" errors.")
print(bad_files)
print('Biggest Margin: '+str(biggestMargin))
if testduplicates >=1 or trainduplicates >=1:
for dd in testdupList:
if dd in traindupList:
matchDupList.append(dd)
testdupList.remove(dd)
traindupList.remove(dd)
testduplicates-=1
trainduplicates-=1
print("NUM OF DUPLICATES: BOTH: "+str(len(matchDupList)))
print(matchDupList)
print("NUM OF DUPLICATES: TEST: "+str(testduplicates))
print(testdupList)
print("NUM OF DUPLICATES: TRAIN: "+str(trainduplicates))
print(traindupList)
qu='Fix? Y/N'
time.sleep(int(4))
if yes_or_no(qu):
a=0
for filepath in glob.iglob(path+"**/*.csv*", recursive=True):
a+=1
with open(filepath,'r', newline='') as infile, open(str(a)+'.csv','w',newline='') as outfile:
fieldnames = ['filename', 'width', 'height', 'class', 'xmin','ymin','xmax','ymax']
writer = csv.DictWriter(outfile,fieldnames=fieldnames,skipinitialspace=False,delimiter=',', quoting=csv.QUOTE_NONE)
reader =csv.DictReader(infile)
writer.writeheader()
#writer.writerow(('filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'))
#next(reader, None)
for row in reader:
imageArea=int(row['width'])*int(row['height'])
boxWidth = int(row['xmax']) - int(row['xmin'])
boxHeight = int(row['ymax']) - int(row['ymin'])
boxArea = boxWidth * boxHeight
if (boxArea < 0.01 * imageArea):
try:
row['xmax']=str(round((int(row['xmax'])+4)))
row['xmin']=str(round((int(row['xmin'])-4)))
row['ymax']=str(int(row['ymax'])+4)
row['ymin']=str(int(row['ymin'])-4)
except:
print(str(filename[len(filename)-1])+ " Too Small object, boxArea= "+str(boxArea)+' imageArea= '+str(imageArea))
imageArea=int(row['width'])*int(row['height'])
boxWidth = int(row['xmax']) - int(row['xmin'])
boxHeight = int(row['ymax']) - int(row['ymin'])
boxArea = boxWidth * boxHeight
if (boxArea < 0.01 * imageArea):
try:
row['xmax']=str(round((int(row['xmax'])+12)))
row['xmin']=str(round((int(row['xmin'])-12)))
row['ymax']=str(int(row['ymax'])+12)
row['ymin']=str(int(row['ymin'])-12)
except:
print(str(filename[len(filename)-1])+ " Too Small object, boxArea= "+str(boxArea)+' imageArea= '+str(imageArea))
if int(row['xmin']) > int(row['xmax']):
row['xmax']=str(int(row['xmax'])+100)
print("BoxBounds Increased")
if int(row['ymin']) > int(row['ymax']):
row['ymax']=str(int(row['ymax'])+100)
print("BoxBounds Increased")
if int(row['xmax'])>= int(row['width']):
row['xmax']=str(int(row['width'])-1)
print("Clamped BoxBounds")
if int(row['ymax']) >= int(row['height']):
row['ymax']=str(int(row['height'])-1)
print("Clamped BoxBounds")
row['xmax']=str(abs(int(row['xmax'])))
row['xmin']=str(abs(int(row['xmin'])))
row['ymax']=str(abs(int(row['ymax'])))
row['ymin']=str(abs(int(row['ymin'])))
found=False
click=0
matchedDims=0
ff=row['filename']
#print('ff= '+ff)
if ff not in testdupList and ff not in traindupList and ff not in matchDupList:
strike=0
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
newpath=re.compile("{ff}$")
if newpath.match(file) or file==ff:
found=True
im = cv2.imread(path+'/train/'+file)
rh, rw, _ = im.shape
if str(rw)==row['width']:
matchedDims+=1
#print("Positive")
else:
matchedDims+=1
row['height']=str(rh)
row['width']=str(rw)
print('Fixed')
else:
strike+=1
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
found=True
#print("yay")
im = cv2.imread(path+"/test/"+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
if str(rw)==row['width']:
matchedDims+=1
#print("Positive")
#writer.writerow(row)
else:
matchedDims+=1
row['height']=str(rh)
row['width']=str(rw)
print('Fixed')
else:
strike+=2
if ff in testdupList:
testdupList.remove(row['filename'])
testduplicates-=1
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
#print('file= '+file)
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
found=True
#print("yay")
im = cv2.imread(path+'/train/'+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
if str(rh)==row['height']:
matchedDims+=1
print("good, would write")
#writer.writerow(row)
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
found=True
#print("yay")
im = cv2.imread(path+"/test/"+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
if str(rh)==row['height']:
matchedDims+=1
print("good, would write")
#writer.writerow(row)
else:
click+=1
if row['filename'] in traindupList:
traindupList.remove(row['filename'])
trainduplicates-=1
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
found=True
#print("yay")
im = cv2.imread(path+'/train/'+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
if str(rh)==row['height']:
matchedDims+=1
print("good, would write")
#writer.writerow(row)
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
found=True
#print("yay")
im = cv2.imread(path+"/test/"+file)
## print("File "+file+" is type: " +str(type(im)))
rh, rw, _ = im.shape
if str(rh)==row['height']:
matchedDims+=1
print("good, would write")
#writer.writerow(row)
else:
click+=1
#print(click)
if click>=2 or matchedDims>0:
#print(zip_longest(*row, fillvalue = ''))
## for r in row
final=[row['filename'],row['width'],row['height'],row['class'],row['xmin'],row['ymin'],row['xmax'],row['ymax']]
#export_data = zip(*final, fillvalue = '')
#print(final)
if found==True:
writer.writerow(row)
if matchedDims>0 and click<2:
print('How????')
if matchedDims<1 and click<2:
if ff not in unmatchedDims:
unmatchedDims.append(ff)
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
newpath=re.compile("{ff}$")
if newpath.match(file) or file==ff:
os.remove(path+'/train/'+file)
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
co+=1
newpath=re.compile("{ff}$")
#print(file)
if newpath.match(file) or file==ff:
os.remove(path+'/test/'+file)
if found!=True and click<2:
if row['filename'] not in unmatchedFile:
unmatchedFile.append(row['filename'])
if row['filename'] in matchDupList:
matchDupList.remove(row['filename'])
print("Deleted "+str(len(unmatchedDims))+"Files with Unmatched Dimensions: ")
print(unmatchedDims)
if len(unmatchedFile)>=1:
print("Unmatched Files: ")
print(unmatchedFile)
for root, dirs, files in os.walk(path+'/train/'):
for file in files:
safe=False
for filepath in glob.iglob(path+"**/train_labels.csv*", recursive=True):
with open(filepath,'r', newline='') as infile:
reader =csv.DictReader(infile)
for row in reader:
ff=row['filename']
newpath=re.compile("{ff}$")
if newpath.match(file) or file==ff:
safe=True
if safe!=True:
os.remove(path+'/train/'+file)
print("Deleted file without labels")
for root, dirs, files in os.walk(path+'/test/'):
for file in files:
safe=False
for filepath in glob.iglob(path+"**/test_labels.csv*", recursive=True):
with open(filepath,'r', newline='') as infile:
reader =csv.DictReader(infile)
for row in reader:
ff=row['filename']
newpath=re.compile("{ff}$")
if newpath.match(file) or file==ff:
safe=True
if safe!=True:
os.remove(path+'/test/'+file)
print("Deleted file without labels")
| StarcoderdataPython |
3496842 | <reponame>VertexC/pipot-server<gh_stars>1-10
import datetime
from flask import Blueprint, g, jsonify, request, render_template_string
from decorators import template_renderer, get_menu_entries
from mod_auth.controllers import login_required, check_access_rights
# Register blueprint
from mod_honeypot.models import Deployment, PiPotReport
from mod_report.forms import DashboardForm
from pipot.services.ServiceLoader import get_class_instance
mod_report = Blueprint('report', __name__)
@mod_report.before_app_request
def before_request():
g.menu_entries['report'] = get_menu_entries(
g.user, 'Dashboard', 'dashboard', 'report.dashboard')
@mod_report.route('/')
@login_required
@check_access_rights()
@template_renderer()
def dashboard():
# Get active deployments
deployments = Deployment.query.all()
data = [
{
'id': d.id,
'name': d.name,
'profile': d.profile.name,
'profile_id': d.profile.id,
'services': [
{
'id': ps.service.id,
'name': ps.service.name,
'report_types': get_class_instance(
ps.service.name, None, None).get_report_types()
} for ps in d.profile.services
]
} for d in deployments
]
for d in data:
d['services'].append(
{
'id': 0,
'name': 'General information',
'report_types': ['General data']
}
)
return {
'data': data,
'form': DashboardForm()
}
@mod_report.route('/dashboard/<action>', methods=['POST'])
@login_required
@check_access_rights('.dashboard')
def dashboard_ajax(action):
from run import app
result = {
'status': 'error',
'errors': ['invalid action']
}
if action == 'load':
form = DashboardForm(request.form)
if form.validate_on_submit():
if form.is_pipot:
template_string = \
'<table><thead><tr><th>ID</th><th>Timestamp</th>' \
'<th>Message</th></tr></thead><tbody>' \
'{% for entry in entries %}<tr><td>{{ entry.id }}</td>' \
'<td>{{ entry.timestamp }}</td><td>{{ entry.message }}' \
'</td></tr>{% else %}<tr><td colspan="4">No entries ' \
'for this timespan</td></tr>{% endfor %}</tbody></table>'
if form.data_num.data == -1:
timestamp = datetime.datetime.utcnow() - datetime.timedelta(
days=7)
data = PiPotReport.query.filter(
PiPotReport.timestamp >= timestamp).order_by(
PiPotReport.timestamp.desc()).all()
else:
data = PiPotReport.query.filter().order_by(
PiPotReport.timestamp.desc()).limit(form.data_num.data).all()
result['data_num'] = len(data)
template_args = {
'entries': data
}
else:
service = get_class_instance(form.service_inst.name, None,
None)
report_type = form.report_type.data
template_string = service.get_template_for_type(report_type)
template_args = service.get_template_arguments(
report_type,
service.get_data_for_type(
report_type,
**service.get_data_for_type_default_args(
report_type
)
)
)
result['status'] = 'success'
result['html'] = render_template_string(
template_string, **template_args)
else:
result['errors'] = form.errors
if action == 'data':
# TODO: add implementation for more data request from the client
# side (to allow dynamic reloading of data)
result['status'] = 'success'
result['payload'] = ''
return jsonify(result)
| StarcoderdataPython |
336341 | # -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2019
# (c) University of Strathclyde 2019
# Author: <NAME>
#
# Contact:
# <EMAIL>
#
# <NAME>,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G1 1XQ
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# Copyright (c) 2019 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code for handling BLAST output files."""
from typing import Any, List, TextIO
def parse_blasttab(fhandle: TextIO) -> List[List[str]]:
"""Return the passed BLAST tab output file as a list of lists.
:param fhandle: TextIO, filehandle containing BLAST output file
This is used when testing for conserved BLAST output, as the
exact format of the BLAST result can depend on the software version.
For instance, the locally-installed version may be BLASTN+ 2.6.0,
which reports match identity to 3sf, and the version in CI may be
BLASTN+ 2.2.28, which reports to 2sf.
Returning a list of lines, parsed into the appropriate data type,
allows for direct comparison of line content independent of formatting.
"""
retval = []
for line in fhandle.readlines():
splitline = line.split("\t") # type: List[Any]
data = splitline[:2] # First two columns are strings
data += [float(_) for _ in splitline[2:]] # The rest are numeric
retval.append(data)
return retval
| StarcoderdataPython |
5084437 | <filename>code/beam_search.py
from APIs import *
from Node import Node
import time
from functools import wraps
# prunning tricks
def dynamic_programming(name, t, orig_sent, sent, tags, mem_str, mem_num, head_str, head_num, label, num=6, debug=False):
must_have = []
must_not_have = []
for k, v in non_triggers.items():
if isinstance(v[0], list):
flags = []
for v_sub in v:
flag = False
for trigger in v_sub:
if trigger in ['RBR', 'RBS', 'JJR', 'JJS']:
if trigger in tags:
flag = True
break
else:
if " " + trigger + " " in " " + sent + " ":
flag = True
break
flags.append(flag)
if not all(flags):
must_not_have.append(k)
else:
flag = False
for trigger in v:
if trigger in ['RBR', 'RBS', 'JJR', 'JJS']:
if trigger in tags:
flag = True
break
else:
if " " + trigger + " " in " " + sent + " ":
flag = True
break
if not flag:
must_not_have.append(k)
node = Node(memory_str=mem_str, memory_num=mem_num, rows=t,
header_str=head_str, header_num=head_num, must_have=must_have, must_not_have=must_not_have)
count_all = False
for k, v in mem_num:
if k == "tmp_input":
count_all = True
break
start_time = time.time()
# The result storage
finished = []
hist = [[node]] + [[] for _ in range(num)]
cache = {}
def call(command, f, *args):
if command not in cache:
cache[command] = f(*args)
return cache[command]
else:
return cache[command]
start_time = time.time()
for step in range(len(hist) - 1):
# Iterate over father nodes
saved_hash = []
def conditional_add(tmp, path):
if tmp.hash not in saved_hash:
path.append(tmp)
saved_hash.append(tmp.hash)
for root in hist[step]:
# Iterate over API
for k, v in APIs.items():
# propose candidates
if k in root.must_not_have or not root.check(*v['argument']):
continue
if v['output'] == 'row' and root.row_num >= 2:
continue
if v['output'] == 'num' and root.tmp_memory_num_len >= 3:
continue
if v['output'] == 'str' and root.tmp_memory_str_len >= 3:
continue
if v['output'] == 'bool' and root.memory_bool_len >= 3:
continue
if 'inc_' in k and 'inc' in root.cur_funcs:
continue
"""
elif v['argument'] == ["header_num"]:
for l in range(len(root.header_num)):
command = v['tostr'](root.header_num[l])
if not root.exist(command):
tmp = root.clone(command)
returned = v['function'](root.header_num[l])
tmp.add_header_num(returned)
conditional_add(tmp, hist[i + 1])
elif v['argument'] == ["header_str"]:
for l in range(len(root.header_str)):
command = v['tostr'](root.header_str[l])
if not root.exist(command):
tmp = root.clone(command)
returned = v['function'](root.header_str[l])
tmp.add_header_str(returned)
conditional_add(tmp, hist[i + 1])
"""
# Incrementing/Decrementing/Whether is zero
if v['argument'] == ["num"]:
for i, (h, va) in enumerate(root.memory_num):
if v['output'] == 'num':
if step == 0 and "tmp" in h:
command = v['tostr'](root.trace_num[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], va)
tmp.add_memory_num(h, returned, returned)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'bool':
if "tmp_" in h and "count" not in h:
command = v['tostr'](root.trace_num[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], va)
tmp.delete_memory_num(i)
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
else:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'none':
if step == 0 and "tmp" in h:
command = v['tostr'](root.trace_num[i])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.delete_memory_num(i)
if tmp.done():
continue
else:
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("Returned Type Wrong")
# Incrementing/Decrementing/Whether is none
elif v['argument'] == ["str"]:
for i, (h, va) in enumerate(root.memory_str):
if v['output'] == 'str':
if step == 0:
if "tmp_" not in h:
command = v['tostr'](root.trace_str[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], va)
tmp.add_memory_str(h, returned, returned)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'bool':
if k == "existing" and step == 0:
pass
elif k == "none" and "tmp_" in h:
pass
else:
continue
command = v['tostr'](root.trace_str[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], va)
tmp.delete_memory_str(i)
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
else:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("Returned Type Wrong")
elif v['argument'] == ['row', 'header_str', 'str']:
for j, (row_h, row) in enumerate(root.rows):
for i, (h, va) in enumerate(root.memory_str):
if "tmp_" in h or len(row) == 1:
continue
for head in root.header_str:
if "; " + head + ";" in row_h:
continue
command = v['tostr'](row_h, head, root.trace_str[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], row, head, va)
if v['output'] == "bool":
tmp.inc_row_counter(j)
tmp.delete_memory_str(i)
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("Returned Type Wrong")
elif v['argument'] == ['row', 'header_num', 'num']:
for j, (row_h, row) in enumerate(root.rows):
for i, (h, va) in enumerate(root.memory_num):
if "tmp_" in h or len(row) == 1:
continue
for head in root.header_num:
if "; " + head + ";" in row_h:
continue
command = v['tostr'](row_h, head, root.trace_num[i])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], row, head, va)
if v['output'] == "bool":
tmp.inc_row_counter(j)
tmp.delete_memory_num(i)
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("Returned Type Wrong")
elif v['argument'] == ['bool', 'bool']:
if root.memory_bool_len < 2:
continue
else:
for l in range(0, root.memory_bool_len - 1):
for m in range(l + 1, root.memory_bool_len):
command = v['tostr'](root.memory_bool[l][0], root.memory_bool[m][0])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], root.memory_bool[l]
[1], root.memory_bool[m][1])
if v['output'] == "bool":
tmp.delete_memory_bool(l, m)
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("Returned Type Wrong")
elif v['argument'] == ['row']:
for j, (row_h, row) in enumerate(root.rows):
if k == "count":
if row_h.startswith('filter'):
pass
elif row_h == "all_rows":
if count_all:
pass
else:
continue
elif k == "only":
if not row_h.startswith('filter'):
continue
else:
if not row_h == "all_rows":
continue
command = v['tostr'](row_h)
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
returned = call(command, v['function'], row)
if v['output'] == 'num':
tmp.add_memory_num("tmp_count", returned, command)
elif v['output'] == 'row':
tmp.add_rows(command, returned)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, out of scope")
conditional_add(tmp, hist[step + 1])
elif v['argument'] == ['row', 'row', 'row']:
if len(root.rows) < 3:
continue
_, all_rows = root.rows[0]
for i in range(1, len(root.rows) - 1):
for j in range(i + 1, len(root.rows)):
if v['output'] == 'bool':
if len(root.rows[i][1]) != 1 or len(root.rows[j][1]) != 1:
continue
command = v['tostr'](root.rows[i][0], root.rows[j][0])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(i)
tmp.inc_row_counter(j)
returned = call(command, v['function'], all_rows, root.rows[i][1], root.rows[j][1])
if returned is not None:
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, out of scope")
elif v['argument'] == ['row', 'row']:
if len(root.rows) < 2:
continue
for i in range(len(root.rows) - 1):
for j in range(i + 1, len(root.rows)):
if v['output'] == 'bool':
if len(root.rows[i][1]) != 1 and len(root.rows[j][1]) != 1:
continue
command = v['tostr'](root.rows[i][0], root.rows[j][0])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(i)
tmp.inc_row_counter(j)
returned = call(command, v['function'], root.rows[i][1], root.rows[j][1])
if returned is not None:
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, out of scope")
elif v['argument'] == ['row', 'header_num']:
if "hop" in k:
for j, (row_h, row) in enumerate(root.rows):
if len(row) != 1:
continue
for l in range(len(root.header_num)):
command = v['tostr'](row_h, root.header_num[l])
if "; " + root.header_num[l] + ";" in row_h:
continue
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
returned = call(command, v['function'], row, root.header_num[l])
if v['output'] == 'num':
tmp.add_memory_num("tmp_" + root.header_num[l], returned, command)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
else:
for j, (row_h, row) in enumerate(root.rows):
if len(row) == 1:
continue
for l in range(len(root.header_num)):
command = v['tostr'](row_h, root.header_num[l])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
returned = call(command, v['function'], row, root.header_num[l])
if v['output'] == 'num':
tmp.add_memory_num("tmp_" + root.header_num[l], returned, command)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'row':
if len(returned) > 0:
tmp.add_rows(command, returned)
conditional_add(tmp, hist[step + 1])
else:
continue
else:
raise ValueError("error, output of scope")
elif v['argument'] == ['row', 'header_str']:
if "most_freq" in k:
row_h, row = root.rows[0]
for l in range(len(root.header_str)):
command = v['tostr'](row_h, root.header_str[l])
if not root.exist(command):
tmp = root.clone(command, k)
returned = call(command, v['function'], row, root.header_str[l])
if v['output'] == 'str':
if returned is not None:
tmp.add_memory_str("tmp_" + root.header_str[l], returned, command)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
elif "hop" in k:
for j, (row_h, row) in enumerate(root.rows):
if len(row) != 1:
continue
for l in range(len(root.header_str)):
if "; " + root.header_str[l] + ";" in row_h:
continue
command = v['tostr'](row_h, root.header_str[l])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
returned = call(command, v['function'], row, root.header_str[l])
if v['output'] == 'str':
if isinstance(returned, str):
tmp.add_memory_str("tmp_" + root.header_str[l], returned, command)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
else:
for j, (row_h, row) in enumerate(root.rows):
if len(row) == 1:
continue
for l in range(len(root.header_str)):
command = v['tostr'](row_h, root.header_str[l])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
returned = call(command, v['function'], row, root.header_str[l])
if v['output'] == 'str':
if isinstance(returned, str):
tmp.add_memory_str("tmp_" + root.header_str[l], returned, command)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'row':
if len(returned) > 0:
tmp.add_rows(command, returned)
conditional_add(tmp, hist[step + 1])
else:
continue
elif v['output'] == 'num':
tmp.add_memory_num("tmp_count", returned, command)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
elif v['argument'] == ['num', 'num']:
if root.memory_num_len < 2:
continue
for l in range(0, root.memory_num_len - 1):
for m in range(l + 1, root.memory_num_len):
if 'tmp_' in root.memory_num[l][0] or 'tmp_' in root.memory_num[m][0]:
if ("tmp_input" == root.memory_num[l][0] and "tmp_" not in root.memory_num[m][0]) or \
("tmp_input" == root.memory_num[m][0] and "tmp_" not in root.memory_num[l][0]):
continue
elif root.memory_num[l][0] == root.memory_num[m][0] == "tmp_input":
continue
else:
continue
type_l = root.memory_num[l][0].replace('tmp_', '')
type_m = root.memory_num[m][0].replace('tmp_', '')
if v['output'] == 'num':
if type_l == type_m:
command = v['tostr'](root.trace_num[l], root.trace_num[m])
tmp = root.clone(command, k)
tmp.delete_memory_num(l, m)
returned = call(command, v['function'],
root.get_memory_num(l), root.get_memory_num(m))
tmp.add_memory_num("tmp_" + root.memory_num[l][0], returned, command)
conditional_add(tmp, hist[step + 1])
elif v['output'] == 'bool':
if type_l == type_m or (type_l == "input" or type_m == "input"):
pass
else:
continue
if type_l == "count" and type_m == "input" or type_m == "count" and type_l == "input":
if max(root.get_memory_num(l), root.get_memory_num(m)) > len(root.rows[0][1]):
continue
command = v['tostr'](root.trace_num[l], root.trace_num[m])
tmp = root.clone(command, k)
tmp.delete_memory_num(l, m)
returned = call(command, v['function'], root.get_memory_num(l), root.get_memory_num(m))
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
elif v['argument'] == ['str', 'str']:
if root.memory_str_len < 2:
continue
for l in range(0, root.memory_str_len - 1):
for m in range(l + 1, root.memory_str_len):
if 'tmp_' not in root.memory_str[l][0] and 'tmp_' not in root.memory_str[m][0]:
continue
type_l = root.memory_str[l][0].replace('tmp_', '')
type_m = root.memory_str[m][0].replace('tmp_', '')
if type_l == type_m:
command = v['tostr'](root.trace_str[l], root.trace_str[m])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.delete_memory_str(l, m)
if v['output'] == 'bool':
returned = call(command, v['function'],
root.get_memory_str(m), root.get_memory_str(l))
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError("error, output of scope")
elif v['argument'] == ['row', ['header_str', 'str']]:
for j, (row_h, row) in enumerate(root.rows):
for i, (h, va) in enumerate(root.memory_str):
if "tmp_" not in h:
command = v['tostr'](row_h, h, root.trace_str[i])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
tmp.delete_memory_str(tmp.memory_str.index((h, va)))
returned = call(command, v['function'], row, h, va)
if v['output'] == 'row':
if len(returned) > 0:
tmp.add_rows(command, returned)
conditional_add(tmp, hist[step + 1])
else:
continue
elif v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError('error, output of scope')
elif v['argument'] == ['row', ['header_num', 'num']]:
for j, (row_h, row) in enumerate(root.rows):
for i, (h, va) in enumerate(root.memory_num):
if "tmp_" not in h:
command = v['tostr'](row_h, h, root.trace_num[i])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.inc_row_counter(j)
tmp.delete_memory_num(tmp.memory_num.index((h, va)))
returned = call(command, v['function'], row, h, va)
if v['output'] == 'row':
if len(returned) > 0:
tmp.add_rows(command, returned)
conditional_add(tmp, hist[step + 1])
else:
continue
elif v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError('error, output of scope')
elif v['argument'] == [['header_str', 'str'], ['header_num', 'num']]:
if not root.memory_str_len or not root.memory_num_len:
continue
row_h, row = root.rows[0]
for i, (h1, va1) in enumerate(root.memory_str):
for j, (h2, va2) in enumerate(root.memory_num):
if "tmp_" not in h1 and "tmp_" not in h2:
command = v['tostr'](h1, root.trace_str[i], h2, root.trace_num[j])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.delete_memory_str(i)
tmp.delete_memory_num(j)
returned = call(command, v['function'], row, h1, va1, h2, va2)
if v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError('error, output of scope')
elif v['argument'] == [['header_str', 'str'], ['header_str', 'str']]:
if root.memory_str_len < 2:
continue
row_h, row = root.rows[0]
for l in range(len(root.memory_str) - 1):
for m in range(l + 1, len(root.memory_str)):
h1, va1 = root.memory_str[l]
h2, va2 = root.memory_str[m]
if "tmp_" not in h1 and "tmp_" not in h2 and (h1 != h2):
command = v['tostr'](h1, root.trace_str[l], h2, root.trace_str[m])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.delete_memory_str(l, m)
returned = call(command, v['function'], row, h1, va1, h2, va2)
if v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError('error, output of scope')
elif v['argument'] == [['header_num', 'num'], ['header_num', 'num']]:
if root.memory_num_len < 2:
continue
row_h, row = root.rows[0]
for l in range(len(root.memory_num) - 1):
for m in range(l + 1, len(root.memory_num)):
h1, va1 = root.memory_num[l]
h2, va2 = root.memory_num[m]
if ("tmp_" not in h1 and "tmp_" not in h2) and (h1 != h2):
command = v['tostr'](h1, root.trace_num[l], h2, root.trace_num[m])
if not root.exist(command):
tmp = root.clone(command, k)
tmp.delete_memory_num(l, m)
returned = call(command, v['function'], row, h1, va1, h2, va2)
if v['output'] == 'bool':
if tmp.done():
tmp.append_result(command, returned)
finished.append((tmp, returned))
elif tmp.memory_bool_len < 2:
tmp.add_memory_bool(command, returned)
conditional_add(tmp, hist[step + 1])
else:
raise ValueError(k + ": error")
if len(finished) > 100 or time.time() - start_time > 40:
break
# return (name, orig_sent, label, [_[0].cur_str for _ in finished])
"""
if debug:
with open('/tmp/results.txt', 'w') as f:
for h in hist[-1]:
print(h.cur_strs, file=f)
"""
return (name, orig_sent, sent, label, [_[0].cur_str for _ in finished])
# for _ in finished:
# print(_[0].cur_str, _[1])
| StarcoderdataPython |
3311274 | import pymongo
import dotenv
from .config import MONGO_URI
dotenv.load_dotenv()
client = pymongo.MongoClient(MONGO_URI)
db = client.get_database('cookiecoin') | StarcoderdataPython |
9781968 | <filename>cherrypy/test/test_tools.py
"""Test the various means of instantiating and invoking tools."""
import gzip
import sys
from cherrypy._cpcompat import BytesIO, copyitems, itervalues, IncompleteRead, ntob, ntou, xrange
import time
timeout = 0.2
import types
import cherrypy
from cherrypy import tools
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
from cherrypy.test import helper
class ToolTests(helper.CPWebCase):
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox("myauth")
def check_access(default=False):
if not getattr(cherrypy.request, "userid", default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool('before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get("map", {})
cherrypy.request.numerify_map = copyitems(m)
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = "nadsat"
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(ntob("good"), ntob("horrorshow"))
chunk = chunk.replace(ntob("piece"), ntob("lomtick"))
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [ntob("razdrez")]
id = cherrypy.request.params.get("id")
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
cherrypy.response.output = o = BytesIO()
try:
response = next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(stream_handler)
class Root:
def index(self):
return "Howdy earth!"
index.exposed = True
def tarfile(self):
cherrypy.response.output.write(ntob('I am '))
cherrypy.response.output.write(ntob('a tarfile'))
tarfile.exposed = True
tarfile._cp_config = {'tools.streamer.on': True}
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
euro.exposed = True
# Bare hooks
def pipe(self):
return cherrypy.request.body
pipe.exposed = True
pipe._cp_config = {'hooks.before_request_body': pipe_body}
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
def decorated_euro(self, *vpath):
yield ntou("Hello,")
yield ntou("world")
yield europoundUnicode
decorated_euro.exposed = True
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in itervalues(dct):
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
# METHOD ONE:
# Declare Tools in _cp_config
class Demo(Test):
_cp_config = {"tools.nadsat.on": True}
def index(self, id=None):
return "A good piece of cherry pie"
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield "nonconfidential"
raise ValueError()
yield "confidential"
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like this:
# @tools.check_access()
def restricted(self):
return "Welcome!"
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return "success!"
def stream(self, id=None):
for x in xrange(100000000):
yield str(x)
stream._cp_config = {'response.stream': True}
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {ntob("pie"): ntob("3.14159")},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': "pie->3.14159"
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
if sys.version_info >= (2, 5):
from cherrypy.test import _test_decorators
root.tooldecs = _test_decorators.ToolExamples()
setup_server = staticmethod(setup_server)
def testHookErrors(self):
self.getPage("/demo/?id=1")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/demo/err?id=3")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage("/demo/errinstream?id=5")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("nonconfidential")
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
"/demo/errinstream?id=5")
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage("/demo/ended/5")
self.assertBody("True")
# Test the "__call__" technique (compile-time decorator).
self.getPage("/demo/restricted")
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage("/demo/userid")
self.assertBody("Welcome!")
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest("GET", "/demo/stream?id=9", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage("/demo/ended/9")
self.assertBody("True")
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage("/demo/err_in_onstart")
self.assertErrorPage(502)
self.assertInBody("AttributeError: 'str' object has no attribute 'items'")
def testCombinedTools(self):
expectedResult = (ntou("Hello,world") + europoundUnicode).encode('utf-8')
zbuf = BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage("/euro", headers=[("Accept-Encoding", "gzip"),
("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7")])
self.assertInBody(zbuf.getvalue()[:3])
zbuf = BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage("/decorated_euro", headers=[("Accept-Encoding", "gzip")])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage("/decorated_euro/subpath",
headers=[("Accept-Encoding", "gzip")])
self.assertInBody(''.join([chr((ord(x) + 3) % 256) for x in zbuf.getvalue()]))
def testBareHooks(self):
content = "bit of a pain in me gulliver"
self.getPage("/pipe",
headers=[("Content-Length", str(len(content))),
("Content-Type", "text/plain")],
method="POST", body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage("/tarfile")
self.assertBody("I am a tarfile")
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip("skipped (Python 2.5+ only)")
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
numon = cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError("Tool.on did not error as it should have.")
| StarcoderdataPython |
1714376 | <gh_stars>1-10
from datetime import timedelta
from flask import Blueprint, request
from sqlalchemy.exc import IntegrityError
from flask_jwt_extended import create_access_token, create_refresh_token, get_jwt_identity, jwt_refresh_token_required
from http import HTTPStatus
from app.models import User, db
from app.services.http import build_api_response
bp_auth = Blueprint("api_auth", __name__, url_prefix="/auth")
@bp_auth.route('/signup', methods=['POST'])
def signup():
data = request.get_json(force=True)
user = User(
name=data.get('name', None),
email=data.get('email', None),
password=data.get('password', None),
is_admin=data.get('is_admin', None)
)
try:
db.session.add(user)
db.session.commit()
return build_api_response(HTTPStatus.CREATED)
except IntegrityError:
return build_api_response(HTTPStatus.UNPROCESSABLE_ENTITY)
@bp_auth.route('/login', methods=['POST'])
def login():
user_found = User.query.filter_by(
email=request.get_json(force=True).get('email')).first()
pwd_informed = user_found.password
pwd_db = request.get_json(force=True).get('password')
if pwd_informed != pwd_db:
return build_api_response(HTTPStatus.UNAUTHORIZED)
access_token = create_access_token(
identity=user_found.id, expires_delta=timedelta(days=5))
fresh_token = create_refresh_token(
identity=user_found.id, expires_delta=timedelta(days=10))
return {'data': {
'name': user_found.name,
'email': user_found.email,
'is_admin': user_found.is_admin,
'access_token': access_token,
'fresh_token': fresh_token
}}
@bp_auth.route('/fresh_token', methods=['GET'])
@jwt_refresh_token_required
def get_fresh_token():
user_id = get_jwt_identity()
access_token = create_access_token(
identity=user_found.id, expires_delta=timedelta(days=5))
return access_token
| StarcoderdataPython |
4914024 | <filename>builder/frameworks/linux.py<gh_stars>0
# Copyright 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
env.Append(
CPPFLAGS=[
"-mcpu=arm1176jzf-s",
"-mtune=arm1176jzf-s",
"-mfloat-abi=hard",
"-mfpu=vfp",
"-O2",
"-Wformat=2",
"-Wall",
"-Wextra",
"-Winline"
],
LIBS=[
"pthread"
]
)
| StarcoderdataPython |
11279873 | <gh_stars>1-10
import torch
import torch.nn as nn
from torchvision import models, transforms
class VGG19(nn.Module):
def __init__(self, vgg_path="models/vgg19-d01eb7cb.pth"):
super(VGG19, self).__init__()
# Load VGG Skeleton, Pretrained Weights
vgg19_features = models.vgg19(pretrained=False)
vgg19_features.load_state_dict(torch.load(vgg_path), strict=False)
self.features = vgg19_features.features
# Turn-off Gradient History
for param in self.features.parameters():
param.requires_grad = False
# Sequential(
# (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace=True)
# (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace=True)
# (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace=True)
# (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace=True)
# (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace=True)
# (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace=True)
# (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace=True)
# (16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (17): ReLU(inplace=True)
# (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (20): ReLU(inplace=True)
# (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (22): ReLU(inplace=True)
# (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (24): ReLU(inplace=True)
# (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (26): ReLU(inplace=True)
# (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace=True)
# (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (31): ReLU(inplace=True)
# (32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (33): ReLU(inplace=True)
# (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (35): ReLU(inplace=True)
# (36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# )
def forward(self, x):
layers = {'3': 'relu1_2', '8': 'relu2_2', '17': 'relu3_4', '22': 'relu4_2', '26': 'relu4_4', '35': 'relu5_4'}
features = {}
for name, layer in self.features._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
class VGG16(nn.Module):
def __init__(self, vgg_path="models/vgg16-00b39a1b.pth"):
super(VGG16, self).__init__()
# Load VGG Skeleton, Pretrained Weights
vgg16_features = models.vgg16(pretrained=False)
vgg16_features.load_state_dict(torch.load(vgg_path), strict=False)
self.features = vgg16_features.features
# Turn-off Gradient History
for param in self.features.parameters():
param.requires_grad = False
# Sequential(
# (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace=True)
# (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace=True)
# (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace=True)
# (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace=True)
# (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace=True)
# (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace=True)
# (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace=True)
# (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (18): ReLU(inplace=True)
# (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (20): ReLU(inplace=True)
# (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (22): ReLU(inplace=True)
# (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace=True)
# (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace=True)
# (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace=True)
# (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# )
def forward(self, x):
layers = {'3': 'relu1_2', '8': 'relu2_2', '15': 'relu3_3', '22': 'relu4_3'}
features = {}
for name, layer in self.features._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
if (name=='22'):
break
return features | StarcoderdataPython |
11387575 | from rest_framework.permissions import BasePermission
class IsOrgAdmin(BasePermission):
"""
Check whether user is org admin.
"""
def has_permission(self, request, *args, **kwargs):
org = request.user.org
if org and org.is_staff:
return True
return False
| StarcoderdataPython |
4934412 | <reponame>KazukiOnodera/Microsoft-Malware-Prediction
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 21:49:53 2019
@author: kazuki.onodera
NOT time series feature
"""
import numpy as np
import pandas as pd
#from multiprocessing import Pool
from sklearn.preprocessing import LabelEncoder
import utils
PREF = 'f002'
col_cat = [
'ProductName', # cardinality(6, 6), same distribuyion
'EngineVersion', # maybe time series
'AppVersion', # maybe time series
'AvSigVersion', # maybe time series
'DefaultBrowsersIdentifier', # cardinality(1730, 1548)
'AVProductStatesIdentifier', # cardinality(28970, 23492)
'CountryIdentifier', # cardinality(222, 222)
'CityIdentifier', # cardinality(107366, 105817)
'OrganizationIdentifier', # cardinality(49, 50)
'GeoNameIdentifier', # cardinality(292, 289)
'LocaleEnglishNameIdentifier', # cardinality(276, 278)
'Platform', # cardinality(4, 4)
'Processor', # cardinality(3, 3)
'OsVer', # cardinality(58, 44) maybe time series
'OsPlatformSubRelease', # cardinality(58, 44)
'OsBuildLab', # cardinality(663, 673)
'SkuEdition', # cardinality(8, 8)
'IeVerIdentifier', # cardinality(303, 294) maybe time series
'SmartScreen', # cardinality(21, 21)
'Census_MDC2FormFactor', # cardinality(13, 14)
'Census_DeviceFamily', # cardinality(3, 3)
'Census_OEMNameIdentifier', # cardinality(3832, 3685)
'Census_OEMModelIdentifier', # cardinality(175365, 167776)
'Census_ProcessorManufacturerIdentifier', # cardinality(7, 7)
'Census_ProcessorModelIdentifier', # cardinality(3428, 3438)
'Census_ProcessorClass', # cardinality(3, 3)
'Census_PrimaryDiskTypeName', # cardinality(4, 4)
'Census_ChassisTypeName', # cardinality(52, 48)
'Census_PowerPlatformRoleName', # cardinality(10, 10)
'Census_InternalBatteryType', # cardinality(78, 63)
'Census_OSVersion', # cardinality(469, 475)
'Census_OSArchitecture', # cardinality(3, 3)
'Census_OSBranch', # cardinality(32, 29)
'Census_OSEdition', # cardinality(33, 36)
'Census_OSSkuName', # cardinality(30, 31)
'Census_OSInstallTypeName', # cardinality(9, 9)
'Census_OSInstallLanguageIdentifier', # cardinality(39, 39)
'Census_OSUILocaleIdentifier', # cardinality(147, 139)
'Census_OSWUAutoUpdateOptionsName', # cardinality(6, 6)
'Census_GenuineStateName', # cardinality(5, 5)
'Census_ActivationChannel', # cardinality(6, 6)
'Census_FlightRing', # cardinality(10, 11)
'Census_FirmwareManufacturerIdentifier', # cardinality(712, 722)
'Census_FirmwareVersionIdentifier', # cardinality(50494, 49811)
'Wdft_RegionIdentifier', # cardinality(15, 15)
'OsBuild', # cardinality(58, 44)
'OsSuite', # cardinality(58, 44)
'OsBuildLab_major',
'OsBuildLab_minor',
'OsBuildLab_build',
'OsBuildLab_architecture',
]
# =============================================================================
# main
# =============================================================================
if __name__ == "__main__":
utils.start(__file__)
tr = pd.read_feather('../data/train.f')[col_cat]
te = pd.read_feather('../data/test.f')[col_cat]
le = LabelEncoder()
for c in col_cat:
print('processing', c)
if tr[c].dtype != 'O':
min_ = min(tr[c].min(), te[c].min()) -1
tr[c].fillna(min_, inplace=True)
te[c].fillna(min_, inplace=True)
else:
tr[c].fillna('na dayo', inplace=True)
te[c].fillna('na dayo', inplace=True)
le.fit( tr[c].append(te[c]) )
tr[c] = le.transform(tr[c])
te[c] = le.transform(te[c])
utils.reduce_mem_usage(tr)
utils.reduce_mem_usage(te)
tr.add_prefix(PREF+'_').to_feather(f'../data/train_{PREF}.f')
te.add_prefix(PREF+'_').to_feather(f'../data/test_{PREF}.f')
print('==== categories ====')
print(tr.head().add_prefix(PREF+'_').columns.tolist())
utils.end(__file__)
| StarcoderdataPython |
1946166 | from setuptools import setup
setup(name='JSONdb',
version='0.1',
description='A lightweight flat file database Python API using JSON as a storage medium.',
url='https://bitbucket.org/harryjubb/jsondb',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['jsondb'],
zip_safe=False) | StarcoderdataPython |
6647207 | # -*- coding: utf-8 -*-
"""Git client plugin
.. module:: client.plugins.gitclient
:platform: Windows, Unix
:synopsis: Git client
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from hydratk.extensions.client.core import plugin
from hydratk.extensions.client.core.tkimport import tk, ttk, tkfd
from hydratk.extensions.client.core.utils import fix_path
try:
from git import Repo
except ImportError:
pass
from shutil import rmtree
import os
class Plugin(plugin.Plugin):
"""Class Plugin
"""
# gui elements
_win = None
_pane = None
_frame_left = None
_frame_right = None
_tree = None
_menu = None
_vbar = None
_url = None
_user = None
_passw = None
_name = None
_email = None
_msg = None
_author = None
_files = None
_files_bar = None
def _init_plugin(self):
"""Method initializes plugin
Args:
none
Returns:
void
"""
self._plugin_id = 'gitclient'
self._plugin_name = 'GitClient'
self._plugin_version = '0.1.0'
self._plugin_author = '<NAME> <<EMAIL>>, HydraTK team <<EMAIL>>'
self._plugin_year = '2017 - 2018'
def _setup(self):
"""Method executes plugin setup
Args:
none
Returns:
void
"""
try:
import git
self._set_menu('gitclient', 'htk_gitclient_menu', 'plugin')
self._set_menu_item('gitclient', 'htk_gitclient_menu_clone', self._win_clone)
self._set_menu_item('gitclient', 'htk_gitclient_menu_repomanager', self._win_repomanager)
except ImportError:
self.logger.error('Plugin {0} could not be loaded, git executable is not installed.'.format(self._plugin_name))
def _win_clone(self):
"""Method displays clone window
Args:
none
Returns:
void
"""
win = tk.Toplevel(self.root)
win.title(self.trn.msg('htk_gitclient_clone_title'))
win.transient(self.root)
win.resizable(False, False)
win.geometry('+%d+%d' % (self.root.winfo_screenwidth() / 3, self.root.winfo_screenheight() / 3))
win.tk.call('wm', 'iconphoto', win._w, self.root.images['logo'])
tk.Label(win, text=self.trn.msg('htk_gitclient_clone_url')).grid(row=0, column=0, sticky='e')
url = tk.Entry(win, width=70)
url.grid(row=0, column=1, padx=3, pady=10, sticky='e')
url.focus_set()
tk.Label(win, text=self.trn.msg('htk_gitclient_clone_user')).grid(row=1, column=0, sticky='e')
user = tk.Entry(win, width=20)
user.grid(row=1, column=1, padx=3, pady=3, sticky='w')
tk.Label(win, text=self.trn.msg('htk_gitclient_clone_password')).grid(row=2, column=0, sticky='e')
passw = tk.Entry(win, width=20)
passw.grid(row=2, column=1, padx=3, pady=3, sticky='w')
tk.Label(win, text=self.trn.msg('htk_gitclient_clone_dirpath')).grid(row=3, column=0, sticky='e')
dirpath = tk.Entry(win, width=70)
dirpath.grid(row=3, column=1, padx=3, pady=3, sticky='w')
tk.Button(win, text='...', command=lambda: self._set_dirpath(dirpath)).grid(row=3, column=2, sticky='w')
error = tk.Label(win, text='', foreground='#FF0000')
error.grid(row=4, column=1, sticky='w')
btn = tk.Button(win, text=self.trn.msg('htk_gitclient_clone_button'),
command=lambda: self._clone_repo(url.get(), dirpath.get(), user.get(), passw.get(), error, win))
btn.grid(row=4, column=2, padx=3, pady=3, sticky='e')
win.bind('<Escape>', lambda f: win.destroy())
def _win_repomanager(self):
"""Method displays repository manager window
Args:
none
Returns:
void
"""
self._win = tk.Toplevel(self.root)
self._win.title(self.trn.msg('htk_gitclient_repomanager_title'))
self._win.transient(self.root)
self._win.resizable(False, False)
self._win.geometry('+%d+%d' % (self.root.winfo_screenwidth() / 5, self.root.winfo_screenheight() / 10))
self._win.tk.call('wm', 'iconphoto', self._win._w, self.root.images['logo'])
self._pane = tk.PanedWindow(self._win, orient=tk.HORIZONTAL)
self._pane.pack(expand=True, fill=tk.BOTH)
# left frame
self._frame_left = tk.Frame(self._pane)
self._set_tree()
self._pane.add(self._frame_left)
# right frame
self._frame_right = tk.Frame(self._pane)
self._set_config()
self._set_commit()
self._pane.add(self._frame_right)
self._win.bind('<Escape>', lambda f: self._win.destroy())
def _set_tree(self):
"""Method sets tree gui
Args:
none
Returns:
void
"""
self._vbar = ttk.Scrollbar(self._frame_left, orient=tk.VERTICAL)
self._tree = ttk.Treeview(self._frame_left, columns=(), show='tree', displaycolumns=(), height=10, selectmode='browse',
yscrollcommand=self._vbar.set)
self._vbar.config(command=self._tree.yview)
self._vbar.pack(side=tk.RIGHT, fill=tk.Y)
self._tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
for name, cfg in self.explorer._projects.items():
if ('git' in cfg):
self._tree.insert('', 'end', text=name)
# context menu
self._menu = tk.Menu(self._tree, tearoff=False)
self._menu.add_command(label=self.trn.msg('htk_gitclient_repomanager_push'), command=self._push)
self._menu.add_command(label=self.trn.msg('htk_gitclient_repomanager_pull'), command=self._pull)
# events
self._tree.bind('<ButtonRelease-1>', self._fill_repo_detail)
self._tree.bind('<Any-KeyRelease>', self._fill_repo_detail)
self._tree.bind('<Button-3>', self._context_menu)
def _context_menu(self, event=None):
"""Method sets context menu
Args:
event (obj): event
Returns:
void
"""
self._menu.tk_popup(event.x_root, event.y_root)
def _set_config(self):
"""Method sets configuration gui
Args:
none
Returns:
void
"""
row = 0
font = ('Arial', 10, 'bold')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_title'), font=font).grid(row=row, column=0, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_url')).grid(row=row + 1, column=0, sticky='e')
self._url = tk.Entry(self._frame_right, width=70)
self._url.grid(row=row + 1, column=1, padx=3, pady=3, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_user')).grid(row=row + 2, column=0, sticky='e')
self._user = tk.Entry(self._frame_right, width=20)
self._user.grid(row=row + 2, column=1, padx=3, pady=3, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_password')).grid(row=row + 3, column=0, sticky='e')
self._passw = tk.Entry(self._frame_right, width=20)
self._passw.grid(row=row + 3, column=1, padx=3, pady=3, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_name')).grid(row=row + 4, column=0, sticky='e')
self._name = tk.Entry(self._frame_right, width=40)
self._name.grid(row=row + 4, column=1, padx=3, pady=3, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_email')).grid(row=row + 5, column=0, sticky='e')
self._email = tk.Entry(self._frame_right, width=40)
self._email.grid(row=row + 5, column=1, padx=3, pady=3, sticky='w')
error = tk.Label(self._frame_right, text='', foreground='#FF0000')
error.grid(row=row + 6, column=1, sticky='w')
btn = tk.Button(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_config_save'),
command=lambda: self._save_config(self._url.get(), self._user.get(), self._passw.get(), self._name.get(), self._email.get(), error))
btn.grid(row=row + 6, column=2, padx=3, pady=3, sticky='e')
def _set_commit(self):
"""Method sets commit gui
Args:
none
Returns:
void
"""
row = 7
font = ('Arial', 10, 'bold')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_title'), font=font).grid(row=row, column=0, sticky='w')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_message')).grid(row=row + 1, column=0, sticky='e')
self._msg = tk.Text(self._frame_right, background='#FFFFFF', height=7, width=50)
self._msg.grid(row=row + 1, column=1, rowspan=2, sticky='w')
row += 1
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_author')).grid(row=row + 3, column=0, sticky='e')
self._author = tk.Entry(self._frame_right, width=40)
self._author.grid(row=row + 3, column=1, padx=3, pady=3, sticky='w')
push = tk.BooleanVar(value=True)
tk.Checkbutton(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_push'), variable=push).grid(row=row + 3, column=2, sticky='e')
error = tk.Label(self._frame_right, text='', foreground='#FF0000')
error.grid(row=row + 4, column=1, sticky='w')
btn = tk.Button(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_commit'),
command=lambda: self._commit(self._msg.get('1.0', 'end-1c'), self._author.get(), [], push.get(), error))
btn.grid(row=row + 4, column=2, padx=3, pady=3, sticky='e')
tk.Label(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_files'), font=font).grid(row=row + 5, column=0, sticky='w')
select_all = tk.BooleanVar(value=False)
tk.Checkbutton(self._frame_right, text=self.trn.msg('htk_gitclient_repomanager_commit_select_all'), variable=select_all,
command=lambda: self._select_all_files(select_all.get())).grid(row=row + 6, column=1, sticky='w')
self._files_bar = ttk.Scrollbar(self._frame_right, orient=tk.VERTICAL)
self._files = ttk.Treeview(self._frame_right, columns=('operation', 'file'), show='tree', displaycolumns=('operation', 'file'), height=10, selectmode='browse',
yscrollcommand=self._files_bar.set)
self._files_bar.configure(command=self._files.yview)
self._files.grid(row=row + 7, column=1, sticky=tk.NSEW)
self._files_bar.grid(row=row + 7, column=2, sticky='nsw')
self._files.column('#0', stretch=False, width=40)
self._files.column('operation', stretch=False, width=50)
self._files.column('file', stretch=True, width=200)
self._files.bind('<ButtonRelease-1>', self._select_file)
self._files.bind('<Any-KeyRelease>', self._select_file)
def _set_dirpath(self, entry):
"""Method sets dirpath
Args:
entry (obj): entry reference
Returns:
void
"""
entry.delete(0, tk.END)
entry.insert(tk.END, tkfd.askdirectory())
def _clone_repo(self, url, dirpath, user='', passw='', error=None, win=None):
"""Method clones repository
Args:
url (str): repository url
dirpath (str): directory path
user (str): username
pass (str): password
error (obj): error label reference
win (obj): window reference
Returns:
void
"""
if (error is not None):
error.config(text='')
proj_name = dirpath.split('/')[-1]
if (len(url) == 0):
error.config(text=self.trn.msg('htk_gitclient_mandatory_field', self.trn.msg('htk_gitclient_clone_url')))
return
elif (len(dirpath) == 0):
error.config(text=self.trn.msg('htk_gitclient_mandatory_field', self.trn.msg('htk_gitclient_clone_dirpath')))
return
elif (proj_name in self.explorer._projects):
error.config(text=self.trn.msg('htk_gitclient_clone_project_exist', proj_name))
return
repo = None
try:
if (win is not None):
win.destroy()
url_auth = self._prepare_url(url, user, passw)
self.logger.info(self.trn.msg('htk_gitclient_clone_start', url))
repo = Repo.clone_from(url_auth, dirpath)
self.logger.info(self.trn.msg('htk_gitclient_clone_finish'))
self._create_project(url, dirpath, user, passw)
except Exception as ex:
self.logger.error(ex)
if (os.path.exists(dirpath)):
rmtree(dirpath)
finally:
if (repo is not None):
repo.close()
def _create_project(self, url, dirpath, user, passw):
"""Method creates project from repository
Args:
url (str): repository url
dirpath (str): directory path
user (str): username
pass (str): password
Returns:
void
"""
dirpath = fix_path(dirpath)
proj_name = dirpath.split('/')[-1]
self.explorer._projects[proj_name] = {'path': dirpath, 'pythonpath': [dirpath + '/lib/yodalib', dirpath + '/helpers/yodahelpers'],
'git': {'url': url, 'username': user, 'password': <PASSWORD>, 'name': '', 'email': ''}}
node = self.explorer._tree.insert('', 'end', text=proj_name, values=(dirpath, 'directory'))
self.explorer._populate_tree(node)
self.logger.info(self.trn.msg('htk_core_project_created', proj_name))
self.config.data['Projects'] = self.explorer._projects
self.explorer.autocompleter.update_pythonpath()
self.config.save()
def _fill_repo_detail(self, event=None):
"""Method fills repository detail
Args:
event (obj): event
Returns:
void
"""
item = self._tree.selection()
if (len(item) == 0):
return
project = self._tree.item(item)['text']
cfg = self.config.data['Projects'][project]
repo_path = cfg['path']
cfg = cfg['git']
self._url.delete(0, tk.END)
self._url.insert(tk.END, cfg['url'])
self._user.delete(0, tk.END)
self._user.insert(tk.END, cfg['username'])
self._passw.delete(0, tk.END)
self._passw.insert(tk.END, cfg['password'])
self._name.delete(0, tk.END)
self._name.insert(tk.END, cfg['name'])
self._email.delete(0, tk.END)
self._email.insert(tk.END, cfg['email'])
self._author.delete(0, tk.END)
author = '{0} <{1}>'.format(cfg['name'], cfg['email']) if (cfg['email'] != '') else ''
self._author.insert(tk.END, author)
self._msg.delete('1.0', 'end')
self._fill_changed_files(repo_path)
def _save_config(self, url, user='', passw='', name='', email='', error=None):
"""Method saves configuration
Args:
url (str): repository url
user (str): username
passw (str): password
name (str): author name
email (str): author email
error (obj): error label reference
Returns:
void
"""
item = self._tree.selection()
if (len(item) == 0):
return
if (error is not None):
error.config(text='')
if (len(url) == 0):
error.config(text=self.trn.msg('htk_gitclient_mandatory_field', self.trn.msg('htk_gitclient_repomanager_config_url')))
return
project = self._tree.item(item)['text']
repo_path = self.config.data['Projects'][project]['path']
cfg = self.config.data['Projects'][project]['git']
repo = None
try:
if ([cfg['url'], cfg['username'], cfg['password'], cfg['name'], cfg['email']] != [url, user, passw, name, email]):
repo = Repo(repo_path)
repo.git.remote('set-url', 'origin', self._prepare_url(url, user, passw))
repo.git.config('user.name', name)
repo.git.config('user.email', email)
cfg['url'] = url
cfg['username'] = user
cfg['password'] = <PASSWORD>
cfg['name'] = name
cfg['email'] = email
self.config.save()
self.logger.debug(self.trn.msg('htk_gitclient_repomanager_config_saved', project))
except Exception as ex:
self.logger.error(ex)
finally:
if (repo is not None):
repo.close()
def _commit(self, msg, author, files, push=False, error=None):
"""Method performs commit to local repository
Args:
msg (str): commit message
author (str): author
files (list): files to commit
push (bool): push commit to remote repository
error (obj): error label reference
Returns:
void
"""
item = self._tree.selection()
if (len(item) == 0):
return
if (error is not None):
error.config(text='')
if (len(msg) == 0):
error.config(text=self.trn.msg('htk_gitclient_mandatory_field', self.trn.msg('htk_gitclient_repomanager_commit_message')))
return
elif (len(author) == 0):
error.config(text=self.trn.msg('htk_gitclient_mandatory_field', self.trn.msg('htk_gitclient_repomanager_commit_author')))
return
cnt, files = 0, []
for i in self._files.get_children():
item = self._files.item(i)
if (item['text'] != ''):
cnt += 1
files.append(item['values'][1])
if (error is not None and cnt == 0):
error.config(text=self.trn.msg('htk_gitclient_repomanager_commit_no_files_selected'))
return
repo = None
try:
project = self._tree.item(self._tree.selection())['text']
repo_path = self.config.data['Projects'][project]['path']
repo = Repo(repo_path)
repo.git.add('--all', files)
repo.git.commit(message=msg, author=author)
self.logger.info(self.trn.msg('htk_gitclient_repomanager_commit_finish', project))
if (push):
self._push()
except Exception as ex:
self.logger.error(ex)
finally:
if (repo is not None):
repo.close()
def _push(self, event=None):
"""Method performs push to remote repository
Args:
event (obj): event
Returns:
void
"""
item = self._tree.selection()
if (len(item) == 0):
return
repo = None
try:
project = self._tree.item(item)['text']
repo_path = self.config.data['Projects'][project]['path']
self.logger.info(self.trn.msg('htk_gitclient_repomanager_push_start', project))
repo = Repo(repo_path)
repo.git.push('origin')
self.logger.info(self.trn.msg('htk_gitclient_repomanager_push_finish'))
except Exception as ex:
self.logger.error(ex)
finally:
if (repo is not None):
repo.close()
def _pull(self, event=None):
"""Method performs pull from remote repository
Args:
event (obj): event
Returns:
void
"""
item = self._tree.selection()
if (len(item) == 0):
return
repo = None
try:
project = self._tree.item(item)['text']
repo_path = self.config.data['Projects'][project]['path']
self.logger.info(self.trn.msg('htk_gitclient_repomanager_pull_start', project))
repo = Repo(repo_path)
repo.git.pull('origin')
self.explorer.refresh(path=repo_path)
self.logger.info(self.trn.msg('htk_gitclient_repomanager_pull_finish'))
except Exception as ex:
self.logger.error(ex)
finally:
if (repo is not None):
repo.close()
def _fill_changed_files(self, repo_path):
"""Method fills changed
Args:
repo_path (str): repository path
Returns:
void
"""
self._files.delete(*self._files.get_children())
changes = self._get_changed_files(repo_path)
for operation, files in changes.items():
for f in files:
self._files.insert('', 'end', text='', values=(operation, f))
def _get_changed_files(self, repo_path):
"""Method gets changed files available for commit
Args:
repo_path (str): repository path
Returns:
dict
"""
repo, files = None, []
try:
repo = Repo(repo_path)
status = repo.git.status('--porcelain')
added, modified, deleted = [], [], []
for rec in status.splitlines():
operation, fname = rec[:2], rec[3:]
if ('?' in operation):
added.append(fname)
elif ('M' in operation):
modified.append(fname)
elif ('D' in operation):
deleted.append(fname)
files = {
'add' : added,
'modify' : modified,
'delete' : deleted
}
except Exception as ex:
self.logger.error(ex)
finally:
if (repo is not None):
repo.close()
return files
def _select_file(self, event=None):
"""Method selects or deselects file for commit
Args:
event (obj): event
Returns:
void
"""
sel = self._files.selection()
if (len(sel) == 0):
return
item = self._files.item(sel)
self._files.item(sel, text='X' if item['text'] == '' else '')
def _select_all_files(self, value):
"""Method selects or deselects all files for commit
Args:
value (bool): requested value
Returns:
void
"""
value = 'X' if (value) else ''
for i in self._files.get_children():
self._files.item(i, text=value)
def _prepare_url(self, url, user=None, passw=None):
"""Method prepares url with authentication
Args:
url (str): repository URL
user (str): username
passw (str): password
Returns:
str
"""
if (len(user) > 0 and '://' in url):
url = url.replace('://', '://{0}:{1}@'.format(user, passw))
return url
| StarcoderdataPython |
5011158 | <reponame>MxBromelia/SQL-Judge<filename>src/sql_judge/adapter.py
""" Database adapters """
from typing import List, Dict, Tuple
from abc import ABC, abstractmethod
class AbstractAdapter(ABC):
"""
The main and only source for building the schema.
All the methods in this interface are mandatory. If you do not want to add a specific element
to the schema to be built, implement the given function returning an empty list([]).
Note, however, that not putting elements that are related to others, namely tables and
columns, it may cause unexpected behavior
The methods can be classified in two types: entity methods and relation method.
Entity Methods are the ones that are used to generate an entity(table, column, etc.),
and they need to return a list of dicts.
Many of these methods also need to inform its relation to another entities, but its main purpose
is to inform what are the entities in the database and what properties
should this given object have, and the values it returns.
Entity methods have a dict to represent an entity because it allows you to pass
any kind of parameter of an object you want,
at the same it that it does not enforce any of them besides its name
and its relations to another entities. So, for example,
if you want to make a validation that utilizes its data type,
add a key which value is the column data type ('data_type', for example),
and you will be able to access
this information through the object as a method (column.data_type, for example).
Relation methods are, in the other hand, used to inform certain relationships between entities,
specifically primary and foreign keys.
This information is presented separated from the column method
in order to not insert a disproportional amount of logic inside one method(columns).
They need to return a list of tuples, or at least in an ordered object(like a list)
the information in what needs to appear
and in which order is explained in more detail in each method docstring.
"""
@abstractmethod
def tables(self) -> List[Dict[str, str]]:
"""Return a list containing a dict for every table in the schema
mandatory keys:
- name: The Table name
forbidden keys:
- columns
- triggers
"""
raise NotImplementedError
@abstractmethod
def columns(self) -> List[Dict[str, str]]:
"""Return a dict for every column in the schema
mandatory keys:
- name: The Column name
- table: The name of the Table that owns the column
forbiddent keys:
- constraints
- indexes
- primary_keys
- references
"""
raise NotImplementedError
@abstractmethod
def triggers(self) -> List[Dict[str, str]]:
"""Return a list of dicts, each representing one of every triggers in the schema
mandatory keys:
- name: the Trigger name
- table: The Name of the Table that owns it
"""
raise NotImplementedError
@abstractmethod
def primary_keys(self) -> List[Tuple[str, str]]:
"""Return a tuple for every table that contains one primary key.
If the said table has no primary key, do not put the table at all.
The tuple must have the table name as its first element, and the name
of the column that is its primary key as it's second. Currently does not
support primary keys with multiple columns.
"""
raise NotImplementedError
@abstractmethod
def references(self) -> List[Tuple[str, str, str]]:
"""Return a tuple for every column that is a foreign key for a table, ordered by:
(the name of the column table, the column name, the name of the referenced table)
If a column is not a reference for any table, it should not appear in this list at all.
"""
raise NotImplementedError
@abstractmethod
def indexes(self) -> List[Dict[str, str]]:
""" Return a dict for every index int the schema
mandatory keys:
- name: the Index name
- column: the column bound with the index
currently, it does not support a multi-column index """
raise NotImplementedError
@abstractmethod
def constraints(self) -> List[Dict[str, str]]:
"""Return a dict for every constraint present in the schema
mandatory keys:
- name: the Constraint name
- column: the column bound with constraint
"""
raise NotImplementedError
@abstractmethod
def functions(self) -> List[Dict[str, str]]:
"""Return a dict for every function present in the schema
mandatory keys:
- name: the function name
"""
raise NotImplementedError
@abstractmethod
def procedures(self) -> List[Dict[str, str]]:
"""Return a dict for every procedure present in the schema
mandatory keys:
- name: the procedure name
"""
raise NotImplementedError
@abstractmethod
def sequences(self) -> List[Dict[str, str]]:
"""Return a dict for every sequence present in the schema
mandatory keys:
- name: the sequence name
"""
raise NotImplementedError
| StarcoderdataPython |
4985392 | <gh_stars>0
import pytest
import os
import sys
base_path = os.path.join(os.path.abspath(os.path.dirname(__name__)))
sys.path.append(os.path.join(base_path))
from nso_jsonrpc_requester import NsoJsonRpcComet
def test_comet_init_bad_data(request_data_login_get_comet):
test_obj = NsoJsonRpcComet('http', 'example.com', '8080', 'admin', 'admin', ssl_verify=False)
with pytest.raises(TypeError):
test_obj.subscribe_changes(path=1)
with pytest.raises(TypeError):
test_obj.subscribe_poll_leaf(path=1, interval=1)
with pytest.raises(TypeError):
test_obj.subscribe_poll_leaf(path='/services/path', interval='test')
with pytest.raises(TypeError):
test_obj.subscribe_cdboper(path=1)
with pytest.raises(Exception):
test_obj.start_comet()
test_obj.stop_comet()
with pytest.raises(Exception):
test_obj.stop_comet()
def test_comet_init(request_data_login_get_comet):
test_obj = NsoJsonRpcComet('http', 'example.com', '8080', 'admin', 'admin', ssl_verify=False)
test_obj.subscribe_changes(path='/services/path')
test_obj.subscribe_poll_leaf(path='/services/path', interval=30)
test_obj.subscribe_cdboper(path='/services/path')
test_obj.subscribe_upgrade()
test_obj.subscribe_jsonrpc_batch()
test_obj.get_subscriptions()
test_obj.comet_poll()
test_obj.stop_comet()
| StarcoderdataPython |
8145108 | <filename>15/15.6/web.py
'''
15-6. Match simple Web domain names that begin with "www." and end with a ".com"
suffix, e.g., www.yahoo.com. Extra credit if your RE also supports other high-level
domain names: .edu, .net, etc., e.g., www.ucsc.edu.
'''
import re
def is_web_domain(text):
m = re.match(r'www\.\w+\.(com|edu|net)', text)
return (m is not None) and (m.group() == text)
assert is_web_domain('www.ucsc.edu')
assert is_web_domain('www.vk.com')
assert is_web_domain('www.wd40.net')
assert not is_web_domain('wd40.net')
assert not is_web_domain('wd40.nl')
assert not is_web_domain('-.nl')
| StarcoderdataPython |
9642419 | from django.contrib import admin
from django.urls import include, path
from django.views.generic.base import TemplateView
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("mpact.urls")),
path("login/", TemplateView.as_view(template_name="index.html")),
path("chat/", TemplateView.as_view(template_name="index.html")),
path("flagged-messages/", TemplateView.as_view(template_name="index.html")),
path("", TemplateView.as_view(template_name="index.html")),
]
| StarcoderdataPython |
9785854 | <gh_stars>0
from Neurosetta.inputs import swc_input
from Neurosetta.outputs import navis_output, graph_output
class rosettaNEURON:
""" general core class used to move between neuron types """
def __init__(self,neuron):
if isinstance(neuron,str):
self.swcTable = swc_input(neuron)
def to_navis(self):
navis_n = navis_output(self)
return navis_n
def to_graph(self,directed):
graph_n = graph_output(self,directed)
return graph_n | StarcoderdataPython |
4945769 | stuff = list()
stuff.append('python')
stuff.append('chuck')
stuff.sort()
print(stuff.__getitem__(0))
print(list.__getitem__(stuff,0))
#dir()函数的输出来查看对象的功能
print(dir(stuff))
# just sample Input >program >Output
usf = input('Enter the US Floor Number: ')
wf = int(usf) - 1
print('Non-US Floor Number is',wf)
| StarcoderdataPython |
3400928 | import os
import django
import sys
import urllib2
import xml.etree.ElementTree as ET
pro_dir = os.getcwd()
sys.path.append(pro_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BioDesigner.settings")
from design.models import parts, features, part_twins, part_features
baseXmlUrl = 'http://parts.igem.org/cgi/xml/part.cgi?part='
def extractAndSave(partObj, xmlStr):
try:
doc = ET.fromstring(xmlStr)
except:
print 'part %s error, passed' % partObj.part_name
try:
part_url = doc.find('part_list/part/part_url').text
except:
return
featuresInfo = doc.findall('part_list/part/features/feature')
twins = doc.findall('part_list/part/twins/twin')
if featuresInfo:
for f in featuresInfo:
fId = int(f.find('id').text)
#print fId
featureObj = features.objects.get_or_create(feature_id=fId)
if featureObj[1]:
title = f.find('title')
#print title.text
ftype = f.find('type')
#print ftype.text
direction = f.find('direction')
#print direction.text
startpos = f.find('startpos')
#print startpos.text
endpos = f.find('endpos')
#print endpos.text
featureObj[0].title = title.text
featureObj[0].feature_type = ftype.text
featureObj[0].direction = direction.text
featureObj[0].startpos = int(startpos.text)
featureObj[0].endpos = int(endpos.text)
try:
featureObj[0].save()
except:
pass
newPF = part_features(part=partObj, feature=featureObj[0])
newPF.save()
if twins:
for twin in twins:
partB = parts.objects.filter(part_name=twin.text)
if len(partB) == 0:
continue
else:
partB = partB[0]
newPP = part_twins(part_1=partObj, part_2=partB)
try:
newPP.save()
except:
pass
partObj.part_url = part_url
partObj.save()
def mainFunc():
#get all parts
step = 50
head = 29800
tail = head + step
total = parts.objects.count() - head
while total > 0:
partlist = parts.objects.all()[head:tail]
print 'first %d' % tail
for partObj in partlist:
print 'processing part %s' % partObj.part_name
tmp = part_features.objects.filter(part=partObj)
if len(tmp) != 0:
print 'passing'
continue
print 'getting xml data'
req = urllib2.Request(baseXmlUrl+partObj.part_name)
response = urllib2.urlopen(req)
xmlStr = response.read()
print 'extracting data'
extractAndSave(partObj,xmlStr)
head += step
tail += step
total -= step
if __name__ == '__main__':
django.setup()
mainFunc() | StarcoderdataPython |
9607135 | <gh_stars>0
# numeros = [2,9,4,11]
# print(numeros[-1])
n = 3
i = 0
while (i < n ):
print(i)
i = i + 1
# for i in range(len(numeros)):
# print(numeros[i])
# n = 4
# for i in range(1,n):
# print(i) | StarcoderdataPython |
249498 | <filename>test/test_election.py
import pytest
from socialchoice import Election, PairwiseBallotBox, RankedChoiceBallotBox
empty_election = Election(PairwiseBallotBox([]))
example_votes = PairwiseBallotBox(
[[0, 1, "win"], [3, 2, "loss"], [2, 3, "win"], [0, 3, "tie"], [3, 0, "win"]]
)
def test_get_ranked_pairs_ranking():
"""Tests that ranked_pairs on a pairwise ballot box produces the correct outcome."""
assert empty_election.ranking_by_ranked_pairs() == []
e_1 = Election(example_votes)
assert e_1.ranking_by_ranked_pairs() == [2, 3, 0, 1]
def test_get_win_ratio():
assert empty_election.ranking_by_win_ratio() == []
e_1 = Election(example_votes)
assert e_1.ranking_by_win_ratio(include_score=True) == [
(2, 1.0),
(0, 0.5),
(3, 1 / 3),
(1, 0.0),
]
def test_get_win_tie_ratio():
assert empty_election.ranking_by_win_tie_ratio() == []
e_1 = Election(example_votes)
assert e_1.ranking_by_win_tie_ratio(include_score=True) == [
(2, 1.0),
(0, 2 / 3),
(3, 0.5),
(1, 0.0),
]
def test_flatten_ties():
election = Election(PairwiseBallotBox([(1, 2, "win"), (3, 4, "win")], candidates={1, 2, 3, 4}))
ranking = election.ranking_by_win_ratio(group_ties=True, include_score=True)
assert ranking == [[(1, 1.0), (3, 1.0)], [(2, 0.0)]]
def test_borda_count_pairwise_raises_ballot_type_error():
with pytest.raises(ValueError):
Election(PairwiseBallotBox([])).ranking_by_borda_count()
def test_borda_count_ranked_choice():
ballots = RankedChoiceBallotBox([[1, 2, 3]])
election = Election(ballots)
# assert election.ranking_by_borda_count() == [1, 2, 3]
assert election.ranking_by_borda_count(include_score=True) == [(1, 2), (2, 1), (3, 0)]
def test_borda_count_ranked_choice_with_ties():
ballots = RankedChoiceBallotBox([[1, 2, 3], [{1, 2, 3}], [1, {2, 3}]])
election = Election(ballots)
# assert election.ranking_by_borda_count() == [1, 2, 3]
assert election.ranking_by_borda_count(include_score=True) == [(1, 4), (2, 1), (3, 0)]
| StarcoderdataPython |
1992151 | <gh_stars>1-10
# coding: utf-8
from __future__ import unicode_literals
from ..compat import (compat_b64decode, compat_urllib_parse_unquote,
compat_urlparse)
from ..utils import determine_ext, update_url_query
from .bokecc import BokeCCBaseIE
class InfoQIE(BokeCCBaseIE):
_VALID_URL = r"https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)"
_TESTS = [
{
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
"md5": "b5ca0e0a8c1fed93b0e65e48e462f9a2",
"info_dict": {
"id": "A-Few-of-My-Favorite-Python-Things",
"ext": "mp4",
"description": "<NAME> presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
"title": "A Few of My Favorite [Python] Things",
},
},
{
"url": "http://www.infoq.com/fr/presentations/changez-avis-sur-javascript",
"only_matching": True,
},
{
"url": "http://www.infoq.com/cn/presentations/openstack-continued-delivery",
"md5": "4918d0cca1497f2244572caf626687ef",
"info_dict": {
"id": "openstack-continued-delivery",
"title": "OpenStack持续交付之路",
"ext": "flv",
"description": "md5:308d981fb28fa42f49f9568322c683ff",
},
},
{
"url": "https://www.infoq.com/presentations/Simple-Made-Easy",
"md5": "0e34642d4d9ef44bf86f66f6399672db",
"info_dict": {
"id": "Simple-Made-Easy",
"title": "Simple Made Easy",
"ext": "mp3",
"description": "md5:3e0e213a8bbd074796ef89ea35ada25b",
},
"params": {
"format": "bestaudio",
},
},
]
def _extract_rtmp_video(self, webpage):
# The server URL is hardcoded
video_url = "rtmpe://videof.infoq.com/cfx/st/"
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, "encoded id", default=None
)
real_id = compat_urllib_parse_unquote(
compat_b64decode(encoded_id).decode("utf-8")
)
playpath = "mp4:" + real_id
return [
{
"format_id": "rtmp_video",
"url": video_url,
"ext": determine_ext(playpath),
"play_path": playpath,
}
]
def _extract_cf_auth(self, webpage):
policy = self._search_regex(
r"InfoQConstants\.scp\s*=\s*\'([^\']+)\'", webpage, "policy"
)
signature = self._search_regex(
r"InfoQConstants\.scs\s*=\s*\'([^\']+)\'", webpage, "signature"
)
key_pair_id = self._search_regex(
r"InfoQConstants\.sck\s*=\s*\'([^\']+)\'", webpage, "key-pair-id"
)
return {
"Policy": policy,
"Signature": signature,
"Key-Pair-Id": key_pair_id,
}
def _extract_http_video(self, webpage):
http_video_url = self._search_regex(
r"P\.s\s*=\s*\'([^\']+)\'", webpage, "video URL"
)
http_video_url = update_url_query(
http_video_url, self._extract_cf_auth(webpage)
)
return [
{
"format_id": "http_video",
"url": http_video_url,
"http_headers": {"Referer": "https://www.infoq.com/"},
}
]
def _extract_http_audio(self, webpage, video_id):
fields = self._form_hidden_inputs("mp3Form", webpage)
http_audio_url = fields.get("filename")
if not http_audio_url:
return []
# base URL is found in the Location header in the response returned by
# GET https://www.infoq.com/mp3download.action?filename=... when logged in.
http_audio_url = compat_urlparse.urljoin(
"http://ress.infoq.com/downloads/mp3downloads/", http_audio_url
)
http_audio_url = update_url_query(
http_audio_url, self._extract_cf_auth(webpage)
)
# audio file seem to be missing some times even if there is a download link
# so probe URL to make sure
if not self._is_valid_url(http_audio_url, video_id):
return []
return [
{
"format_id": "http_audio",
"url": http_audio_url,
"vcodec": "none",
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r"<title>(.*?)</title>", webpage, "title")
video_description = self._html_search_meta(
"description", webpage, "description"
)
if "/cn/" in url:
# for China videos, HTTP video URL exists but always fails with 403
formats = self._extract_bokecc_formats(webpage, video_id)
else:
formats = (
self._extract_rtmp_video(webpage)
+ self._extract_http_video(webpage)
+ self._extract_http_audio(webpage, video_id)
)
self._sort_formats(formats)
return {
"id": video_id,
"title": video_title,
"description": video_description,
"formats": formats,
}
| StarcoderdataPython |
3291850 | <gh_stars>1-10
from django.db import close_old_connections
from django.dispatch import Signal
consumer_started = Signal(providing_args=["environ"])
consumer_finished = Signal()
# Connect connection closer to consumer finished as well
consumer_finished.connect(close_old_connections)
| StarcoderdataPython |
6438518 | <filename>flow/masked_autoregressive.py
import math
import torch
from torch import nn
import network
class AutoregressiveInverseAndLogProb(nn.Module):
"""Use MADE to build MAF: Masked Autoregressive Flow.
Implements Eqs 2-5 in https://arxiv.org/abs/1705.07057
"""
def __init__(self,
num_input,
use_context,
use_tanh,
hidden_size,
hidden_degrees,
flow_std,
activation):
super().__init__()
self.f_mu_alpha = network.MADE(num_input=num_input,
num_output=num_input * 2,
use_context=use_context,
num_hidden=hidden_size,
hidden_degrees=hidden_degrees,
activation=activation)
self.use_tanh = use_tanh
self.scale = 1.0
self.flow_std = flow_std
@torch.no_grad()
def initialize_scale(self, input, context=None):
u, log_det = self.forward(input, context)
self.scale = self.flow_std / u.std()
print('MAF output std: %.3f' % u.std())
print('Multiplying output of flow by: %.3f' % self.scale)
return u, log_det
def forward(self, input, context=None):
"""Returns:
- random numbers u used to generate an input: input = f(u)
- log density of input corresponding to transform f
Prob correction is log det |\partial_x f^{-1}|."""
# Calculate u = f^{-1}(x) with equations 4-5
# MAF parameterizes the forward direction using the inverse
x = input
mu, alpha = torch.chunk(self.f_mu_alpha(x, context), chunks=2, dim=-1)
if self.use_tanh:
alpha = torch.tanh(alpha)
u = (x - mu) * torch.exp(-alpha)
# u is the output of the inverse, so rescaling adds |d / du (scale * u)|
# to the density
return u * self.scale, (-alpha + math.log(self.scale)).sum(-1)
| StarcoderdataPython |
1812615 | <filename>notebooks/PerfForesightCRRA-Approximation.py<gh_stars>10-100
# ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# formats: py:percent,ipynb
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.8.5
# ---
# %% [markdown]
# # Perfect Foresight CRRA Model - Approximation
#
# [](https://econ-ark.org/materials/perfforesightcrra-approximation#launch)
#
# %% {"code_folding": []}
# Initial notebook set up
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import HARK
from copy import deepcopy
mystr = lambda number : "{:.4f}".format(number)
from HARK.utilities import plot_funcs
# These last two will make our charts look nice
plt.style.use('seaborn-darkgrid')
palette = plt.get_cmap('Dark2')
# %% [markdown]
# [PerfectForesightCRRA](http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/Consumption/PerfForesightCRRA) derives a number of results as approximations; for instance, the exact formula for the consumption function is derived as $$c_t = \left(\frac{R - (R\beta)^{1/\rho}}{R}\right)o_t$$
# and approximated by $$c_t \approx (r-\rho^{-1}(r-\theta))o_t$$.
#
# Your task is to make a series of plots that show how the quality of the approximation deteriorates as you change various model parameters. The notebook aims to make this easier by showing that under the baseline parameter values, the percentage amount of the error is pretty much constant across different values of market resources, so you can assume that is generically true.
#
# To get you started, we show how to conduct the exercise under particularly simple parameterization (the Deaton/Friedman model where $R = \frac{1}{\beta}$, in which the only relevant parameter is the interest rate).
#
# Your specific assignment is:
#
# 1. Starting with the default parameterization of the model, show how the approximation quality changes with values of other parameters
# 1. Explain, mathematically, why you get the patterns you do for how the solutions deteriorate as you change the parameter values
#
# Hints:
#
# 1. [MathFactsList](http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/MathFacts/MathFactsList.pdf) describes the conditions under which the approximations will be good; you want to find conditions under which the approximations get bad
# 2. An interesting question is the extent to which the size of approximation errors is related to the degree of impatience according to alternative metrics
# %%
# Set up a HARK Perfect Foresight Consumer called PFagent
from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType, init_perfect_foresight # Import the consumer type
# Now we need to give our consumer parameter values that allow us to solve the consumer's problem
# Invoke it to create a dictionary called Paramod (Params that we will modify)
Paramod = deepcopy(init_perfect_foresight) # deepcopy prevents later overwriting
# Extract the parameters from the dictionary to make them easy to reference
CRRA = Paramod['CRRA'] # Coefficient of relative risk aversion
Rfree = Paramod['Rfree'] # Interest factor on assets
DiscFac = Paramod['DiscFac'] # Intertemporal discount factor
PermGroFac = Paramod['PermGroFac'] # Permanent income growth factor
LivPrb = Paramod['LivPrb'] = [1.0] # Survival probability of 100 percent
cycles = Paramod['cycles'] = 0 # This says that it is an infinite horizon model
# %%
# Now let's pass our dictionary to our consumer class to create an instance
PFagent = PerfForesightConsumerType(**Paramod) # Any parameters we did not modify get their default values
# Solve the agent's problem
PFagent.solve()
# %%
# Plot the consumption function approximation versus the "true" consumption function
# Set out some range of market resources that we want to plot consumption for
mMin = 0
mMax = 10
numPoints = 100
m_range = np.linspace(mMin, mMax, numPoints) # This creates an array of points in the given range
wealthHmn = PFagent.solution[0].hNrm # normalized human wealth is constructed when we .solve()
wealthMkt = m_range # bank balances plus current income
wealthTot = wealthHmn+wealthMkt # Total wealth is the sum of human and market
# Feed our range of market resources into our consumption function in order to get consumption at each point
# (Remember, after doing .solve(), the consumption function is stored as PFagent.solution[0].cFunc)
cHARK = PFagent.solution[0].cFunc(m_range) # Because the input m_range is an array, the output cHARK is too
cMax = cHARK[-1]*1.2 # The last point will be the largest; add 20 percent for visual appeal
# Use matplotlib package (imported in first cell) to plot the consumption function
plt.figure(figsize=(9,6)) # set the figure size
plt.plot(m_range, cHARK, 'b', label='Consumption Function from HARK') # m on the x axis vs c on the y axis
# 'b' is for blue
plt.xlabel('Market resources m') # x axis label
plt.ylabel('Consumption c') # y axis label
plt.ylim(0,cMax)
# The plot is named plt and it hangs around like a variable
# but is not displayed until you do a plt.show()
# Construct the approximate consumption function
# Also, recall that in the "true" consumption function what matters is total wealth,
# not just market resources so we need to add in human wealth
# Use the values of R, beta, and rho that we used above to construct rates
rfree=Rfree-1
discRte=(1/DiscFac)-1 # See handout for why this is approximately the time preference rate
cApprox = wealthTot*(rfree - (1/CRRA)*(rfree-discRte))
plt.plot(m_range, cApprox, 'k', label='c function approximated') # Add true consumption function line
plt.legend() # show the legend
plt.show() # show the plot
# %% [markdown]
# The size of the error looks pretty stable, which we can show by calculating it in percentage terms
# %%
# Plot the deviations
approximationError = 100*(cHARK - cApprox)/cHARK
plt.figure(figsize=(9,6)) #set the figure size
plt.plot(m_range, approximationError, label='cHARK - cApprox')
plt.xlabel('Market resources') # x axis label
plt.ylabel('Percent deviation of approximation') # y axis label
plt.legend()
plt.show()
# %% [markdown]
# Now we want to calculate how the approximation quality depends on the interest factor. We proceed as follows:
# 1. Create arrays of R values, such that the return patience factor is increasing as you descend through the array
# 2. Set up a for loop in which we will:
# 1. Input the new value of $R$
# 0. Solve the HARK model for the consumption function
# 0. Calculate the approximate consumption function
# 0. Save the average deviation between the two functions
# 3. Then we can plot average deviation against the $R$ factor
# %%
# Create array of Rfree values, and calculate the patience factor
howMany = 30
Rfree_min = Rfree
Rfree_max = Rfree**20
Rfree_array = np.linspace(Rfree_min, Rfree_max, howMany)
Pat_array = (Rfree_array*DiscFac)**(1/CRRA)
PatR_array = Pat_array/Rfree_array
# %%
# Set the time preference factor to match the interest factor so that $(R \beta) = 1$
Paramod['DiscFac'] = 1/Rfree
# %%
# Plot average deviation from true consumption function
PFagent = PerfForesightConsumerType(**Paramod) # construct a consumer with our previous parameters
plt.figure(figsize=(9,6)) #set the figure size
mean_dev = np.zeros(30)
for i in range(len(Rfree_array)):
PFagent.Rfree = Rfree_array[i]
# Now we just copy the lines of code from above that we want
PFagent.solve()
cHARK = PFagent.solution[0].cFunc(m_range)
wealthHmn = PFagent.solution[0].hNrm
wealthTot = wealthHmn+m_range
rfree=Rfree-1
discRte=(1/DiscFac)-1
cApprox = wealthTot*(rfree - (1/CRRA)*(rfree-discRte))
deviation = np.mean(np.abs(cApprox/cHARK))
mean_dev[i] = deviation
plt.plot(Rfree_array,mean_dev)
plt.xlabel('Return Factor') # x axis label
plt.ylabel(' Average deviation along consumption function') # y axis label
plt.show()
# %% [markdown]
# So, when the return factor gets to roughly 1.4, the error in the approximation is almost 80 percent. It looks like the value for $R$ where the approximation almost exactly matches the truth is about 1.035.
| StarcoderdataPython |
6594744 | <gh_stars>1-10
import tensorflow as tf
def weight_pruning(w: tf.Variable, k: float) -> tf.Variable:
"""Performs pruning on a weight matrix w in the following way:
- The absolute value of all elements in the weight matrix are computed.
- The indices of the smallest k% elements based on their absolute values are
selected.
- All elements with the matching indices are set to 0.
Args:
w: The weight matrix.
k: The percentage of values (units) that should be pruned from the matrix.
Returns:
The unit pruned weight matrix.
"""
k = tf.cast(
tf.round(tf.size(w, out_type=tf.float32) * tf.constant(k)), dtype=tf.int32
)
w_reshaped = tf.reshape(w, [-1])
_, indices = tf.nn.top_k(tf.negative(tf.abs(w_reshaped)), k, sorted=True, name=None)
mask = tf.scatter_nd_update(
tf.Variable(
tf.ones_like(w_reshaped, dtype=tf.float32), name="mask", trainable=False
),
tf.reshape(indices, [-1, 1]),
tf.zeros([k], tf.float32),
)
return w.assign(tf.reshape(w_reshaped * mask, tf.shape(w)))
def unit_pruning(w: tf.Variable, k: float) -> tf.Variable:
"""Performs pruning on a weight matrix w in the following way:
- The euclidean norm of each column is computed.
- The indices of smallest k% columns based on their euclidean norms are
selected.
- All elements in the columns that have the matching indices are set to 0.
Args:
w: The weight matrix.
k: The percentage of columns that should be pruned from the matrix.
Returns:
The weight pruned weight matrix.
"""
k = tf.cast(
tf.round(tf.cast(tf.shape(w)[1], tf.float32) * tf.constant(k)), dtype=tf.int32
)
norm = tf.norm(w, axis=0)
row_indices = tf.tile(tf.range(tf.shape(w)[0]), [k])
_, col_indices = tf.nn.top_k(tf.negative(norm), k, sorted=True, name=None)
col_indices = tf.reshape(
tf.tile(tf.reshape(col_indices, [-1, 1]), [1, tf.shape(w)[0]]), [-1]
)
indices = tf.stack([row_indices, col_indices], axis=1)
return w.assign(
tf.scatter_nd_update(w, indices, tf.zeros(tf.shape(w)[0] * k, tf.float32))
)
def pruning_factory(pruning_type: str, w: tf.Variable, k: float) -> tf.Variable:
"""Given a pruning type, a weight matrix and a pruning percentage it will return the
pruned or non pruned weight matrix.
Args:
pruning_type: How to prune the weight matrix.
w: The weight matrix.
k: The pruning percentage.
Returns:
The pruned or not pruned (if pruning_type == None) weight matrix.
"""
if pruning_type is None:
return w
elif pruning_type == "weight_pruning":
return weight_pruning(w, k)
elif pruning_type == "unit_pruning":
return unit_pruning(w, k)
else:
raise ValueError(f"Pruning type {pruning_type} unrecognized!")
| StarcoderdataPython |
3267082 | <filename>colossus/apps/subscribers/tests/factories.py
from django.utils import timezone
import factory
from colossus.apps.lists.tests.factories import MailingListFactory
from colossus.apps.subscribers.constants import Status, TemplateKeys
from colossus.apps.subscribers.models import (
Activity, Domain, Subscriber, SubscriptionFormTemplate, Tag,
)
class DomainFactory(factory.DjangoModelFactory):
name = '@colossusmail.com'
class Meta:
model = Domain
django_get_or_create = ('name',)
class SubscriberFactory(factory.DjangoModelFactory):
email = factory.Sequence(lambda n: f'<EMAIL>')
domain = factory.SubFactory(DomainFactory)
mailing_list = factory.SubFactory(MailingListFactory)
status = Status.SUBSCRIBED
last_sent = timezone.now()
class Meta:
model = Subscriber
django_get_or_create = ('email',)
class ActivityFactory(factory.DjangoModelFactory):
date = timezone.now()
ip_address = '127.0.0.1'
subscriber = factory.SubFactory(SubscriberFactory)
class Meta:
model = Activity
class SubscriptionFormTemplateFactory(factory.DjangoModelFactory):
key = TemplateKeys.SUBSCRIBE_FORM
mailing_list = factory.SubFactory(MailingListFactory)
class Meta:
model = SubscriptionFormTemplate
django_get_or_create = ('key',)
class TagFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: f'tag_{n}')
mailing_list = factory.SubFactory(MailingListFactory)
class Meta:
model = Tag
django_get_or_create = ('name',)
| StarcoderdataPython |
154770 | from face_detection import face_detect
import os
def createFolder(path, name):
index = ''
while True:
try:
file_path = os.path.join(path, name+index)
os.makedirs(file_path)
return file_path
except:
if index:
index = '('+str(int(index[1:-1])+1)+')' # Append 1 to number in brackets
else:
index = '(1)'
pass # Go and try create file again
os.makedirs('faces/tmp', exist_ok=True)
os.makedirs(os.path.join('faces/tmp','other'), exist_ok=True)
name = input("Dien ten: ")
name = name.lower()
name = name.replace(" ", "")
file_path = createFolder('faces/tmp',name)
face_detect(file_path, name)
| StarcoderdataPython |
11315863 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "0.1.0",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: launchdarkly_feature_flag_validator
short_description: Validate feature flags by running a configuration test
description:
- Validate LaunchDarkly feature flags in a project, using Conftest OPA Policies written in Rego. Conftest 0.18.0 is required as a binary in your C(PATH).
version_added: "0.3.0"
options:
project_key:
description:
- The project key
default: 'default'
required: yes
env:
description:
- The environment key
required: no
type: str
extends_documentation_fragment:
- launchdarkly_labs.collection.launchdarkly
- launchdarkly_labs.collection.launchdarkly_conftest
"""
RETURN = r"""
validated:
description: Whether all flags were successfully validated using Conftest OPA Policies.
type: bool
returned: always
validation:
description: List of dictionaries, containing the flag key and list of failures as strings.
returned: failure
"""
import inspect
import traceback
import time
LD_IMP_ERR = None
try:
import launchdarkly_api
HAS_LD = True
except ImportError:
LD_IMP_ERR = traceback.format_exc()
HAS_LD = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.common._json_compat import json
from ansible.module_utils.six import PY2, iteritems, string_types
from ansible_collections.launchdarkly_labs.collection.plugins.module_utils.base import (
configure_instance,
fail_exit,
ld_common_argument_spec,
rego_test,
)
def main():
argument_spec = ld_common_argument_spec()
argument_spec.update(
dict(
env=dict(type="str"),
project_key=dict(type="str", required=True),
tag=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_LD:
module.fail_json(
msg=missing_required_lib("launchdarkly_api"), exception=LD_IMP_ERR
)
# Set up API
configuration = configure_instance(module.params["api_key"])
api_instance = launchdarkly_api.FeatureFlagsApi(
launchdarkly_api.ApiClient(configuration)
)
feature_flags = _fetch_flags(module, api_instance)
flags = feature_flags["items"]
results = []
for flag in flags:
result = rego_test(module, flag)
if result.results[0].failures:
validation_fail = {"key": flag["key"], "failures": []}
for failure in result.results[0].failures:
validation_fail["failures"].append(failure["msg"])
results.append(validation_fail)
if results:
module.exit_json(failed=True, validated=False, validation=results)
else:
module.exit_json(changed=True, validated=True)
def _fetch_flags(module, api_instance):
try:
if module.params.get("key"):
if module.params.get("env"):
response = api_instance.get_feature_flag(
module.params["project_key"],
module.params["key"],
env=module.params["env"],
)
else:
response = api_instance.get_feature_flag(
module.params["project_key"], module.params["key"]
)
else:
keys = ["project_key", "env", "summary", "archived", "tag"]
filtered_keys = dict(
(k, module.params[k])
for k in keys
if k in module.params and module.params[k] is not None
)
response = api_instance.get_feature_flags(**filtered_keys)
return response.to_dict()
except launchdarkly_api.rest.ApiException as e:
if e.status == 404:
return None
else:
fail_exit(module, e)
if __name__ == "__main__":
main()
| StarcoderdataPython |
386027 | # Copyright 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
import os
import shutil
import questionary
from emrichen import Context, Template
from AppImageBuilder.commands.file import File
from AppImageBuilder.generator.app_runtime_analyser import AppRuntimeAnalyser
from AppImageBuilder.generator.apt_recipe_generator import AptRecipeGenerator
from AppImageBuilder.generator.desktop_entry_parser import DesktopFileParser
class RecipeGeneratorError(RuntimeError):
pass
class RecipeGenerator:
def __init__(self):
self.logger = logging.getLogger("Generator")
self.logger.info("Searching AppDir")
self.app_dir = self._locate_app_dir()
self.app_info_id = ''
self.app_info_name = ''
self.app_info_icon = ''
self.app_info_version = 'latest'
self.app_info_exec = ''
self.app_info_exec_args = '$@'
self.runtime_generator = None
self.runtime_env = None
self.appimage_arch = None
self.apt_arch = None
self._setup_app_info()
self.setup_questions()
self.logger.info("Analysing application runtime dependencies")
runtime_analyser = AppRuntimeAnalyser(self.app_dir, self.app_info_exec, self.app_info_exec_args)
runtime_analyser.run_app_analysis()
if shutil.which('apt-get'):
self.logger.info("Guessing APT configuration")
self.apt_arch = AptRecipeGenerator.get_arch()
self.apt_sources = AptRecipeGenerator.get_sources()
self.apt_includes = AptRecipeGenerator.resolve_includes(runtime_analyser.runtime_libs)
self.apt_excludes = AptRecipeGenerator.resolve_excludes()
self.files_excludes = [
'usr/share/man',
'usr/share/doc/*/README.*',
'usr/share/doc/*/changelog.*',
'usr/share/doc/*/NEWS.*',
'usr/share/doc/*/TODO.*',
]
self.logger.info("No desktop entries found")
self.appimage_arch = self._guess_appimage_runtime_arch()
self.runtime_env = {'APPDIR_LIBRARY_PATH': self._define_appdir_library_path(runtime_analyser.runtime_libs)}
def setup_questions(self):
# AppDir -> app_info
print('Basic Information :')
self.app_info_id = questionary.text('ID [Eg: com.example.app] :', default=self.app_info_id).ask()
self.app_info_name = questionary.text('Application Name :', default=self.app_info_name).ask()
self.app_info_icon = questionary.text('Icon :', default=self.app_info_icon).ask()
self.app_info_version = questionary.text('Version :', default=self.app_info_version).ask()
self.app_info_exec = questionary.text('Executable path relative to AppDir [usr/bin/app] :',
default=self.app_info_exec).ask()
self.app_info_exec_args = questionary.text('Arguments [Default: $@] :', default=self.app_info_exec_args).ask()
self.apt_arch = questionary.select('Architecture :', ['amd64', 'arm64', 'i386', 'armhf'],
default=self.apt_arch).ask()
def generate(self):
appimage_builder_yml_template_path = os.path.realpath(os.path.join(
os.path.dirname(__file__),
'templates',
'AppImageBuilder.yml.in'
))
with open(appimage_builder_yml_template_path, 'r') as filedata:
appimage_builder_yml_template = Template.parse(filedata, 'yaml')
appimage_builder_yml_ctx = Context({
'app_info_id': self.app_info_id,
'app_info_name': self.app_info_name,
'app_info_icon': self.app_info_icon,
'app_info_version': self.app_info_version,
'app_info_exec': self.app_info_exec,
'app_info_exec_args': self.app_info_exec_args,
'runtime_generator': self.runtime_generator,
'runtime_env': self.runtime_env,
'apt_arch': self.apt_arch,
'apt_sources': self.apt_sources,
'apt_includes': self.apt_includes,
'apt_excludes': self.apt_excludes,
'files_excludes': self.files_excludes,
'appimage_arch': self.appimage_arch,
})
rendered_yml = appimage_builder_yml_template.render(appimage_builder_yml_ctx)
logging.info(rendered_yml)
with open('AppImageBuilder.yml', 'w') as f:
f.write(rendered_yml)
self.logger.info("Recipe generation completed.")
self.logger.info("Please manually fill any blank field left before calling appimage-builder")
@staticmethod
def _locate_app_dir():
for file_name in os.listdir(os.path.curdir):
if os.path.isdir(file_name) and file_name.lower() == 'appdir':
return file_name
raise RecipeGeneratorError('Unable to find an AppDir, this is required to create a recipe.')
def _setup_app_info(self):
self.logger.info("Searching desktop entries")
desktop_files = self._find_desktop_entry_files()
desktop_file = None
if len(desktop_files) == 1:
desktop_file = desktop_files[0]
if len(desktop_files) > 1:
desktop_file = questionary.select('Main desktop entry :', desktop_files).ask()
if desktop_file:
self.logger.info("Reading desktop entry: %s" % desktop_file)
parser = DesktopFileParser(desktop_file)
self.app_info_id = parser.get_id()
self.app_info_name = parser.get_name()
self.app_info_icon = parser.get_icon()
exec = parser.get_exec_path()
self.app_info_exec = self._resolve_exec_path(exec)
self.app_info_exec_args = parser.get_exec_args()
if not self.app_info_exec_args:
self.app_info_exec_args = '$@'
else:
self.logger.info("No desktop entries found")
def _find_desktop_entry_files(self):
desktop_entries = []
for file_name in os.listdir(os.path.abspath(self.app_dir)):
if file_name.lower().endswith('desktop'):
desktop_entries.append(file_name)
for root, dir, files in os.walk(os.path.join(self.app_dir, 'usr', 'share', 'applications')):
for file_name in files:
if file_name.lower().endswith('desktop'):
desktop_entries.append(os.path.join(root, file_name))
return desktop_entries
def _resolve_exec_path(self, exec):
if '/' in exec and os.path.exists(os.path.join(self.app_dir, exec)):
return exec
absolute_app_dir = os.path.abspath(self.app_dir)
for root, dir, files in os.walk(absolute_app_dir):
for file in files:
full_path = os.path.join(root, file)
if os.access(full_path, os.X_OK):
return os.path.relpath(full_path, absolute_app_dir)
raise RecipeGeneratorError('Unable to find executable: %s' % exec)
def _guess_appimage_runtime_arch(self):
file = File()
signature = file.query(os.path.join(self.app_dir, self.app_info_exec))
if 'x86-64' in signature:
return 'x86_64'
if 'Intel 80386,' in signature:
return 'i686'
if 'ARM aarch64,' in signature:
return 'aarch64'
if 'ARM,' in signature:
return 'armhf'
return None
@staticmethod
def _define_appdir_library_path(runtime_libs):
lib_dirs = set()
for lib in runtime_libs:
dirname = os.path.dirname(lib)
if not dirname.endswith('/dri') and \
'qt5/qml' not in dirname and \
'qt5/plugins' not in lib:
lib_dirs.add(dirname)
runtime_env = ':'.join('$APPDIR%s' % dir for dir in lib_dirs)
return runtime_env
| StarcoderdataPython |
264861 | #
# =====================
# Training a Classifier
# =====================
#
import time, os, copy, numpy as np
import torch, torchvision
import torch.nn as nn
from torch.nn import Parameter, init
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.models as models
from torch.utils.data import Dataset, DataLoader
import sys
import pickle
import math
from collections import defaultdict
import random
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
print(device)
# ## Get object features, distances and frame labels
# ### Helper functions
from PIL import Image
object_classes = {'bottle': 0, 'bowl': 1, 'cheese': 2, 'cucumber': 3, 'knife': 4,
'lettuce': 5, 'peeler': 6, 'spoon': 7, 'tomato': 8, 'hand': 9}
class LongTermFrameFeat(Dataset):
features = None
def __init__(self, annot_dir, i3d_feature_dir, sequence_ids, obs_seg, seg_length, transform=None):
self.label_sequences = []
self.i3d_sequences = []
self.target_labels = []
self.target_features = []
self.action_classes = {'SIL':0,'cut_fruit':1,'put_fruit2bowl':2,'peel_fruit':3,'stir_fruit':4,
'crack_egg':5,'add_saltnpepper':6,'stir_egg':7,'pour_oil':8,
'pour_egg2pan':9,'stirfry_egg':10,'take_plate':11,'put_egg2plate':12,
'pour_coffee':13,'pour_milk':14,'spoon_powder':15,'stir_milk':16,
'pour_cereals':17,'stir_cereals':18,'pour_flour':19,'stir_dough':20,
'pour_dough2pan':21,'fry_pancake':22,'put_pancake2plate':23,
'add_teabag':24,'pour_water':25,'cut_orange':26,'squeeze_orange':27,
'take_glass':28,'pour_juice':29,'fry_egg':30,'cut_bun':31,
'take_butter':32,'smear_butter':33,'put_toppingOnTop':34,
'put_bunTogether':35,'spoon_flour':36,'butter_pan':37,'take_eggs':38,
'take_cup':39,'pour_sugar':40,'stir_coffee':41,'take_bowl':42,
'take_knife':43,'spoon_sugar':44,'take_topping':45,'take_squeezer':46,
'stir_tea':47}
self.transform = transform
for sequence_id in sequence_ids:
# print(sequence_id)
try:
labels = open(os.path.join(annot_dir,sequence_id.strip('\n')+'.txt'),'r').readlines()
i3d_sequence_file = os.path.join(i3d_feature_dir, sequence_id.strip('\n')+'.npy')
i3d_feat = np.load(i3d_sequence_file)
except:
continue
label_sequence = []
i3d_sequence = []
label_segs = []
i3d_segs = []
# print(len(labels))
# print(i3d_feat.shape)
for frame_num in range(len(labels)):
action_label = labels[frame_num].strip('\n')
action_label = self.action_classes[action_label]
# print(len(features_frame))
# if action_label == -1:
# continue
try:
i3d_feature_frame = i3d_feat[frame_num,:]
label_sequence.append(action_label)
i3d_sequence.append(i3d_feature_frame)
except:
continue
#print(len(label_sequence))
#print(len(i3d_sequence))
# print(len(dt_sequence))
seq_len = len(label_sequence)
for i in range(seq_len):
if i % seg_length == 0 and i > 0:
label_segs.append(label_sequence[i-seg_length+15])
i3d_segs.append(i3d_sequence[i-seg_length:i:5]) # 3fps
#print(seg_start)
final = len(label_segs)- obs_seg - 2
for i in range(0, final):
self.label_sequences.append(label_segs[i:i+obs_seg])
self.i3d_sequences.append(i3d_segs[i:i+obs_seg])
self.target_labels.append(label_segs[i+obs_seg])
self.target_features.append(i3d_segs[i+obs_seg][3:])
def __getitem__(self, index):
i3d_seq_segs = self.i3d_sequences[index]
label_segs = self.label_sequences[index]
target_label = self.target_labels[index]
target_feats = self.target_features[index]
return i3d_seq_segs, label_segs, target_label, target_feats
def __len__(self):
return len(self.label_sequences)
def get_weights(self):
weights = np.zeros(47)
values, counts = np.unique(self.target_labels, return_counts=True)
for value, count in zip(values, counts):
weights[value] = count
weights = weights/len(self.target_labels)
return weights
class GoalPredictor(nn.Module):
def __init__(self):
super(GoalPredictor, self).__init__()
self.rnn1 = nn.LSTMCell(2048, 2048)
self.softmax = nn.Softmax(dim=1)
self.W_h = nn.Linear(2048, 2048)
self.W_c = nn.Linear(2048, 2048)
self.pool = nn.AvgPool1d(obs_segs[-1])
self.W_f = nn.Linear(2048, 2048)
self.relu = nn.ReLU()
self.rnn2 = nn.LSTMCell(2*2048, 2048)
def forward(self, feat_state, action_state, hidden_now, batch_size=None):
future_hidden = self.relu(self.W_h(hidden_now))
r_t = action_state
future_hiddens = []
cell_state = torch.zeros(1, 2048).to(device)
for i in range(obs_segs[-1]):
future_hidden, cell_state = self.rnn1(r_t, (future_hidden, cell_state))
r_t = self.softmax(self.W_c(future_hidden))
future_hiddens.append(future_hidden)
future_hiddens = torch.stack(future_hiddens).permute(2,1,0)
#print(future_hiddens.shape)
pooled = self.pool(future_hiddens).squeeze().unsqueeze(0)
# print(pooled.shape)
x_t_est = self.relu(self.W_f(pooled))
# print(x_t_est.shape)
cell_state = torch.zeros(1, 2048).to(device)
h_next, _ = self.rnn2(torch.cat((feat_state, x_t_est),1), (hidden_now, cell_state))
# print(h_next.shape)
return h_next, x_t_est
class ActionAnticipator(nn.Module):
def __init__(self):
super(ActionAnticipator, self).__init__()
self.feat_embedding = nn.LSTM(2048, 2048, 1)
self.action_embedding = nn.Linear(48, 2048)
self.goalpredictor = GoalPredictor()
self.epsilon = 0.0005
self.rnn = nn.LSTMCell(2*2048, 2048)
self.predictor = nn.Linear(2*2048, 2048)
self.relu = nn.ReLU()
self.delta = 0.0005
self.embedding2action = nn.Linear(2048, 48)
self.softmax = nn.Softmax(dim=1)
def feat_predictor(self, input1, input2, input3):
#print(input1.shape)
pred_feat = self.predictor(torch.cat((input1, input2),1))
pred_feat = self.predictor(torch.cat((pred_feat, input3),1))
pred_feat = self.relu(pred_feat)
return pred_feat
def target_feat_embedding(self, target_feat):
# print(target_feat.shape)
_, (feat_state, _) = self.feat_embedding(target_feat)
feat_state = feat_state.squeeze(0)
return feat_state
def forward(self, i3d_feat_seq, batch_size=None):
# print(i3d_feat_seq.shape)
# print(type(i3d_feat_seq))
if len(i3d_feat_seq.shape) == 4:
i3d_feat_seq = i3d_feat_seq.squeeze().permute(1,0,2)
#print(i3d_feat_seq.shape)
_, (feat_states, _) = self.feat_embedding(i3d_feat_seq)
else:
_, (feat_states, _) = self.feat_embedding(i3d_feat_seq)
feat_states = feat_states.squeeze(0)
#print(feat_states.shape)
obs_acts = feat_states.shape[0]
best_feat_state = feat_states[0,:].unsqueeze(0)
action_state = torch.zeros(1,2048).to(device)
best_action_state = action_state
# print(best_action_state.shape)
# print(best_feat_state.shape)
best_action_states = []
best_feat_states = []
hidden = torch.zeros(1, 2048).to(device)
hidden, goal_state = self.goalpredictor(best_feat_state, best_action_state, hidden)
for i in range(obs_acts):
action_list = []
# print(best_feat_state.shape)
# print(best_action_state.shape)
if torch.square(torch.dist(feat_states[i,:], goal_state,2)) > self.epsilon:
h_act = torch.zeros(1, 2048).to(device)
c_act = torch.zeros(1, 2048).to(device)
for j in range(6):
h_act, c_act = self.rnn(torch.cat((best_action_state, best_feat_state), 1), (h_act, c_act))
action_list.append(h_act)
goal_state_est = torch.zeros_like(goal_state)
hidden_est = torch.zeros_like(hidden)
for action in action_list:
next_feat = self.feat_predictor(best_feat_state, action, goal_state)
hidden_new, goal_state_new = self.goalpredictor(best_feat_state, best_action_state, hidden)
if torch.square(torch.dist(next_feat, goal_state,2)) < torch.square(torch.dist(best_feat_state, goal_state, 2)) and \
torch.square(torch.dist(goal_state_new, goal_state,2)) < self.delta:
best_feat_state = next_feat
best_action_state = action
hidden_est = hidden_new
goal_state_est = goal_state_new
best_action_states.append(best_action_state)
hidden = hidden_est
goal_state = goal_state_est
h_act = torch.zeros(1, 2048).to(device)
c_act = torch.zeros(1, 2048).to(device)
ant_action, _ = self.rnn(torch.cat((best_action_state, best_feat_state), 1), (h_act, c_act))
best_action_states.append(ant_action)
best_action_states = torch.stack(best_action_states).squeeze(1)
best_actions = self.embedding2action(best_action_states)
#print(best_action_states.shape)
return best_actions, best_feat_state
class TrainTest():
def __init__(self, model, trainset, testset, batch_size, nepoch, ckpt_path):
self.model = model
self.optimizer = optim.Adam(model.parameters())
#self.optimizer = optim.SGD(model.parameters(), lr=0.0005, weight_decay=0.9 )
self.criterion1 = nn.CrossEntropyLoss()
self.criterion2 = nn.MSELoss()
self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=1,
shuffle=True, num_workers=0)
self.testloader = torch.utils.data.DataLoader(testset, batch_size=1,
shuffle=False, num_workers=0)
self.model.to(device)
self.chkpath = ckpt_path
self.batch_size = batch_size
self.nepoch = nepoch
self.trainset_size = len(trainset)
self.mse = nn.MSELoss(reduction='none')
if not os.path.exists('ckpt/'):
os.mkdir('ckpt/')
print(self.chkpath)
if os.path.exists(self.chkpath) == True:
print('load from ckpt', end=' ')
self.state = torch.load(self.chkpath)
self.model.load_state_dict(self.state['model'])
best_acc = self.state['acc']
start_epoch = self.state['epoch']
print('Epoch {}'.format(start_epoch))
if start_epoch == self.nepoch:
print('existing as epoch is max.')
self.details = self.state['details']
self.best_acc = best_acc
self.start_epoch = start_epoch + 1
self.model.to(device)
else:
self.best_acc = -1.
self.details = []
self.start_epoch = 0
def test(self):
correct = 0
count = 0
sequence = []
with torch.no_grad():
for i, data in enumerate(self.testloader, 0):
#print(len(data))
loss = 0.
i3d_feat_seq = []
obs_label_seq = []
i3d_seq_segs, obs_label_segs, target_label, target_feats = data
for i3d_seq, obs_label in zip(i3d_seq_segs, obs_label_segs):
seg_feat = torch.stack(i3d_seq)
i3d_feat_seq.append(seg_feat)
obs_label_seq.append([int(obs_label)])
i3d_feat_seq = torch.stack(i3d_feat_seq).float().squeeze(0).to(device)
# print(i3d_feat_seq.shape)
target_feat = torch.stack(target_feats).float().to(device)
target_feat = self.model.target_feat_embedding(target_feat)
target_actions = obs_label_seq
target_actions.append([int(target_label)])
action_label = torch.LongTensor(obs_label_seq[0]).unsqueeze(0)
# print(action_label.shape)
# print(obs_label_tensor.shape)
# print(target_seq.shape)
# print(action_label_tensor.is_cuda)
pred_actions, pred_feat = self.model(i3d_feat_seq)
target_actions = torch.LongTensor(target_actions).squeeze().to(device)
# print(action_label.shape)
# print(obs_label_tensor.shape)
# print(i3d_feat_seq.shape)
#print(target_seq.shape)
if target_actions.shape[0] != pred_actions.shape[0]:
continue
pred_action = pred_actions[-1,:].view(1,-1)
# print(pred_action.shape)
target_action = target_actions[-1].unsqueeze(0)
loss += self.criterion1(pred_actions, target_actions)/target_actions.shape[0]
# print(loss)
loss += self.criterion2(pred_feat, target_feat)
# print(loss)
# print(pred_seq.shape)
ant_action = torch.argmax(pred_actions[-1,:])
correct = correct + torch.sum(ant_action==target_actions[-1]).item()
print("\rIteration: {}/{}, Loss: {}.".format(i+1, len(self.testloader), loss), end="")
sys.stdout.flush()
count += 1
#print(count)
return correct/count*100., correct
def train(self):
for epoch in range(self.start_epoch,self.nepoch):
start_time = time.time()
running_loss = 0.0
correct = 0.
count = 0.
total_loss = 0
self.optimizer.zero_grad()
loss = 0.
iterations = 0
for i, data in enumerate(self.trainloader, 0):
#print(len(data))
i3d_feat_seq = []
obs_label_seq = []
i3d_seq_segs, obs_label_segs, target_label, target_feats = data
for i3d_seq, obs_label in zip(i3d_seq_segs, obs_label_segs):
seg_feat = torch.stack(i3d_seq)
i3d_feat_seq.append(seg_feat)
obs_label_seq.append([int(obs_label)])
i3d_feat_seq = torch.stack(i3d_feat_seq).float().squeeze(0).to(device)
# print(i3d_feat_seq.shape)
target_feat = torch.stack(target_feats).float().to(device)
target_feat = self.model.target_feat_embedding(target_feat)
target_actions = obs_label_seq
target_actions.append([int(target_label)])
action_label = torch.LongTensor(obs_label_seq[0]).unsqueeze(0)
# print(action_label.shape)
# print(action_label_tensor.is_cuda)
pred_actions, pred_feat = self.model(i3d_feat_seq)
target_actions = torch.LongTensor(target_actions).squeeze().to(device)
# print(target_actions.shape)
if target_actions.shape[0] != pred_actions.shape[0]:
continue
pred_action = pred_actions[-1,:].view(1,-1)
# print(pred_action.shape)
target_action = target_actions[-1].unsqueeze(0)
loss += self.criterion1(pred_actions, target_actions)/target_actions.shape[0]
# print(loss)
loss += self.criterion2(pred_feat, target_feat)
# print(loss)
# print(pred_seq.shape)
ant_action = torch.argmax(pred_actions[-1,:])
# print(ant_action)
with torch.no_grad():
correct = correct + torch.sum(ant_action==target_actions[-1]).item()
if i % batch_size == 0 and i>1:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
running_loss = running_loss + loss.item()
print("\rIteration: {}/{}, Loss: {}.".format(i+1, len(self.trainloader), loss), end="")
loss = 0.
count += self.batch_size
iterations += 1
TRAIN_LOSS = running_loss/iterations
TRAIN_ACC = correct/count*100
TEST_ACC, TEST_COUNT = self.test()
self.details.append((TRAIN_LOSS,TRAIN_ACC,0.,TEST_ACC))
#utils.draw_Fig(self.details)
#plt.savefig('results/sum_classifier.png')
#plt.close()
if TEST_ACC > self.best_acc:
self.state = {
'model': self.model.state_dict(),
'acc': TEST_ACC,
'epoch': epoch,
'details':self.details,
}
torch.save(self.state, self.chkpath)
self.best_acc = TEST_ACC
else:
self.state['epoch'] = epoch
torch.save(self.state, self.chkpath)
elapsed_time = time.time() - start_time
print('[{}] [{:.1f}] [Loss {:.3f}] [Correct : {}] [Trn. Acc {:.1f}] '.format(epoch, elapsed_time,
TRAIN_LOSS, correct,TRAIN_ACC),end=" ")
print('[Test Cor {}] [Acc {:.1f}]'.format(TEST_COUNT,TEST_ACC))
# ### define hyperparameters
num_classes = 48
obs_segs = [1, 2, 3, 4]
seg_lengths = [30, 45, 75, 150]
seg_length_secs = [2, 3, 5, 10]
obs_seg = obs_segs[0]
seg_length = seg_lengths[3]
seg_length_sec = seg_length_secs[3]
nepochs = 10
batch_size = 8
annot_dir = '../breakfast_framewise_annotations/'
i3d_feature_dir = '/home/roy/breakfast_i3dfeatures/'
sequence_ids_train = sorted(open('train.s1').readlines())
training_set = LongTermFrameFeat(annot_dir, i3d_feature_dir, sequence_ids_train[:], obs_seg, seg_length)
print('{} training instances.'.format(len(training_set)))
sequence_ids_test = open('test.s1').readlines()
test_set = LongTermFrameFeat(annot_dir, i3d_feature_dir, sequence_ids_test[:], obs_seg, seg_length)
print('{} test instances.'.format(len(test_set)))
model_ft = ActionAnticipator()
ckpt_path = 'ckpt/i3d_latent_goal_{:d}sx{:d}_obs_lstm.pt'.format(seg_length_sec, obs_seg)
# tgtembed = TargetEmbedding(seg_length_sec)
# weights = torch.Tensor(training_set.get_weights()).to(device)
# print(weights.shape)
EXEC = TrainTest(model_ft, training_set, test_set, batch_size, nepochs, ckpt_path)
EXEC.train()
testloader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0)
from sklearn.metrics import classification_report
pred_labels = []
target_labels = []
state = torch.load(ckpt_path)
model_ft.load_state_dict(state['model'])
model_ft.to(device)
correct = 0
actual_pred_seq = []
actual_target_seq = []
with torch.no_grad():
for i, data in enumerate(testloader, 0):
loss = 0.
#print(len(data))
i3d_feat_seq = []
obs_label_seq = []
i3d_seq_segs, obs_label_segs, target_label, target_feats = data
for i3d_seq, obs_label in zip(i3d_seq_segs, obs_label_segs):
seg_feat = torch.stack(i3d_seq)
i3d_feat_seq.append(seg_feat)
obs_label_seq.append([int(obs_label)])
i3d_feat_seq = torch.stack(i3d_feat_seq).float().squeeze(0).to(device)
# print(i3d_feat_seq.shape)
target_action = target_label
pred_actions, pred_feat = model_ft(i3d_feat_seq)
actual_pred_seq.append(torch.argmax(pred_actions[-1,:]).item())
actual_target_seq.append(target_action)
print(classification_report(actual_target_seq, actual_pred_seq, digits=4))
| StarcoderdataPython |
3325554 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from .Utility import deep_update
from .JsonAccessor.JsonAccessor import load_json
class ConfigureLoader(object):
CONFIG_FILENAME_DEFAULT = "ConfigDefault.json"
CONFIG_FILENAME_USER = "ConfigUser.json"
@staticmethod
def load_file(dir_path, filename):
if dir_path[-1] != os.sep:
dir_path += os.sep
try:
return load_json(dir_path + filename)
except FileNotFoundError:
return {}
@classmethod
def load_integrated_config(cls, dir_path, default_file=None, user_file=None):
"""
Loading both default and user config, return the integrated result.
"""
if default_file is None:
default_file = cls.CONFIG_FILENAME_DEFAULT
if user_file is None:
user_file = cls.CONFIG_FILENAME_USER
default_config = cls.load_file(dir_path, default_file)
user_config = cls.load_file(dir_path, user_file)
return deep_update(default_config, user_config)
| StarcoderdataPython |
9705028 | # Generated by Django 3.2.10 on 2022-01-09 17:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0018_ProductGroups'),
]
operations = [
migrations.AddField(
model_name='course',
name='display_in_lms',
field=models.BooleanField(default=True, help_text='If disabled will not be shown in LMS', verbose_name='Display in LMS'),
),
]
| StarcoderdataPython |
9612050 | from dataclasses import dataclass
from enum import Enum
from typing import List, Union, Dict
from brain_brew.configuration.part_holder import PartHolder
from brain_brew.configuration.representation_base import RepresentationBase
from brain_brew.interfaces.yamale_verifyable import YamlRepr
from brain_brew.representation.yaml.note_model import NoteModel
from brain_brew.representation.yaml.notes import GUID, TAGS
from brain_brew.utils import single_item_to_list
class FieldMapping:
class FieldMappingType(Enum):
COLUMN = "column"
PERSONAL_FIELD = "personal_field"
DEFAULT = "default"
@classmethod
def values(cls):
return set(it.value for it in cls)
type: FieldMappingType
value: str
field_name: str
def __init__(self, field_type: FieldMappingType, field_name: str, value: str):
self.type = field_type
self.field_name = field_name.lower()
if self.type == FieldMapping.FieldMappingType.COLUMN:
self.value = value.lower()
else:
self.value = value
@dataclass
class NoteModelMapping(YamlRepr):
@classmethod
def task_name(cls) -> str:
return r'note_model_mapping'
@classmethod
def yamale_schema(cls) -> str:
return f'''\
note_models: any(list(str()), str())
columns_to_fields: map(str(), key=str())
personal_fields: list(str())
'''
@dataclass
class Representation(RepresentationBase):
note_models: Union[str, list]
columns_to_fields: Dict[str, str]
personal_fields: List[str]
note_models: Dict[str, PartHolder[NoteModel]]
columns: List[FieldMapping]
personal_fields: List[FieldMapping]
required_fields_definitions = [GUID, TAGS]
@classmethod
def from_repr(cls, data: Union[Representation, dict]):
rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data)
note_models = [PartHolder.from_file_manager(model) for model in single_item_to_list(rep.note_models)]
return cls(
columns=[FieldMapping(
field_type=FieldMapping.FieldMappingType.COLUMN,
field_name=field,
value=key) for key, field in rep.columns_to_fields.items()],
personal_fields=[FieldMapping(
field_type=FieldMapping.FieldMappingType.PERSONAL_FIELD,
field_name=field,
value="") for field in rep.personal_fields],
note_models=dict(map(lambda nm: (nm.part_id, nm), note_models))
)
def get_note_model_mapping_dict(self):
return {model: self for model in self.note_models}
def verify_contents(self):
errors = []
extra_fields = [field.field_name for field in self.columns
if field.field_name not in self.required_fields_definitions]
for holder in self.note_models.values():
model: NoteModel = holder.part
# Check for Required Fields
missing = []
for req in self.required_fields_definitions:
if req not in [field.field_name for field in self.columns]:
missing.append(req)
if missing:
errors.append(KeyError(f"""Note model(s) "{holder.part_id}" to Csv config error: \
Definitions for fields {missing} are required."""))
# Check Fields Align with Note Type
missing = model.check_field_overlap(
[field.field_name for field in self.columns
if field.field_name not in self.required_fields_definitions]
)
missing = [m for m in missing if m not in [field.field_name for field in self.personal_fields]]
if missing:
errors.append(
KeyError(f"Note model '{holder.part_id}' to Csv config error. "
f"It expected {[field.name for field in model.fields]} but was missing: {missing}")
)
# Find mappings which do not exist on any note models
if extra_fields:
extra_fields = model.check_field_extra(extra_fields)
if extra_fields:
errors.append(
KeyError(f"Field(s) '{extra_fields} are defined as mappings, but match no Note Model's field"))
if errors:
raise Exception(errors)
def csv_row_map_to_note_fields(self, row: dict) -> dict:
relevant_row_data = self.get_relevant_data(row)
for pf in self.personal_fields: # Add in Personal Fields
relevant_row_data.setdefault(pf.field_name, False)
for column in self.columns: # Rename from Csv Column to Note Type Field
if column.value in relevant_row_data:
relevant_row_data[column.field_name] = relevant_row_data.pop(column.value)
return relevant_row_data
def csv_headers_map_to_note_fields(self, row: list) -> list:
return list(self.csv_row_map_to_note_fields({row_name: "" for row_name in row}).keys())
def note_fields_map_to_csv_row(self, row):
for column in self.columns: # Rename from Note Type Field to Csv Column
if column.field_name in row:
row[column.value] = row.pop(column.field_name)
for pf in self.personal_fields: # Remove Personal Fields
if pf.field_name in row:
del row[pf.field_name]
relevant_row_data = self.get_relevant_data(row)
return relevant_row_data
def get_relevant_data(self, row):
relevant_columns = [field.value for field in self.columns]
if not relevant_columns:
return []
cols = list(row.keys())
# errors = [KeyError(f"Missing column {rel_col}") for rel_col in relevant_columns if rel_col not in cols]
# if errors:
# raise Exception(errors)
irrelevant_columns = [column for column in cols if column not in relevant_columns]
if not irrelevant_columns:
return row
relevant_data = {key: row[key] for key in row if key not in irrelevant_columns}
return relevant_data
def field_values_in_note_model_order(self, note_model_name, fields_from_csv):
return [fields_from_csv[f] if f in fields_from_csv else ""
for f in self.note_models[note_model_name].part.field_names_lowercase
]
| StarcoderdataPython |
11275953 | <filename>app/scienceapi/events/migrations/0003_auto_20160425_1752.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-25 17:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20160425_1615'),
('users', '0002_user'),
('events', '0002_event'),
]
operations = [
migrations.AddField(
model_name='event',
name='attendees',
field=models.ManyToManyField(blank=True, related_name='events_attended', to='users.User'),
),
migrations.AddField(
model_name='event',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='events_created', to='users.User'),
preserve_default=False,
),
migrations.AddField(
model_name='event',
name='facilitators',
field=models.ManyToManyField(blank=True, related_name='events_facilitated', to='users.User'),
),
migrations.AddField(
model_name='event',
name='projects',
field=models.ManyToManyField(blank=True, related_name='events', to='projects.Project'),
),
]
| StarcoderdataPython |
1997754 | <filename>alpha_vantage/functions/__init__.py<gh_stars>0
from alpha_vantage.functions.timeseries import TimeSeries | StarcoderdataPython |
12847985 | <reponame>medfiras/Bazinga
from userena.forms import EditProfileForm
from userena import views as userena_views
class CustomEditProfileForm(userena_views.EditProfileForm):
class Meta(EditProfileForm.Meta):
exclude = EditProfileForm.Meta.exclude + ['privacy'] | StarcoderdataPython |
3308691 | <reponame>SpeagleYao/IP_Final_Project<gh_stars>0
from img_aug import data_generator
from models import *
from loss import *
import numpy as np
import cv2
import torch
model = CENet_My()
model.load_state_dict(torch.load('./pth/CENet_My.pth'))
model.eval()
criterion = DiceLoss()
g_val = data_generator('./data/img_val.npy', './data/tar_val.npy', 10, train=False)
img, tar = g_val.gen()
out = model(img)
loss_val = criterion(1-out, 1-tar)
print("Loss_val:{0}".format(format(loss_val, ".4f")))
out = torch.where(out>=0.5, 1, 0)
out = out.numpy().reshape(10, 224, 224)*255
tar = tar.detach().numpy().reshape(10, 224, 224)*255
for i in range(out.shape[0]):
a = np.hstack((tar[i], out[i]))
cv2.imwrite('./prdimg/prdimg'+str(i)+'.png', a)
| StarcoderdataPython |
1935354 | # coding: utf-8
#
# Weather station display for Raspberry and Waveshare 2.7" e-Paper display
# (fetch ThingSpeak data)
#
# Copyright by <NAME>
#
# Documentation and full source code:
# https://github.com/arutz12/Raspberry-Weather-EPD
#
import os
import requests
import json
from dotenv import load_dotenv
base_dir = os.path.dirname(os.path.abspath(__file__))
dotenv_path = os.path.join(base_dir, '.env')
load_dotenv(dotenv_path)
TS_READ_API_KEY = os.environ.get('TS_READ_API_KEY')
TS_CHANNEL_ID = os.environ.get('TS_CHANNEL_ID')
TS_READ_URL = 'https://api.thingspeak.com/channels/{}/feeds.json?api_key={}&results=1'.format(TS_CHANNEL_ID, TS_READ_API_KEY)
def fetchThingSpeak():
TS_DATA = {}
try:
r = requests.get(TS_READ_URL)
except Exception as e:
print(e)
return {'TEMP': 0, 'VOLT': 0, 'UPDATED': 0}
ts_result = json.loads(r.content)
if not ts_result:
return {'TEMP': 0, 'VOLT': 0, 'UPDATED': 0}
TS_DATA['TEMP'] = ts_result['feeds'][0]['field1']
TS_DATA['VOLT'] = ts_result['feeds'][0]['field2']
TS_DATA['UPDATED'] = ts_result['feeds'][0]['created_at']
return TS_DATA
if __name__ == '__main__':
from pprint import pprint
ts_data = fetchThingSpeak()
pprint(ts_data)
| StarcoderdataPython |
3534724 | <filename>classcharts_trello_sync/__init__.py
from .generate_config import configure
from .sync import sync_data
__all__ = (
'configure',
'sync_data',
)
| StarcoderdataPython |
290834 | """
Program written by <NAME>?
MAR/2020 during confinement
"""
import os
import json
import config
from urllib import request
def jsonparsser(location):
"""
go get the data, make the first parssing and return the data
"""
try:
toreturn = json.load(request.urlopen\
("https://www.franceix.net/api/members/list/json?location={}"\
.format(location)))
except:
print("Error - request timeout !")
toreturn = {}
return toreturn
def confchecker():
"""
here to tell if the user did put garbage in his config file
"""
no_error = True
if config.location != "PAR" and config.location != "MRS":
print(config.location + " in config is garbage")
no_error = False
if type(config.asusage) != bool:
print("asusage in config must be a boolean")
no_error = False
if type(config.asn) != int:
print(str(config.asn) + " in config is garbage")
no_error = False
if config.asn > 4294967295 or config.asn < 1:
print(str(config.asn) + " is not a valid ASN")
no_error = False
if bool(config.asn > 64495 and config.asn < 131072) or bool(config.asn > 399260 and config.asn <= 4294967295):
print(str(config.asn) + " is a private ASN")
no_error = False
return no_error
def main():
if confchecker() is False:
return 0
print(jsonparsser(config.location))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6504073 | import numpy as np
from time import time
from math import sqrt, pow
#Get list of active taxels per bounding box on the image, create an array of the taxel center
def bb_active_taxel (bb_number, T, bb_predictions_reshaped, TIB, skin_faces):
taxel_predictions, pixel_positions,taxel_predictions_info = np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object)
for n in range(bb_number):
faces_predictions, pixel_position, info = [], [], []
cols = range(bb_predictions_reshaped[n].coordinates_reshaped[0], bb_predictions_reshaped[n].coordinates_reshaped[2])
rows = range(bb_predictions_reshaped[n].coordinates_reshaped[1], bb_predictions_reshaped[n].coordinates_reshaped[3])
for i in cols:
for j in rows:
face_index = TIB.get_pixel_face_index( i, j)
if face_index == (-1) or face_index >= 1218: #checking that taxels are withing boundss
break
#Pixel_Position
pos_on_map = TIB.get_pixel_position_on_map(i, j)
pixel_pos = T.back_project_point(pos_on_map, face_index)
pixel_position.append(pixel_pos)
#Taxel_IDs_from_faces
faces_predictions.append(skin_faces[face_index][0])
faces_predictions.append(skin_faces[face_index][1])
faces_predictions.append(skin_faces[face_index][2])
taxel_predictions[n] = set(faces_predictions) #set rmoves duplicates
pixel_positions[n] = pixel_position
#Prediction info
info.append(bb_predictions_reshaped[n].label)
info.append(bb_predictions_reshaped[n].confidence)
info.append(len(set(faces_predictions)))
taxel_predictions_info[n] = info #this is the name, conf and # active taxels per prediction
return taxel_predictions, pixel_positions, taxel_predictions_info
#Get total data for the all the taxels and bounding boxes
def get_total_data(bb_number, S, T, taxel_predictions):
total_taxel_responses = [[S.taxels[i].get_taxel_response() for i in taxel_predictions[n]] for n in range(bb_number)]
total_taxels_3D_position = [[S.taxels[i].get_taxel_position()for i in taxel_predictions[n]] for n in range(bb_number)]
total_taxel_normals = [[S.taxels[i].get_taxel_normal() for i in taxel_predictions[n]] for n in range(bb_number)]
total_taxels_2D_position = [[T.taxels[i].get_taxel_position() for i in taxel_predictions[n]] for n in range(bb_number)]
return total_taxel_responses, total_taxels_3D_position, total_taxel_normals , total_taxels_2D_position
#AVERAGE RESPONSES including taxels with 0 response
def get_average_response_per_BB(bb_number, total_taxel_responses, taxel_predictions_info):
average_responses = [(sum(total_taxel_responses[n])/taxel_predictions_info[n][2]) for n in range(bb_number) if (len(total_taxel_responses[n]) != 0)]
return average_responses
#2D AND 3D CENTROID OF BB
def get_bb_centroids(bb_number,S,T, total_taxels_2D_position, taxel_coords):
bb_centroid2d, bb_centroid3d = np.empty((bb_number,), dtype = object), np.empty((bb_number,), dtype = object)
for n in range(bb_number):
average_position = [0.0,0.0,0.0]
if len(total_taxels_2D_position[n]) != 0:
for i,val in enumerate(total_taxels_2D_position[n]):
average_position[0] = average_position[0] + val[0]
average_position[1] = average_position[1] + val[1]
average_position[2] = average_position[2] + val[2] #z should be 0 anyway
average_position[0] = average_position[0] / len(total_taxels_2D_position[n])
average_position[1] = average_position[1] / len(total_taxels_2D_position[n])
average_position[2] = average_position[2] / len(total_taxels_2D_position[n])
bb_centroid2d[n]=average_position
#used for projecting a 2D centroid on the tactile map to a 3D point
bb_centroid3d[n] = back_project_centroid(S, T, bb_centroid2d[n], taxel_coords)
else:
bb_centroid2d[n] = []
bb_centroid3d[n] = []
return bb_centroid2d, bb_centroid3d
#BB NORMALS, i put the minus here
def get_bb_average_normals(bb_number,total_taxel_normals):
bb_normal = np.empty((bb_number,), dtype = object)
#AVERAGE NORMAL
for n in range(bb_number):
average_normal = [0.0,0.0,0.0]
if len(total_taxel_normals[n]) != 0:
for i, val in enumerate(total_taxel_normals[n]):
average_normal[0] = average_normal[0] - val[0] #on the x, it is going to be 0 of course
average_normal[1] = average_normal[1] - val[1]
average_normal[2] = average_normal[2] - val[2]
average_normal[0] = average_normal[0] / len(total_taxel_normals[n])
average_normal[1] = average_normal[1] / len(total_taxel_normals[n])
average_normal[2] = average_normal[2] / len(total_taxel_normals[n])
bb_normal[n] = average_normal
#print("Position of Centroid", taxel_predictions_info[n][0], "is", bb_centroid[n])
else:
bb_normal[n] = []
return bb_normal
#BACK PROJECT A POINT FROM 2D MAP TO 3D
def back_project_centroid(S, T, bb_centroid2d, taxel_coords):
#initializing
centroid_3d, P, B, C = [0.0,0.0,0.0], [0.0,0.0], [0.0,0.0], [0.0,0.0]
#finding the indexes of the 3 closest points, with numpy is very fast
difference = np.subtract(taxel_coords, bb_centroid2d)
diff_pow2 = np.square(difference)
diff_sum = np.sum(diff_pow2, axis=1)
diff_squared = np.square(diff_sum)
minimum_indexes = diff_squared.argsort()[:3]
a, b, c = T.taxels[minimum_indexes[0]].get_taxel_position(), T.taxels[minimum_indexes[1]].get_taxel_position(), T.taxels[minimum_indexes[2]].get_taxel_position()
#Compute the cofficents of the convex combination
P[0], P[1], B[0], B[1], C[0], C[1] = bb_centroid2d[0]-a[0], bb_centroid2d[1]-a[1], b[0]-a[0], b[1]-a[1], c[0]-a[0], c[1]-a[1]
d = B[0]*C[1] - C[0]*B[1]
wa, wb, wc = (P[0]*(B[1]-C[1]) + P[1]*(C[0]-B[0]) + B[0]*C[1] - C[0]*B[1]) / d, (P[0]*C[1] - P[1]*C[0]) / d, (P[1]*B[0] - P[0]*B[1]) / d
v1, v2, v3 = S.taxels[minimum_indexes[0]].get_taxel_position(), S.taxels[minimum_indexes[1]].get_taxel_position(), S.taxels[minimum_indexes[2]].get_taxel_position()
centroid_3d[0], centroid_3d[1], centroid_3d[2] = wa*v1[0] + wb*v2[0] + wc*v3[0], wa*v1[1] + wb*v2[1] + wc*v3[1], wa*v1[2] + wb*v2[2] + wc*v3[2]
return centroid_3d
| StarcoderdataPython |
6443410 | <filename>qiskit/algorithms/phase_estimators/phase_estimator.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Phase Estimator interface."""
from typing import Optional
from abc import ABC, abstractmethod, abstractproperty
from qiskit.circuit import QuantumCircuit
from qiskit.algorithms.algorithm_result import AlgorithmResult
class PhaseEstimator(ABC):
"""The Phase Estimator interface.
Algorithms that can compute a phase for a unitary operator and
initial state may implement this interface to allow different
algorithms to be used interchangeably.
"""
@abstractmethod
def estimate(self,
unitary: Optional[QuantumCircuit] = None,
state_preparation: Optional[QuantumCircuit] = None,
pe_circuit: Optional[QuantumCircuit] = None,
num_unitary_qubits: Optional[int] = None) -> 'PhaseEstimatorResult':
"""Estimate the phase."""
raise NotImplementedError
class PhaseEstimatorResult(AlgorithmResult):
"""Phase Estimator Result."""
@abstractproperty
def most_likely_phase(self) -> float:
r"""Return the estimated phase as a number in :math:`[0.0, 1.0)`.
1.0 corresponds to a phase of :math:`2\pi`. It is assumed that the input vector is an
eigenvector of the unitary so that the peak of the probability density occurs at the bit
string that most closely approximates the true phase.
"""
raise NotImplementedError
| StarcoderdataPython |
5095709 | # Generated by Django 2.0.6 on 2018-06-05 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('gender', models.CharField(max_length=10)),
('gmail', models.EmailField(max_length=254)),
('linkedin', models.CharField(max_length=30)),
('skype_id', models.CharField(max_length=30)),
('about_me', models.CharField(max_length=500)),
('address', models.CharField(max_length=200)),
('birthaday', models.DateField()),
('job_title', models.CharField(max_length=20)),
('location', models.CharField(max_length=20)),
],
),
]
| StarcoderdataPython |
12850004 | from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.contrib import messages
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic.simple import direct_to_template
from schedule.models import Event
from schedule.periods import Period
from profiles.models import *
from profiles.forms import ProfileForm, ProfileSkillsForm
from profiles.controllers import tag_clean
from django.contrib.auth.decorators import login_required
from django.utils import simplejson
def userprofile(request, username=None, template_name='profiles/profile.html'):
form = ProfileForm()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def ajax_view(request, profile_id, skill_id, verb):
datadict = {'profile_id':profile_id, 'skill_id':skill_id, 'verb':verb}
profile = Profile.objects.get(id = profile_id)
skill = Skill.objects.get(id = skill_id)
if verb == "remove":
try:
profile.skills.remove(skill)
except Exception, e:
datadict['status'] = "failure"
else:
datadict['status'] = "success"
return HttpResponse(simplejson.dumps(datadict))
else:
return HttpResponse("verb unrecognized")
def ajax_toggle_availability(request):
datadict = dict()
datadict['status'] = "failure"
if request.user.is_authenticated():
try:
user = request.user
profile = user.get_profile()
profile.is_available = not profile.is_available
profile.save()
except:
pass
else:
datadict['status'] = "success"
datadict['availability'] = profile.is_available
return HttpResponse(simplejson.dumps(datadict))
def set_availability(request,set_status):
datadict = dict()
datadict['status'] = "failure"
if request.user.is_authenticated():
try:
user = request.user
profile = user.get_profile()
profile.is_available = bool(int(set_status)) # someone fix this casting for me please
profile.save()
except:
pass
else:
datadict['status'] = "success"
datadict['availability'] = profile.is_available
if request.is_ajax():
return HttpResponse(simplejson.dumps(datadict))
else:
return HttpResponse("GOOBER!, ENABLE JAVASCRIPT! "+simplejson.dumps(datadict))
def _can_view_full_profile(user):
# for now just check if user is logged in, later there may be karma and/or
# other requirements.
return user.is_authenticated()
def list_profiles_by_skill(request, skill):
skills = Skill.objects.filter(name__contains=skill)
qs = User.objects.filter(profile__skills__in=skills).distinct()
return list_profiles(request, qs=qs)
def list_profiles(request, qs=None, template_name='profiles/list_profiles.html'):
"""Display a list of Users
If qs == None, return list of all Users
Optionally pass a qs of users
"""
if qs == None:
users = User.objects.all()
else:
users = qs
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def view_profile(request, username, template_name='profiles/view_profile.html'):
user = get_object_or_404(User, username=username)
display_full_profile = _can_view_full_profile(request.user)
events = Event.objects.filter(creator=user)
start = datetime.now()
end = start + timedelta(days=30)
period = Period(events=events, start=start, end=end)
office_hours = period.get_occurrences()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def profile(request, template_name="profiles/edit_profile.html"):
user = request.user
profile = user.profile
def update_profile():
profile_form = ProfileForm(data=request.POST,
instance=request.user.get_profile())
if profile_form.is_valid():
profile_form.save()
messages.success(request, 'Profile updated.')
def update_skills():
#fuck capitals
tag_list = request.POST.get('skills_text').lower().split(',')
for tag in tag_list:
if tag and tag != '':
#fucking excess whitespace man
tag = tag_clean(tag)
skill, created = Skill.objects.get_or_create(name=tag)
profile.skills.add(skill)
psf = ProfileSkillsForm(request.POST)
if psf.is_valid():
skills_list = Skill.objects.filter(id__in = psf.cleaned_data.get('skills'))
for skill in skills_list:
profile.skills.add(skill)
profile.save()
messages.success(request, 'Skills updated.')
if request.method == "POST":
origin = request.POST.get('origin')
if origin == "profile":
update_profile()
else: #origin == "skill":
update_skills()
profile_form = ProfileForm(instance=request.user.get_profile())
skill_form = ProfileSkillsForm()
skills = profile.skills.all()
events = Event.objects.filter(creator=user)
start = datetime.now()
end = start + timedelta(days=30)
office_hours = reduce(lambda x,y: x+y, [e.get_occurrences(start, end)
for e in events]) if events else []
return direct_to_template(request, template_name,
{'skill_form':skill_form,
'profile_form':profile_form,
'profile':profile,
'skills':skills,
'editable':True,
'office_hours':office_hours})
| StarcoderdataPython |
3260942 | import stocklab
stocklab.bundle(__file__)
| StarcoderdataPython |
3399331 | from abc import ABC
from flask import Blueprint
from flask import url_for
from Config import Config
class AbstractService(ABC):
def __init__(self):
self.serviceName = type(self).__name__.lower()
self.blueprint = Blueprint(self.serviceName, self.serviceName)
| StarcoderdataPython |
5131991 | <filename>celerytask/tasks.py
# encoding:utf-8
from celery import Celery
import os
import time
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'osf.settings')
#os.environ['CELERY_CONFIG_MODULE'] = 'celerytask.celeryconfig'
app = Celery('test')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('celerytask.celeryconfig')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task()
def add_task(name):
for i in range(1,10):
print 'hello:%s %s'%(name,i)
time.sleep(1)
return 1
if __name__ == '__main__':
app.start() | StarcoderdataPython |
11229755 | from airtravel import *
from pprint import pprint as pp
f = Flight("BA777", Aircraft("G-EUPT", "Airbus A312", num_rows=22, num_seats_per_row=6))
print(f.aircraft_model())
f = Flight("BA758", Aircraft("G-RUPT", "Airbus A319", num_rows=22, num_seats_per_row=6))
# pp(f.seating) # all seats free
f.allocate_seat("12A", "<NAME>")
f.allocate_seat("15F", "<NAME>")
# f.allocate_seat("12A", "<NAME>") -> Will Raise an Error "12A" already taken.
# f.allocate_seat("DD", "<NAME>") -> Will Raise an Error "DD" Invalid seat place.
# pp(f.seating) # after we allocated some seats :)
f2 = make_flight()
pp(f2.seating)
print("---------------------------------------------------------------------")
f2.relocate_passenger("12A", "15D")
pp(f2.seating)
f3 = make_flight()
print(f3.num_available_seats())
print("---------------------------------------------------------------------")
f = make_flight()
f.make_boarding_cards(console_card_printer)
print("---------------------------------------------------------------------")
print("---------------------------------------------------------------------")
print("---------------------------------------------------------------------")
print("---------------------------------------------------------------------")
f4, g = make_flights()
print(f4.aircraft_model())
print(g.aircraft_model())
print(f4.num_available_seats())
print(g.num_available_seats())
g.relocate_passenger("55K", "13G")
g.make_boarding_cards(console_card_printer)
a = AirbusA319("G-EZBT")
print(a.num_seats())
b = Boeing777("N717AN")
print(b.num_seats())
| StarcoderdataPython |
8196242 | import typing
import unittest
class SnakeTestCase(unittest.TestCase):
def test_snake(self):
from core import Snake, Action
key2direction: typing.Dict[str, int] = {
'w': Action.UP,
's': Action.DOWN,
'a': Action.LEFT,
'd': Action.RIGHT
}
test_case_set: typing.List[typing.Tuple[typing.Tuple[int, int], str, int]] = [
((5, 5), '....', 1),
((5, 5), 'aaaa', 1),
((5, 5), '...w', 0),
]
for shape, keys, expected in test_case_set:
game: Snake = Snake(shape)
flag = -1
for key in keys:
observation, reward, is_crash, info = game.step(key2direction.get(key, Action.NONE))
flag = 1 if is_crash else 0
self.assertEqual(expected, flag)
def test_matrix_value_dict(self):
n: int = 10
q: typing.Dict[typing.Tuple, int] = {}
matrix_list: typing.List[typing.List[typing.Tuple]] = []
for i in range(n):
matrix_list.append([tuple([i, i + 1]), tuple([i + 2, i + 3])])
q[tuple(matrix_list[i])] = i
for i in range(n):
self.assertEqual(i, q[tuple(matrix_list[i])])
def test_copy_call(self):
import copy
class Object(object):
def __init__(self):
self.__data__ = [1, 2, 3]
@property
def data(self):
return copy.deepcopy(self.__data__)
obj = Object()
self.assertNotEqual(id(obj.__data__), id(obj.data))
def test_string_to_tuple(self):
import re
string = '(2, 3)'
result = tuple(map(int, re.findall(r'\d+', string)))
self.assertEqual(tuple, type(result))
self.assertEqual(2, len(result))
self.assertEqual(2, result[0])
self.assertEqual(3, result[1])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11387644 | <filename>tests/mocks/mocked_redis.py
from typing import Dict, Optional
class MockedRedis:
"""A mock aioredis.Redis class that
imitates the required methods.
"""
def __init__(self):
self._data: Dict[str, dict] = {}
@property
def cache(self) -> Dict:
return self._data
async def get(self, key) -> Optional[dict]:
try:
return self._data[key]
except:
return None
async def set(self, key, value):
self._data[key] = value
async def delete(self, key):
try:
self._data.pop(key)
except:
pass
async def flushdb(self, *args, **kwargs):
self._data = {}
async def keys(self, pattern: str):
if pattern.startswith("GUILD"):
return self._get_guilds()
member, guild_id, wildcard = pattern.split(":")
return self._get_members(guild_id)
def _get_guilds(self):
return [
key.encode("utf-8") for key in self._data.keys() if key.startswith("GUILD")
]
def _get_members(self, guild_id):
return [
key.encode("utf-8")
for key in self._data.keys()
if key.startswith(f"MEMBER:{guild_id}")
]
| StarcoderdataPython |
255579 | <filename>hubblestack/utils/__init__.py
# coding: utf-8
from hubblestack.utils.process import daemonize
| StarcoderdataPython |
11274490 | import autograd.numpy as np
from autograd import grad
from autograd import jacobian
from src.maths.func_stats import *
class Prior(object):
def single(self,theta):
"""univariate probability distribution function"""
return NotImplemented
def prior(self, theta):
"""likelihood"""
return NotImplemented
def log_prior(self, theta):
"""log likelihood"""
return np.log(self.prior(theta))
def neg_log_prior(self, theta):
"""negative log likelihood"""
return - self.log_prior(theta)
def log_prior_grad(self,theta):
grad_fun = grad(self.log_prior)
return grad_fun(theta)
def log_prior_hes(self, theta):
"""hessian of log likelihood"""
hessian = jacobian(self.log_prior_grad)
return hessian(theta)
def neg_log_prior_grad(self, theta):
"""gradient of negative log likelihood"""
grad_fun = grad(self.neg_log_prior)
return grad_fun(theta)
def neg_log_prior_hes(self, theta):
"""hessian of log likelihood"""
hessian = jacobian(self.neg_log_prior_grad)
return hessian(theta)
def __call__(self, theta):
return self.prior(theta)
################################################################################
# priors for the linear parameter + noise parameter
################################################################################
class Gaussian_exp_prior(Prior):
""" Implementation of the gaussian prior for the parameters beta and the
exponential prior for the sigma
beta ~ N(mean,std**2)
sigma ~ Exp(lamdba)
Parameters
----------
mean : float
if the Gaussian_prior is called as a multivariate normal, the mean is a
vector equal mean in every coordinate
std : float > 0
if the Gaussian_prior is called as a multivariate normal, the variance
is a diagoonal matrix with std as element on the whole diag
scale : float
inverse of the scale for the variance parameters
Attributes
----------
theta : array like or float
parameter that follows this prior: [sigma,beta]
"""
def __init__(self,mean,std, lambda_):
self.mean = mean
self.std = std
self.lambda_ = lambda_
self.name = "gaussian and exponential"
def prior(self,theta):
prior_beta = normal(theta[1:],self.mean, self.std)
prior_sigma = exponential(theta[0],self.lambda_)
return prior_beta*prior_sigma
def log_prior(self,theta):
log_prior_beta = np.sum(np.log(normal(theta[1:],self.mean,self.std, prod = False)))
log_prior_sigma = np.sum(np.log(exponential(theta[0], lambda_ = self.lambda_,prod = False)))
return log_prior_beta + log_prior_sigma
class Gaussian_gamma_prior(Prior):
""" Implementation of the Student prior for the parameters beta and the
gamma prior for the df of the student noise
beta ~ N(mean,std**2)
df ~ Gamma(alpha,beta)
Parameters
----------
mean : float
if the Gaussian_prior is called as a multivariate normal, the mean is a
vector equal mean in every coordinate
std : float > 0
if the Gaussian_prior is called as a multivariate normal, the variance
is a diagoonal matrix with std as element on the whole diag
scale : float
inverse of the scale for the variance parameters
Attributes
----------
theta : array like or float
parameter that follows this prior [sigma,beta]
"""
def __init__(self,mean,std, alpha, beta):
self.mean = mean
self.std = std
self.alpha = alpha
self.beta = beta
self.name = "gaussian and gamma"
def prior(self,theta):
prior_beta = normal(theta[1:],self.mean, self.std)
prior_df = gamma_(theta[0],self.alpha, self.beta)
return prior_beta*prior_df
def log_prior(self,theta):
log_prior_beta = np.sum(np.log(normal(theta[1:],self.mean,self.std,prod = False)))
log_prior_df = np.sum(np.log(gamma_(theta[0],self.alpha, self.beta,prod = False)))
return log_prior_beta + log_prior_df
################################################################################
# priors for the linear parameter only
################################################################################
class Gaussian_prior(Prior):
""" Implementation of the vanilla gaussian prior
theta ~ N(mean,std**2)
Parameters
----------
mean : float
if the Gaussian_prior is called as a multivariate normal, the mean is a
vector equal mean in every coordinate
std : float > 0
if the Gaussian_prior is called as a multivariate normal, the variance
is a diagoonal matrix with std as element on the whole diag
Attributes
----------
theta : array like or float
parameter that follows this prior [beta]
"""
def __init__(self,mean,std):
self.mean = mean
self.std = std
self.name = "gaussian"
def prior(self,theta):
return normal(theta,self.mean, self.std)
def log_prior(self,theta):
return np.sum(np.log(normal(theta,self.mean,self.std, prod = False)))
| StarcoderdataPython |
4802999 | from copy import deepcopy
from IPython.nbconvert.preprocessors import Preprocessor
from IPython.utils.traitlets import Unicode
class CherryPickingPreprocessor(Preprocessor):
expression = Unicode('True', config=True, help="Cell tag expression.")
def preprocess(self, nb, resources):
# Loop through each cell, remove cells that dont match the query.
for worksheet in nb.worksheets:
remove_indicies = []
for index, cell in enumerate(worksheet.cells):
if not self.validate_cell_tags(cell):
remove_indicies.append(index)
for index in remove_indicies[::-1]:
del worksheet.cells[index]
resources['notebook_copy'] = deepcopy(nb)
return nb, resources
def validate_cell_tags(self, cell):
if 'cell_tags' in cell['metadata']:
return self.eval_tag_expression(cell['metadata']['cell_tags'], self.expression)
return False
def eval_tag_expression(self, tags, expression):
# Create the tags as True booleans. This allows us to use python
# expressions.
for tag in tags:
exec tag + " = True"
# Attempt to evaluate expression. If a variable is undefined, define
# the variable as false.
while True:
try:
return eval(expression)
except NameError as Error:
exec str(Error).split("'")[1] + " = False"
| StarcoderdataPython |
9678815 | <reponame>alphagov/sandbox-mgt
import os
import base64
from django.conf import settings
from django.http import HttpResponse
def basic_challenge(realm='Restricted Access'):
response = HttpResponse('Authorization Required')
response['WWW-Authenticate'] = 'Basic realm="%s"' % (realm)
response.status_code = 401
return response
def basic_authenticate(authentication):
(authmeth, auth) = authentication.split(' ', 1)
if 'basic' != authmeth.lower():
return None
auth = base64.b64decode(bytes(auth, 'utf-8')).decode()
username, password = auth.split(':', 1)
auth_username = settings.HTTP_USERNAME
auth_password = settings.HTTP_PASSWORD
return username == auth_username and password == <PASSWORD>
class BasicAuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if not settings.HTTP_USERNAME:
return response
if 'HTTP_AUTHORIZATION' not in request.META:
return basic_challenge()
authenticated = basic_authenticate(request.META['HTTP_AUTHORIZATION'])
if authenticated:
return response
return basic_challenge()
| StarcoderdataPython |
1907023 | def assignment(a: bool, b: str, c: int, d: int) -> int:
e = a
f = b
g = c
h = c + d
j = k = l = c + d + g + h
return j + k + l if a and e or f == "hello" else 0
def annotated_assignment(a: bool, b: str, c: int, d: int) -> int:
e: int = 3
f: bool = a
g: str = b
h: int = c + d
return e + h if a and f and g == "hello" else 0
def aug_assign(a: int, b: int) -> int:
c = 0
c += a
c -= b
c /= (a + b)
c //= a
c *= b
return c
def repeated_assign(a: str, b: str) -> str:
c = a
if a != b:
c = b
if b == a:
c = "equal"
return c
def pathological(a: bool, b: str, c: str) -> str:
if a:
d = b
else:
d = c
return d
| StarcoderdataPython |
3510479 | """General interface for a planner.
"""
import abc
import numpy as np
class Planner:
"""An abstract planner for PDDLGym.
"""
def __init__(self):
self._statistics = {}
@abc.abstractmethod
def __call__(self, domain, state, horizon=np.inf, timeout=10,
return_files=False, translate_separately=False):
"""Takes in a PDDLGym domain and PDDLGym state. Returns a plan.
Note that the state already contains the goal, accessible via
`state.goal`. The domain for an env is given by `env.domain`.
"""
raise NotImplementedError("Override me!")
def reset_statistics(self):
"""Reset the internal statistics dictionary.
"""
self._statistics = {}
def get_statistics(self):
"""Get the internal statistics dictionary.
"""
return self._statistics
class PlanningFailure(Exception):
"""Exception raised when planning fails.
"""
pass
class PlanningTimeout(Exception):
"""Exception raised when planning times out.
"""
pass
| StarcoderdataPython |
29246 | <filename>Libs/Scene Recognition/SceneRecognitionCNN.py
import torch.nn as nn
from torchvision.models import resnet
class SceneRecognitionCNN(nn.Module):
"""
Generate Model Architecture
"""
def __init__(self, arch, scene_classes=1055):
super(SceneRecognitionCNN, self).__init__()
# --------------------------------#
# Base Network #
# ------------------------------- #
if arch == 'ResNet-18':
# ResNet-18 Network
base = resnet.resnet18(pretrained=True)
# Size parameters for ResNet-18
size_fc_RGB = 512
elif arch == 'ResNet-50':
# ResNet-50 Network
base = resnet.resnet50(pretrained=True)
# Size parameters for ResNet-50
size_fc_RGB = 2048
# --------------------------------#
# RGB Branch #
# ------------------------------- #
# First initial block
self.in_block = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
)
# Encoder
self.encoder1 = base.layer1
self.encoder2 = base.layer2
self.encoder3 = base.layer3
self.encoder4 = base.layer4
# -------------------------------------#
# RGB Classifier #
# ------------------------------------ #
self.dropout = nn.Dropout(0.3)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(size_fc_RGB, scene_classes)
# Loss
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
"""
Netowrk forward
:param x: RGB Image
:return: Scene recognition predictions
"""
# --------------------------------#
# RGB Branch #
# ------------------------------- #
x, pool_indices = self.in_block(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# -------------------------------------#
# RGB Classifier #
# ------------------------------------ #
act = self.avgpool(e4)
act = act.view(act.size(0), -1)
act = self.dropout(act)
act = self.fc(act)
return act
def loss(self, x, target):
"""
Funtion to comput the loss
:param x: Predictions obtained by the network
:param target: Ground-truth scene recognition labels
:return: Loss value
"""
# Check inputs
assert (x.shape[0] == target.shape[0])
# Classification loss
loss = self.criterion(x, target.long())
return loss
| StarcoderdataPython |
12807355 | <gh_stars>1-10
# Generated by Django 2.0.6 on 2018-07-04 02:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subject',
fields=[
('no', models.AutoField(db_column='sno', primary_key=True, serialize=False, verbose_name='编号')),
('name', models.CharField(db_column='sname', max_length=50, verbose_name='学科名称')),
('intro', models.CharField(db_column='sintro', max_length=511, verbose_name='学科介绍')),
],
options={
'db_table': 'tb_subject',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('no', models.AutoField(db_column='tno', primary_key=True, serialize=False, verbose_name='编号')),
('name', models.CharField(db_column='tname', max_length=20, verbose_name='姓名')),
('intro', models.CharField(db_column='tintro', max_length=1023, verbose_name='简介')),
('motto', models.CharField(db_column='tmotto', max_length=255, verbose_name='教学理念')),
('photo', models.CharField(blank=True, db_column='tphoto', max_length=511, null=True)),
('manager', models.BooleanField(db_column='tmanager', default=False, verbose_name='是否主管')),
('subject', models.ForeignKey(db_column='sno', on_delete=django.db.models.deletion.PROTECT, to='demo.Subject', verbose_name='所属学科')),
],
options={
'db_table': 'tb_teacher',
'ordering': ('name',),
},
),
]
| StarcoderdataPython |
86618 | <filename>exec/bnc.py
import json
import os
from pyconversations.reader import BNCReader
if __name__ == '__main__':
data_root = '/Users/hsh28/data/'
out = data_root + 'conversations/'
os.makedirs(out + 'Reddit/BNC/', exist_ok=True)
convos = BNCReader.read(data_root + 'BNC/*', ld=True)
cache = []
messages = {
'all': 0,
'ah': 0,
'non': 0
}
conversations = {
'all': 0,
'ah': 0,
'non': 0,
}
for convo in convos:
cache.append(json.dumps(convo.to_json()))
ah = False
for post in convo.posts.values():
messages['all'] += 1
if 'AH=1' in post.tags:
messages['ah'] += 1
ah = True
else:
messages['non'] += 1
conversations['all'] += 1
if ah:
conversations['ah'] += 1
else:
conversations['non'] += 1
with open(out + 'Reddit/BNC/out.json', 'w+') as fp:
fp.write('\n'.join(cache))
print(messages)
print(conversations)
| StarcoderdataPython |
3230377 | <reponame>flo-compbio/monet
# Copyright (c) 2021 <NAME>
#
# This file is part of Monet.
from .nonlinear import *
from .clustering import *
from .denoising import *
from .heatmap import *
from .preprocess import *
from .scvelo import *
| StarcoderdataPython |
285303 | <reponame>hbasria/netbox-dns<gh_stars>0
from django.urls import reverse
from utilities.testing import APITestCase
from netbox_dns.models import NameServer, Record, Zone
class ZoneAPITestCase(APITestCase):
"""
Tests for Zone API (format=json)
"""
def test_view_zone_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_zone_with_permission(self):
self.add_permissions("netbox_dns.view_zone")
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_zone_detail_with_permission(self):
self.add_permissions("netbox_dns.view_zone")
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_zone_with_permission(self):
self.add_permissions("netbox_dns.add_zone")
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 201)
def test_add_zone_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:zone-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
def test_delete_zone_with_permission(self):
self.add_permissions("netbox_dns.delete_zone")
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_zone_without_permission(self):
zone = Zone.objects.create(name="asdf")
url = reverse("plugins-api:netbox_dns-api:zone-detail", kwargs={"pk": zone.id})
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
class NameServerAPITestCase(APITestCase):
"""
Tests for NameServer API (format=json)
"""
def test_list_nameserver_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_list_nameserver_with_permission(self):
self.add_permissions("netbox_dns.view_nameserver")
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_nameserver_detail_with_permission(self):
self.add_permissions("netbox_dns.view_nameserver")
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_nameserver_with_permission(self):
self.add_permissions("netbox_dns.add_nameserver")
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 201)
def test_add_nameserver_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:nameserver-list")
response = self.client.post(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
def test_delete_nameserver_with_permission(self):
self.add_permissions("netbox_dns.delete_nameserver")
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_nameserver_without_permission(self):
nameserver = NameServer.objects.create(name="asdf")
url = reverse(
"plugins-api:netbox_dns-api:nameserver-detail", kwargs={"pk": nameserver.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
class RecordAPITestCase(APITestCase):
"""
Tests for Record API (format=json)
"""
def test_view_record_without_permission(self):
url = reverse("plugins-api:netbox_dns-api:record-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_record_with_permission(self):
self.add_permissions("netbox_dns.view_record")
url = reverse("plugins-api:netbox_dns-api:record-list")
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_view_record_detail_without_permission(self):
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 403)
def test_view_record_detail_with_permission(self):
self.add_permissions("netbox_dns.view_record")
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.get(f"{url}?format=json", **self.header)
self.assertEqual(response.status_code, 200)
def test_add_record_with_permission(self):
self.add_permissions("netbox_dns.add_record")
zone = Zone.objects.create(name="zone.com")
url = reverse("plugins-api:netbox_dns-api:record-list")
data = {
"zone": zone.id,
"type": Record.A,
"name": "Record 1",
"value": "Value 1",
"ttl": 100,
}
response = self.client.post(f"{url}?format=json", data, **self.header)
self.assertEqual(response.status_code, 201)
def test_add_zone_without_permission(self):
zone = Zone.objects.create(name="zone.com")
url = reverse("plugins-api:netbox_dns-api:record-list")
data = {
"zone": zone.id,
"type": Record.A,
"name": "Record 1",
"value": "Value 1",
"ttl": 100,
}
response = self.client.post(f"{url}?format=json", data, **self.header)
self.assertEqual(response.status_code, 403)
def test_delete_record_with_permission(self):
self.add_permissions("netbox_dns.delete_record")
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 204)
def test_delete_zone_without_permission(self):
zone = Zone.objects.create(name="zone.com")
record = Record.objects.create(
zone=zone,
type=Record.A,
name="Record 1",
value="Value 1",
ttl=100,
)
url = reverse(
"plugins-api:netbox_dns-api:record-detail", kwargs={"pk": record.id}
)
response = self.client.delete(
f"{url}?format=json", {"name": "Name 1"}, **self.header
)
self.assertEqual(response.status_code, 403)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.