text string | size int64 | token_count int64 |
|---|---|---|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The entry point for running a Dopamine agent."""
import os
from absl import app
from absl import flags
from absl import logging
from dopamine.discrete_domains import run_experiment
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files (e.g.'
'"dopamine/agents/dqn/dqn.gin").')
# When using Vertex Tensorboard, the tensorboard will be present as a
# environment variable.
BASE_DIR = os.environ.get('AIP_TENSORBOARD_LOG_DIR', '/tmp/dopamine_runs')
def main(unused_argv):
logging.set_verbosity(logging.INFO)
tf.compat.v1.disable_v2_behavior()
run_experiment.load_gin_configs(FLAGS.gin_files, [])
runner = run_experiment.create_runner(BASE_DIR)
runner.run_experiment()
if __name__ == '__main__':
app.run(main)
| 1,407 | 466 |
def soma():
r=1
while r>0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} + "))
soma = n1 + n2
print(f"\n{n1} + {n2} = {soma}\n")
r=int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r=0
def sub():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} - "))
sub = n1 - n2
print(f"\n{n1} - {n2} = {sub}\n")
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def mult():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1} X "))
mult = n1 * n2
print(f"\n{n1} X {n2} = {mult}\n")
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def div():
r = 1
while r > 0:
n1 = int(input("Valor: "))
n2 = int(input(f"{n1}/ "))
if n2 == 0:
print("Não existe divisão por 0! ")
break
div = n1 / n2
print("\n{} / {} = {:.2f}\n".format(n1, n2, div))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def raiz():
import math
r = 1
while r > 0:
n = int(input("Digite o valor: "))
if n < 0:
print("Não existe raiz de números negativos!")
break
raiz = math.sqrt(n)
print("\nRaiz de {} = {:.2f}\n".format(n, raiz))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
def sct():
import math
r = 1
while r > 0:
n = float(input("Digite o valor: "))
seno = math.sin(math.radians(n))
cosseno = math.cos(math.radians(n))
tang = math.tan(math.radians(n))
print("\nO valor {} possui Seno = {:.2f}, Cosseno = {:.2f} e Tangente = {:.2f}\n".format(n, seno, cosseno, tang))
r = int(input("1-Continuar 0-Sair: "))
if r > 1:
print("Opção Inválida!....Saindo")
r = 0
print("""
-------------CALCULADORA--------------
-----------Pablo--Vinícius------------
""")
corpo=True
while corpo == True:
print("""
MENU
1 - Soma
2 - Subtração
3 - Multiplicação
4 - Divisão
5 - Raiz Quadrada
6 - Seno, Cosseno, Tangente
0 - Sair
""")
op = int(input("Escolha a operação: "))
if op == 1:
soma()
elif op == 2:
sub()
elif op == 3:
mult()
elif op == 4:
div()
elif op == 5:
raiz()
elif op == 6:
sct()
elif op == 0:
break
else:
print("Opção inválida!") | 2,814 | 1,121 |
"""
"""
from collections import Counter
from read_file import read_input
def checksum(filename):
"""
"""
threes = 0
twos = 0
ids = read_input(filename)
for label_id in ids:
counts = {value: key for key, value in Counter(label_id).items()}
try:
counts[2]
twos += 1
except KeyError:
pass
try:
counts[3]
threes += 1
except KeyError:
pass
return threes * twos
def differ():
"""
"""
ids = read_input('input_2.txt')
# correct_ids = []
# for label_id in ids:
# counts = {value: key for key, value in Counter(label_id).items()}
# try:
# counts[2]
# correct_ids.append(label_id)
# except KeyError:
# pass
# try:
# counts[3]
# if correct_ids[-1] == label_id:
# continue
# correct_ids.append(label_id)
# except KeyError:
# pass
for idx, first_label in enumerate(ids, 1):
for second_label in ids[idx:]:
total = 0
for i, _ in enumerate(second_label):
if first_label[i] != second_label[i]:
total += 1
if total == 1:
return first_label, second_label
if __name__ == '__main__':
print(differ())
| 1,452 | 490 |
#!/usr/bin/env python
# Created by Søren Christian Aarup, sc@aarup.org
# https://github.com/scaarup/home-assistant-backup
# api ref.: https://developers.home-assistant.io/docs/api/supervisor/endpoints
import requests,json,datetime,gzip,sys,datetime
from datetime import timedelta, date
token = 'Bearer <token>'
host = '<url>'
retention = 12 # In days, how many backups do you want to keep on Home Assistant (normally in /backup).
backupname = 'hassio_backup_full-'
date_string = datetime.datetime.now().strftime('%Y%m%d')
_d = date.today() - timedelta(retention)
oldestbackup = backupname+_d.strftime('%Y%m%d')+'.tar.gz'
name = backupname+date_string+'.tar.gz'
debug = 1
def debuglog(msg):
if debug == 1:
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' DEBUG: '+msg)
def log(msg):
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' INFO: '+msg)
# Ping Supervisor, quit if fail:
response = requests.get(host+'/api/hassio/supervisor/ping', headers={'authorization': token})
json_response = response.json()
if not json_response['result'] == 'ok':
log('Supervisor not responding ok to our ping! '+str(response.status_code)+' '+str(response.content))
sys.exit(1)
##
def listBackups(name):
debuglog('Looping through backups on HA, looking for '+name)
response = requests.get(
host+'/api/hassio/backups',
headers={'authorization': token}
)
json_response = response.json()
backups = json_response['data']['backups']
for backup in backups:
debuglog('\t'+backup['name']+' '+backup['slug'])
if (backup['name'] == name):
debuglog('Found our backup on HA:')
return backup['slug']
def createBackupFull(name):
debuglog('Creating backup '+name)
response = requests.post(
host+'/api/hassio/backups/new/full',
json={'name': name},
headers={'authorization': token,'content-type': 'application/json'}
)
debuglog(str(response.status_code)+' '+str(response.content))
json_response = response.json()
debuglog('Create backup response: '+json_response['result'])
return json_response['data']['slug']
def removeBackup(name,slug):
debuglog('Removing backup '+name+' on server')
response = requests.delete(
host+'/api/hassio/backups/'+slug,
headers={'authorization': token,
'content-type': 'application/json'}
)
debuglog(str(response.status_code)+' '+str(response.content))
json_response = response.json()
def getBackup(name,slug):
log('Downloading backup '+name)
response = requests.get(
host+'/api/hassio/backups/'+slug+'/download',
headers={'authorization': token}
)
output = gzip.open(name, 'wb')
# try:
output.write(response.content)
# finally:
output.close()
if response.status_code == 200:
debuglog('Download ok')
else:
debuglog('Download response '+str(response.status_code)+' '+str(response.content))
# Create the backup, get the slug:
slug = createBackupFull(name)
# Download the backup:
getBackup(name,slug)
# Remove our oldest backup, according to retention
slug = listBackups(oldestbackup)
if slug is not None:
debuglog('Calling removeBackup for '+oldestbackup+' with slug '+slug)
removeBackup(name,slug)
else:
debuglog('Did not find a backup to delete.')
| 3,357 | 1,116 |
"""
Contains class FeatureBuilder for building feature set from given data set and word embedding
"""
import numpy as np
class FeatureBuilder(object):
"""
Class used for building feature matrix.
Field "labels" is a list of categories of sentences
Field "features" is a features matrix of shape (training set sixe, vector_length)
"""
def __init__(self):
self.labels = np.empty(0, dtype=np.uint8)
self.features = np.empty(0, dtype=float)
self.labels.flags.writeable = False
self.features.flags.writeable = False
def build(self, sentence_embedding, labels, sentences):
"""
:param sentence_embedding: instance of sentence embedding class implementing ISentenceEmbedding interface
:param labels: a numpy vector of labels of sentences
:param sentences: a numpy matrix of sentences (rows = sentences, columns = words)
"""
self.labels = labels
sentences_vectors_length = sentence_embedding.target_vector_length
self.features = np.empty((sentences.shape[0], sentences_vectors_length), dtype=float)
for i in xrange(sentences.shape[0]):
self.features[i] = sentence_embedding[sentences[i]]
self.labels.flags.writeable = False
self.features.flags.writeable = False
| 1,319 | 350 |
import os
from conans import ConanFile
from conans.tools import download, check_sha256
class NlohmannJsonConan(ConanFile):
name = "json"
with open(os.path.join(os.path.dirname(os.path.realpath(
__file__)), "VERSION.txt"), 'r') as version_file:
version = version_file.read()
settings = {}
description = "JSON for Modern C++"
generators = "cmake", "virtualenv"
exports = "VERSION.txt"
url = "https://github.com/nlohmann/json"
license = "https://github.com/nlohmann/json/blob/v2.1.0/LICENSE.MIT"
options = {'no_exceptions': [True, False]}
default_options = 'no_exceptions=False'
def config(self):
self.options.remove("os")
self.options.remove("compiler")
self.options.remove("shared")
self.options.remove("build_type")
self.options.remove("arch")
def source(self):
download_url = 'https://github.com/nlohmann/json/releases/' \
'download/v{!s}/json.hpp'.format(self.version)
download(download_url, 'json.hpp')
check_sha256('json.hpp',
'a571dee92515b685784fd527e38405cf3f5e13e96edbfe3f03d6df2e'
'363a767b')
def build(self):
return # Nothing to do. Header Only
def package(self):
self.copy(pattern='json.hpp', dst='include/nlohmann', src=".")
def package_info(self):
if self.options.no_exceptions:
self.cpp_info.defines.append('JSON_NOEXCEPTION=1')
self.cpp_info.includedirs = ['include']
self.env_info.CPATH.append("{}/include".format(self.package_folder))
| 1,624 | 567 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
SeeKeR Dialogue Tasks.
"""
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.teachers import MultiTaskTeacher
import parlai.tasks.convai2.agents as convai2
import parlai.tasks.blended_skill_talk.agents as bst
import parlai.tasks.empathetic_dialogues.agents as ed
import parlai.tasks.wizard_of_internet.agents as woi
import parlai.tasks.wizard_of_wikipedia.agents as wow
import parlai.tasks.msc.agents as msc
import parlai.tasks.ms_marco.agents as ms_marco
import parlai.utils.logging as logging
import projects.seeker.tasks.mutators # type: ignore # noqa: F401
class WoiDialogueTeacher(woi.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'woi_pop_documents_mutator',
'woi_filter_no_passage_used',
'woi_add_checked_sentence_to_input',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "WoiDialogueTeacher"
class WowDialogueTeacher(wow.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['add_missing_turns'] = 'all'
mutators = '+'.join(
[
'flatten',
'wow_filter_no_passage_used',
'wow_add_checked_sentence_to_input',
'skip_retrieval_mutator',
'wow_to_woi',
'woi_pop_documents_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "WowDialogueTeacher"
class MsMarcoDialogueTeacher(ms_marco.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'ms_marco_filter_has_answer',
'ms_marco_create_fid_docs',
'ms_marco_find_selected_sentence_for_response',
'woi_pop_documents_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = "MsMarcoDialogueTeacher"
def get_dialogue_task_mutators(opt: Opt) -> str:
"""
Set the mutators appropriately for the dialogue tasks.
"""
mutators = '+'.join(
['flatten', 'extract_entity_for_response_model', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
return mutators
class Convai2DialogueTeacher(convai2.NormalizedTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['task'] += ':no_cands'
super().__init__(opt, shared)
self.id = 'Convai2DialogueTeacher'
class EDDialogueTeacher(ed.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'EDDialogueTeacher'
class BSTDialogueTeacher(bst.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'BSTDialogueTeacher'
class MSCDialogueTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCDialogueTeacher'
class MSCDialogueOverlapTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = '+'.join(
['flatten', 'msc_find_selected_sentence_response', 'skip_retrieval_mutator']
)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCDialogueOverlapTeacher'
class DialogueTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
WoiDialogueTeacher.add_cmdline_args(parser, partial_opt)
WowDialogueTeacher.add_cmdline_args(parser, partial_opt)
MsMarcoDialogueTeacher.add_cmdline_args(parser, partial_opt)
Convai2DialogueTeacher.add_cmdline_args(parser, partial_opt)
EDDialogueTeacher.add_cmdline_args(parser, partial_opt)
BSTDialogueTeacher.add_cmdline_args(parser, partial_opt)
MSCDialogueTeacher.add_cmdline_args(parser, partial_opt)
MSCDialogueOverlapTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
tasks = [
f"projects.seeker.tasks.dialogue:{teacher}"
for teacher in [
'WoiDialogueTeacher',
'WowDialogueTeacher',
'MsMarcoDialogueTeacher',
'Convai2DialogueTeacher',
'EDDialogueTeacher',
'BSTDialogueTeacher',
'MSCDialogueTeacher',
'MSCDialogueOverlapTeacher',
]
]
opt['task'] = ','.join(tasks)
super().__init__(opt, shared)
class DefaultTeacher(DialogueTeacher):
pass
| 5,958 | 1,986 |
import abc
from argparse import ArgumentParser, Namespace
from .config import get_config
class CommandProvider(abc.ABC):
def __init__(self, parser: ArgumentParser):
self.config = get_config()
parser.set_defaults(func=self)
@abc.abstractmethod
def __call__(self, options: Namespace):
raise NotImplementedError()
| 350 | 99 |
# Stwórz słownik, w którym kluczami będą różne przedmioty szkolne
# a wartościami oceny uzyskane z tych przedmiotów
grades = {
"Matematyka": [4, 2, 6, 5, 3],
"Fizyka": [5, 5, 2, 4, 3],
"Chemia": [4, 1, 4, 5, 4],
"Biologia": [3, 5, 5, 2, 5],
}
print("Przedmioty i oceny", grades)
| 297 | 162 |
from starry_process import calibrate
import numpy as np
import os
import shutil
# Utility funcs to move figures to this directory
abspath = lambda *args: os.path.join(
os.path.dirname(os.path.abspath(__file__)), *args
)
copy = lambda name, src, dest: shutil.copyfile(
abspath("data", name, src), abspath(dest)
)
# Run
calibrate.run(path=abspath("data/default"), ncols=7, clip=True)
# Copy output to this directory
copy("default", "data.pdf", "calibration_default_data.pdf")
copy("default", "corner_transformed.pdf", "calibration_default_corner.pdf")
copy("default", "latitude.pdf", "calibration_default_latitude.pdf")
copy("default", "inclination.pdf", "calibration_default_inclination.pdf")
| 703 | 239 |
import numpy.random as rnd
from sklearn import manifold
from sklearn.ensemble import IsolationForest
from common.gen_samples import *
"""
pythonw -m ad.spectral_outlier
"""
def euclidean_dist(x1, x2):
dist = np.sqrt(np.sum((x1 - x2) ** 2))
return dist
class LabelDiffusion(object):
"""
IMPORTANT: The results from Python's Scikit-Learn MDS API are significantly
different (and sub-optimal) from R. Strongly recommend R's isoMDS for the last
step of converting pair-wise distances to 2D coordinates.
"""
def __init__(self, n_neighbors=10, k2=0.5, alpha=0.99,
n_components=2, eigen_solver='auto',
tol=0., max_iter=None, n_jobs=1, metric=True):
self.n_neighbors = n_neighbors
self.k2 = k2
self.alpha = alpha
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.n_jobs = n_jobs
self.metric = metric
self.alphas_ = None
self.lambdas_ = None
def fit_transform(self, x_in):
n = nrow(x_in)
x = normalize_and_center_by_feature_range(x_in)
dists = np.zeros(shape=(n, n), dtype=float)
for i in range(n):
for j in range(i, n):
dists[i, j] = euclidean_dist(x[i, :], x[j, :])
dists[j, i] = dists[i, j]
logger.debug(dists[0, 0:10])
neighbors = np.zeros(shape=(n, self.n_neighbors), dtype=int)
for i in range(n):
neighbors[i, :] = np.argsort(dists[i, :])[0:self.n_neighbors]
logger.debug(neighbors[0, 0:10])
W = np.zeros(shape=(n, n))
for i in range(n):
for j in neighbors[i, :]:
# diagonal elements of W will be zeros
if i != j:
W[i, j] = np.exp(-(dists[i, j] ** 2) / self.k2)
W[j, i] = W[i, j]
D = W.sum(axis=1)
# logger.debug(str(list(D[0:10])))
iDroot = np.diag(np.sqrt(D) ** (-1))
S = iDroot.dot(W.dot(iDroot))
# logger.debug("S: %s" % str(list(S[0, 0:10])))
B = np.eye(n) - self.alpha * S
# logger.debug("B: %s" % str(list(B[0, 0:10])))
A = np.linalg.inv(B)
tdA = np.diag(np.sqrt(np.diag(A)) ** (-1))
A = tdA.dot(A.dot(tdA))
# logger.debug("A: %s" % str(list(A[0, 0:10])))
d = 1 - A
# logger.debug("d: %s" % str(list(d[0, 0:10])))
# logger.debug("min(d): %f, max(d): %f" % (np.min(d), np.max(d)))
mds = manifold.MDS(self.n_components,
metric=self.metric, dissimilarity='precomputed')
# using abs below because some zeros are represented as -0; other values are positive.
embedding = mds.fit_transform(np.abs(d))
return embedding
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_command_args(debug=True, debug_args=["--debug",
"--plot",
"--log_file=temp/spectral_outlier.log"])
# print "log file: %s" % args.log_file
configure_logger(args)
# sample_type = "4_"
# sample_type = "donut_"
sample_type = "face_"
rnd.seed(42)
x, y = get_demo_samples(sample_type)
n = x.shape[0]
xx = yy = x_grid = Z = scores = None
if args.plot:
plot_sample(x, y, pdfpath="temp/spectral_%ssamples.pdf" % sample_type)
n_neighbors = 10
n_components = 2
method = "standard" # ['standard', 'ltsa', 'hessian', 'modified']
# embed_type = "se"
# embed_type = "tsne"
# embed_type = "isomap"
# embed_type = "mds"
# embed_type = "lle_%s" % method
embed_type = "diffusion"
if embed_type == "se":
embed = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors)
elif embed_type == "tsne":
embed = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
elif embed_type.startswith("lle_"):
embed = manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=n_components,
eigen_solver='auto', method=method)
elif embed_type == "isomap":
embed = manifold.Isomap(n_neighbors=n_neighbors, n_components=n_components)
elif embed_type == "mds":
embed = manifold.MDS(n_components=n_components)
elif embed_type == "diffusion":
embed = LabelDiffusion(n_neighbors=n_neighbors, n_components=n_components, metric=True)
else:
raise ValueError("invalid embed type %s" % embed_type)
x_tr = embed.fit_transform(x)
logger.debug(x_tr)
if args.plot:
plot_sample(x_tr, y, pdfpath="temp/spectral_%s%s.pdf" % (sample_type, embed_type))
ad_type = 'ifor'
outliers_fraction = 0.1
ad = IsolationForest(max_samples=256, contamination=outliers_fraction, random_state=None)
ad.fit(x_tr)
scores = -ad.decision_function(x_tr)
top_anoms = np.argsort(-scores)[np.arange(10)]
if args.plot:
# to plot probability contours
xx, yy = np.meshgrid(np.linspace(np.min(x_tr[:, 0]), np.max(x_tr[:, 0]), 50),
np.linspace(np.min(x_tr[:, 1]), np.max(x_tr[:, 1]), 50))
x_grid = np.c_[xx.ravel(), yy.ravel()]
Z = -ad.decision_function(x_grid)
Z = Z.reshape(xx.shape)
pdfpath = "temp/spectral_%scontours_%s_%s.pdf" % (sample_type, ad_type, embed_type)
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet'))
dp.plot_points(x_tr, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
pl.scatter(x_tr[top_anoms, 0], x_tr[top_anoms, 1], marker='o', s=35,
edgecolors='red', facecolors='none')
dp.close()
| 5,922 | 2,257 |
from .logger import logger
from .ecc import convert_pubkey_to_addr,VerifyingKey,sha256d
class Stack(list):
push = list.append
def peek(self):
return self[-1]
class LittleMachine(object):
def __init__(self):
self.stack = Stack()
self._map = {
"OP_ADD": self.add,
"OP_MINUS": self.minus,
"OP_MUL": self.mul,
"OP_EQ": self.equal_check,
"OP_EQUAL" : self.equal,
"OP_CHECKSIG": self.check_sig,
"OP_ADDR": self.calc_addr,
"OP_DUP" : self.dup,
"OP_NDUP" : self.ndup,
"OP_CHECKMULSIG" : self.check_mulsig,
"OP_MULHASH": self.calc_mulhash,
}
def set_script(self,script,message = b''):
self.clear()
self.result = True
self.pointer = 0
self.message = message
self.script = script
def clear(self):
self.stack.clear()
def peek(self):
return self.stack.peek()
def pop(self):
return self.stack.pop()
def push(self,value):
self.stack.push(value)
def evaluate(self,op):
if op in self._map:
self._map[op]()
elif isinstance(op,str) or\
isinstance(op,bytes)or\
isinstance(op,int) or\
isinstance(op,bool):
self.push(op)
else:
logger.info('Uknow opcode: '.format(op))
def add(self):
self.push(self.pop() + self.pop())
def minus(self):
last = self.pop()
self.push(self.pop() - last)
def mul(self):
self.push(self.pop() * self.pop())
def dup(self):
self.push(self.peek())
def ndup(self):
n = self.pop()
for val in self.stack[-n:]:
self.push(val)
self.push(n)
def equal_check(self):
flag = self.pop() == self.pop()
if not flag:
self.result = False
def equal(self):
self.push(self.pop()==self.pop())
def calc_mulhash(self):
n = self.pop()
pk_strs = [self.pop() for _ in range(n)]
s = b''
for val in pk_strs[::-1]:
s += val
self.push(sha256d(s))
def check_sig(self):
pk_str = self.pop()
sig = self.pop()
verifying_key = VerifyingKey.from_bytes(pk_str)
try:
flag = verifying_key.verify(sig,self.message)
except Exception:
flag = False
self.push(flag)
def check_mulsig(self):
n = self.pop()
pk_strs = [self.pop() for _ in range(n)]
m = self.pop()
sigs = [self.pop() for _ in range(m)]
pk_strs = pk_strs[-m:]
for i in range(m):
verifying_key = VerifyingKey.from_bytes(pk_strs[i])
try:
flag = verifying_key.verify(sigs[i],self.message)
except Exception:
flag = False
if not flag:
falg = False
break
self.push(flag)
def calc_addr(self):
pk_str = self.pop()
self.push(convert_pubkey_to_addr(pk_str))
def run(self):
while (self.pointer < len(self.script)):
op = self.script[self.pointer]
self.pointer += 1
self.evaluate(op)
if not self.result:
return False
else:
return self.peek()
if __name__ == "__main__":
from datatype import Vin,Vout
from ecc import SigningKey,convert_pubkey_to_addr
## k = 12356
## k1 = 23464
## sk = SigningKey.from_number(k)
## pk = sk.get_verifying_key()
##
## sk1 = SigningKey.from_number(k1)
## pk1 = sk1.get_verifying_key()
## addr = convert_pubkey_to_addr(pk.to_bytes())
## addr1 = convert_pubkey_to_addr(pk1.to_bytes())
##
## m1 = b'hello'
## m2 = b'go away'
## sig = sk.sign(m1)
## sig1 = sk1.sign(m2)
## vin = Vin(None,sig1,pk1.to_bytes())
## vout = Vout(addr,10)
##
## sig_script = [vin.sig_script[:64],vin.sig_script[64:]]
## pubkey_script = vout.pubkey_script.split(' ')
kA = 3453543
kB = 2349334
skA = SigningKey.from_number(kA)
skB = SigningKey.from_number(kB)
pkA = skA.get_verifying_key()
pkB = skB.get_verifying_key()
message = b'I love blockchain'
sigA = skA.sign(message)
sigB = skB.sign(message)
Hash = sha256d(pkA.to_bytes()+pkB.to_bytes())
sig_script = [sigA,sigB,2,pkA.to_bytes(),pkB.to_bytes(),2]
pubkey_script = ['OP_NDUP','OP_MULHASH',Hash,'OP_EQ',2,'OP_CHECKMULSIG']
script = sig_script + pubkey_script
machine = LittleMachine()
machine.set_script(script,message)
print (machine.run())
## script = [a,1,2,'OP_DUP','OP_ADD','OP_EQ']
## machine = LittleMachine()
## machine.set_script(script)
## print(machine.run())
| 5,042 | 1,796 |
# Generated by Django 3.0.8 on 2020-07-22 17:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('player', '0002_auto_20200722_1725'),
]
operations = [
migrations.AlterField(
model_name='album',
name='total_songs',
field=models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='total tracks'),
),
]
| 442 | 157 |
# Copyright 2017 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import importlib
import logging
from sawtooth_validator.exceptions import UnknownConsensusModuleError
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.state.settings_view import SettingsView
LOGGER = logging.getLogger(__name__)
PROXY = '_proxy_'
class ConsensusFactory(object):
"""ConsensusFactory returns consensus modules by short name.
"""
@staticmethod
def get_consensus_module(module_name):
"""Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
"""
module_package = module_name
if module_name == 'genesis':
module_package = (
'sawtooth_validator.journal.consensus.genesis.'
'genesis_consensus'
)
elif module_name == 'devmode':
module_package = (
'sawtooth_validator.journal.consensus.dev_mode.'
'dev_mode_consensus'
)
elif module_name == PROXY:
module_package = (
'sawtooth_validator.journal.consensus.proxy.'
'proxy_consensus'
)
elif module_name == 'poet':
module_package = 'sawtooth_poet.poet_consensus'
elif module_name == 'pbft':
module_package = 'pbft.bgx_pbft.consensus'
try:
return importlib.import_module(module_package)
except ImportError:
raise UnknownConsensusModuleError(
'Consensus module "{}" does not exist.'.format(module_name))
@staticmethod
def try_configured_proxy_consensus():
"""Returns the proxy onsensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
LOGGER.debug("ConsensusFactory::try_configured_proxy_consensus")
try:
mod = ConsensusFactory.get_consensus_module(PROXY)
except UnknownConsensusModuleError:
mod = None
return mod
@staticmethod
def try_configured_consensus_module(block_id, state_view):
"""Returns the consensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
settings_view = SettingsView(state_view)
default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode'
consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus)
consensus_version = settings_view.get_setting('bgx.consensus.version', default_value='0.1')
LOGGER.debug("ConsensusFactory::try_configured_consensus_module consensus_module_name=%s ver=%s",consensus_module_name,consensus_version)
try:
mod = ConsensusFactory.get_consensus_module(consensus_module_name)
except UnknownConsensusModuleError:
mod = None
return mod,(consensus_module_name,consensus_version)
@staticmethod
def get_configured_consensus_module(block_id, state_view):
"""Returns the consensus_module based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
block_id (str): the block id associated with the current state_view
state_view (:obj:`StateView`): the current state view to use for
setting values
Raises:
UnknownConsensusModuleError: Thrown when an invalid consensus
module has been configured.
"""
settings_view = SettingsView(state_view)
default_consensus = 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode'
consensus_module_name = settings_view.get_setting('bgx.consensus.algorithm', default_value=default_consensus)
LOGGER.debug("ConsensusFactory::get_configured_consensus_module consensus_module_name=%s",consensus_module_name)
return ConsensusFactory.get_consensus_module(consensus_module_name)
| 5,556 | 1,494 |
import logging
import os
import sys
import uuid
import splunk.admin as admin
import ensemble_aws_accounts_schema
import urllib
import hashlib
import base_eai_handler
import log_helper
import boto3
import json
if sys.platform == 'win32':
import msvcrt
# Binary mode is required for persistent mode on Windows.
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
# Setup the handler
logger = log_helper.setup(logging.INFO, 'EnsembleAWSAccountsEAIHandler', 'ensemble_aws_accounts_handler.log')
class EnsembleAWSAccountsEAIHandler(base_eai_handler.BaseEAIHandler):
def setup(self):
# Add our supported args
for arg in ensemble_aws_accounts_schema.ALL_FIELDS:
self.supportedArgs.addOptArg(arg)
def handleList(self, confInfo):
"""
Called when user invokes the "list" action.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('Ensemble AWS accounts list requested.')
# Fetch from ensemble_aws_accounts conf handler
conf_handler_path = self.get_conf_handler_path_name('ensemble_aws_accounts', 'nobody')
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET')
# Add link alternate (without mgmt, scheme, host, port) to list response
for ensemble_aws_accounts in ensemble_aws_accounts_eai_response_payload['entry']:
ensemble_aws_accounts_link_alternate = ensemble_aws_accounts['links']['alternate'].replace('/configs/conf-ensemble_aws_accounts/', '/ensemble_aws_accounts/')
if ensemble_aws_accounts['content'].get('cloudformation_stack_id', '') != '':
ensemble_aws_accounts['content']['data_collection_deployed'] = '1'
# Get AWS Secret Key
passwords_conf_payload = self.simple_request_eai(ensemble_aws_accounts['content']['aws_secret_key_link_alternate'], 'list', 'GET')
SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password']
try:
client = boto3.client('cloudformation', aws_access_key_id=ensemble_aws_accounts['content']['aws_access_key'],
aws_secret_access_key=SECRET_KEY)
response = client.describe_stacks(
StackName=ensemble_aws_accounts['name'],
)
except Exception, e:
ensemble_aws_accounts['content']['data_collection_deployed'] = '0'
ensemble_aws_accounts['content']['data_collection_deployment_success'] = '0'
# Remove stack_id from the Ensemble AWS Account conf entry
ensemble_aws_accounts['content']['cloudformation_stack_id'] = ''
continue
data_collection_deployment_success = '0'
for stack in response['Stacks']:
if stack['StackName'] == ensemble_aws_accounts['name']:
if stack['StackStatus'] == 'DELETE_IN_PROGRESS':
data_collection_deployment_success = '3'
ensemble_aws_accounts['content']['data_collection_deployed'] = '2'
if stack['StackStatus'] == 'CREATE_IN_PROGRESS':
data_collection_deployment_success = '2'
if stack['StackStatus'] == 'UPDATE_IN_PROGRESS':
data_collection_deployment_success = '2'
if stack['StackStatus'] == 'CREATE_COMPLETE':
data_collection_deployment_success = '1'
if stack['StackStatus'] == 'UPDATE_COMPLETE':
data_collection_deployment_success = '1'
ensemble_aws_accounts['content']['data_collection_deployment_success'] = data_collection_deployment_success
else:
ensemble_aws_accounts['content']['data_collection_deployed'] = '0'
ensemble_aws_accounts['content']['ensemble_aws_accounts_link_alternate'] = ensemble_aws_accounts_link_alternate
ensemble_aws_accounts['content']['ensemble_aws_accounts_name'] = ensemble_aws_accounts['name']
ensemble_aws_accounts['content']['aws_access_key'] = ensemble_aws_accounts['content'].get('aws_access_key', '')
ensemble_aws_accounts['content']['cloudformation_stack_id'] = ensemble_aws_accounts['content'].get(
'cloudformation_stack_id', '')
ensemble_aws_accounts['content']['tags'] = ensemble_aws_accounts['content'].get('tags', '')
self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload)
def handleCreate(self, confInfo):
"""
Called when user invokes the "create" action.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('Ensemble AWS account creation requested.')
# Validate and extract correct POST params
server_params = self.validate_server_schema_params()
auth_params = self.validate_auth_schema_params()
params = auth_params.copy()
params.update(server_params)
# Password creation
aws_secret_key_link_alternate = self.password_create(params['aws_access_key'], params['aws_secret_key'])
# ensemble_aws_accounts.conf creation and response
post_args = {
'name': params['name'],
'aws_account_id': params['aws_account_id'],
'aws_access_key': params['aws_access_key'],
'aws_secret_key_link_alternate': aws_secret_key_link_alternate,
'data_collection_deployed': '0',
'data_collection_deployment_success': '0',
'tags': params['tags']
}
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(self.get_conf_handler_path_name('ensemble_aws_accounts'),
'create', 'POST', post_args)
# Always populate entry content from request to list handler.
ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % (
'nobody', self.appName, urllib.quote_plus(params['name']))
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET')
self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload)
def handleEdit(self, confInfo):
"""
Called when user invokes the 'edit' action. Index modification is not supported through this endpoint. Both the
scripted input and the ensemble_aws_accounts.conf stanza will be overwritten on ANY call to this endpoint.
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('Ensemble AWS Account edit requested.')
name = self.callerArgs.id
conf_stanza = urllib.quote_plus(name)
params = self.validate_server_schema_params()
conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('ensemble_aws_accounts', 'nobody'), conf_stanza)
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET')
old_aws_access_key = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_access_key']
old_aws_secret_key_link_alternate = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_secret_key_link_alternate']
# Create post args - remove name to ensure edit instead of create
ensemble_aws_accounts_conf_postargs = {
'aws_access_key': params['aws_access_key'],
'tags': params['tags'],
}
# Change password if provided in params
if old_aws_access_key != params['aws_access_key']:
if self.get_param('aws_secret_key'):
# New username and password provided
auth_params = self.validate_auth_schema_params()
params.update(auth_params)
# Edit passwords.conf stanza
ensemble_aws_accounts_conf_postargs['aws_secret_key_link_alternate'] = self.password_edit(old_aws_secret_key_link_alternate, params['aws_access_key'], params['aws_secret_key'])
else:
# Can't change username without providing password
raise admin.InternalException('AWS Secret Key must be provided on AWS Access Key change.')
if (old_aws_access_key == params['aws_access_key'] and self.get_param('aws_secret_key')):
# Password update to existing username
auth_params = self.validate_auth_schema_params()
params.update(auth_params)
# Edit passwords.conf stanza
ensemble_aws_accounts_conf_postargs['aws_secret_key_link_alternate'] = self.password_edit(old_aws_secret_key_link_alternate, params['aws_access_key'], params['aws_secret_key'])
if self.get_param('aws_secret_key'):
aws_secret_key_link_alternate = self.get_param('aws_secret_key')
else:
aws_secret_key_link_alternate = old_aws_secret_key_link_alternate
# Get AWS Secret Key
passwords_conf_payload = self.simple_request_eai(aws_secret_key_link_alternate, 'list', 'GET')
SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password']
if params['template_link_alternate'] and params['template_link_alternate'] != '' and params['cloudformation_template_action'] and params['cloudformation_template_action'] == 'apply':
# Get CloudFormation template string
cloudformation_templates_conf_payload = self.simple_request_eai(params['template_link_alternate'], 'list', 'GET')
template_filename = cloudformation_templates_conf_payload['entry'][0]['content']['filename']
with open(os.path.dirname(os.path.abspath(__file__)) + '/cloudformation_templates/' + template_filename) as json_file:
json_data = json.dumps(json.load(json_file))
try:
client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'],
aws_secret_access_key=SECRET_KEY)
response = client.create_stack(
StackName=params['name'],
TemplateBody=json_data,
Capabilities=[
'CAPABILITY_IAM'
]
)
except Exception, e:
logger.error(e)
raise admin.InternalException('Error connecting to AWS or deploying CloudFormation template %s' % e)
ensemble_aws_accounts_conf_postargs['cloudformation_stack_id'] = response['StackId']
if params['cloudformation_template_action'] and params['cloudformation_template_action'] == 'remove':
try:
client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'],
aws_secret_access_key=SECRET_KEY)
response = client.delete_stack(
StackName=params['name']
)
except Exception, e:
logger.error(e)
raise admin.InternalException('Error connecting to AWS or deleting CloudFormation template %s' % e)
if params['template_link_alternate'] and params['template_link_alternate'] != '' and params[
'cloudformation_template_action'] and params['cloudformation_template_action'] == 'update':
# Get CloudFormation template string
cloudformation_templates_conf_payload = self.simple_request_eai(params['template_link_alternate'], 'list',
'GET')
template_filename = cloudformation_templates_conf_payload['entry'][0]['content']['filename']
with open(os.path.dirname(os.path.abspath(__file__)) + '/cloudformation_templates/' + template_filename) as json_file:
json_data = json.dumps(json.load(json_file))
try:
client = boto3.client('cloudformation', aws_access_key_id=params['aws_access_key'],
aws_secret_access_key=SECRET_KEY)
response = client.update_stack(
StackName=params['name'],
TemplateBody=json_data,
Capabilities=[
'CAPABILITY_IAM'
]
)
except Exception, e:
logger.error(e)
raise admin.InternalException('Error connecting to AWS or deploying CloudFormation template %s' % e)
ensemble_aws_accounts_conf_postargs['cloudformation_stack_id'] = response['StackId']
# Edit ensemble_aws_accounts.conf
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'edit', 'POST',
ensemble_aws_accounts_conf_postargs)
# Always populate entry content from request to list handler.
ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % ('nobody', self.appName, conf_stanza)
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET')
self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload)
def handleRemove(self, confInfo):
"""
Called when user invokes the 'remove' action. Removes the requested stanza from inputs.conf (scripted input),
removes the requested stanza from ensemble_aws_accounts.conf, and removes all related credentials
Arguments
confInfo -- The object containing the information about what is being requested.
"""
logger.info('Ensemble AWS Account removal requested.')
name = self.callerArgs.id
conf_stanza = urllib.quote_plus(name)
# Grab the link alternate and username from the ensemble_aws_accounts GET response payload before it gets deleted
ensemble_aws_accounts_rest_path = '/servicesNS/%s/%s/ensemble_aws_accounts/%s' % ('nobody', self.appName, conf_stanza)
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(ensemble_aws_accounts_rest_path, 'read', 'GET')
aws_secret_key_link_alternate = ensemble_aws_accounts_eai_response_payload['entry'][0]['content']['aws_secret_key_link_alternate']
# Delete passwords.conf stanza
self.password_delete(aws_secret_key_link_alternate)
# Delete ensemble_aws_accounts.conf stanza
conf_handler_path = '%s/%s' % (self.get_conf_handler_path_name('ensemble_aws_accounts'), conf_stanza)
ensemble_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'remove', 'DELETE')
self.set_conf_info_from_eai_payload(confInfo, ensemble_aws_accounts_eai_response_payload)
def password_edit(self, password_link_alternate, new_username, password):
"""
Edits a password entry using the storage/passwords endpoint. This endpoint will first delete the existing
entry, then creates a new one.
Arguments
password_link_alternate -- The link alternate of the password entry
password -- The actual password which will be encrypted and stored in passwords.conf
"""
self.password_delete(password_link_alternate)
return self.password_create(new_username, password)
def password_delete(self, password_link_alternate):
"""
Deletes a password entry using the storage/passwords endpoint.
Arguments
password_link_alternate -- The link alternate of the password entry
"""
passwords_conf_payload = self.simple_request_eai(password_link_alternate, 'remove', 'DELETE')
def hash_len_confirm(self, password, password_after, password_orig_hash, password_after_hash):
"""
Confirms length of plaintext password matches retrieved decrypted password. Also compares the hashes of
the initial and retrieved passwords.
Arguments
password -- The actual password which was encrypted and stored in passwords.conf
password_after -- The decrypted password retrieved from passwords.conf
password_orig_hash -- The hash of the actual password which was encrypted and stored in passwords.conf
password_after_hash -- The hash of the decrypted password retrieved from passwords.conf
"""
assert len(password_after) == len(password)
assert password_orig_hash == password_after_hash
def password_create(self, username, password):
"""
Creates a password entry using the storage/passwords endpoint. This endpoint will validate successful creationof the password by comparing length and hashes of the provided password and the retrieved cleartext password. Password realm will include a unique GUID.
Arguments
username -- The username associated with the provided password
password -- The actual password which will be encrypted and stored in passwords.conf
"""
m = hashlib.md5()
m.update(password)
password_orig_hash = m.hexdigest()
realm = str(uuid.uuid4().hex)
passwords_conf_postargs = {
'realm': realm,
'name': username,
'password': password
}
passwords_rest_path = '/servicesNS/%s/%s/storage/passwords/' % ('nobody', self.appName)
# Create password
passwords_conf_payload = self.simple_request_eai(passwords_rest_path, 'create', 'POST', passwords_conf_postargs)
password_link_alternate = passwords_conf_payload['entry'][0]['links']['alternate']
# Load password to check hash and length
passwords_conf_payload = self.simple_request_eai(password_link_alternate, 'list', 'GET')
password_after = passwords_conf_payload['entry'][0]['content']['clear_password']
m = hashlib.md5()
m.update(password_after)
password_after_hash = m.hexdigest()
try:
self.hash_len_confirm(password, password_after, password_orig_hash, password_after_hash)
except Exception, e:
logger.error(e)
raise admin.InternalException('Password stored incorrectly %s' % e)
return password_link_alternate
def validate_server_schema_params(self):
"""
Validates raw request params against the server schema
"""
params = self.get_params(schema=ensemble_aws_accounts_schema, filter=ensemble_aws_accounts_schema.SERVER_FIELDS)
return self.validate_params(ensemble_aws_accounts_schema.server_schema, params)
def validate_auth_schema_params(self):
"""
Validates raw request params against the auth schema
"""
params = self.get_params(schema=ensemble_aws_accounts_schema, filter=ensemble_aws_accounts_schema.AUTH_FIELDS)
return self.validate_params(ensemble_aws_accounts_schema.auth_schema, params)
admin.init(EnsembleAWSAccountsEAIHandler, admin.CONTEXT_NONE)
| 19,554 | 5,268 |
import random
import unittest
import torch
import invertransforms as T
class InvertibleTestCase(unittest.TestCase):
def setUp(self) -> None:
self.img_size = (256, 320)
self.h, self.w = self.img_size
self.crop_size = (64, 128)
self.img_tensor = torch.randn((1,) + self.img_size).clamp(0, 1)
self.img_pil = T.ToPILImage()(self.img_tensor)
self.img_tensor = T.ToTensor()(self.img_pil)
self.n = random.randint(0, 1e9)
| 480 | 197 |
description = 'Devices for the ZEBRA monochromator'
mota = 'SQ:ZEBRA:mota:'
motb = 'SQ:ZEBRA:motb:'
motd = 'SQ:ZEBRA:motd:'
devices = dict(
mtvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator vertical translation',
motorpv = mota + 'MTVL',
errormsgpv = mota + 'MTVL-MsgTxt',
precision = 0.5,
),
mtpl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator paralell translation',
motorpv = mota + 'MTPL',
errormsgpv = mota + 'MTPL-MsgTxt',
precision = 0.5,
),
mgvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator vertical goniometer',
motorpv = mota + 'MGVL',
errormsgpv = mota + 'MGVL-MsgTxt',
precision = 0.5,
),
mgpl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator paralell goniometer',
motorpv = mota + 'MGPL',
errormsgpv = mota + 'MGPL-MsgTxt',
precision = 0.5,
),
moml = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator omega',
motorpv = mota + 'MOML',
errormsgpv = mota + 'MOML-MsgTxt',
precision = 0.5,
),
mtvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator vertical translation',
motorpv = mota + 'MTVU',
errormsgpv = mota + 'MTVU-MsgTxt',
precision = 0.5,
),
mtpu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator paralell translation',
motorpv = mota + 'MTPU',
errormsgpv = mota + 'MTPU-MsgTxt',
precision = 0.5,
),
mgvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator vertical goniometer',
motorpv = mota + 'MGVU',
errormsgpv = mota + 'MGVU-MsgTxt',
precision = 0.5,
),
mgpu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator paralell goniometer',
motorpv = mota + 'MGPU',
errormsgpv = mota + 'MGPU-MsgTxt',
precision = 0.5,
),
momu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator omega',
motorpv = mota + 'MOMU',
errormsgpv = mota + 'MOMU-MsgTxt',
precision = 0.5,
),
mcvl = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Lower monochromator curvature',
motorpv = mota + 'MCVL',
errormsgpv = mota + 'MCVL-MsgTxt',
precision = 0.5,
),
mcvu = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Upper monochromator curvature',
motorpv = motb + 'MCVU',
errormsgpv = motb + 'MCVU-MsgTxt',
precision = 0.5,
),
mexz = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Monochromator lift',
motorpv = motb + 'MEXZ',
errormsgpv = motb + 'MEXZ-MsgTxt',
precision = 0.5,
),
wavelength = device('nicos_sinq.zebra.devices.zebrawl.ZebraWavelength',
description = 'Wavelength for ZEBRA',
unit = 'A-1',
lift = 'mexz'
),
cex1 = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'First collimator drum',
motorpv = motd + 'CEX1',
errormsgpv = motd + 'CEX1-MsgTxt',
precision = 0.5,
),
cex2 = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Second collimator drum',
motorpv = motd + 'CEX2',
errormsgpv = motd + 'CEX2-MsgTxt',
precision = 0.5,
),
)
| 4,162 | 1,659 |
import urllib.request, urllib.parse, urllib.error
import logging
import os
from datetime import datetime
import sys
import importlib
importlib.reload(sys) # Reload does the trick!
from src.helpers import upload_file
from scripts.loading.database_session import get_session
from scripts.loading.ontology import read_owl
from src.models import Source, Ro, Edam, Dbentity, Filedbentity, \
Psimod, Psimi, PsimiUrl, PsimiAlias, PsimiRelation
__author__ = 'sweng66'
## Created on March 2018
## This script is used to update PSI-MI ontology in NEX2.
log_file = 'scripts/loading/ontology/logs/psimi.log'
ontology = 'PSIMI'
src = 'PSI'
CREATED_BY = os.environ['DEFAULT_USER']
logging.basicConfig(format='%(message)s')
log = logging.getLogger()
log.setLevel(logging.INFO)
log.info("PSI-MI Ontology Loading Report:\n")
def load_ontology(ontology_file):
nex_session = get_session()
log.info(str(datetime.now()))
log.info("Getting data from database...")
source_to_id = dict([(x.display_name, x.source_id) for x in nex_session.query(Source).all()])
psimiid_to_psimi = dict([(x.psimiid, x) for x in nex_session.query(Psimi).all()])
term_to_ro_id = dict([(x.display_name, x.ro_id) for x in nex_session.query(Ro).all()])
roid_to_ro_id = dict([(x.roid, x.ro_id) for x in nex_session.query(Ro).all()])
edam_to_id = dict([(x.format_name, x.edam_id) for x in nex_session.query(Edam).all()])
psimi_id_to_alias = {}
for x in nex_session.query(PsimiAlias).all():
aliases = []
if x.psimi_id in psimi_id_to_alias:
aliases = psimi_id_to_alias[x.psimi_id]
aliases.append((x.display_name, x.alias_type))
psimi_id_to_alias[x.psimi_id] = aliases
psimi_id_to_parent = {}
for x in nex_session.query(PsimiRelation).all():
parents = []
if x.child_id in psimi_id_to_parent:
parents = psimi_id_to_parent[x.child_id]
parents.append((x.parent_id, x.ro_id))
psimi_id_to_parent[x.child_id] = parents
####################################
fw = open(log_file, "w")
log.info("Reading data from ontology file...")
data = read_owl(ontology_file, ontology)
log.info("Updating psimi ontology data in the database...")
[update_log, to_delete_list] = load_new_data(nex_session, data,
source_to_id,
psimiid_to_psimi,
term_to_ro_id['is a'],
roid_to_ro_id,
psimi_id_to_alias,
psimi_id_to_parent,
fw)
# log.info("Uploading file to S3...")
# update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id)
log.info("Writing loading summary...")
write_summary_and_send_email(fw, update_log, to_delete_list)
nex_session.close()
fw.close()
log.info(str(datetime.now()))
log.info("Done!\n\n")
def load_new_data(nex_session, data, source_to_id, psimiid_to_psimi, ro_id, roid_to_ro_id, psimi_id_to_alias, psimi_id_to_parent, fw):
active_psimiid = []
update_log = {}
for count_name in ['updated', 'added', 'deleted']:
update_log[count_name] = 0
relation_just_added = {}
alias_just_added = {}
for x in data:
psimi_id = None
if "MI:" not in x['id']:
continue
if x['id'] in psimiid_to_psimi:
## in database
y = psimiid_to_psimi[x['id']]
psimi_id = y.psimi_id
if y.is_obsolete is True:
y.is_obsolete = '0'
nex_session.add(y)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x['id'] + " has been updated from " + y.is_obsolete + " to " + 'False' + "\n")
if x['term'] != y.display_name.strip():
## update term
fw.write("The display_name for " + x['id'] + " has been updated from " + y.display_name + " to " + x['term'] + "\n")
y.display_name = x['term']
# nex_session.add(y)
# nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
# print "UPDATED: ", y.psimiid, ":"+y.display_name+ ":" + ":"+x['term']+":"
# else:
# print "SAME: ", y.psimiid, y.display_name, x['definition'], x['aliases'], x['parents'], x['other_parents']
active_psimiid.append(x['id'])
else:
fw.write("NEW entry = " + x['id'] + " " + x['term'] + "\n")
this_x = Psimi(source_id = source_to_id[src],
format_name = x['id'],
psimiid = x['id'],
display_name = x['term'],
description = x['definition'],
obj_url = '/psimi/' + x['id'],
is_obsolete = '0',
created_by = CREATED_BY)
nex_session.add(this_x)
nex_session.flush()
psimi_id = this_x.psimi_id
update_log['added'] = update_log['added'] + 1
# print "NEW: ", x['id'], x['term'], x['definition']
link_id = x['id'].replace(':', '_')
insert_url(nex_session, source_to_id['Ontobee'], 'Ontobee', psimi_id,
'http://www.ontobee.org/ontology/MI?iri=http://purl.obolibrary.org/obo/'+link_id,
fw)
# insert_url(nex_session, source_to_id['BioPortal'], 'BioPortal', psimi_id,
# 'http://bioportal.bioontology.org/ontologies/MI/?p=classes&conceptid=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
# fw)
insert_url(nex_session, source_to_id['OLS'], 'OLS', psimi_id,
'http://www.ebi.ac.uk/ols/ontologies/mi/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F' + link_id,
fw)
## add RELATIONS
for parent_psimiid in x['parents']:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
child_id = psimi_id
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, ro_id, relation_just_added, fw)
for (parent_psimiid, roid) in x['other_parents']:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
child_id = psimi_id
this_ro_id = roid_to_ro_id.get(roid)
if this_ro_id is None:
log.info("The ROID:" + str(roid) + " is not found in the database")
continue
insert_relation(nex_session, source_to_id[src], parent_id,
child_id, this_ro_id, relation_just_added, fw)
## add ALIASES
for (alias, alias_type) in x['aliases']:
if alias_type != 'EAXCT':
continue
insert_alias(nex_session, source_to_id[src], alias,
alias_type, psimi_id, alias_just_added, fw)
## update RELATIONS
curr_parents = psimi_id_to_parent.get(psimi_id)
if curr_parents is None:
curr_parents = []
update_relations(nex_session, psimi_id, curr_parents, x['parents'],
x['other_parents'], roid_to_ro_id,
source_to_id[src], psimiid_to_psimi, ro_id, relation_just_added, fw)
## update ALIASES
update_aliases(nex_session, psimi_id, psimi_id_to_alias.get(psimi_id), x['aliases'],
source_to_id[src], psimiid_to_psimi, alias_just_added, fw)
to_delete = []
for psimiid in psimiid_to_psimi:
if psimiid in active_psimiid:
continue
x = psimiid_to_psimi[psimiid]
if psimiid.startswith('NTR'):
continue
to_delete.append((psimiid, x.display_name))
if x.is_obsolete is False:
x.is_obsolete = '1'
nex_session.add(x)
nex_session.flush()
update_log['updated'] = update_log['updated'] + 1
fw.write("The is_obsolete for " + x.psimiid + " has been updated from " + x.is_obsolete +" to " + 'True' + "\n")
nex_session.commit()
# nex_session.rollback()
return [update_log, to_delete]
def update_aliases(nex_session, psimi_id, curr_aliases, new_aliases, source_id, psimiid_to_psimi, alias_just_added, fw):
# print "ALIAS: ", curr_aliases, new_aliases
# return
if curr_aliases is None:
curr_aliases = []
for (alias, type) in new_aliases:
if type != 'EXACT':
continue
if (alias, type) not in curr_aliases:
insert_alias(nex_session, source_id, alias, type, psimi_id, alias_just_added, fw)
for (alias, type) in curr_aliases:
if(alias, type) not in new_aliases:
to_delete = nex_session.query(PsimiAlias).filter_by(psimi_id=psimi_id, display_name=alias, alias_type=type).first()
nex_session.delete(to_delete)
fw.write("The old alias = " + alias + " has been deleted for psimi_id = " + str(psimi_id) + "\n")
def update_relations(nex_session, child_id, curr_parent_ids, new_parents, other_parents, roid_to_ro_id, source_id, psimiid_to_psimi, ro_id, relation_just_added, fw):
# print "RELATION: ", curr_parent_ids, new_parents, other_parents
# return
new_parent_ids = []
for parent_psimiid in new_parents:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
new_parent_ids.append((parent_id, ro_id))
if (parent_id, ro_id) not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
ro_id, relation_just_added, fw)
for (parent_psimiid, roid) in other_parents:
parent = psimiid_to_psimi.get(parent_psimiid)
if parent is not None:
parent_id = parent.psimi_id
this_ro_id = roid_to_ro_id.get(roid)
if this_ro_id is None:
log.info("The ROID:" + str(roid) + " is not found in the database")
continue
new_parent_ids.append((parent_id, this_ro_id))
if (parent_id, this_ro_id) not in curr_parent_ids:
insert_relation(nex_session, source_id, parent_id, child_id,
this_ro_id, relation_just_added, fw)
for (parent_id, ro_id) in curr_parent_ids:
if (parent_id, ro_id) not in new_parent_ids:
## remove the old one
to_delete = nex_session.query(PsimiRelation).filter_by(child_id=child_id, parent_id=parent_id, ro_id=ro_id).first()
nex_session.delete(to_delete)
fw.write("The old parent: parent_id = " + str(parent_id) + " has been deleted for psimi_id = " + str(child_id)+ "\n")
def insert_url(nex_session, source_id, display_name, psimi_id, url, fw, url_type=None):
# print display_name, psimi_id, url
# return
if url_type is None:
url_type = display_name
x = PsimiUrl(display_name = display_name,
url_type = url_type,
source_id = source_id,
psimi_id = psimi_id,
obj_url = url,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new URL: " + url + " for psimi_id = " + str(psimi_id) + "\n")
def insert_alias(nex_session, source_id, display_name, alias_type, psimi_id, alias_just_added, fw):
# print display_name, alias_type
# return
if (psimi_id, display_name, alias_type) in alias_just_added:
return
alias_just_added[(psimi_id, display_name, alias_type)] = 1
x = PsimiAlias(display_name = display_name,
alias_type = alias_type,
source_id = source_id,
psimi_id = psimi_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new ALIAS: " + display_name + " for psimi_id = " + str(psimi_id) + "\n")
def insert_relation(nex_session, source_id, parent_id, child_id, ro_id, relation_just_added, fw):
# print "PARENT/CHILD: ", parent_id, child_id
# return
if (parent_id, child_id) in relation_just_added:
return
relation_just_added[(parent_id, child_id)] = 1
x = PsimiRelation(parent_id = parent_id,
child_id = child_id,
source_id = source_id,
ro_id = ro_id,
created_by = CREATED_BY)
nex_session.add(x)
nex_session.flush()
fw.write("Added new PARENT: parent_id = " + str(parent_id) + " for psimi_id = " + str(child_id) + "\n")
def update_database_load_file_to_s3(nex_session, ontology_file, source_to_id, edam_to_id):
gzip_file = ontology_file + ".gz"
import gzip
import shutil
with open(ontology_file, 'rb') as f_in, gzip.open(gzip_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
local_file = open(gzip_file, mode='rb')
import hashlib
psimi_md5sum = hashlib.md5(ontology_file.encode()).hexdigest()
psimi_row = nex_session.query(Filedbentity).filter_by(md5sum = psimi_md5sum).one_or_none()
if psimi_row is not None:
return
nex_session.query(Dbentity).filter_by(display_name=gzip_file, dbentity_status='Active').update({"dbentity_status": 'Archived'})
nex_session.commit()
data_id = edam_to_id.get('EDAM:2353') ## data:2353 Ontology data
topic_id = edam_to_id.get('EDAM:0089') ## topic:0089 Ontology and terminology
format_id = edam_to_id.get('EDAM:3262') ## format:3262 OWL/XML
from sqlalchemy import create_engine
from src.models import DBSession
engine = create_engine(os.environ['NEX2_URI'], pool_recycle=3600)
DBSession.configure(bind=engine)
upload_file(CREATED_BY, local_file,
filename=gzip_file,
file_extension='gz',
description='PSI-MI Ontology in OWL RDF/XML format',
display_name=gzip_file,
data_id=data_id,
format_id=format_id,
topic_id=topic_id,
status='Active',
is_public='0',
is_in_spell='0',
is_in_browser='0',
file_date=datetime.now(),
source_id=source_to_id['SGD'],
md5sum=psimi_md5sum)
def write_summary_and_send_email(fw, update_log, to_delete_list):
summary = "Updated: " + str(update_log['updated'])+ "\n"
summary = summary + "Added: " + str(update_log['added']) + "\n"
summary_4_email = summary
if len(to_delete_list) > 0:
summary = summary + "The following PSI-MI terms are not in the current release:\n"
for (psimiid, term) in to_delete_list:
summary = summary + "\t" + psimiid + " " + term + "\n"
fw.write(summary)
log.info(summary_4_email)
if __name__ == "__main__":
url_path = 'http://purl.obolibrary.org/obo/'
mi_owl_file = 'mi.owl'
urllib.request.urlretrieve(url_path + mi_owl_file, mi_owl_file)
load_ontology(mi_owl_file)
| 15,886 | 5,496 |
from data_module.get_source_file import *
import numpy as np
class DataCleaner(object):
def __init__(self):
# 图1数据源
self.data_LD = []
self.data_I = []
self.data_LU = []
self.data_T = []
# 图2数据源
self.data_RD = []
self.data_RU = []
# 图3数据源
self.data_K_R = []
self.data_K_L = []
self.data_J = []
# 图4数据源
self.data_LU_LD = []
self.data_RD_RU = []
self.data_Q = []
# 图5数据源
self.data_K = []
self.data_P = []
self.data_R = []
self.data_S = []
# 读文件
self.read_data()
def single_line_cleaning(self, one_line):
end = one_line.index(" 0D 0A")
start = 1
line_result = one_line[start:end]
item_data_list = line_result.split(" ")
# print(item_data_list)
return self.__get_datas(item_data_list)
def get_item_data(self, data, i):
"""
需要计算的单数据生成
:param data:
:param i:
:return:
"""
return ((int(data[i], 16) << 8) + int(data[i + 1], 16)) / 100
def get_item_single_data(self, data, i):
"""
简单的单数据生成
:param i:
:return:
"""
return int(data[i], 16)
def __get_datas(self, data):
"""
多种单数据生成
:param data:
:return:
"""
LD = self.get_item_data(data, 0)
LU = self.get_item_data(data, 2)
RD = self.get_item_data(data, 4)
RU = self.get_item_data(data, 6)
K_R = self.get_item_data(data, 10)
K_L = self.get_item_data(data, 12)
I = self.get_item_single_data(data, 8)
T = self.get_item_single_data(data, 19)
J = self.get_item_single_data(data, 9)
Q = self.get_item_single_data(data, 16) / 50
K = self.get_item_single_data(data, 16)
P = self.get_item_single_data(data, 16) / 10
R = self.get_item_single_data(data, 17)
S = self.get_item_single_data(data, 18)
return LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S
def read_data(self):
source_data = file_read_lines()
for line_str in source_data:
cleaned_data = self.single_line_cleaning(line_str)
# print(cleaned_data)
# LD, LU, RD, RU, K_R, K_L, I, T, J, Q, K, P, R, S
self.data_LD.append(cleaned_data[0])
self.data_LU.append(cleaned_data[1])
self.data_RD.append(cleaned_data[2])
self.data_RU.append(cleaned_data[3])
self.data_K_R.append(cleaned_data[4])
self.data_K_L.append(cleaned_data[5])
self.data_I.append(cleaned_data[6])
self.data_T.append(cleaned_data[7])
self.data_J.append(cleaned_data[8])
self.data_Q.append(cleaned_data[9])
self.data_K.append(cleaned_data[10])
self.data_P.append(cleaned_data[11])
self.data_R.append(cleaned_data[12])
self.data_S.append(cleaned_data[13])
self.data_LU_LD.append(round(cleaned_data[1] - cleaned_data[0], 2))
self.data_RD_RU.append(round(cleaned_data[2] - cleaned_data[3], 2))
def get_chart_data(self, chart_num):
# print("data_LD:%s" % self.data_LD)
# print("data_LU:%s" % self.data_LU)
# print("data_RD:%s" % self.data_RD)
# print("data_K_R:%s" % self.data_K_R)
# print("data_K_L:%s" % self.data_K_L)
# print("data_I:%s" % self.data_I)
# print("data_T:%s" % self.data_T)
# print("data_J:%s" % self.data_J)
# print("data_Q:%s" % self.data_Q)
# print("data_K:%s" % self.data_K)
# print("data_P:%s" % self.data_P)
# print("data_R:%s" % self.data_R)
# print("data_S:%s" % self.data_S)
if chart_num == '1':
# 返回图1 所有需要的数据
return {'data_LD': self.data_LD,
'data_I': self.data_I,
'data_LU': self.data_LU,
'data_T': self.data_T, }
elif chart_num == '2':
# 返回图2 所有需要的数据
return {'data_RD': self.data_RD,
'data_I': self.data_I,
'data_RU': self.data_RU,
'data_T': self.data_T, }
elif chart_num == '3':
return {'data_RD': self.data_K_R,
'data_K_L': self.data_K_L,
'data_I': self.data_I,
'data_T': self.data_T,
'data_J': self.data_J, }
elif chart_num == '4':
return {'data_LU_LD': self.data_LU_LD,
'data_RD_RU': self.data_RD_RU,
'data_K_R': self.data_K_R,
'data_K_L': self.data_K_L,
'data_I': self.data_I,
'data_T': self.data_T,
'data_J': self.data_J,
'data_Q': self.data_Q, }
elif chart_num == '5':
return {'data_K': self.data_K,
'data_P': self.data_P,
'data_R': self.data_R,
'data_S': self.data_S, }
if __name__ == '__main__':
dc = DataCleaner()
m = dc.get_chart_data("1")
for key in m:
print(key + ':' + m[key].__str__())
| 5,312 | 2,072 |
from django.apps import AppConfig
class RequestOrderConfig(AppConfig):
name = 'request_order'
| 100 | 29 |
""" Sample to find common keys between 2 dictionaries """
d1 = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
d2 = {
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
print(d1.keys() & d2.keys()) # intersection
print(d1.keys() | d2.keys()) # union
print(d1.keys() - d2.keys()) # diff
| 293 | 144 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if sys.version_info[0] != 3:
sys.stderr.write('Batavia requires Python 3' + os.linesep)
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 372 | 136 |
#!/usr/bin/env python3
def predict(dict_values):
x1 = float(dict_values["x"])
y_pred = 2**x1
return y_pred
| 120 | 49 |
TELEGRAM_API_TOKEN = '1978689233:AAGsYEAvbN3ZkrA5a77VTO9yc30VSNZs8lQ'
BOT_USERNAME = 'boardroomv1bot'
WEBHOOK_URL = '' | 118 | 77 |
import gzip
import json
import os
import shutil
from os import PathLike
from pathlib import Path
from typing import Dict, List
import ndjson
def check_file_is_gzipped(file_path: PathLike):
file_path = Path(file_path)
if file_path.suffixes[-2:] != ['.ndjson', '.gz']:
return False
else:
return True
def gzip_file(input_path: PathLike, output_path: PathLike = None, keep=True):
input_path = Path(input_path)
if input_path.name.endswith('.gz'):
raise ValueError(f'{input_path} is already gzipped')
output_path = Path(output_path) if output_path else input_path.parent / f"{input_path.name}.gz"
with input_path.open('rb') as f_in, gzip.open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if not keep:
os.unlink(input_path)
def read_json_gz_file(input_path: PathLike, decoder='utf-8'):
input_path = Path(input_path)
with gzip.open(input_path, 'rb') as in_f:
json_bytes = in_f.read()
return json.loads(json_bytes.decode(decoder))
def read_ndjson_gz_file(input_path: PathLike, decoder='utf-8'):
input_path = Path(input_path)
records = []
with gzip.open(input_path, 'rb') as in_f:
line = in_f.readline()
while line:
records.append(json.loads(line.decode(decoder)))
line = in_f.readline()
return records
def write_to_ndjson_gz_file(data: List[Dict], output_file: PathLike):
output_file = Path(output_file)
if not output_file.name.endswith(".ndjson.gz"):
raise ValueError("Output file must end with .ndjson.gz")
ndjson_file = output_file.parent / output_file.stem
with ndjson_file.open('w') as f:
ndjson.dump(data, f)
gzip_file(ndjson_file, output_file, keep=False)
| 1,765 | 626 |
import itertools
import time
def digits(n):
while n:
yield n % 10
n /= 10
def pows(b):
x = 1
while True:
yield x
x *= 10
def f(n, d):
def g(d0, m):
if d0 < d:
return n / (m * 10) * m
elif d0 == d:
return n / (m * 10) * m + n % m + 1
else:
return (n / (m * 10) + 1) * m
return sum(itertools.starmap(g, itertools.izip(digits(n), pows(10))))
def solve(L, d):
n = 1
ret = 0
while True:
m = f(n, d)
if m == n:
ret += n
n += 1
elif m < n:
n += max(1, (n - m) / (sum(1 for _ in digits(n)) + 1))
else:
if m - n >= L:
break
n += m - n
return ret
if __name__ == "__main__":
L = 10 ** 100
t0 = time.clock()
print sum(solve(L, d) for d in range(1, 10))
t1 = time.clock()
print 'time = ', t1 - t0 | 946 | 385 |
# Generated by Django 2.2 on 2019-06-04 18:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ResourceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource_type', models.CharField(max_length=255)),
('resource_description', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name_plural': 'resources',
'db_table': 'resource',
},
),
migrations.CreateModel(
name='Website',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website_title', models.CharField(max_length=255)),
('website_url', models.URLField()),
('website_description', models.TextField()),
('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'websites',
'db_table': 'website',
},
),
migrations.CreateModel(
name='Meetup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meetup_title', models.CharField(max_length=255)),
('meetup_url', models.URLField()),
('meetup_city', models.CharField(max_length=100)),
('meetup_state', models.CharField(max_length=2)),
('meetup_description', models.TextField()),
('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'meetups',
'db_table': 'meetup',
},
),
migrations.CreateModel(
name='Developer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dev_first_name', models.CharField(max_length=100)),
('dev_last_name', models.CharField(max_length=100)),
('dev_twitter', models.CharField(max_length=51)),
('dev_specialty', models.TextField()),
('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'developers',
'db_table': 'developer',
},
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_title', models.CharField(max_length=255)),
('book_author', models.CharField(max_length=255)),
('book_publisher', models.CharField(max_length=255)),
('book_pages', models.IntegerField(blank=True, null=True, verbose_name=4)),
('book_isbn10', models.CharField(blank=True, max_length=10, null=True)),
('book_isbn13', models.CharField(blank=True, max_length=14, null=True)),
('book_pub_date', models.DateField(blank=True, null=True)),
('book_description', models.TextField()),
('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'books',
'db_table': 'book',
},
),
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blog_title', models.CharField(max_length=255)),
('blog_author_first', models.CharField(blank=True, max_length=100, null=True)),
('blog_author_last', models.CharField(blank=True, max_length=100, null=True)),
('blog_url', models.CharField(max_length=255)),
('blog_postdate', models.DateField()),
('blog_description', models.TextField()),
('resource_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='codelibapp.ResourceType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'blogs',
'db_table': 'blog',
},
),
]
| 5,770 | 1,720 |
from congregation.codegen.python.libs.external.unary import *
from congregation.codegen.python.libs.external.binary import *
from congregation.codegen.python.libs.external.nary import *
| 186 | 57 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
BCL + OMEGA = 180 / 8.
{'0.7': {'tennis-court': 0.9030146006529745, 'ship': 0.5247147197113373, 'basketball-court': 0.5197288166873728, 'small-vehicle': 0.3876947548806594, 'harbor': 0.11656494591608674, 'plane': 0.7970783470996496, 'soccer-ball-field': 0.5481252919561383, 'ground-track-field': 0.49822883370115856, 'roundabout': 0.4251287737133813, 'baseball-diamond': 0.4246128098451668, 'large-vehicle': 0.2103852912661382, 'helicopter': 0.26122090469916553, 'bridge': 0.09114583333333333, 'swimming-pool': 0.14757313945494202, 'storage-tank': 0.6677333132875084, 'mAP': 0.4348633584136675},
'0.9': {'tennis-court': 0.19764274620067623, 'ship': 0.004702194357366771, 'basketball-court': 0.09090909090909091, 'small-vehicle': 0.0303030303030303, 'harbor': 0.0016175994823681658, 'plane': 0.09577370534335426, 'soccer-ball-field': 0.045454545454545456, 'ground-track-field': 0.004784688995215311, 'roundabout': 0.09090909090909091, 'baseball-diamond': 0.09090909090909091, 'large-vehicle': 0.0036363636363636364, 'helicopter': 0.0, 'bridge': 0.0, 'swimming-pool': 0.001652892561983471, 'storage-tank': 0.047402781720124895, 'mAP': 0.04704652138548674},
'0.85': {'tennis-court': 0.5175145387959259, 'ship': 0.04181083824704637, 'basketball-court': 0.16507177033492823, 'small-vehicle': 0.0606060606060606, 'harbor': 0.004132231404958678, 'plane': 0.31478501464749403, 'soccer-ball-field': 0.19507575757575757, 'ground-track-field': 0.007974481658692184, 'roundabout': 0.12648221343873517, 'baseball-diamond': 0.10730253353204174, 'large-vehicle': 0.005236915550816896, 'helicopter': 0.0606060606060606, 'bridge': 0.0303030303030303, 'swimming-pool': 0.003305785123966942, 'storage-tank': 0.2216713262889979, 'mAP': 0.1241252372076342},
'0.95': {'tennis-court': 0.011019283746556474, 'ship': 0.0034965034965034965, 'basketball-court': 0.0, 'small-vehicle': 0.00033921302578018993, 'harbor': 0.0, 'plane': 0.0303030303030303, 'soccer-ball-field': 0.004329004329004329, 'ground-track-field': 0.0, 'roundabout': 0.0101010101010101, 'baseball-diamond': 0.0, 'large-vehicle': 0.00016528925619834712, 'helicopter': 0.0, 'bridge': 0.0, 'swimming-pool': 0.0, 'storage-tank': 0.004914004914004914, 'mAP': 0.004311155944805877},
'0.75': {'tennis-court': 0.814202005991537, 'ship': 0.36943182805314534, 'basketball-court': 0.45146913919982956, 'small-vehicle': 0.2500155419128262, 'harbor': 0.04276380829572319, 'plane': 0.7579878981894648, 'soccer-ball-field': 0.4295376606872696, 'ground-track-field': 0.38142101120570016, 'roundabout': 0.33333075942849677, 'baseball-diamond': 0.32189281750059, 'large-vehicle': 0.08109584612393884, 'helicopter': 0.10013175230566534, 'bridge': 0.03636363636363637, 'swimming-pool': 0.04511019283746556, 'storage-tank': 0.5556670810786699, 'mAP': 0.3313613986115972},
'0.6': {'tennis-court': 0.9078675692919017, 'ship': 0.7428748965130202, 'basketball-court': 0.5525816701862102, 'small-vehicle': 0.5661458809978344, 'harbor': 0.31468024317286736, 'plane': 0.8927954483248337, 'soccer-ball-field': 0.7026712063326276, 'ground-track-field': 0.5952492478039144, 'roundabout': 0.5862217256587403, 'baseball-diamond': 0.6337310374828784, 'large-vehicle': 0.473337107335067, 'helicopter': 0.4559992150034522, 'bridge': 0.24367445113583264, 'swimming-pool': 0.3895201094294005, 'storage-tank': 0.780240020845799, 'mAP': 0.5891726553009586},
'0.65': {'tennis-court': 0.9063366415272575, 'ship': 0.6452205305336104, 'basketball-court': 0.5419974943230758, 'small-vehicle': 0.5169961021709822, 'harbor': 0.22418261562998404, 'plane': 0.8852270920046745, 'soccer-ball-field': 0.6228637775932758, 'ground-track-field': 0.5591851331213543, 'roundabout': 0.5511841761637736, 'baseball-diamond': 0.580333891442914, 'large-vehicle': 0.3714621290611434, 'helicopter': 0.38442656608097786, 'bridge': 0.17609532766667257, 'swimming-pool': 0.2673287170682332, 'storage-tank': 0.7642816542612352, 'mAP': 0.5331414565766109},
'0.5': {'tennis-court': 0.9088195386702851, 'ship': 0.8224437807168951, 'basketball-court': 0.5830775602074171, 'small-vehicle': 0.6169954809326167, 'harbor': 0.5258339843237152, 'plane': 0.8967687126422501, 'soccer-ball-field': 0.7362705406914213, 'ground-track-field': 0.6498421987512867, 'roundabout': 0.6566326127028347, 'baseball-diamond': 0.6993401680187941, 'large-vehicle': 0.6045608802509415, 'helicopter': 0.5212808419504471, 'bridge': 0.36652945756438354, 'swimming-pool': 0.5164216645404407, 'storage-tank': 0.820030826549724, 'mAP': 0.6616565499008968},
'0.8': {'tennis-court': 0.7816894723179306, 'ship': 0.1451783316171541, 'basketball-court': 0.3190681777298075, 'small-vehicle': 0.10194653796304984, 'harbor': 0.013468013468013467, 'plane': 0.5427055255026874, 'soccer-ball-field': 0.3415451418500199, 'ground-track-field': 0.13296378418329638, 'roundabout': 0.24207752583038625, 'baseball-diamond': 0.15874047455208096, 'large-vehicle': 0.02730096965512913, 'helicopter': 0.07382920110192837, 'bridge': 0.0303030303030303, 'swimming-pool': 0.022727272727272728, 'storage-tank': 0.3935350148663551, 'mAP': 0.22180523157787616},
'0.55': {'tennis-court': 0.9088195386702851, 'ship': 0.7579876783545256, 'basketball-court': 0.5756532396263904, 'small-vehicle': 0.6038512968643537, 'harbor': 0.41351532033351385, 'plane': 0.8953579426876774, 'soccer-ball-field': 0.7172833446523743, 'ground-track-field': 0.6291038290639339, 'roundabout': 0.6120712939504148, 'baseball-diamond': 0.6832929342136569, 'large-vehicle': 0.540009629017207, 'helicopter': 0.5193070813559969, 'bridge': 0.31825260872940675, 'swimming-pool': 0.4790914021824345, 'storage-tank': 0.78480241358501, 'mAP': 0.6292266368858123},
'mmAP': 0.3576710201805347}
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_DCL_B_2x_20200920'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 2000
SAVE_WEIGHTS_INTE = 20673 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = None
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTATrain' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
OMEGA = 180 / 8.
ANGLE_MODE = 0
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 90 or 180
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
| 9,107 | 5,792 |
# Copyright (c) 2021-2022 Johnathan P. Irvin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from functools import cache
class InputConfiguration:
@cache
def get_bot_name(self) -> str:
"""
Returns the name of the bot.
Returns:
str: The name of the bot.
"""
return input("Bot name: ")
@cache
def get_citizen_number(self) -> int:
"""
Returns the citizen number of the owner of the bot.
Returns:
int: The citizen number of the owner of the bot.
"""
while True:
citizen_number = input("Citizen number: ")
if citizen_number.isnumeric():
return int(citizen_number)
print("Invalid citizen number.")
@cache
def get_password(self) -> str:
"""
Returns the priviledge password of the owner of the bot.
Returns:
str: The priviledge password of the owner of the bot.
"""
return input("Password: ")
@cache
def get_world_name(self) -> str:
"""
Returns the name of the world the bot will enter.
Returns:
str: The name of the world the bot will enter.
"""
return input("World name: ")
@cache
def get_world_coordinates(self) -> tuple:
"""
Returns the coordinates of the world the bot will enter.
Returns:
tuple: The coordinates where the bot will enter.
"""
while True:
x, y, z = input(
"World coordinates (x, y, z): "
).replace(" ", "").split(",")
if x.isnumeric() and y.isnumeric() and z.isnumeric():
return (
int(x), int(y), int(z)
)
print("Invalid coordinates.")
@cache
def get_plugin_path(self) -> str:
"""
Returns the path where the plugins are stored.
Returns:
str: The path where the plugins are stored.
"""
return input("Plugin path: ")
| 3,081 | 888 |
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import normalize
class TestData:
def __init__(self, dimensions, points) -> None:
self.dimensions = dimensions
self.points = points
def linearly_separable(self) -> np.array:
x, y = make_blobs(
n_samples=self.points,
centers=2,
n_features=self.dimensions,
center_box=(0, 1),
)
for d in range(self.dimensions):
x[d] = x[d] - np.min(x[d]) / (np.max(x[d]) - np.min(x[d]))
return x, y
| 586 | 194 |
# Copyright (c) 2014, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.util.func_utils as fu
import sahara.tests.unit.base as b
class PredicatesTest(b.SaharaTestCase):
def test_true_predicate(self):
self.assertTrue(fu.true_predicate(None))
def test_false_predicate(self):
self.assertFalse(fu.false_predicate(None))
def test_not_predicate(self):
self.assertFalse(fu.not_predicate(fu.true_predicate)(None))
self.assertTrue(fu.not_predicate(fu.false_predicate)(None))
def test_and_predicate(self):
true_p = fu.true_predicate
false_p = fu.false_predicate
and_p = fu.and_predicate
self.assertTrue(and_p(true_p, true_p)(None))
self.assertFalse(and_p(false_p, true_p)(None))
self.assertFalse(and_p(true_p, false_p)(None))
self.assertFalse(and_p(false_p, false_p)(None))
def test_or_predicate(self):
true_p = fu.true_predicate
false_p = fu.false_predicate
or_p = fu.or_predicate
self.assertTrue(or_p(true_p, true_p)(None))
self.assertTrue(or_p(false_p, true_p)(None))
self.assertTrue(or_p(true_p, false_p)(None))
self.assertFalse(or_p(false_p, false_p)(None))
def test_field_equals_predicate(self):
field_equals_p = fu.field_equals_predicate
arg = {'a': 'a', 'b': 'b'}
self.assertTrue(field_equals_p('a', 'a')(arg))
self.assertFalse(field_equals_p('b', 'a')(arg))
def test_like_predicate(self):
like_p = fu.like_predicate
arg = {'a': 'a', 'b': 'b', 'c': 'c'}
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'c'})(arg))
self.assertTrue(like_p({'a': 'a', 'b': 'b'})(arg))
self.assertTrue(like_p({'a': 'a'})(arg))
self.assertTrue(like_p({'a': 'a'}, ['a'])(arg))
self.assertTrue(like_p({})(arg))
self.assertTrue(like_p({'a': 'a', 'b': 'b', 'c': 'a'}, ['c'])(arg))
self.assertFalse(like_p({'a': 'a', 'b': 'b', 'c': 'a'})(arg))
self.assertFalse(like_p({'a': 'a', 'c': 'a'})(arg))
self.assertFalse(like_p({'c': 'a'}, ['a'])(arg))
def test_in_predicate(self):
in_p = fu.in_predicate
arg = {'a': 'a', 'b': 'b'}
self.assertTrue(in_p('a', ['a', 'b'])(arg))
self.assertFalse(in_p('a', ['c', 'b'])(arg))
self.assertFalse(in_p('a', [])(arg))
class FunctionsTest(b.SaharaTestCase):
def test_copy_function(self):
copy_f = fu.copy_function
arg = {'a': 'a'}
actual = copy_f()(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_append_field_function(self):
append_field_f = fu.append_field_function
arg = {'a': 'a'}
actual = append_field_f('b', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_append_fields_function(self):
append_fields_f = fu.append_fields_function
arg = {'a': 'a'}
actual = append_fields_f({'b': 'b', 'c': 'c'})(arg)
expected = {'a': 'a', 'b': 'b', 'c': 'c'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = append_fields_f({'b': 'b'})(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = append_fields_f({})(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_get_values_pair_function(self):
get_values_pair_f = fu.get_values_pair_function
arg = {'a': 'a', 'b': 'b'}
actual = get_values_pair_f('a', 'b')(arg)
expected = ('a', 'b')
self.assertEqual(expected, actual)
def test_get_field_function(self):
get_field_f = fu.get_field_function
arg = {'a': 'a', 'b': 'b'}
actual = get_field_f('a')(arg)
expected = ('a', 'a')
self.assertEqual(expected, actual)
def test_get_fields_function(self):
get_fields_f = fu.get_fields_function
arg = {'a': 'a', 'b': 'b'}
actual = get_fields_f(['a', 'b'])(arg)
expected = [('a', 'a'), ('b', 'b')]
self.assertEqual(expected, actual)
actual = get_fields_f(['a'])(arg)
expected = [('a', 'a')]
self.assertEqual(expected, actual)
def test_extract_fields_function(self):
extract_fields_f = fu.extract_fields_function
arg = {'a': 'a', 'b': 'b'}
actual = extract_fields_f(['a', 'b'])(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
actual = extract_fields_f(['a'])(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
def test_get_value_function(self):
get_value_f = fu.get_value_function
arg = {'a': 'a', 'b': 'b'}
actual = get_value_f('a')(arg)
expected = 'a'
self.assertEqual(expected, actual)
def test_set_default_value_function(self):
set_default_value_f = fu.set_default_value_function
arg = {'a': 'a'}
actual = set_default_value_f('b', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_value_f('a', 'b')(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_set_default_values_function(self):
set_default_values_f = fu.set_default_values_function
arg = {'a': 'a'}
actual = set_default_values_f({'a': 'b', 'c': 'c'})(arg)
expected = {'a': 'a', 'c': 'c'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_values_f({'b': 'b'})(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
actual = set_default_values_f({})(arg)
expected = {'a': 'a'}
self.assertEqual(expected, actual)
self.assertIsNot(actual, arg)
def test_values_pair_to_dict_function(self):
values_pair_to_dict_f = fu.values_pair_to_dict_function
arg = ('a', 'b')
actual = values_pair_to_dict_f('a', 'b')(arg)
expected = {'a': 'a', 'b': 'b'}
self.assertEqual(expected, actual)
| 7,013 | 2,496 |
# -*- coding: utf-8 -*-
"""Mega-Cam gen2 band and PanSTARRS 1 band Mixed Functions."""
__author__ = "Nathaniel Starkman"
__copyright__ = "Copyright 2018, "
__credits__ = [
"http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html"
]
__all__ = []
#############################################################################
# IMPORTS
#############################################################################
# CODE
#############################################################################
#############################################################################
# END
| 609 | 181 |
import sys
import os
import math
import pytest
import grpc
import tempfile
sys.path.insert(1, os.path.join(sys.path[0], "../../"))
import rips
import dataroot
def test_Launch(rips_instance, initialize_test):
assert rips_instance is not None
def test_EmptyProject(rips_instance, initialize_test):
cases = rips_instance.project.cases()
assert len(cases) is 0
def test_OneCase(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
def test_BoundingBox(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
boundingbox = case.reservoir_boundingbox()
assert math.isclose(3382.90, boundingbox.min_x, abs_tol=1.0e-1)
assert math.isclose(5850.48, boundingbox.max_x, abs_tol=1.0e-1)
assert math.isclose(4157.45, boundingbox.min_y, abs_tol=1.0e-1)
assert math.isclose(7354.93, boundingbox.max_y, abs_tol=1.0e-1)
assert math.isclose(-4252.61, boundingbox.min_z, abs_tol=1.0e-1)
assert math.isclose(-4103.60, boundingbox.max_z, abs_tol=1.0e-1)
min_depth, max_depth = case.reservoir_depth_range()
assert math.isclose(4103.60, min_depth, abs_tol=1.0e-1)
assert math.isclose(4252.61, max_depth, abs_tol=1.0e-1)
def test_MultipleCases(rips_instance, initialize_test):
case_paths = []
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_paths.append(dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID")
case_names = []
for case_path in case_paths:
case_name = os.path.splitext(os.path.basename(case_path))[0]
case_names.append(case_name)
rips_instance.project.load_case(path=case_path)
cases = rips_instance.project.cases()
assert len(cases) == len(case_names)
for i, case_name in enumerate(case_names):
assert case_name == cases[i].name
def get_cell_index_with_ijk(cell_info, i, j, k):
for (idx, cell) in enumerate(cell_info):
if cell.local_ijk.i == i and cell.local_ijk.j == j and cell.local_ijk.k == k:
return idx
return -1
def check_corner(actual, expected):
assert math.isclose(actual.x, expected[0], abs_tol=0.1)
assert math.isclose(actual.y, expected[1], abs_tol=0.1)
assert math.isclose(actual.z, expected[2], abs_tol=0.1)
def test_10k(rips_instance, initialize_test):
case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 2
cell_count_info = case.cell_count()
assert cell_count_info.active_cell_count == 11125
assert cell_count_info.reservoir_cell_count == 316224
time_steps = case.time_steps()
assert len(time_steps) == 9
days_since_start = case.days_since_start()
assert len(days_since_start) == 9
cell_info = case.cell_info_for_active_cells()
assert len(cell_info) == cell_count_info.active_cell_count
# Check an active cell (found in resinsight ui)
cell_index = get_cell_index_with_ijk(cell_info, 23, 44, 19)
assert cell_index != -1
cell_centers = case.active_cell_centers()
assert len(cell_centers) == cell_count_info.active_cell_count
# Check the cell center for the specific cell
assert math.isclose(3627.17, cell_centers[cell_index].x, abs_tol=0.1)
assert math.isclose(5209.75, cell_centers[cell_index].y, abs_tol=0.1)
assert math.isclose(4179.6, cell_centers[cell_index].z, abs_tol=0.1)
cell_corners = case.active_cell_corners()
assert len(cell_corners) == cell_count_info.active_cell_count
# Expected values from ResInsight UI
expected_corners = [
[3565.22, 5179.02, 4177.18],
[3655.67, 5145.34, 4176.63],
[3690.07, 5240.69, 4180.02],
[3599.87, 5275.16, 4179.32],
[3564.13, 5178.61, 4179.75],
[3654.78, 5144.79, 4179.23],
[3688.99, 5239.88, 4182.7],
[3598.62, 5274.48, 4181.96],
]
check_corner(cell_corners[cell_index].c0, expected_corners[0])
check_corner(cell_corners[cell_index].c1, expected_corners[1])
check_corner(cell_corners[cell_index].c2, expected_corners[2])
check_corner(cell_corners[cell_index].c3, expected_corners[3])
check_corner(cell_corners[cell_index].c4, expected_corners[4])
check_corner(cell_corners[cell_index].c5, expected_corners[5])
check_corner(cell_corners[cell_index].c6, expected_corners[6])
check_corner(cell_corners[cell_index].c7, expected_corners[7])
# No coarsening info for this case
coarsening_info = case.coarsening_info()
assert len(coarsening_info) == 0
def test_PdmObject(rips_instance, initialize_test):
case_path = dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert case.id == 0
assert case.address() is not 0
assert case.__class__.__name__ == "EclipseCase"
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_brugge_0010(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real10/BRUGGE_0010.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 1
cellCountInfo = case.cell_count()
assert cellCountInfo.active_cell_count == 43374
assert cellCountInfo.reservoir_cell_count == 60048
time_steps = case.time_steps()
assert len(time_steps) == 11
days_since_start = case.days_since_start()
assert len(days_since_start) == 11
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_replaceCase(rips_instance, initialize_test):
project = rips_instance.project.open(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/10KWithWellLog.rsp"
)
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = project.case(case_id=0)
assert case is not None
assert case.name == "TEST10K_FLT_LGR_NNC"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
case.replace(new_grid_file=case_path)
# Check that the case object has been changed
assert case.name == "BRUGGE_0000"
assert case.id == 0
cases = rips_instance.project.cases()
assert len(cases) is 1
# Check that retrieving the case object again will yield the changed object
case = project.case(case_id=0)
assert case.name == "BRUGGE_0000"
assert case.id == 0
def test_loadNonExistingCase(rips_instance, initialize_test):
case_path = "Nonsense/Nonsense/Nonsense"
with pytest.raises(grpc.RpcError):
assert rips_instance.project.load_case(case_path)
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_exportFlowCharacteristics(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = rips_instance.project.load_case(case_path)
with tempfile.TemporaryDirectory(prefix="rips") as tmpdirname:
print("Temporary folder: ", tmpdirname)
file_name = tmpdirname + "/exportFlowChar.txt"
case.export_flow_characteristics(
time_steps=8, producers=[], injectors="I01", file_name=file_name
)
def test_selected_cells(rips_instance, initialize_test):
case = rips_instance.project.load_case(
dataroot.PATH + "/TEST10K_FLT_LGR_NNC/TEST10K_FLT_LGR_NNC.EGRID"
)
assert case.name == "TEST10K_FLT_LGR_NNC"
selected_cells = case.selected_cells()
assert len(selected_cells) == 0
time_step_info = case.time_steps()
for (tidx, timestep) in enumerate(time_step_info):
# Try to read for SOIL the time step (will be empty since nothing is selected)
soil_results = case.selected_cell_property("DYNAMIC_NATIVE", "SOIL", tidx)
assert len(soil_results) == 0
| 8,405 | 3,386 |
#basic file to run the program
import mainApp
startprogram = mainApp.commandLine()
startprogram.run()
| 104 | 31 |
#!/usr/bin/python
"""
@cernodile/py-deltaparser
A Python script to convert Growtopia's items.dat to human-readable indexable format.
File: iamim_gt_planner.py
Purpose: To generate data file for Iamim's GT Planner
License: See LICENSE.txt in project root directory.
"""
import csv
import item_parser
def filter(item):
"""Filters out items that you should not be able to use in a world planner."""
if item.ID % 2 == 1:
return False
if item.Type == 0 or item.Type == 1 or item.Type == 4 or item.Type == 8 or item.Type == 19 \
or item.Type == 20 or item.Type == 37 or item.Type == 44 or item.Type == 57 or item.Type == 64 or item.Type == 107 \
or item.Type == 112 or item.Type == 120 or item.Type == 129:
return False
# Any bedrock-type that is within startopia
if item.Type == 15 and item.ID > 6000 and item.ID <= 6742:
return False
if "null_item" in item.Name:
return False
if "Guild Flag" in item.Name:
return False
# Blank, UPDATE_STORE, Valhowla Treasure
if item.ID == 0 or item.ID == 244 or item.ID == 4368:
return False
return True
def get_item_type(Type):
if Type == 18 or Type == 22 or Type == 23 or Type == 28:
return "Background"
return "Foreground"
def get_informational_type(item):
if item.Type == 7:
return "Bouncy"
elif item.Type == 2 or item.Type == 13 or item.Type == 26:
return "Door"
elif item.Type == 3:
return "Lock"
elif item.Type == 6 or item.Type == 45 or item.Type == 93:
return "Death"
elif item.Type == 9:
return "Entrance"
elif item.Type == 10:
return "Sign"
elif item.Type == 12 or item.Type == 31 or item.Type == 32 or item.Type == 122:
return "Togglable Block"
elif item.Type == 14 or item.CollisionType == 2:
return "Platform"
elif item.Type == 16 or item.Type == 25 or item.Type == 126 or item.Type == 136 or item.ID == 5238:
return "Pain"
elif item.Type == 27:
return "Checkpoint"
elif item.Type == 28:
return "Music Note"
elif item.Type == 41 or item.Type == 81 or item.Type == 89 or item.Type == 134:
return "Weather Machine"
elif item.Type == 60:
return "Wind"
elif item.Type == 69 or item.Type == 70 or item.Type == 71 or item.Type == 79:
return "Steam"
elif item.Type == 113:
return "Bots"
else:
return get_item_type(item.Type)
def get_special_data(item):
name = item.FileName.replace(".rttex", "")
x = item.TexX
y = item.TexY
if (item.ID >= 3258 and item.ID <= 3268) or item.ID == 3280 or item.ID == 3282 or item.ID == 3412 or item.ID == 3414 \
or (item.ID >= 3752 and item.ID <= 3756) or item.ID == 3766 or item.ID == 3768:
name = "Steam_items"
x = 0
y = 0
if item.ID == 3258 or item.ID == 3268 or item.ID == 3412 or item.ID == 3756:
x = 8
if item.ID == 3262 or item.ID == 3280 or item.ID == 3766 or item.ID == 3768:
x = 16
if item.ID == 3264 or item.ID == 3282 or item.ID == 3752:
x = 24
if item.ID >= 3266 and item.ID <= 3282:
y = 6
if item.ID == 3412 or item.ID == 3414 or item.ID == 3752 or item.ID == 3766:
y = 12
if item.ID == 3754 or item.ID == 3756 or item.ID == 3768:
y = 18
if item.ID == 620 or item.ID == 3592:
name = "pipes"
x = 0
y = 0 if item.ID == 620 else 2
if item.ID >= 2242 and item.ID <= 2250:
name = "crystals"
y = 1 if item.ID == 2250 else 0
x = (item.ID - 2242) // 2 if item.ID != 2250 else 0
if item.ID >= 4382 and item.ID <= 4398:
name = "bunting"
x = y = 0
if item.ID == 4384:
x = 4
elif item.ID == 4386:
y = 1
elif item.ID == 4388:
x = 4
y = 1
elif item.ID == 4390:
y = 2
elif item.ID == 4392:
x = 4
y = 2
elif item.ID == 4394:
x = 6
y = 2
elif item.ID == 4396:
y = 3
elif item.ID == 4398:
x = 2
y = 3
if item.ID == 10254:
x = y = 0
name = "Dining"
return (name, x, y)
def get_storage_type(item):
if (item.ID >= 3258 and item.ID <= 3268) or item.ID == 3280 or item.ID == 3282 or item.ID == 3412 or item.ID == 3414 \
or (item.ID >= 3752 and item.ID <= 3756) or item.ID == 3766 or item.ID == 3768:
return 2
return item.StorageType
def write_iamim_gt_planner(items):
with open("iamim_gt_planner.csv", "w") as csvfile:
writer = csv.writer(csvfile, delimiter="|")
writer.writerow([1, "Water", "Water", 2, "Water", "Water", 0, 0, 0])
for item in items:
item = items[item]
if filter(item):
data = get_special_data(item)
writer.writerow([item.ID, item.Name, get_item_type(item.Type), get_storage_type(item), get_informational_type(item), data[0], data[1], data[2], 1 if item.Properties & 0x01 or item.ID == 4700 else 0])
# actual 4660 is a confetti cannon, 5604 a goldfish bowler hat.
if item.ID == 4658:
writer.writerow([4660, "Detonated Uranium Block", "Foreground", 1, "Foreground", "tiles_page10", 3, 1, 0])
elif item.ID == 5602:
writer.writerow([5604, "Drilled Ice Crust Block", "Foreground", 1, "Foreground", "tiles_page10", 1, 1, 0])
elif item.ID == 7866:
writer.writerow([7865, "Topiary Hedge (Swirly)", "Foreground", 1, "Foreground", "tiles_page13", 28, 11, 1])
writer.writerow([7866, "Topiary Hedge (Bird)", "Foreground", 1, "Foreground", "tiles_page13", 26, 11, 1])
writer.writerow([7867, "Topiary Hedge (Circle)", "Foreground", 1, "Foreground", "tiles_page13", 27, 11, 1])
elif item.ID == 9030:
writer.writerow([9032, "Spooky Bunting (pumpkin)", "Foreground", 1, "Foreground", "tiles_page14", 18, 10, 1])
writer.writerow([9034, "Spooky Bunting (ghost)", "Foreground", 1, "Foreground", "tiles_page14", 19, 10, 1])
writer.writerow([9036, "Spooky Bunting (bats)", "Foreground", 1, "Foreground", "tiles_page14", 21, 10, 1])
elif item.ID == 9198:
writer.writerow([9199, "Ice Sculptures (Flower)", "Foreground", 1, "Foreground", "tiles_page14", 9, 12, 0])
elif item.ID == 9200:
writer.writerow([9201, "Ice Sculptures (Teddy Bear)", "Foreground", 1, "Foreground", "tiles_page14", 10, 12, 0])
elif item.ID == 9202:
writer.writerow([9203, "Ice Sculptures (Star)", "Foreground", 1, "Foreground", "tiles_page14", 11, 12, 0])
if __name__ == "__main__":
items = item_parser.parse("items.dat")
write_iamim_gt_planner(items)
| 6,103 | 2,989 |
import sys
sys.path.append('')
from organizercore import organizer
class OrganizerModel:
def start_processing(self, input_dir, output_dir, settings):
organizer.Organizer(settings).process_gopro_dir(input_dir, output_dir)
| 236 | 73 |
import datetime as dt
import pytest
import human_dates
class TestTimeAgoInWords:
"""
test time_ago_in_words function
"""
@pytest.fixture(autouse=True)
def _import_templates(self, templates):
"""
import templates from conftest local plugin
"""
self.templates = templates
@pytest.fixture(autouse=True)
def _run_time_ago_comparison(self):
"""
autoexecute after the specific test has been defined, running the
actual comparison
"""
yield
for date, expected in zip(self.dates, self.expected):
result = human_dates.time_ago_in_words(dt.datetime.now() + date)
assert expected == result
def test_time_years(self):
self.dates = [-dt.timedelta(days=366 * 4), dt.timedelta(days=366 * 4)]
self.expected = [
self.templates.past % "4 years",
self.templates.future % "4 years",
]
def test_time_months(self):
self.dates = [-dt.timedelta(days=31 * 3), dt.timedelta(days=31 * 3)]
self.expected = [
self.templates.past % "3 months",
self.templates.future % "3 months",
]
def test_time_weeks(self):
self.dates = [-dt.timedelta(days=7 * 3 + 1), dt.timedelta(days=7 * 3 + 1)]
self.expected = [
self.templates.past % "3 weeks",
self.templates.future % "3 weeks",
]
def test_time_days(self):
self.dates = [-dt.timedelta(days=5.1), dt.timedelta(days=5.1)]
self.expected = [
self.templates.past % "5 days",
self.templates.future % "5 days",
]
def test_time_one_day(self):
self.dates = [-dt.timedelta(hours=24.1), dt.timedelta(hours=24.5)]
self.expected = ["yesterday", "tomorrow"]
def test_time_hours(self):
self.dates = [
-dt.timedelta(hours=17.1),
dt.timedelta(hours=5.1),
-dt.timedelta(minutes=75),
]
self.expected = [
self.templates.past % "17 hours",
self.templates.future % "5 hours",
self.templates.past % "an hour",
]
def test_time_minutes(self):
self.dates = [
-dt.timedelta(minutes=41.3),
dt.timedelta(minutes=26.3),
dt.timedelta(seconds=67),
]
self.expected = [
self.templates.past % "41 minutes",
self.templates.future % "26 minutes",
self.templates.future % "a minute",
]
def test_time_seconds(self):
self.dates = [-dt.timedelta(seconds=19.3), dt.timedelta(seconds=45.8)]
self.expected = [
self.templates.past % "19 seconds",
self.templates.future % "45 seconds",
]
def test_time_now(self):
self.dates = [-dt.timedelta(seconds=3.7), dt.timedelta(seconds=8.1)]
self.expected = ["just now"] * 2
| 2,959 | 980 |
# -*- coding: utf-8 -*-
import pandas as pd
import csv
import collections
from pandas import DataFrame as df
def test(data):
print(data['TIME'])
return 100
# data['TIME'] = data['TIME'].astype('float')
# data['TIME'] = data['TIME'].astype('int')
#
# # IP, PORT, IP_PORT 데이터셋 생성 -> new_data
# new_data = data['dstip']
# new_data = pd.DataFrame(new_data)
# IP = []
# PORT = []
# dst = []
# land=[]
#
# dst = data['dstip'].values.tolist()
# src = data['srcip'].values.tolist()
#
#
# for i in range(len(new_data)):
# IP.append(new_data.iloc[i][0].split(':')[0])
# PORT.append(new_data.iloc[i][0].split(':')[1])
#
# for i in range(len(new_data)):
# if src==dst:
# land.append(1)
# else:
# land.append(0)
#
#
# IP = pd.DataFrame(IP, columns=['IP'])
# PORT = pd.DataFrame(PORT, columns=['PORT'])
# IP_PORT = pd.DataFrame(dst, columns=['IP_PORT'])
# LAND=pd.DataFrame(land,columns=['LAND'])
#
#
#
# new_data = pd.concat([data['TIME'], IP], axis=1)
# new_data = pd.concat([new_data, PORT], axis=1)
# new_data = pd.concat([new_data, IP_PORT], axis=1)
# new_data = pd.concat([new_data,LAND],axis=1)
#
#
#
# # timestamp에 각 초에 따른 데이터를 넣어줌
# timestamp_IP_PORT = []
# for i in range((max(new_data['TIME']))+1):
# line = []
# timestamp_IP_PORT.append(line)
#
# for j in range(len(new_data['TIME'])):
# timestamp_IP_PORT[new_data['TIME'].iloc[j]].append(new_data['IP_PORT'].iloc[j])
#
# # timestamp를 이용해서 counter에 각 초당 IP&PORT 개수를 저장함
# counter_IP_PORT = []
# for k in range(len(timestamp_IP_PORT)):
# counter_IP_PORT.append(collections.Counter(timestamp_IP_PORT[k]))
#
# timestamp_IP = []
#
# # 초단위로 바꾼 값 중의 최대값 크기만큼의 (timestamp)리스트를 만듦
# for i in range((max(new_data['TIME']))+1):
# line = []
# timestamp_IP.append(line)
#
#
#
# # (timestamp)안에 각 초단위에 해당하는 dstip를 리스트형태로 넣음
# # 아래 코드 실행 후 timestamp[0:2] 로 출력하면 0초,1초에 대한 dstip 출력
# for j in range(len(new_data['TIME'])):
#
# timestamp_IP[new_data['TIME'].iloc[j]].append(new_data['IP'].iloc[j])
# # f.write(str(timestamp_IP)) 테스트
#
#
# # timestamp를 이용해서 counter에 각 초당 IP&PORT 개수를 저장함
# counter_IP = []
# for k in range(len(timestamp_IP)):
# counter_IP.append(collections.Counter(timestamp_IP[k]))
#
# timestamp_PORT = []
# # 초단위로 바꾼 값 중의 최대값 크기만큼의 (timestamp)리스트를 만듦
# for i in range((max(new_data['TIME']))+1):
# line = []
# timestamp_PORT.append(line)
#
# # (timestamp)안에 각 초단위에 해당하는 dstip를 리스트형태로 넣음
# # 아래 코드 실행 후 timestamp[0:2] 로 출력하면 0초,1초에 대한 dstip 출력
# for j in range(len(new_data['TIME'])):
# # print(data['time'].iloc[j])
# timestamp_PORT[new_data['TIME'].iloc[j]].append(new_data['PORT'].iloc[j])
#
# counter_PORT = []
# for k in range(len(timestamp_PORT)):
# counter_PORT.append(collections.Counter(timestamp_PORT[k]))
#
# return land
if __name__ == '__main__':
pass | 3,124 | 1,468 |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditor
:platform: Unix
:synopsis: This class is subclassed to add audit rules.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com>
"""
import datastore
from security_monkey import app, db
from security_monkey.watcher import ChangeItem
from security_monkey.common.jinja import get_jinja_env
from security_monkey.datastore import User, AuditorSettings, Item, ItemAudit, Technology, Account, ItemAuditScore, AccountPatternAuditScore
from security_monkey.common.utils import send_email
from security_monkey.account_manager import get_account_by_name
from security_monkey.alerters.custom_alerter import report_auditor_changes
from sqlalchemy import and_
from collections import defaultdict
auditor_registry = defaultdict(list)
class AuditorType(type):
def __init__(cls, name, bases, attrs):
super(AuditorType, cls).__init__(name, bases, attrs)
if cls.__name__ != 'Auditor' and cls.index:
# Only want to register auditors explicitly loaded by find_modules
if not '.' in cls.__module__:
found = False
for auditor in auditor_registry[cls.index]:
if auditor.__module__ == cls.__module__ and auditor.__name__ == cls.__name__:
found = True
break
if not found:
app.logger.debug("Registering auditor {} {}.{}".format(cls.index, cls.__module__, cls.__name__))
auditor_registry[cls.index].append(cls)
class Auditor(object):
"""
This class (and subclasses really) run a number of rules against the configurations
and look for any violations. These violations are saved with the object and a report
is made available via the Web UI and through email.
"""
index = None # Should be overridden
i_am_singular = None # Should be overridden
i_am_plural = None # Should be overridden
__metaclass__ = AuditorType
support_auditor_indexes = []
support_watcher_indexes = []
def __init__(self, accounts=None, debug=False):
self.datastore = datastore.Datastore()
self.accounts = accounts
self.debug = debug
self.items = []
self.team_emails = app.config.get('SECURITY_TEAM_EMAIL', [])
self.emails = []
self.current_support_items = {}
self.override_scores = None
self.current_method_name = None
if type(self.team_emails) in (str, unicode):
self.emails.append(self.team_emails)
elif type(self.team_emails) in (list, tuple):
self.emails.extend(self.team_emails)
else:
app.logger.info("Auditor: SECURITY_TEAM_EMAIL contains an invalid type")
for account in self.accounts:
users = User.query.filter(User.daily_audit_email==True).filter(User.accounts.any(name=account)).all()
self.emails.extend([user.email for user in users])
def add_issue(self, score, issue, item, notes=None):
"""
Adds a new issue to an item, if not already reported.
:return: The new issue
"""
if notes and len(notes) > 1024:
notes = notes[0:1024]
if not self.override_scores:
query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index)
self.override_scores = query.all()
# Check for override scores to apply
score = self._check_for_override_score(score, item.account)
for existing_issue in item.audit_issues:
if existing_issue.issue == issue:
if existing_issue.notes == notes:
if existing_issue.score == score:
app.logger.debug(
"Not adding issue because it was already found:{}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
return existing_issue
app.logger.debug("Adding issue: {}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
new_issue = datastore.ItemAudit(score=score,
issue=issue,
notes=notes,
justified=False,
justified_user_id=None,
justified_date=None,
justification=None)
item.audit_issues.append(new_issue)
return new_issue
def prep_for_audit(self):
"""
To be overridden by child classes who
need a way to prepare for the next run.
"""
pass
def audit_these_objects(self, items):
"""
Only inspect the given items.
"""
app.logger.debug("Asked to audit {} Objects".format(len(items)))
self.prep_for_audit()
self.current_support_items = {}
query = ItemAuditScore.query.filter(ItemAuditScore.technology == self.index)
self.override_scores = query.all()
methods = [getattr(self, method_name) for method_name in dir(self) if method_name.find("check_") == 0]
app.logger.debug("methods: {}".format(methods))
for item in items:
for method in methods:
self.current_method_name = method.func_name
# If the check function is disabled by an entry on Settings/Audit Issue Scores
# the function will not be run and any previous issues will be cleared
if not self._is_current_method_disabled():
method(item)
self.items = items
self.override_scores = None
def _is_current_method_disabled(self):
"""
Determines whether this method has been marked as disabled based on Audit Issue Scores
settings.
"""
for override_score in self.override_scores:
if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')':
return override_score.disabled
return False
def audit_all_objects(self):
"""
Read all items from the database and inspect them all.
"""
self.items = self.read_previous_items()
self.audit_these_objects(self.items)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
arn=item.arn,
new_config=item_revision.config)
new_item.audit_issues = []
new_item.db_item = item
prev_list.append(new_item)
return prev_list
def read_previous_items_for_account(self, index, account):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
prev = self.datastore.get_all_ctype_filtered(tech=index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
arn=item.arn,
new_config=item_revision.config)
new_item.audit_issues = []
new_item.db_item = item
prev_list.append(new_item)
return prev_list
def save_issues(self):
"""
Save all new issues. Delete all fixed issues.
"""
app.logger.debug("\n\nSaving Issues.")
# Work around for issue where previous get's may cause commit to fail
db.session.rollback()
for item in self.items:
changes = False
loaded = False
if not hasattr(item, 'db_item'):
loaded = True
item.db_item = self.datastore._get_item(item.index, item.region, item.account, item.name)
existing_issues = list(item.db_item.issues)
new_issues = item.audit_issues
for issue in item.db_item.issues:
if not issue.auditor_setting:
self._set_auditor_setting_for_issue(issue)
# Add new issues
old_scored = ["{} -- {} -- {} -- {} -- {}".format(
old_issue.auditor_setting.auditor_class,
old_issue.issue,
old_issue.notes,
old_issue.score,
self._item_list_string(old_issue)) for old_issue in existing_issues]
for new_issue in new_issues:
nk = "{} -- {} -- {} -- {} -- {}".format(self.__class__.__name__,
new_issue.issue,
new_issue.notes,
new_issue.score,
self._item_list_string(new_issue))
if nk not in old_scored:
changes = True
app.logger.debug("Saving NEW issue {}".format(nk))
item.found_new_issue = True
item.confirmed_new_issues.append(new_issue)
item.db_item.issues.append(new_issue)
else:
for issue in existing_issues:
if issue.issue == new_issue.issue and issue.notes == new_issue.notes and issue.score == new_issue.score:
item.confirmed_existing_issues.append(issue)
break
key = "{}/{}/{}/{}".format(item.index, item.region, item.account, item.name)
app.logger.debug("Issue was previously found. Not overwriting.\n\t{}\n\t{}".format(key, nk))
# Delete old issues
new_scored = ["{} -- {} -- {} -- {}".format(new_issue.issue,
new_issue.notes,
new_issue.score,
self._item_list_string(new_issue)) for new_issue in new_issues]
for old_issue in existing_issues:
ok = "{} -- {} -- {} -- {}".format(old_issue.issue,
old_issue.notes,
old_issue.score,
self._item_list_string(old_issue))
old_issue_class = old_issue.auditor_setting.auditor_class
if old_issue_class is None or (old_issue_class == self.__class__.__name__ and ok not in new_scored):
changes = True
app.logger.debug("Deleting FIXED or REPLACED issue {}".format(ok))
item.confirmed_fixed_issues.append(old_issue)
item.db_item.issues.remove(old_issue)
if changes:
db.session.add(item.db_item)
else:
if loaded:
db.session.expunge(item.db_item)
db.session.commit()
self._create_auditor_settings()
report_auditor_changes(self)
def email_report(self, report):
"""
Given a report, send an email using SES.
"""
if not report:
app.logger.info("No Audit issues. Not sending audit email.")
return
subject = "Security Monkey {} Auditor Report".format(self.i_am_singular)
send_email(subject=subject, recipients=self.emails, html=report)
def create_report(self):
"""
Using a Jinja template (jinja_audit_email.html), create a report that can be emailed.
:return: HTML - The output of the rendered template.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_audit_email.html')
# This template expects a list of items that have been sorted by total score in
# descending order.
for item in self.items:
item.totalscore = 0
for issue in item.db_item.issues:
item.totalscore = item.totalscore + issue.score
sorted_list = sorted(self.items, key=lambda item: item.totalscore)
sorted_list.reverse()
report_list = []
for item in sorted_list:
if item.totalscore > 0:
report_list.append(item)
else:
break
if len(report_list) > 0:
return template.render({'items': report_list})
else:
return False
def applies_to_account(self, account):
"""
Placeholder for custom auditors which may only want to run against
certain types of accounts
"""
return True
def _create_auditor_settings(self):
"""
Checks to see if an AuditorSettings entry exists for each issue.
If it does not, one will be created with disabled set to false.
"""
app.logger.debug("Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index))
query = ItemAudit.query
query = query.join((Item, Item.id == ItemAudit.item_id))
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.filter(Technology.name == self.index)
issues = query.filter(ItemAudit.auditor_setting_id == None).all()
for issue in issues:
self._set_auditor_setting_for_issue(issue)
db.session.commit()
app.logger.debug("Done Creating/Assigning Auditor Settings in account {} and tech {}".format(self.accounts, self.index))
def _set_auditor_setting_for_issue(self, issue):
auditor_setting = AuditorSettings.query.filter(
and_(
AuditorSettings.tech_id == issue.item.tech_id,
AuditorSettings.account_id == issue.item.account_id,
AuditorSettings.issue_text == issue.issue,
AuditorSettings.auditor_class == self.__class__.__name__
)
).first()
if auditor_setting:
auditor_setting.issues.append(issue)
db.session.add(auditor_setting)
return auditor_setting
auditor_setting = AuditorSettings(
tech_id=issue.item.tech_id,
account_id=issue.item.account_id,
disabled=False,
issue_text=issue.issue,
auditor_class=self.__class__.__name__
)
auditor_setting.issues.append(issue)
db.session.add(auditor_setting)
db.session.commit()
db.session.refresh(auditor_setting)
app.logger.debug("Created AuditorSetting: {} - {} - {}".format(
issue.issue,
self.index,
issue.item.account.name))
return auditor_setting
def _check_cross_account(self, src_account_number, dest_item, location):
account = Account.query.filter(Account.identifier == src_account_number).first()
account_name = None
if account is not None:
account_name = account.name
src = account_name or src_account_number
dst = dest_item.account
if src == dst:
return None
notes = "SRC [{}] DST [{}]. Location: {}".format(src, dst, location)
if not account_name:
tag = "Unknown Cross Account Access"
self.add_issue(10, tag, dest_item, notes=notes)
elif account_name != dest_item.account and not account.third_party:
tag = "Friendly Cross Account Access"
self.add_issue(0, tag, dest_item, notes=notes)
elif account_name != dest_item.account and account.third_party:
tag = "Friendly Third Party Cross Account Access"
self.add_issue(0, tag, dest_item, notes=notes)
def _check_cross_account_root(self, source_item, dest_arn, actions):
if not actions:
return None
account = Account.query.filter(Account.name == source_item.account).first()
source_item_account_number = account.identifier
if source_item_account_number == dest_arn.account_number:
return None
tag = "Cross-Account Root IAM"
notes = "ALL IAM Roles/users/groups in account {} can perform the following actions:\n"\
.format(dest_arn.account_number)
notes += "{}".format(actions)
self.add_issue(6, tag, source_item, notes=notes)
def get_auditor_support_items(self, auditor_index, account):
for index in self.support_auditor_indexes:
if index == auditor_index:
audited_items = self.current_support_items.get(account + auditor_index)
if audited_items is None:
audited_items = self.read_previous_items_for_account(auditor_index, account)
if not audited_items:
app.logger.info("{} Could not load audited items for {}/{}".format(self.index, auditor_index, account))
self.current_support_items[account+auditor_index] = []
else:
self.current_support_items[account+auditor_index] = audited_items
return audited_items
raise Exception("Auditor {} is not configured as an audit support auditor for {}".format(auditor_index, self.index))
def get_watcher_support_items(self, watcher_index, account):
for index in self.support_watcher_indexes:
if index == watcher_index:
items = self.current_support_items.get(account + watcher_index)
if items is None:
items = self.read_previous_items_for_account(watcher_index, account)
# Only the item contents should be used for watcher support
# config. This prevents potentially stale issues from being
# used by the auditor
for item in items:
item.db_item.issues = []
if not items:
app.logger.info("{} Could not load support items for {}/{}".format(self.index, watcher_index, account))
self.current_support_items[account+watcher_index] = []
else:
self.current_support_items[account+watcher_index] = items
return items
raise Exception("Watcher {} is not configured as a data support watcher for {}".format(watcher_index, self.index))
def link_to_support_item_issues(self, item, sub_item, sub_issue_message=None, issue_message=None, issue=None, score=None):
"""
Creates a new issue that is linked to an issue in a support auditor
"""
matching_issues = []
for sub_issue in sub_item.issues:
if not sub_issue_message or sub_issue.issue == sub_issue_message:
matching_issues.append(sub_issue)
if len(matching_issues) > 0:
for matching_issue in matching_issues:
if issue is None:
if issue_message is None:
if sub_issue_message is not None:
issue_message = sub_issue_message
else:
issue_message = "UNDEFINED"
if score is not None:
issue = self.add_issue(score, issue_message, item)
else:
issue = self.add_issue(matching_issue.score, issue_message, item)
else:
if score is not None:
issue.score = score
else:
issue.score = issue.score + matching_issue.score
issue.sub_items.append(sub_item)
return issue
def link_to_support_item(self, score, issue_message, item, sub_item, issue=None):
"""
Creates a new issue that is linked a support watcher item
"""
if issue is None:
issue = self.add_issue(score, issue_message, item)
issue.sub_items.append(sub_item)
return issue
def _item_list_string(self, issue):
"""
Use by save_issue to generate a unique id for an item
"""
item_ids = []
for sub_item in issue.sub_items:
item_ids.append(sub_item.id)
item_ids.sort()
return str(item_ids)
def _check_for_override_score(self, score, account):
"""
Return an override to the hard coded score for an issue being added. This could either
be a general override score for this check method or one that is specific to a particular
field in the account.
:param score: the hard coded score which will be returned back if there is
no applicable override
:param account: The account name, used to look up the value of any pattern
based overrides
:return:
"""
for override_score in self.override_scores:
# Look for an oberride entry that applies to
if override_score.method == self.current_method_name + ' (' + self.__class__.__name__ + ')':
# Check for account pattern override where a field in the account matches
# one configured in Settings/Audit Issue Scores
account = get_account_by_name(account)
for account_pattern_score in override_score.account_pattern_scores:
if getattr(account, account_pattern_score.account_field, None):
# Standard account field, such as identifier or notes
account_pattern_value = getattr(account, account_pattern_score.account_field)
else:
# If there is no attribute, this is an account custom field
account_pattern_value = account.getCustom(account_pattern_score.account_field)
if account_pattern_value is not None:
# Override the score based on the matching pattern
if account_pattern_value == account_pattern_score.account_pattern:
app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, account_pattern_score.score))
score = account_pattern_score.score
break
else:
# No specific override pattern fund. use the generic override score
app.logger.debug("Overriding score based on config {}:{} {}/{}".format(self.index, self.current_method_name + '(' + self.__class__.__name__ + ')', score, override_score.score))
score = override_score.score
return score
| 24,331 | 6,495 |
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import os, sys, codecs
import docutils.core
from .RevealTranslator import RST2RevealTranslator, RST2RevealWriter
# Import custom directives
from .TwoColumnsDirective import *
from .PygmentsDirective import *
from .VideoDirective import *
from .PlotDirective import *
from .SmallRole import *
from .VspaceRole import *
from .ClassDirective import *
from .ClearDirective import *
from .TemplateDirective import *
class Parser:
"""Class converting a stand-alone reST file into a Reveal.js-powered HTML5 file, using the provided options."""
def __init__(self, input_file, output_file='', theme='default', transition = 'default', stylesheet='',
mathjax_path='', pygments_style='', vertical_center=False,
horizontal_center=False, title_center=False, footer=False, page_number=False,
controls=False, firstslide_template='', footer_template='', init_html=False, reveal_root='reveal'):
""" Constructor of the Parser class.
``create_slides()`` must then be called to actually produce the presentation.
Arguments:
* input_file : name of the reST file to be processed (obligatory).
* output_file: name of the HTML file to be generated (default: same as input_file, but with a .html extension).
* theme: the name of the theme to be used ({**default**, beige, night}).
* transition: the transition between slides ({**default**, cube, page, concave, zoom, linear, fade, none}).
* stylesheet: a custom CSS file which extends or replaces the used theme.
* mathjax_path: URL or path to the MathJax library (default: http://cdn.mathjax.org/mathjax/latest/MathJax.js).
* pygments_style: the style to be used for syntax color-highlighting using Pygments. The list depends on your Pygments version, type::
from pygments.styles import STYLE_MAP
print STYLE_MAP.keys()
* vertical_center: boolean stating if the slide content should be vertically centered (default: False).
* horizontal_center: boolean stating if the slide content should be horizontally centered (default: False).
* title_center: boolean stating if the title of each slide should be horizontally centered (default: False).
* footer: boolean stating if the footer line should be displayed (default: False).
* page_number: boolean stating if the slide number should be displayed (default: False).
* controls: boolean stating if the control arrows should be displayed (default: False).
* firstslide_template: template string defining how the first slide will be rendered in HTML.
* footer_template: template string defining how the footer will be rendered in HTML.
The ``firstslide_template`` and ``footer_template`` can use the following substitution variables:
* %(title)s : will be replaced by the title of the presentation.
* %(subtitle)s : subtitle of the presentation (either a level-2 header or the :subtitle: field, if any).
* %(author)s : :author: field (if any).
* %(institution)s : :institution: field (if any).
* %(email)s : :email: field (if any).
* %(date)s : :date: field (if any).
* %(is_author)s : the '.' character if the :author: field is defined, '' otherwise.
* %(is_subtitle)s : the '-' character if the subtitle is defined, '' otherwise.
* %(is_institution)s : the '-' character if the :institution: field is defined, '' otherwise.
You can also use your own fields in the templates.
"""
# Input/Output files
self.input_file = input_file
self.output_file = output_file
# Style
self.theme = theme
self.stylesheet = stylesheet
self.transition = transition
self.vertical_center=vertical_center
self.horizontal_center = horizontal_center
self.title_center = title_center
self.write_footer=footer
self.page_number=page_number
self.controls=controls
# MathJax
if mathjax_path =='':
self.mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js'
else:
self.mathjax_path = mathjax_path
# Pygments
self.pygments_style = pygments_style
# Template for the first slide
self.firstslide_template = firstslide_template
# Temnplate for the footer
self.footer_template = footer_template
# Initalization html for reveal.js
self.init_html = init_html
# Root path to reaveal
self.reveal_root = reveal_root
def create_slides(self):
"""Creates the HTML5 presentation based on the arguments given to the constructor."""
# Copy the reveal library in the current directory
self._copy_reveal()
# Create the writer and retrieve the parts
self.html_writer = RST2RevealWriter()
self.html_writer.translator_class = RST2RevealTranslator
with codecs.open(self.input_file, 'r', 'utf8') as infile:
self.parts = docutils.core.publish_parts(source=infile.read(), writer=self.html_writer)
# Produce the html file
self._produce_output()
def _copy_reveal(self):
curr_dir = os.path.dirname(os.path.realpath(self.output_file))
cwd = os.getcwd()
# Copy the reveal subfolder
#if not os.path.isdir(curr_dir+'/reveal'):
# sources_dir = os.path.abspath(os.path.dirname(__file__)+'/reveal')
# import shutil
# shutil.copytree(sources_dir, curr_dir+'/reveal')
# Copy the rst2reveal.css
if not os.path.exists(curr_dir+'/rst2reveal.css'):
source_file = os.path.abspath(os.path.dirname(__file__)+'/reveal/css/rst2reveal.css')
import shutil
shutil.copyfile(source_file, curr_dir+'/rst2reveal.css')
# Generate the Pygments CSS file
self.is_pygments = False
if not self.pygments_style == '':
# Check if Pygments is installed
try:
import pygments
self.is_pygments = True
except:
print('Warning: Pygments is not installed, the code will not be highlighted.')
print('You should install it with `pip install pygments`')
return
os.chdir(curr_dir)
import subprocess, shutil
os.system("pygmentize -S "+self.pygments_style+" -f html -O bg=light > pygments.css")
# Fix the bug where the literal color goes to math blocks...
with codecs.open('pygments.css', 'r', 'utf8') as infile:
with codecs.open('pygments.css.tmp', 'w', 'utf8') as outfile:
for aline in infile:
outfile.write('.highlight '+aline)
shutil.move('pygments.css.tmp', 'pygments.css')
os.chdir(cwd)
def _produce_output(self):
self.title = self.parts['title']
self._analyse_metainfo()
header = self._generate_header()
body = self._generate_body()
footer = self._generate_footer()
document_content = header + body + footer
with codecs.open(self.output_file, 'w', 'utf8') as wfile:
wfile.write(document_content)
def _generate_body(self):
body = """
<body>
<div class="static-content"></div>
<div class="reveal">
<div class="slides">
%(titleslide)s
%(body)s
</div>
</div>
""" % {'body': self.parts['body'],
'titleslide' : self.titleslide}
return body
def _analyse_metainfo(self):
def clean(text):
import re
if len(re.findall(r'<paragraph>', text)) > 0:
text = re.findall(r'<paragraph>(.+)</paragraph>', text)[0]
if len(re.findall(r'<author>', text)) > 0:
text = re.findall(r'<author>(.+)</author>', text)[0]
if len(re.findall(r'<date>', text)) > 0:
text = re.findall(r'<date>(.+)</date>', text)[0]
if len(re.findall(r'<reference', text)) > 0:
text = re.findall(r'<reference refuri="mailto:(.+)">', text)[0]
return text
self.meta_info ={'author': ''}
texts=self.parts['metadata'].split('\n')
for t in texts:
if not t == '':
name=t.split('=')[0]
content=t.replace(name+'=', '')
content=clean(content)
self.meta_info[name]= content
self._generate_titleslide()
def _generate_titleslide(self):
if self.parts['title'] != '': # A title has been given
self.meta_info['title'] = self.parts['title']
elif not 'title' in self.meta_info.keys():
self.meta_info['title'] = ''
if self.parts['subtitle'] != '': # defined with a underlined text instead of :subtitle:
self.meta_info['subtitle'] = self.parts['subtitle']
elif not 'subtitle' in self.meta_info.keys():
self.meta_info['subtitle'] = ''
if not 'email' in self.meta_info.keys():
self.meta_info['email'] = ''
if not 'institution' in self.meta_info.keys():
self.meta_info['institution'] = ''
if not 'date' in self.meta_info.keys():
self.meta_info['date'] = ''
# Separators
self.meta_info['is_institution'] = '-' if self.meta_info['institution'] != '' else ''
self.meta_info['is_author'] = '.' if self.meta_info['author'] != '' else ''
self.meta_info['is_subtitle'] = '.' if self.meta_info['subtitle'] != '' else ''
if self.firstslide_template == "":
self.firstslide_template = """
<section class="titleslide">
<h1>%(title)s</h1>
<h3>%(subtitle)s</h3>
<br>
<p><a href="mailto:%(email)s">%(author)s</a> %(is_institution)s %(institution)s</p>
<p><small>%(email)s</small></p>
<p>%(date)s</p>
</section>
"""
self.titleslide=self.firstslide_template % self.meta_info
if self.footer_template=="":
self.footer_template = """<b>%(title)s %(is_subtitle)s %(subtitle)s.</b> %(author)s%(is_institution)s %(institution)s. %(date)s"""
if self.write_footer:
self.footer_html = """<footer id=\"footer\">""" + self.footer_template % self.meta_info + """<b id=\"slide_number\" style=\"padding: 1em;\"></b></footer>"""
elif self.page_number:
self.footer_html = """<footer><b id=\"slide_number\"></b></footer>"""
else:
self.footer_html = ""
def _generate_header(self):
header="""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>%(title)s</title>
<meta name="description" content="%(title)s">
%(meta)s
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, user-scalable=no">
<link rel="stylesheet" href="%(reveal_root)s/css/reveal.css">
%(pygments)s
<link rel="stylesheet" href="rst2reveal.css">
<!--link rel="stylesheet" href="%(reveal_root)s/css/theme/default.css" id="theme"-->
<link rel="stylesheet" href="%(reveal_root)s/css/theme/%(theme)s.css" id="theme">
<link rel="stylesheet" href="%(reveal_root)s/css/print/pdf.css" type="text/css" media="print">
<script type="text/javascript" src="%(mathjax_path)s?config=TeX-AMS-MML_HTMLorMML"></script>
<!-- Extra styles -->
<style>
.reveal section {
text-align: %(horizontal_center)s;
}
.reveal h2{
text-align: %(title_center)s;
}
</style>
%(custom_stylesheet)s
<!--[if lt IE 9]>
<script src="%(reveal_root)s/lib/js/html5shiv.js"></script>
<![endif]-->
</head>
"""%{'title': self.title,
'meta' : self.parts['meta'],
'theme': self.theme,
'reveal_root' : self.reveal_root,
'pygments': '<link rel="stylesheet" href="pygments.css">' if self.is_pygments else '',
'mathjax_path': self.mathjax_path,
'horizontal_center': 'center' if self.horizontal_center else 'left',
'title_center': 'center' if self.title_center else 'left',
'custom_stylesheet' : '<link rel="stylesheet" href="%s">'%self.stylesheet if not self.stylesheet is '' else ''}
return header
def _generate_footer(self):
if self.page_number:
script_page_number = """
<script>
// Fires each time a new slide is activated
Reveal.addEventListener( 'slidechanged', function( event ) {
if(event.indexh > 0) {
if(event.indexv > 0) {
val = event.indexh + ' - ' + event.indexv
document.getElementById('slide_number').innerHTML = val;
}
else{
document.getElementById('slide_number').innerHTML = event.indexh;
}
}
else {
document.getElementById('slide_number').innerHTML = '';
}
} );
</script>"""
else:
script_page_number = ""
if self.init_html:
footer = self.init_html
else:
footer="""
<script src="%(reveal_root)s/lib/js/head.min.js"></script>
<script src="%(reveal_root)s/js/reveal.min.js"></script>
<script>
// Full list of configuration options available here:
// https://github.com/hakimel/reveal.js#configuration
Reveal.initialize({
controls: %(controls)s,
progress: false,
history: true,
overview: true,
keyboard: true,
loop: false,
touch: true,
rtl: false,
center: %(vertical_center)s,
mouseWheel: true,
fragments: true,
rollingLinks: false,
transition: '%(transition)s'
});
</script>"""
footer+="""
%(script_page_number)s
%(footer)s
</body>
</html>"""
footer = footer % {'transition' : self.transition,
'footer' : self.footer_html,
'mathjax_path': self.mathjax_path,
'reveal_root' : self.reveal_root,
'script_page_number' : script_page_number,
'vertical_center' : 'true' if self.vertical_center else 'false',
'controls': 'true' if self.controls else 'false'}
return footer
if __name__ == '__main__':
# Create the object
parser = Parser(input_file='index.rst')
# Create the slides
parser.create_slides()
| 16,473 | 4,819 |
""" Application files """ | 25 | 5 |
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import webbrowser
b = webbrowser.get('lynx')
b.open('https://docs.python.org/3/library/webbrowser.html')
| 235 | 98 |
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
rf = RandomForestRegressor(random_state=42)
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
r2_score(y_test, rf_pred)
mean_squared_error(y_test, rf_pred)
mean_absolute_error(y_test, rf_pred)
| 385 | 150 |
# Generated by Django 2.2.7 on 2019-11-11 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atleta', '0007_remove_atleta_treinador'),
]
operations = [
migrations.AddField(
model_name='atleta',
name='senha',
field=models.CharField(default='', max_length=255),
),
]
| 400 | 143 |
from django import forms
from django.forms.widgets import TextInput
class ListCreateForm(forms.Form):
list_name = forms.CharField(label="Name", max_length=60, widget=TextInput(attrs={'class':'form-control'}))
| 214 | 65 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : functional.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 04/15/2020
#
# This file is part of TOQ-Nets-PyTorch.
# Distributed under terms of the MIT license.
from typing import List
import jactorch
import torch
from jacinle.utils.enum import JacEnum
__all__ = [
'TemporalPoolingImplementation', 'TemporalPoolingReduction', 'backward_pooling_1d1d',
'temporal_pooling_1d', 'temporal_pooling_2d', 'interval_pooling',
'matrix_from_diags', 'matrix_remove_diag'
]
class TemporalPoolingImplementation(JacEnum):
BROADCAST = 'broadcast'
FORLOOP = 'forloop'
class TemporalPoolingReduction(JacEnum):
MAX = 'max'
MIN = 'min'
SOFTMAX = 'softmax'
SOFTMIN = 'softmin'
def masked_min(input, mask, dim, inf=1e9):
mask = mask.type(input.dtype)
input = input * mask + inf * (1 - mask)
return input.min(dim)[0]
def masked_max(input, mask, dim, inf=1e9):
mask = mask.type(input.dtype)
input = input * mask + inf * (mask - 1)
return input.max(dim)[0]
def backward_pooling_1d1d(input, implementation='forloop', reduction='max'):
"""
:param input: [batch, nr_frames, nr_frames, hidden_dim]
"""
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
if implementation == TemporalPoolingImplementation.BROADCAST:
indices = torch.arange(nr_frames, device=input.device)
indices_i, indices_j = jactorch.meshgrid(indices, dim=0)
mask = indices_i <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2)
if reduction == 'max':
return masked_max(input, mask, dim=2)
elif reduction == 'min':
return masked_min(input, mask, dim=2)
else:
raise ValueError()
elif implementation == TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
if reduction == 'max':
all_tensors.append(input[:, i, i:].max(dim=1)[0])
elif reduction == 'min':
all_tensors.append(input[:, i, i:].min(dim=1)[0])
else:
raise ValueError()
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def temporal_pooling_1d(input, implementation='forloop'):
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
if implementation is TemporalPoolingImplementation.BROADCAST:
indices = torch.arange(nr_frames, device=input.device)
indices_i, indices_j = jactorch.meshgrid(indices, dim=0)
input = jactorch.add_dim(input, 1, nr_frames)
mask = indices_i <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2)
return torch.cat((masked_min(input, mask, dim=2), masked_max(input, mask, dim=2)), dim=-1)
elif implementation is TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
all_tensors.append(torch.cat((input[:, i:].min(dim=1)[0], input[:, i:].max(dim=1)[0]), dim=-1))
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def temporal_pooling_2d(input, implementation='forloop'):
implementation = TemporalPoolingImplementation.from_string(implementation)
nr_frames = input.size(1)
indices = torch.arange(nr_frames, device=input.device)
if implementation is TemporalPoolingImplementation.BROADCAST:
indices_i, indices_j, indices_k = (
jactorch.add_dim(jactorch.add_dim(indices, 1, nr_frames), 2, nr_frames),
jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 1, nr_frames),
jactorch.add_dim(jactorch.add_dim(indices, 0, nr_frames), 2, nr_frames)
)
input = jactorch.add_dim(input, 0, nr_frames) # input[batch, i, k, j] = input[batch, k, j]
mask = indices_i <= indices_k <= indices_j
mask = jactorch.add_dim_as_except(mask, input, 1, 2, 3)
return torch.cat((
masked_min(input, mask, dim=2),
masked_max(input, mask, dim=2)
), dim=-1)
elif implementation is TemporalPoolingImplementation.FORLOOP:
all_tensors = list()
for i in range(nr_frames):
mask = indices >= i
mask = jactorch.add_dim_as_except(mask, input, 1)
all_tensors.append(torch.cat((
masked_min(input, mask, dim=1),
masked_max(input, mask, dim=1)
), dim=-1))
return torch.stack(all_tensors, dim=1)
else:
raise ValueError('Unknown temporal pooling implementation: {}.'.format(implementation))
def interval_pooling(input, implementation='forloop', reduction='max', beta=None):
"""
Args:
input (torch.Tensor): 3D tensor of [batch_size, nr_frames, hidden_dim]
implementation (Union[TemporalPoolingImplementation, str]): the implementation. Currently only support FORLOOP.
reduction (Union[TemporalPoolingReduction, str]): reduction method. Either MAX or MIN.
Return:
output (torch.Tensor): 4D tensor of [batch_size, nr_frames, nr_frames, hidden_dim], where
```
output[:, i, j, :] = min output[:, k, :] where i <= k <= j
```
the k is cyclic-indexed.
"""
implementation = TemporalPoolingImplementation.from_string(implementation)
reduction = TemporalPoolingReduction.from_string(reduction)
batch_size, nr_frames = input.size()[:2]
if implementation is TemporalPoolingImplementation.FORLOOP:
if reduction is TemporalPoolingReduction.MAX or reduction is TemporalPoolingReduction.MIN:
input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1.
output_tensors = list()
output_tensors.append(input)
for length in range(2, nr_frames + 1):
last_tensor = output_tensors[-1]
last_elems = input_doubled[:, length - 1:length - 1 + nr_frames]
if reduction is TemporalPoolingReduction.MAX:
this_tensor = torch.max(last_tensor, last_elems)
elif reduction is TemporalPoolingReduction.MIN:
this_tensor = torch.min(last_tensor, last_elems)
else:
raise ValueError('Wrong value {}.'.format(reduction))
output_tensors.append(this_tensor)
return matrix_from_diags(output_tensors, dim=1, triu=True)
else:
from math import exp
scale = exp(beta)
input_doubled = torch.cat((input, input), dim=1) # repeat the input at dim=1.
output_tensors = list()
if reduction is TemporalPoolingReduction.SOFTMIN:
scale = -scale
else:
assert reduction is TemporalPoolingReduction.SOFTMAX
input_arg = torch.exp(input / scale)
output_tensors.append((input * input_arg, input_arg))
for length in range(2, nr_frames + 1):
last_tensor, last_argsum = output_tensors[-1]
last_elems = input_doubled[:, length - 1:length - 1 + nr_frames]
last_elems_arg = torch.exp(last_elems / scale)
output_tensors.append((
last_tensor + last_elems * last_elems_arg,
last_argsum + last_elems_arg
))
output2 = matrix_from_diags([x[0] / x[1] for x in output_tensors], dim=1, triu=True)
# Test:
# X, Y = torch.meshgrid(torch.arange(length), torch.arange(length))
# upper = (X < Y).float().view(1, length, length, 1).to(output.device)
# print((((output - output2) ** 2) * upper).sum())
# exit()
return output2
else:
raise NotImplementedError('Unknown interval pooling implementation: {}.'.format(implementation))
def matrix_from_diags(diags: List[torch.Tensor], dim: int = 1, triu: bool = False):
"""
Construct an N by N matrix from N diags of the matrix.
Args:
diags (List[torch.Tensor]): N length-N vectors regarding the 1st, 2nd, ... diags of the output matrix.
They can also be same-dimensional tensors, where the matrix will be created at the dim and dim+1 axes.
dim (int): the matrix will be created at dim and dim+1.
triu (bool): use only the upper triangle of the matrix.
Return:
output: torch.Tensor
"""
if dim < 0:
dim += diags[0].dim()
size = diags[0].size()
diags.append(torch.zeros_like(diags[0]))
output = torch.cat(diags, dim=dim) # [..., (f+1)*f, ...]
output = output.reshape(size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:])
output = output.transpose(dim, dim + 1)
output = output.reshape(
size[:dim] + (size[dim] + 1, size[dim]) + size[dim + 1:]) # use to reshape for auto-contiguous.
if triu:
return output.narrow(dim, 0, size[dim])
output = torch.cat((
output.narrow(dim, 0, 1),
matrix_remove_diag(output.narrow(dim, 1, size[dim]), dim=dim, move_up=True)
), dim=dim)
return output
def matrix_remove_diag(matrix: torch.Tensor, dim: int = 1, move_up: bool = False):
"""
Remove the first diag of the input matrix. The result is an N x (N-1) matrix.
Args:
matrix (torch.Tensor): the input matrix. It can be a tensor where the dim and dim+1 axes form a matrix.
dim (int): the matrix is at dim and dim+1.
move_up (bool): if True, the output matrix will be of shape (N-1) x N.
In the move_left (default, move_up=False) mode, the left triangle will stay in its position and the upper triangle will move 1 element left.
While in the move_up mode, the upper triangle will stay in its position, and the left triangle will move 1 element up.
"""
if dim < 0:
dim += matrix.size()
if move_up:
matrix = matrix.transpose(dim, dim + 1)
size = matrix.size()
n = size[dim]
matrix = matrix.reshape(size[:dim] + (n * n,) + size[dim + 2:])
matrix = matrix.narrow(dim, 1, n * n - 1)
matrix = matrix.reshape(size[:dim] + (n - 1, n + 1) + size[dim + 2:])
matrix = matrix.narrow(dim + 1, 0, n)
matrix = matrix.reshape(size[:dim] + (n, n - 1) + size[dim + 2:])
if move_up:
matrix = matrix.transpose(dim, dim + 1)
return matrix
| 10,656 | 3,525 |
def teste(b):
global a
b += 4
a=8
c=2
print(a)
print(b)
print(c)
#------------------------------------
#funcionamento do comando global
a=5
print(a)
teste(6)
print(a) | 194 | 79 |
"""Implement USE query."""
from pyetcd import EtcdKeyNotFound
from etcdb import OperationalError
def use_database(etcd_client, tree):
"""
Return database name if it exists or raise exception.
:param etcd_client: etcd client
:type etcd_client: pyetcd.client.Client
:param tree: Parsing tree.
:type tree: SQLTree
:return: Database name
:raise OperationalError: if database doesn't exist.
"""
try:
etcd_client.read('/%s' % tree.db)
return tree.db
except EtcdKeyNotFound:
raise OperationalError("Unknown database '%s'" % tree.db)
| 598 | 184 |
from libs.apis import getCountryInfo, getCountries, getCountriesNames
from libs.charts import visualize
def main():
arr = []
number = 10
# Get top 10 countries
countries = getCountries(number)
countries_names = getCountriesNames(number)
for i in range(len(countries)):
country = countries[i]
country_names = countries_names[i]
country_info = getCountryInfo(country)
d = {
'country': country_names,
'info': country_info
}
arr.append(d)
visualize(arr)
if __name__ == "__main__":
main()
| 598 | 186 |
"""Authenticate Single Sign-On Middleware
==============
Single-Sign On
==============
About SSO
---------
Single sign on is a session/user authentication process that allows a user to
provide his or her credentials once in order to access multiple applications.
The single sign on authenticates the user to access all the applications he or
she has been authorized to access. It eliminates future authenticaton requests
when the user switches applications during that particular session.
.. admonition :: sources
# http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci340859,00.html
# http://en.wikipedia.org/wiki/Single_sign-on
AuthKit Implementations
-----------------------
The SSO sub-package of Authenticate implements various SSO schemes for
several University SSO systems as well as OpenID. In the future, additional
SSO schemes like LID may also be supported.
These systems sub-class the ``RedirectingAuthMiddleware`` from the api package
as they all utilize a similar scheme of authentcation via redirection with
back-end verification.
.. note::
All University SSO work developed by Ben Bangert has been sponsered by
Prometheus Research, LLC and contributed under the BSD license.
"""
| 1,230 | 323 |
from flask import Flask, redirect, render_template, request, session
import yaml
from mendeley import Mendeley
from mendeley.session import MendeleySession
with open('config.yml') as f:
config = yaml.load(f)
REDIRECT_URI = 'http://localhost:5000/oauth'
app = Flask(__name__)
app.debug = True
app.secret_key = config['clientSecret']
mendeley = Mendeley(config['clientId'], config['clientSecret'], REDIRECT_URI)
@app.route('/')
def login():
# TODO Check for token expiry
# if 'token' in session:
# return redirect('/library')
auth = mendeley.start_authorization_code_flow()
session['state'] = auth.state
return redirect(auth.get_login_url())
@app.route('/oauth')
def auth_return():
auth = mendeley.start_authorization_code_flow(state=session['state'])
mendeley_session = auth.authenticate(request.url)
session.clear()
session['token'] = mendeley_session.token
return redirect('/library')
@app.route('/library')
def list_documents():
if 'token' not in session:
return redirect('/')
query = request.args.get('query') or ''
titleQuery = request.args.get('titleQuery') or ''
authorQuery = request.args.get('authorQuery') or ''
sourceQuery = request.args.get('sourceQuery') or ''
abstractQuery = request.args.get('abstractQuery') or ''
noteQuery = request.args.get('noteQuery') or ''
advancedSearch = request.args.get('advancedSearch')
mendeley_session = get_session_from_cookies()
docs = []
# Get iterator for user's document library
if advancedSearch and (titleQuery or authorQuery or sourceQuery or abstractQuery):
docsIter = mendeley_session.documents.advanced_search(
title=titleQuery,
author=authorQuery,
source=sourceQuery,
abstract=abstractQuery,
view='client').iter()
elif query:
docsIter = mendeley_session.documents.search(
query, view='client').iter()
else:
docsIter = mendeley_session.documents.iter(view='client')
# Accumulate all the documents
for doc in docsIter:
docs.append(doc)
# Apply filter for annotations
if noteQuery:
nq = noteQuery.lower()
noteDocIDs = set()
# Find the IDs of all documents with at least one matching annotation
for note in mendeley_session.annotations.iter():
if (note.text):
text = note.text.lower()
if (text.find(nq) > -1):
noteDocIDs.add(note.document().id)
# Filter the document list
docs = [doc for doc in docs if doc.id in noteDocIDs]
# Render results
return render_template(
'library.html',
docs=docs,
query=query,
titleQuery=titleQuery,
authorQuery=authorQuery,
sourceQuery=sourceQuery,
abstractQuery=abstractQuery,
noteQuery=noteQuery,
advancedSearch=advancedSearch)
@app.route('/document')
def get_document():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
document_id = request.args.get('document_id')
doc = mendeley_session.documents.get(document_id)
return render_template('details.html', doc=doc)
@app.route('/detailsLookup')
def details_lookup():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
doi = request.args.get('doi')
doc = mendeley_session.catalog.by_identifier(doi=doi)
return render_template('details.html', doc=doc)
@app.route('/download')
def download():
if 'token' not in session:
return redirect('/')
mendeley_session = get_session_from_cookies()
document_id = request.args.get('document_id')
doc = mendeley_session.documents.get(document_id)
doc_file = doc.files.list().items[0]
return redirect(doc_file.download_url)
@app.route('/logout')
def logout():
session.pop('token', None)
return redirect('/')
def get_session_from_cookies():
return MendeleySession(mendeley, session['token'])
if __name__ == '__main__':
app.run()
| 4,158 | 1,268 |
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for operating on different endpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import contextlib
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.container.gkemulticloud import util as api_util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from six.moves.urllib import parse
_VALID_LOCATIONS = frozenset([
'asia-southeast1',
'europe-west1',
'us-east4',
'us-west1',
])
def _ValidateLocation(location):
if location not in _VALID_LOCATIONS:
locations = list(_VALID_LOCATIONS)
locations.sort()
raise exceptions.InvalidArgumentException(
'--location',
'{bad_location} is not a valid location. Allowed values: [{location_list}].'
.format(
bad_location=location,
location_list=', '.join('\'{}\''.format(r) for r in locations)))
def _AppendLocation(endpoint, location):
scheme, netloc, path, params, query, fragment = parse.urlparse(endpoint)
netloc = '{}-{}'.format(location, netloc)
return parse.urlunparse((scheme, netloc, path, params, query, fragment))
@contextlib.contextmanager
def GkemulticloudEndpointOverride(location, track=base.ReleaseTrack.GA):
"""Context manager to override the GKE Multi-cloud endpoint temporarily.
Args:
location: str, location to use for GKE Multi-cloud.
track: calliope_base.ReleaseTrack, Release track of the endpoint.
Yields:
None.
"""
original_ep = properties.VALUES.api_endpoint_overrides.gkemulticloud.Get()
try:
if not original_ep:
if not location:
raise ValueError('A location must be specified.')
_ValidateLocation(location)
regional_ep = _GetEffectiveEndpoint(location, track=track)
properties.VALUES.api_endpoint_overrides.gkemulticloud.Set(regional_ep)
# TODO(b/203617640): Remove handling of this exception once API has gone GA.
yield
except apitools_exceptions.HttpNotFoundError as e:
if 'Method not found' in e.content:
log.warning(
'This project may not have been added to the allow list for the Anthos Multi-Cloud API, please reach out to your GCP account team to resolve this'
)
raise
finally:
if not original_ep:
properties.VALUES.api_endpoint_overrides.gkemulticloud.Set(original_ep)
def _GetEffectiveEndpoint(location, track=base.ReleaseTrack.GA):
"""Returns regional GKE Multi-cloud Endpoint."""
endpoint = apis.GetEffectiveApiEndpoint(
api_util.MODULE_NAME, api_util.GetApiVersionForTrack(track))
return _AppendLocation(endpoint, location)
| 3,429 | 1,037 |
from math import floor
import talib
from util.dataRetrievalUtil import try_stdev
from util.langUtil import try_mean, try_int
def quartile_out(quartile, data):
"""Takes out extremities"""
pass
def moving_average(period, data):
avg = []
if len(data) < period:
return avg
for i in range(period - 1, len(data)):
avg.append(try_mean(data[i - period + 1:i]))
return avg
def moving_stddev(period, data):
avg = []
if len(data) < period:
return avg
for i in range(period - 1, len(data)):
avg.append(try_stdev(data[i - period + 1:i]))
return avg
def adjusted_dev(period, data, order=1):
# Does not work!
above, below = data, data
stdev_data = talib.STDDEV(data, period)
for i, row in above.iterrows():
above.iloc[i].data += stdev_data.iloc[i].data * order
for u, row in below.iterrows():
above.iloc[i].data -= stdev_data.iloc[i].data * order
return above, below
def index_arr_to_date(date_index, index):
"""Given an index, return date from date_index."""
if index < 0 or index > len(date_index):
return 0
return date_index.iloc[index]
def date_to_index_arr(index, dates_index, dates):
"""Given an index that corresponds to a date_array, find the relative index of input date."""
try:
_dates = []
for date in dates:
_dates.append(index[list(dates_index).index(date)])
return _dates
except:
print('Error! Date cannot be found. Continuing with 0.')
return [0 for date in dates]
def is_integer(x):
y = try_int(x)
if not y or y - x != 0:
return False
return True
def get_scale_colour(col1, col2, val):
"""Takes in two colours and the val (between 1 and 0) to decide
the colour value in the continuum from col1 to col2.
col1 and col2 must be named colours."""
pass
def to_candlestick(ticker_data, interval: str, inc=False):
pass
def get_scale_grey(val):
hexa = 15*16+15 * val
first_digit = hexa//16
second_digit = hexa - first_digit * 16
hexa = F'{to_single_hex(first_digit)}{to_single_hex(second_digit)}'
return F'#{hexa}{hexa}{hexa}'
def get_inverse_single_hex(val):
val = try_int(val)
_val = val % 16
_val = 16 - _val
if _val < 10:
return str(_val)
elif 10 <= _val < 11:
return 'A'
elif 11 <= _val < 12:
return 'B'
elif 12 <= _val < 13:
return 'C'
elif 13 <= _val < 14:
return 'D'
elif 14 <= _val < 15:
return 'E'
elif 15 <= _val < 16:
return 'F'
return None
def to_single_hex(val):
val = try_int(val)
_val = val % 16
if _val < 10:
return str(_val)
elif 10 <= _val < 11:
return 'A'
elif 11 <= _val < 12:
return 'B'
elif 12 <= _val < 13:
return 'C'
elif 13 <= _val < 14:
return 'D'
elif 14 <= _val < 15:
return 'E'
elif 15 <= _val < 16:
return 'F'
return None | 3,025 | 1,141 |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 18:38:21 2019
@author: Agutierrez
"""
# -*- coding: utf-8 -*-
"""
Interfaz gráfica para el movimiento armónico de un edificio, de forma similar
a un terremoto.
"""
import numpy as np
import tkinter as tk
from matplotlib.animation import FuncAnimation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from tkinter.messagebox import showerror
# Inicializar la ventana
window = tk.Tk()
window.title("Movimiento armónico de un edificio")
window.geometry("800x600")
# Inicializar el frame de ingreso de datos
frame = tk.Frame(window)
frame.pack(side=tk.LEFT)
# Declarar los valores por defecto
base = 0.75
altura = 5.71
masa = 164200
radio = 5.76
amplitud = 10
periodo = 2
# Función auxiliar para generar datos de entrada
def generar_dato_entrada(frame, text, index, default=None):
variable = tk.DoubleVar(value=default)
# Configurar etiqueta para los datos
label = tk.Label(frame, text=text)
label.grid(row=index, column=0, padx=5, pady=5)
# Configurar entrada para los datos
entry = tk.Entry(frame, textvariable=variable, justify="right")
entry.grid(row=index, column=1, padx=5, pady=5)
return variable
# Inicializar datos de entrada
base_var = generar_dato_entrada(frame, "Semi-base (m):", 0, base)
altura_var = generar_dato_entrada(frame, "Semi-altura (m):", 1, altura)
masa_var = generar_dato_entrada(frame, "Masa (kg):", 2, masa)
radio_var = generar_dato_entrada(frame, "Radio (m):" , 3, radio)
amplitud_var = generar_dato_entrada(frame, "Amplitud (m):", 4, amplitud)
periodo_var = generar_dato_entrada(frame, "Periodo (s):", 5, periodo)
def calcular_posicion(tiempo, masa, amplitud, elastica, viscosidad):
"""
Simula la posición de un movimiento armónico amortiguado con los datos
del edificio.
"""
parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud
parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular
return amplitud * np.exp(parte1*tiempo) * np.cos(parte2*tiempo)
# Generar gráfico principal
principal_fig = Figure(figsize=(5, 2))
principal_ax = principal_fig.gca(xlim=(-100, 100), ylim=(0, 10))
principal_ax.grid(True)
principal_canvas = FigureCanvasTkAgg(principal_fig, master=window)
principal_canvas.draw()
principal_canvas.get_tk_widget().grid(row=0, column=1)
def calcular_aceleracion(tiempo, masa, amplitud, elastica, viscosidad):
"""
Simula la segunda derivada de la posición (es decir, la aceleración) de
un movimiento armónico amortiguado con los datos del edificio.
"""
parte1 = -viscosidad/(2*masa) # Constante decreciente de amplitud
parte2 = np.sqrt(elastica/masa - parte1**2) # Velocidad angular
parte3 = (parte1**2 - parte2**2)*np.cos(parte2*tiempo)
parte4 = (2*parte1*parte2)*np.sin(parte2*tiempo)
return amplitud * np.exp(parte1*tiempo) * (parte3 - parte4)
def obtener_valor(variable, mensaje_error):
try:
return variable.get()
except Exception as ex:
raise AssertionError(mensaje_error) from ex
def iniciar_simulacion():
try:
base = obtener_valor(base_var, "La semibase no es válida.")
altura = obtener_valor(altura_var, "La semialtura no es válida.")
masa = obtener_valor(masa_var, "La masa no es válida.")
radio = obtener_valor(radio_var, "El radio no es válido.")
amplitud = obtener_valor(amplitud_var, "La amplitud no es válida.")
elastica = obtener_valor(elastica_var, "La const. elástica no es válida.")
viscosidad = obtener_valor(viscosidad_var, "El coef. viscosidad no es válido.")
# Calcular el ángulo entre la base y la altura
assert altura != 0, "La altura no puede ser 0."
alfa = np.arctan(base/altura)
# Verificar que es un movimiento amortiguado
msg = ("Los datos para el movimiento amortiguado no son correctos. "
"Debe cumplirse que b^2 < 4*k*m, donde:\n"
"- b es el coeficiente de viscosidad\n"
"- k es la constante elástica\n"
"- m es la masa del edificio.")
assert viscosidad**2 < 4*elastica*masa, msg
# Mostrar los gráficos
frames = np.linspace(0, 100, 1001)
posiciones = calcular_posicion(frames, masa, amplitud, elastica,
viscosidad)
principal_ax.plot(frames, posiciones, '-o')
print(posiciones)
except Exception as ex:
showerror("Error", str(ex))
def detener_simulacion():
pass
# Inicializar botones
btn_start = tk.Button(frame, text="Iniciar", command=iniciar_simulacion)
btn_start.grid(row=7, column=0)
btn_stop = tk.Button(frame, text="Detener", command=detener_simulacion)
btn_stop.grid(row=7, column=1)
"""
# Mostrar los gráficos
frames = np.linspace(0, 100, 1001)
posiciones = calcular_posicion(frames, masa, amplitud, elastica,
viscosidad)
principal_ax.plot(frames, posiciones, '-o')
"""
# Interactuar con la ventana
window.mainloop()
| 5,057 | 1,838 |
"""File to test all meetup endpoints"""
import os
import psycopg2 as pg2
import json
from app.tests.basetest import BaseTest
data = {
"title": "Test Title",
"body": "body"
}
comment = {
"comment": "Comment 1"
}
class TestQuestions(BaseTest):
""" Class to test all user endpoints """
def test_post_question(self):
"""Method to test post meetup endpoint"""
url = "http://localhost:5000/api/questions/1"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 201)
self.assertEqual(result["message"], "Succesfully added!")
def test_get_questions(self):
"""Test all meetups questions"""
url = "http://localhost:5000/api/questions/8"
response = self.get_items(url)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 200)
def test_meetup_not_found(self):
"""Test correct response for question not found"""
url = "http://localhost:5000/api/questions/0"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["message"], "Meetup not found!")
def test_bad_question_url(self):
"""Test correct response for wrong question url endpoint"""
url = "http://localhost:5000/api/question/0"
response = self.post(url, data)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["message"], "Resource not found!")
def test_comment_question(self):
"""Method to test comment question endpoint"""
url = "http://localhost:5000/api/comments/1"
response = self.post(url, comment)
result = json.loads(response.data.decode("UTF-8"))
self.assertEqual(result["status"], 201)
self.delete_comment("Comment 1")
| 1,914 | 605 |
from coinbase_commerce import util
from . import APIResource
__all__ = (
'CreateAPIResource',
)
class CreateAPIResource(APIResource):
"""
Create operations mixin
"""
@classmethod
async def create(cls, **params):
response = await cls._api_client.post(cls.RESOURCE_PATH, data=params)
return util.convert_to_api_object(response, cls._api_client, cls)
| 392 | 122 |
import paramiko
from .config import CONFIG
class NodeAuthException(Exception):
pass
class NodeTimeoutException(Exception):
pass
# TODO:
# - connection management should be improved, particularly unused connections
# - what happens if connection is lost? this should be handled gracefully
class Node:
def __init__(self, name, addr, hostkeys, privpath, user=CONFIG.USER):
self.name = name
self.addr = addr
self.__hostkeys = hostkeys
self.__privpath = privpath
self.__user = user
self.__pkey = None
self.__conn = None
self.__sftp = None
def __repr__(self):
return "{} ({})".format(self.name, self.addr)
def __load_private_key(self):
if self.__pkey is None:
self.__pkey = paramiko.RSAKey.from_private_key_file(self.__privpath)
def __connect_ssh(self, ignore_missing=False):
if self.__conn is not None:
return
client = paramiko.SSHClient()
client.load_host_keys(self.__hostkeys)
# load private key
self.__load_private_key()
# should we add missing keys?
if ignore_missing:
policy = paramiko.AutoAddPolicy()
client.set_missing_host_key_policy(policy)
client.connect(self.addr, username=self.__user, pkey=self.__pkey, timeout=CONFIG.TIMEOUT)
self.__conn = client
def __connect_sftp(self):
self.__connect_ssh()
self.__sftp = self.__conn.open_sftp()
def connect(self, ignore_missing=False):
try:
self.__connect_ssh(ignore_missing)
except paramiko.ssh_exception.PasswordRequiredException:
raise NodeAuthException("Could not authenticate to the server, either the key is bad or the password")
except paramiko.ssh_exception.NoValidConnectionsError:
raise NodeTimeoutException("Failed to reach node, try again later")
def disconnect(self):
if self.__conn is not None:
self.__conn.close()
self.__conn = None
def copyfile(self, direction, src, dst):
if direction not in ("get", "put"):
raise ValueError("Invalid value for direction (not 'get' or 'put')")
self.__connect_sftp()
if direction == "get":
self.__sftp.get(src, dst)
else:
self.__sftp.put(src, dst)
def file(self, *args, **kwargs):
self.__connect_sftp()
f = self.__sftp.file(*args, **kwargs)
return f
def run(self, command, env=None):
self.__connect_ssh()
_stdin, stdout, stderr = self.__conn.exec_command(command, environment=env)
# TODO: merge stdout and stderr!
out = stdout.read().decode().strip()
err = stderr.read().decode().strip()
retval = stdout.channel.recv_exit_status()
# WARNING: anything that comes out here can be MALICIOUS!
return retval, out, err
def invoke_shell(self, term="vt100", width=80, height=24):
self.__connect_ssh()
chan = self.__conn.invoke_shell(term=term, width=width, height=height)
return chan
| 3,139 | 906 |
from ratcave import texture
import pytest
@pytest.fixture
def tex():
return texture.Texture()
@pytest.fixture
def cubetex():
return texture.TextureCube()
@pytest.fixture
def depthtex():
return texture.DepthTexture()
def test_texture_attributes_created():
old_id = 0
for idx, (w, h) in enumerate([(1024, 1024), (256, 128), (200, 301)]):
tex = texture.Texture(width=w, height=h)
assert tex.width == w
assert tex.height == h
assert tex.id != old_id
old_id = tex.id
cube = texture.TextureCube(width=1024, height=1024)
assert cube.width == 1024
assert cube.height == 1024
with pytest.raises(ValueError):
cube = texture.TextureCube(width=400, height=600)
def test_texture_default_uniform_names(tex, cubetex, depthtex):
assert 'TextureMap' in tex.uniforms
assert 'TextureMap_isBound' in tex.uniforms
assert 'CubeMap' in cubetex.uniforms
assert 'CubeMap_isBound' in cubetex.uniforms
assert 'DepthMap' in depthtex.uniforms
assert 'DepthMap_isBound' in depthtex.uniforms
assert 'CubeMap' not in tex.uniforms
assert 'CubeMap_isBound' not in tex.uniforms
assert 'TextureMap' not in cubetex.uniforms
newtex = texture.Texture(name='NewMap')
assert newtex.name == 'NewMap'
assert 'NewMap' in newtex.uniforms
assert 'NewMap_isBound' in newtex.uniforms
assert 'TextureMap' not in newtex.uniforms
newtex.name = 'Changed'
assert newtex.name == 'Changed'
assert 'Changed' in newtex.uniforms
assert 'Changed_isBound' in newtex.uniforms
assert 'NewMap' not in newtex.uniforms
assert 'NewMap_isBound' not in newtex.uniforms
| 1,676 | 589 |
# Copyright 2020 Kaggle Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from os import path
from random import choice, gauss
def random_agent(obs, config):
return choice(range(config.min, config.max))
def max_agent(obs, config):
return config.max
def min_agent(obs, config):
return config.min
def avg_agent(obs, config):
return (config.min + config.max) // 2
agents = {
"random": random_agent,
"max": max_agent,
"min": min_agent,
"avg": avg_agent,
}
def interpreter(state, env):
if env.done:
return state
# Validate and assign actions as rewards !(min <= action <= max).
for agent in state:
value = 0
if isinstance(agent.action, (int, float)):
value = agent.action
if value < env.configuration.min or value > env.configuration.max:
agent.status = f"Invalid action: {value}"
else:
agent.reward = value + \
gauss(0, 1) * env.configuration.noise // 1
agent.status = "DONE"
return state
def renderer(state, env):
return json.dumps([{"action": a.action, "reward": a.reward} for a in state])
dirpath = path.dirname(__file__)
jsonpath = path.abspath(path.join(dirpath, "identity.json"))
with open(jsonpath) as f:
specification = json.load(f)
| 1,829 | 577 |
#!flask/bin/python
from flask import Flask, jsonify
import requests
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World!"
@app.route('/signed_data', methods=['GET'])
def signed_map():
r = requests.get('http://data.seattle.gov/resource/kb3s-zi3s.json')
json_data = r.json()
return jsonify({'data': json_data})
if __name__ == '__main__':
app.run(debug=True)
| 392 | 150 |
import pytest
from calculator import *
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(-5, 2, 'Imposible Raiz de un Negativo')
])
def testRaizDeNegativo(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).raiz() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(5, 0, 'ZeroDivisionError: division by zero')
])
def testSobreCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).division() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 54, 154)
])
def testSumaDosNumeros(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).suma() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(150, 75, 75)
])
def testRestaDosNumeros(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).resta() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(5, 2, 25)
])
def testPotencia(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).potencia() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 0, 'Sin Definir')
])
def testRaizCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).raiz() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(10, 0, 1)
])
def testPotenciaALaCero(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).potencia() == expected_result
@pytest.mark.parametrize("input_a, input_b, expected_result", [
(100, 23, 2300)
])
def testMultiplicacion(input_a, input_b, expected_result):
assert Calculator(input_a, input_b).multiplicacion() == expected_result
| 1,809 | 710 |
import nmap3
from colored import fg, bg, attr
import colored
import socket as sock
from Modules import intro
class nmap3_Scan() :
def __init__(self):
self.angry1 = colored.fg("green") + colored.attr("bold")
self.angry = colored.fg("white") + colored.attr("bold")
print(f"""{self.angry1}
1 - Os
2 - Top PORT
3- Xmas Scan
4 - Fin Scan
5 - Dns brute
6 - UDP Scan
7 - TCP Scan
99 - back
""")
self.number = str(input("[?]>>"))
if self.number == str(1) or "use os" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
self.OS(self.Host,self.Timing)
if self.number == str(2) or "use top port" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Top_port(self.Host)
else:
self.Top_port(self.Host,self.Timing)
if self.number == str(3) or "use xmas" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Xmas_Scan(self.Host)
else:
self.Xmas_Scan(self.Host,self.Timing)
if self.number == str(4) or "use fin" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.Fin_Scan(self.Host)
else:
self.Fin_Scan(self.Host,self.Timing)
if self.number == str(5) or "use brute dns" in self.number :
self.Host = str(input("%s[*] Domain >>"%(self.angry1)))
self.Dns_Brute(self.Host)
if self.number == str(6) or "use udp" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.UDP_Scan(self.Host)
else:
self.UDP_Scan(self.Host,self.Timing)
if self.number == str(7) or "use tcp" in self.number :
self.Host = str(input("%s[*] Host >>"%(self.angry1)))
self.Timing = int(input("[*] Timing >>"))
if self.Timing == None:
self.TCP_Scan(self.Host)
else:
self.TCP_Scan(self.Host,self.Timing)
if self.number == str(99) or "back" in self.number :
intro.main()
def OS(self,Host,Timing=4):
self.Host = Host
self.Timing = Timing
try :
print("Loading ........................................")
HOST_lib = nmap3.Nmap()
System=HOST_lib.nmap_os_detection(str(self.Host),args=f"-T{self.Timing} -vv")
for i in System:
print(f"System:{i['name']} CPE : {i['cpe']} ")
except :
pass
def Top_port (self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.Nmap()
System = HOST_lib.scan_top_ports(self.Host,self.Timing)
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state'])
def Dns_Brute(self,Host,Timing=4):
print("Loading ........................................")
HOST_lib = nmap3.NmapHostDiscovery()
System = HOST_lib.nmap_dns_brute_script(self.Host)
for output in System:
print(" "+output['address']," "+output['hostname']+self.angry)
def Xmas_Scan (self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapHostDiscovery()
System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sX -T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def Fin_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapHostDiscovery()
System=HOST_lib.nmap_portscan_only(str(self.Host),args=f" -sF -T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def UDP_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapScanTechniques()
System=HOST_lib.nmap_udp_scan(str(self.Host),args=f"-T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
def TCP_Scan(self,Host,Timing=4):
print("Loading ........................................")
self.Host = sock.gethostbyname(self.Host)
HOST_lib = nmap3.NmapScanTechniques()
System=HOST_lib.nmap_tcp_scan(str(self.Host),args=f"-T{self.Timing} -vv")
for z in System[self.Host]:
print(z['portid'],z['service']['name'],z['state']+self.angry)
| 5,482 | 1,839 |
"""Serializers module."""
from rest_framework import serializers
from django_celery_results.models import TaskResult
from api import models
class ExchangeSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = models.Exchange
fields = ('id', 'name', 'created', 'updated', "url", "api_url",
"volume", "top_pair", "top_pair_volume", "interval",
"enabled", "last_data_fetch", "logo")
read_only_fields = ('created', 'updated')
def get_type(self, obj):
return obj.get_type_display()
class MarketSerializer(serializers.ModelSerializer):
class Meta:
model = models.Market
fields = ("id", "name", "exchange", "volume", "last", "bid", "ask",
"base", "quote", "updated")
class ExchangeStatusSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = models.ExchangeStatus
fields = ('id', 'exchange', 'last_run', 'last_run_id',
'last_run_status', 'time_started', 'running')
class TaskResultSerializer(serializers.ModelSerializer):
class Meta:
model = TaskResult
fields = ("id", "date_done", "meta", "status", "result",
"traceback", "task_id")
| 1,524 | 418 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
# Copyright (C) 2022 Graz University of Technology.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OAI-PMH 2.0 server."""
from flask import Blueprint, make_response
from invenio_pidstore.errors import PIDDoesNotExistError
from itsdangerous import BadSignature
from lxml import etree
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import use_args
from .. import response as xml
from ..errors import OAINoRecordsMatchError
from ..verbs import make_request_validator
blueprint = Blueprint(
'invenio_oaiserver',
__name__,
static_folder='../static',
template_folder='../templates',
)
@blueprint.errorhandler(ValidationError)
@blueprint.errorhandler(422)
def validation_error(exception):
"""Return formatter validation error."""
messages = getattr(exception, 'messages', None)
if messages is None:
messages = getattr(exception, 'data', {'messages': None})['messages']
def extract_errors():
"""Extract errors from exception."""
if isinstance(messages, dict):
for field, message in messages.items():
if field == 'verb':
yield 'badVerb', '\n'.join(message)
else:
yield 'badArgument', '\n'.join(message)
else:
for field in exception.field_names:
if field == 'verb':
yield 'badVerb', '\n'.join(messages)
else:
yield 'badArgument', '\n'.join(messages)
if not exception.field_names:
yield 'badArgument', '\n'.join(messages)
return (etree.tostring(xml.error(extract_errors())),
422,
{'Content-Type': 'text/xml'})
@blueprint.errorhandler(PIDDoesNotExistError)
def pid_error(exception):
"""Handle PID Exceptions."""
return (etree.tostring(xml.error([('idDoesNotExist',
'No matching identifier')])),
422,
{'Content-Type': 'text/xml'})
@blueprint.errorhandler(BadSignature)
def resumptiontoken_error(exception):
"""Handle resumption token exceptions."""
return (etree.tostring(xml.error([(
'badResumptionToken',
'The value of the resumptionToken argument is invalid or expired.')
])), 422, {'Content-Type': 'text/xml'})
@blueprint.errorhandler(OAINoRecordsMatchError)
def no_records_error(exception):
"""Handle no records match Exceptions."""
return (etree.tostring(xml.error([('noRecordsMatch',
'')])),
422,
{'Content-Type': 'text/xml'})
@blueprint.route('/oai2d', methods=['GET', 'POST'])
@use_args(make_request_validator)
def response(args):
"""Response endpoint."""
e_tree = getattr(xml, args['verb'].lower())(**args)
response = make_response(etree.tostring(
e_tree,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8',
))
response.headers['Content-Type'] = 'text/xml'
return response
| 3,228 | 971 |
from django.db import models
from core.utils import getReverseWithUpdatedQuery
from ebayinfo.models import EbayCategory
from categories.models import Category
from core.dj_import import get_user_model
User = get_user_model()
from searching import ALL_PRIORITIES
from pyPks.Time.Output import getIsoDateTimeFromDateTime
# ### models can be FAT but not too FAT! ###
class Search(models.Model):
cTitle = models.CharField( 'short description',
help_text = 'This is just a short description -- ebay will not search for this<br>'
'you must have a) key word(s) and/or b) an ebay category',
max_length = 38, null = True )
cKeyWords = models.TextField(
'key words -- search for these (maximum length 350 characters)',
max_length = 350, null = True, blank = True,
help_text = 'What you type here will go into the ebay search box '
'-- mulitple terms will result in an AND search '
'(ebay will look for all terms).<br>'
'search for red OR green handbags as follows: '
'handbags (red,green)<br>'
'TIPS: to exclude words, put a - in front '
'(without any space),<br>'
'search handbags but exclude red as follows: '
'handbags -red<br>'
'search for handbags but '
'exclude red and green as follows: handbags -red -green' )
# max length for a single key word is 98
#models.ForeignKey( EbayCategory, models.PositiveIntegerField(
iEbayCategory = models.ForeignKey( EbayCategory,
on_delete=models.CASCADE,
verbose_name = 'ebay category',
null = True, blank = True,
help_text = 'Limit search to items listed in this category' )
# ### after updating ebay categories, check whether ###
# ### searches that were connected are still connected !!! ###
iDummyCategory = models.PositiveIntegerField( 'ebay category number',
null = True, blank = True,
help_text = 'Limit search to items listed in this category<br>'
'copy the category number from ebay and paste here!!! (sorry)' )
cPriority = models.CharField( 'processing priority',
max_length = 2, null = True,
choices = ALL_PRIORITIES,
help_text = 'high priority A1 A2 A3 ... Z9 low priority' )
bGetBuyItNows = models.BooleanField(
"also get 'Buy It Nows' (fixed price non auctions)?",
help_text = 'You may get an avalanche of useless junk '
'if you turn this on -- be careful!',
blank = True, null = True,
default = False )
bInventory = models.BooleanField(
"also get 'Store Inventory' "
"(fixed price items in ebay stores)?",
help_text = 'You may get an avalanche of useless junk '
'if you turn this on -- be careful!',
blank = True, null = True,
default = False )
iMyCategory = models.ForeignKey( Category,
on_delete=models.DO_NOTHING,
verbose_name = 'my category that matches ebay category',
null = True, blank = True,
help_text = 'Example: if you have a category for "Manuals" and '
'this search is in the ebay category "Vintage Manuals" '
'put your "Manuals" category here.<br>If you have a '
'category "Widgets" and this search finds an item '
'with "Widget Manual" in the title, the bot will know '
'this item is for a manual, NOT a widget.')
tBegSearch = models.DateTimeField( 'last search started',
null = True )
tEndSearch = models.DateTimeField( 'last search completed',
null = True )
cLastResult = models.TextField( 'last search outcome', null = True )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner' )
tCreate = models.DateTimeField( 'created on', auto_now_add= True )
tModify = models.DateTimeField( 'updated on', auto_now = True )
def __str__(self):
return self.cTitle
class Meta:
verbose_name_plural = 'searches'
db_table = 'searching'
unique_together = ( ( 'iUser', 'cPriority' ),
( 'iUser', 'cTitle' ),
( 'iUser', 'cKeyWords', 'iEbayCategory',) )
ordering = ('cTitle',)
def get_absolute_url(self):
#
return getReverseWithUpdatedQuery(
'searching:detail',
kwargs = { 'pk': self.pk, 'tModify': self.tModify } )
class SearchLog(models.Model):
iSearch = models.ForeignKey( Search, on_delete=models.CASCADE,
verbose_name = 'Search that first found this item' )
tBegSearch = models.DateTimeField( 'search started',
db_index = True )
tEndSearch = models.DateTimeField( 'search completed',
null = True )
tBegStore = models.DateTimeField( 'processing started',
null = True )
tEndStore = models.DateTimeField( 'processing completed',
null = True )
iItems = models.PositiveIntegerField( 'items found',
null = True )
iStoreItems = models.PositiveIntegerField( 'items stored',
null = True )
iStoreUsers = models.PositiveIntegerField( 'stored for owner',
null = True )
iItemHits = models.PositiveIntegerField(
'have category, brand & model',
null = True )
cResult = models.TextField( 'search outcome', null = True )
cStoreDir = models.CharField( 'search files directory',
max_length = 10,
null = True, blank = True )
def __str__(self):
sSayDir = ( self.cStoreDir
if self.cStoreDir
else getIsoDateTimeFromDateTime( self.tBegSearch ) )
return '%s - %s' % ( sSayDir, self.iSearch.cTitle )
class Meta:
verbose_name_plural = 'searchlogs'
db_table = verbose_name_plural
| 6,847 | 1,781 |
# Fibonacci Sequence Using Recursion
def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
limit = int(input("How many terms to include in fionacci series:"))
if limit <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci series:")
for i in range(limit):
print(recur_fibo(i))
| 367 | 145 |
"""
Extract useful tokens from multiple attributes of Galaxy tools
"""
import os
import numpy as np
import pandas as pd
import operator
import json
import utils
class ExtractTokens:
@classmethod
def __init__( self, tools_data_path ):
self.tools_data_path = tools_data_path
@classmethod
def _read_file( self ):
"""
Read the description of all tools
"""
return pd.read_csv( self.tools_data_path )
@classmethod
def _extract_tokens( self, file, tokens_source ):
"""
Extract tokens from the description of all tools
"""
tools_tokens_source = dict()
for source in tokens_source:
tools_tokens = dict()
for row in file.iterrows():
tokens = self._get_tokens_from_source( row[ 1 ], source )
tools_tokens[ row[ 1 ][ "id" ] ] = tokens
tools_tokens_source[ source ] = tools_tokens
return tools_tokens_source
@classmethod
def _get_tokens_from_source( self, row, source ):
"""
Fetch tokens from different sources namely input and output files, names and desc of tools and
further help and EDAM sources
"""
tokens = ''
if source == 'input_output':
# remove duplicate file type individually from input and output file types and merge
input_tokens = utils._restore_space( utils._get_text( row, "inputs" ) )
input_tokens = utils._remove_duplicate_file_types( input_tokens )
output_tokens = utils._restore_space( utils._get_text( row, "outputs" ) )
output_tokens = utils._remove_duplicate_file_types( output_tokens )
if input_tokens is not "" and output_tokens is not "":
tokens = input_tokens + ' ' + output_tokens
elif output_tokens is not "":
tokens = output_tokens
elif input_tokens is not "":
tokens = input_tokens
elif source == 'name_desc_edam':
tokens = utils._restore_space( utils._get_text( row, "name" ) ) + ' '
tokens += utils._restore_space( utils._get_text( row, "description" ) ) + ' '
tokens += utils._get_text( row, "edam_topics" )
elif source == "help_text":
tokens = utils._get_text( row, "help" )
return utils._remove_special_chars( tokens )
@classmethod
def _refine_tokens( self, tokens ):
"""
Refine the set of tokens by removing words like 'to', 'with'
"""
k = 1.75
b = 0.75
stop_words_file = "stop_words.txt"
all_stopwords = list()
refined_tokens_sources = dict()
# collect all the stopwords
with open( stop_words_file ) as file:
lines = file.read()
all_stopwords = lines.split( "\n" )
for source in tokens:
refined_tokens = dict()
files = dict()
inverted_frequency = dict()
file_id = -1
total_file_length = 0
for item in tokens[ source ]:
file_id += 1
file_tokens = tokens[ source ][ item ].split(" ")
if source in "name_desc_edam" or source in "help_text":
file_tokens = utils._clean_tokens( file_tokens, all_stopwords )
total_file_length += len( file_tokens )
term_frequency = dict()
for token in file_tokens:
if token is not '':
file_ids = list()
if token not in inverted_frequency:
file_ids.append( file_id )
else:
file_ids = inverted_frequency[ token ]
if file_id not in file_ids:
file_ids.append( file_id )
inverted_frequency[ token ] = file_ids
# for term frequency
if token not in term_frequency:
term_frequency[ token ] = 1
else:
term_frequency[ token ] += 1
files[ item ] = term_frequency
N = len( files )
average_file_length = float( total_file_length ) / N
# find BM25 score for each token of each tool. It helps to determine
# how important each word is with respect to the tool and other tools
for item in files:
file_item = files[ item ]
file_length = len( file_item )
for token in file_item:
tf = file_item[ token ]
# normalize the term freq of token for each document
tf = float( tf ) / file_length
idf = np.log2( N / len( inverted_frequency[ token ] ) )
alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )
tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )
tf_idf = tf_star * idf
file_item[ token ] = tf_idf
# filter tokens based on the BM25 scores and stop words. Not all tokens are important
for item in files:
file_tokens = files[ item ]
tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]
sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )
refined_tokens[ item ] = sorted_tokens
tokens_file_name = 'tokens_' + source + '.txt'
token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )
with open( token_file_path, 'w' ) as file:
file.write( json.dumps( refined_tokens ) )
file.close()
refined_tokens_sources[ source ] = refined_tokens
return refined_tokens_sources
@classmethod
def get_tokens( self, data_source ):
"""
Get refined tokens
"""
print( "Extracting tokens..." )
dataframe = self._read_file()
tokens = self._extract_tokens( dataframe, data_source )
return dataframe, self._refine_tokens( tokens )
| 6,319 | 1,743 |
from django.core.files import storage
from django.db import models
from gestion.utils import products_path_and_rename
from gestion.models.category import Category
from gestion.models.subCategory import SubCategory
class Product(models.Model):
title = models.CharField(max_length=40, unique=True)
description = models.TextField(default="", blank=True)
creationDate = models.DateField(auto_now_add=True)
image = models.ImageField(upload_to=products_path_and_rename ,blank=True , null=True)
forcePrice = models.BooleanField(default=False)
price = models.FloatField(default=0)
categories = models.ManyToManyField(Category, blank=True, through="ProductCategoriesMany")
subCategories = models.ManyToManyField(SubCategory, blank=True, through="ProductSubCategoriesMany")
def getPrice(self):
from .entries import Entries
if self.forcePrice:
return self.price
else:
entries = Entries.objects.filter(product=self)
sumPrices = 0
count = entries.count()
if count <= 0:
count = 1
for entry in entries:
sumPrices += entry.unitPrice
if sumPrices == 0:
return self.price
else:
return sumPrices / count
def quantity(self):
from .SalesAndBalance import SalesAndBalance
sales, c = SalesAndBalance.objects.get_or_create(chooseProduct=self)
return sales.balance
def salesCount(self):
from .SalesAndBalance import SalesAndBalance
sales, c = SalesAndBalance.objects.get_or_create(chooseProduct=self)
return sales.totalSales
def __str__(self) -> str:
return "{} - {}".format(self.id, self.title)
class ProductCategoriesMany(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class ProductSubCategoriesMany(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
subCategory = models.ForeignKey(SubCategory, on_delete=models.CASCADE)
| 2,209 | 648 |
##########################################################################
#
# Copyright (C) 2017 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
import os
import base64
import logging
from odoo import exceptions
from odoo.tests import common
_path = os.path.dirname(os.path.dirname(__file__))
_logger = logging.getLogger(__name__)
class SearchParentTestCase(common.TransactionCase):
def setUp(self):
super(SearchParentTestCase, self).setUp()
self.model = self.env['res.partner.category']
def tearDown(self):
super(SearchParentTestCase, self).tearDown()
def _evaluate_parent_result(self, parents, records):
for parent in parents:
self.assertTrue(
not parent.parent_id or
parent.parent_id.id not in records.ids
)
def test_search_parents(self):
records = self.model.search([])
parents = self.model.search_parents([])
self._evaluate_parent_result(parents, records)
def test_search_parents_domain(self):
records = self.model.search([('id', '!=', 1)])
parents = self.model.search_parents([('id', '!=', 1)])
self._evaluate_parent_result(parents, records)
def test_search_read_parents(self):
parents = self.model.search_parents([])
read_names = parents.read(['name'])
search_names = self.model.search_read_parents([], ['name'])
self.assertTrue(read_names == search_names)
| 2,260 | 694 |
# Copyright (C) 2018-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import argparse
from bench import (
parse_args, measure_function_time, load_data, print_output, rmse_score,
float_or_int, getFPType
)
from daal4py import (
decision_forest_regression_training,
decision_forest_regression_prediction,
engines_mt2203
)
def df_regr_fit(X, y, n_trees=100, seed=12345, n_features_per_node=0,
max_depth=0, min_impurity=0, bootstrap=True):
fptype = getFPType(X)
features_per_node = X.shape[1]
if n_features_per_node > 0 and n_features_per_node <= features_per_node:
features_per_node = n_features_per_node
engine = engines_mt2203(seed=seed, fptype=fptype)
algorithm = decision_forest_regression_training(
fptype=fptype,
method='defaultDense',
nTrees=n_trees,
observationsPerTreeFraction=1.,
featuresPerNode=features_per_node,
maxTreeDepth=max_depth,
minObservationsInLeafNode=1,
engine=engine,
impurityThreshold=min_impurity,
varImportance='MDI',
resultsToCompute='',
memorySavingMode=False,
bootstrap=bootstrap
)
df_regr_result = algorithm.compute(X, y)
return df_regr_result
def df_regr_predict(X, training_result):
algorithm = decision_forest_regression_prediction(
fptype='float'
)
result = algorithm.compute(X, training_result.model)
return result.prediction
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='daal4py random forest '
'regression benchmark')
parser.add_argument('--criterion', type=str, default='mse',
choices=('mse'),
help='The function to measure the quality of a split')
parser.add_argument('--num-trees', type=int, default=100,
help='Number of trees in the forest')
parser.add_argument('--max-features', type=float_or_int, default=0,
help='Upper bound on features used at each split')
parser.add_argument('--max-depth', type=int, default=0,
help='Upper bound on depth of constructed trees')
parser.add_argument('--min-samples-split', type=float_or_int, default=2,
help='Minimum samples number for node splitting')
parser.add_argument('--max-leaf-nodes', type=int, default=None,
help='Grow trees with max_leaf_nodes in best-first fashion'
'if it is not None')
parser.add_argument('--min-impurity-decrease', type=float, default=0.,
help='Needed impurity decrease for node splitting')
parser.add_argument('--no-bootstrap', dest='bootstrap', default=True,
action='store_false', help="Don't control bootstraping")
parser.add_argument('--use-sklearn-class', action='store_true',
help='Force use of '
'sklearn.ensemble.RandomForestRegressor')
params = parse_args(parser, prefix='daal4py')
# Load data
X_train, X_test, y_train, y_test = load_data(
params, add_dtype=True, label_2d=True)
columns = ('batch', 'arch', 'prefix', 'function', 'threads', 'dtype',
'size', 'num_trees', 'time')
if isinstance(params.max_features, float):
params.max_features = int(X_train.shape[1] * params.max_features)
# Time fit and predict
fit_time, res = measure_function_time(
df_regr_fit, X_train, y_train,
n_trees=params.num_trees,
n_features_per_node=params.max_features,
max_depth=params.max_depth,
min_impurity=params.min_impurity_decrease,
bootstrap=params.bootstrap,
seed=params.seed,
params=params)
yp = df_regr_predict(X_train, res)
train_rmse = rmse_score(yp, y_train)
predict_time, yp = measure_function_time(
df_regr_predict, X_test, res, params=params)
test_rmse = rmse_score(yp, y_test)
print_output(library='daal4py', algorithm='decision_forest_regression',
stages=['training', 'prediction'], columns=columns,
params=params, functions=['df_regr.fit', 'df_regr.predict'],
times=[fit_time, predict_time], accuracy_type='rmse',
accuracies=[train_rmse, test_rmse], data=[X_train, X_test])
| 4,514 | 1,400 |
from pathlib import Path
import pandas as pd
import numpy as np
import random
import torch
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
def get_id_columns(df):
user_and_target_id_columns = ["user_id", "target_user_id"]
return df[user_and_target_id_columns]
def extranct_interacted_user_rows(df):
tmp = df[["user_id", "label"]].groupby('user_id').sum()
interacted_user_id = tmp[tmp.label>0].reset_index()
return df[df.user_id.isin(interacted_user_id.user_id)]
def get_ethnicity_columns(df):
ethnicity_user = df.ethnicity_user
ethnicity_target = df.ethnicity_target
ethnicity_columns = [c for c in df.columns if "ethnicity_" in c]
df.drop(ethnicity_columns, axis=1, inplace=True)
df = df.assign(ethnicity_user=ethnicity_user,
ethnicity_target=ethnicity_target)
return df
def calculate_user_features(df):
c_id = 'user_id'
user_feature_columns = [c for c in df.columns
if '_user' in c and 'target_user_id' != c]
user_features = df.groupby(c_id)[user_feature_columns].head(1)
user_features[c_id] = df.loc[user_features.index].user_id
return user_features
def calculate_target_features(df):
c_id = 'target_user_id'
target_feature_columns =\
[c for c in df.columns.values if '_target' in c]
target_features = df[[c_id] + target_feature_columns]
return target_features
def calcurate_target_clicked(df):
result = df[['target_user_id', 'label']]\
.groupby('target_user_id')\
.agg(['sum', 'count'])\
.reset_index()
result.columns = ['target_user_id', 'label_sum', 'label_cnt']
result = result.assign(label_rate=result.label_sum/result.label_cnt)
result.index = df.groupby('target_user_id').head(1).index
return result
def get_target_ids_for_train_input(squewed_user_target_labels,
valued_target_idxs, n_high, n_low):
# 全て返す
return squewed_user_target_labels.index.values
n_total = n_high + n_low
high_rate_flag = squewed_user_target_labels.label > 0
if len(valued_target_idxs) >= n_total:
idxs = np.random.permutation(len(valued_target_idxs))[:n_total]
return valued_target_idxs[idxs]
query = ~squewed_user_target_labels.index.isin(valued_target_idxs)
query &= high_rate_flag
n_rest = n_total - len(valued_target_idxs)
if n_rest == 1:
hight = squewed_user_target_labels[query].sample(n_rest).index.values
return np.concatenate([valued_target_idxs, hight])
m_n_high = int(n_rest * n_high / n_total)
m_n_low = n_rest - m_n_high
hight = squewed_user_target_labels[query].sample(m_n_high, replace=True).index.values
low = squewed_user_target_labels[
squewed_user_target_labels.label == 0].sample(m_n_low, replace=True).index.values
idxs = np.concatenate([valued_target_idxs, hight, low])
return idxs
def get_target_ids_for_test_input(squewed_user_target_labels, n_high, n_low):
# 全て返す
return squewed_user_target_labels.index.values
n_total = n_high + n_low
high_rate_flag = squewed_user_target_labels.label > 0
if sum(high_rate_flag) < n_high:
hight = squewed_user_target_labels[high_rate_flag].index.values
n_low = n_total - sum(high_rate_flag)
else:
hight = squewed_user_target_labels[high_rate_flag].sample(n_high).index.values
low = squewed_user_target_labels[
squewed_user_target_labels.label == 0].sample(n_low, replace=True).index.values
idxs = np.concatenate([hight, low])
return idxs
def get_target_ids_for_input(squewed_user_target_labels,
valued_target_idxs, n_high, n_low, train=True):
if train:
return get_target_ids_for_train_input(squewed_user_target_labels, valued_target_idxs, n_high, n_low)
else:
return get_target_ids_for_test_input(squewed_user_target_labels, n_high, n_low)
class OwnDataset(Dataset):
def __init__(self, file_name, root_dir, n_high, n_low,
subset=False, transform=None, train=True, split_seed=555):
super().__init__()
print("Train:", train)
self.file_name = file_name
self.root_dir = root_dir
self.transform = transform
self.n_high = n_high
self.n_low = n_low
self._train = train
self.split_seed = split_seed
self.prepare_data()
self.user_features_orig = self.user_features
def __len__(self):
return len(self.user_and_target_ids)
def reset(self):
self.user_features = self.user_features_orig
def prepare_data(self):
data_path = Path(self.root_dir, self.file_name)
eme_data = pd.read_csv(data_path)
extracted_interacted_rows = extranct_interacted_user_rows(eme_data)
unique_user_ids = extracted_interacted_rows.user_id.unique()
train_user_ids, test_user_ids = train_test_split(unique_user_ids,
random_state=self.split_seed,
shuffle=True,
test_size=0.2)
if self._train:
_data = eme_data[eme_data.user_id.isin(train_user_ids)]
self.user_features = calculate_user_features(_data)
self.user_and_target_ids = get_id_columns(_data)
self.rewards = eme_data[["user_id", "target_user_id", "label"]]
self.target_features_all = calculate_target_features(eme_data) # _data
else:
_data = eme_data[eme_data.user_id.isin(test_user_ids)]
self.user_and_target_ids = get_id_columns(_data)
self.user_features = calculate_user_features(_data)
self.rewards = eme_data[["user_id", "target_user_id", "label"]]
self.target_features_all = calculate_target_features(eme_data)
print("user", self.user_features.shape)
print("target", len(self.target_features_all.target_user_id.unique()))
def __getitem__(self, idx):
ids = self.user_and_target_ids.iloc[idx].values
current_user_id = ids[0]
user_feature = self.user_features[self.user_features.user_id == current_user_id]
user_feature = user_feature.copy().drop("user_id", axis=1)
user_feature = user_feature.astype(np.float32).values
user_feature = user_feature.reshape(-1)
query = (self.rewards.user_id == current_user_id)
query &= (self.rewards.label == 1)
valued_target_idxs = self.rewards[query].index.values
# TODO: 後で名前変えたる
squewed_user_target_labels =\
self.rewards.groupby("target_user_id").head(1)
target_idxs = get_target_ids_for_input(
squewed_user_target_labels, valued_target_idxs,
self.n_high, self.n_low, self._train)
target_features = self.target_features_all.loc[target_idxs].copy().reindex()
target_ids = target_features.target_user_id.values
target_features =\
target_features.copy().drop("target_user_id", axis=1)
target_features = target_features.astype(np.float32).values
eliminate_teacher = self.target_features_all.loc[valued_target_idxs].copy().reindex()
eliminate_teacher_ids = eliminate_teacher.target_user_id.values
eliminate_teacher_val = target_ids == eliminate_teacher_ids[0]
for v in eliminate_teacher_ids[1:]:
eliminate_teacher_val += target_ids == v
eliminate_teacher_val = eliminate_teacher_val.astype(np.float32)
return (torch.FloatTensor(user_feature),
torch.FloatTensor(target_features),
current_user_id,
target_ids,
eliminate_teacher_val)
def get_reward(self, current_user_id, target_ids):
query_user = self.rewards.user_id == current_user_id
query_target = self.rewards.target_user_id.isin(target_ids)
query = (query_user) & (query_target)
reward = self.rewards[query].label.values
if len(reward) == 0:
return 0.
else:
return float(reward.max())
def loader(dataset, batch_size, shuffle=True):
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=0)
return loader
| 8,658 | 3,022 |
from sql import ValorSQL
from util import guild_name_from_tag
import matplotlib.pyplot as plt
import matplotlib.dates as md
from scipy.interpolate import make_interp_spline
from matplotlib.ticker import MaxNLocator
import numpy as np
from datetime import datetime
import time
def plot_process(lock, opt, query):
a = []
b = []
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
fig = plt.figure()
fig.set_figwidth(20)
fig.set_figheight(10)
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
plt.xticks(rotation=25)
data_pts = 0
for name in opt.guild:
with lock:
res = ValorSQL.execute_sync(query % name)
if opt.split:
b = np.array([x[2] for x in res])
a = np.array([x[1] for x in res])
if opt.moving_average > 1:
a = np.convolve(a, np.ones(opt.moving_average)/opt.moving_average, mode="valid")
b = b[:len(b)-opt.moving_average+1]
if opt.smooth:
spline = make_interp_spline(b, a)
b = np.linspace(b.min(), b.max(), 500)
a = spline(b)
plt.plot([datetime.fromtimestamp(x) for x in b], a, label=name)
plt.legend(loc="upper left")
else:
for i in range(len(res)):
if i >= len(a):
a.append(0)
b.append(res[i][2])
a[i] += res[i][1]
a = np.array(a)
b = np.array(b)
data_pts += len(res)
content = "Plot"
if opt.split:
content = "Split graph"
else:
content =f"""```
Mean: {sum(a)/len(a):.7}
Max: {max(a)}
Min: {min(a)}```"""
if opt.moving_average > 1:
a = np.convolve(a, np.ones(opt.moving_average)/opt.moving_average, mode="valid")
b = b[:len(b)-opt.moving_average+1]
if opt.smooth:
spline = make_interp_spline(b, a)
b = np.linspace(b.min(), b.max(), 500)
a = spline(b)
plt.plot([datetime.fromtimestamp(x) for x in b], a)
ax.xaxis.set_major_locator(MaxNLocator(30))
plt.title("Online Player Activity")
plt.ylabel("Player Count")
plt.xlabel("Date Y-m-d H:M:S")
fig.savefig("/tmp/valor_guild_plot.png")
return data_pts, content
| 2,320 | 851 |
import os
import shutil
from os import system
import discord
import asyncio
import os.path
import linecache
import datetime
import urllib
import requests
from bs4 import BeautifulSoup
from discord.utils import get
from discord.ext import commands
from discord.ext.commands import CommandNotFound
import logging
import itertools
import sys
import traceback
import random
import itertools
import math
from async_timeout import timeout
from functools import partial
import functools
from youtube_dl import YoutubeDL
import youtube_dl
from io import StringIO
import time
import urllib.request
from gtts import gTTS
from urllib.request import URLError
from urllib.request import HTTPError
from urllib.request import urlopen
from urllib.request import Request, urlopen
from urllib.parse import quote
import re
import warnings
import unicodedata
import json
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.tools import argparser
##################### 로깅 ###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
def init():
global command
command = []
fc = []
command_inidata = open('command.ini', 'r', encoding = 'utf-8')
command_inputData = command_inidata.readlines()
############## 뮤직봇 명령어 리스트 #####################
for i in range(len(command_inputData)):
tmp_command = command_inputData[i][12:].rstrip('\n')
fc = tmp_command.split(', ')
command.append(fc)
fc = []
del command[0]
command_inidata.close()
#print (command)
init()
#mp3 파일 생성함수(gTTS 이용, 남성목소리)
async def MakeSound(saveSTR, filename):
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
'''
try:
encText = urllib.parse.quote(saveSTR)
urllib.request.urlretrieve("https://clova.ai/proxy/voice/api/tts?text=" + encText + "%0A&voicefont=1&format=wav",filename + '.wav')
except Exception as e:
print (e)
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
pass
'''
#mp3 파일 재생함수
async def PlaySound(voiceclient, filename):
source = discord.FFmpegPCMAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
source.cleanup()
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ''
class VoiceError(Exception):
pass
class YTDLError(Exception):
pass
class YTDLSource(discord.PCMVolumeTransformer):
YTDL_OPTIONS = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
'force-ipv4' : True,
'-4': True
}
FFMPEG_OPTIONS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
}
ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS)
def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):
super().__init__(source, volume)
self.requester = ctx.author
self.channel = ctx.channel
self.data = data
self.uploader = data.get('uploader')
self.uploader_url = data.get('uploader_url')
date = data.get('upload_date')
self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]
self.title = data.get('title')
self.thumbnail = data.get('thumbnail')
self.description = data.get('description')
self.duration = self.parse_duration(int(data.get('duration')))
self.tags = data.get('tags')
self.url = data.get('webpage_url')
self.views = data.get('view_count')
self.likes = data.get('like_count')
self.dislikes = data.get('dislike_count')
self.stream_url = data.get('url')
def __str__(self):
return '**{0.title}** by **{0.uploader}**'.format(self)
@classmethod
async def create_source(cls, bot, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):
loop = loop or asyncio.get_event_loop()
if "http" not in search:
partial = functools.partial(cls.ytdl.extract_info, f"ytsearch5:{search}", download=False, process=False)
data = await loop.run_in_executor(None, partial)
if data is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
emoji_list : list = ["1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "🚫"]
song_list_str : str = ""
cnt : int = 0
song_index : int = 0
for data_info in data["entries"]:
cnt += 1
if 'title' not in data_info:
data_info['title'] = f"{search} - 제목 정보 없음"
song_list_str += f"`{cnt}.` [**{data_info['title']}**](https://www.youtube.com/watch?v={data_info['url']})\n"
embed = discord.Embed(description= song_list_str)
embed.set_footer(text=f"10초 안에 미선택시 취소됩니다.")
song_list_message = await ctx.send(embed = embed)
for emoji in emoji_list:
await song_list_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == song_list_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await bot.wait_for('reaction_add', check = reaction_check, timeout = 10)
except asyncio.TimeoutError:
reaction = "🚫"
for emoji in emoji_list:
await song_list_message.remove_reaction(emoji, bot.user)
await song_list_message.delete(delay = 10)
if str(reaction) == "1️⃣":
song_index = 0
elif str(reaction) == "2️⃣":
song_index = 1
elif str(reaction) == "3️⃣":
song_index = 2
elif str(reaction) == "4️⃣":
song_index = 3
elif str(reaction) == "5️⃣":
song_index = 4
else:
return False
result_url = f"https://www.youtube.com/watch?v={data['entries'][song_index]['url']}"
else:
result_url = search
webpage_url = result_url
partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)
processed_info = await loop.run_in_executor(None, partial)
if processed_info is None:
raise YTDLError('Couldn\'t fetch `{}`'.format(webpage_url))
if 'entries' not in processed_info:
info = processed_info
else:
info = None
while info is None:
try:
info = processed_info['entries'].pop(0)
except IndexError:
raise YTDLError('Couldn\'t retrieve any matches for `{}`'.format(webpage_url))
return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)
@staticmethod
def parse_duration(duration: int):
return time.strftime('%H:%M:%S', time.gmtime(duration))
class Song:
__slots__ = ('source', 'requester')
def __init__(self, source: YTDLSource):
self.source = source
self.requester = source.requester
def create_embed(self):
embed = (discord.Embed(title='Now playing',
description='**```fix\n{0.source.title}\n```**'.format(self),
color=discord.Color.blurple())
.add_field(name='Duration', value=self.source.duration)
.add_field(name='Requested by', value=self.requester.mention)
.add_field(name='Uploader', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))
.add_field(name='URL', value='[Click]({0.source.url})'.format(self))
.set_thumbnail(url=self.source.thumbnail))
return embed
class SongQueue(asyncio.Queue):
def __getitem__(self, item):
if isinstance(item, slice):
return list(itertools.islice(self._queue, item.start, item.stop, item.step))
else:
return self._queue[item]
def __iter__(self):
return self._queue.__iter__()
def __len__(self):
return self.qsize()
def clear(self):
self._queue.clear()
def shuffle(self):
random.shuffle(self._queue)
def select(self, index : int, loop : bool = False):
for i in range(index-1):
if not loop:
del self._queue[0]
else:
self._queue.append(self._queue[0])
del self._queue[0]
def remove(self, index: int):
del self._queue[index]
class VoiceState:
def __init__(self, bot: commands.Bot, ctx: commands.Context):
self.bot = bot
self._ctx = ctx
self._cog = ctx.cog
self.current = None
self.voice = None
self.next = asyncio.Event()
self.songs = SongQueue()
self._loop = False
self._volume = 0.5
self.skip_votes = set()
self.audio_player = bot.loop.create_task(self.audio_player_task())
def __del__(self):
self.audio_player.cancel()
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: bool):
self._loop = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value: float):
self._volume = value
@property
def is_playing(self):
return self.voice and self.current
async def audio_player_task(self):
while True:
self.next.clear()
if self.loop and self.current is not None:
source1 = await YTDLSource.create_source(self.bot, self._ctx, self.current.source.url, loop=self.bot.loop)
song1 = Song(source1)
await self.songs.put(song1)
else:
pass
try:
async with timeout(180): # 3 minutes
self.current = await self.songs.get()
except asyncio.TimeoutError:
self.bot.loop.create_task(self.stop())
return
self.current.source.volume = self._volume
self.voice.play(self.current.source, after=self.play_next_song)
play_info_msg = await self.current.source.channel.send(embed=self.current.create_embed())
# await play_info_msg.delete(delay = 20)
await self.next.wait()
def play_next_song(self, error=None):
if error:
raise VoiceError(str(error))
self.next.set()
def skip(self):
self.skip_votes.clear()
if self.is_playing:
self.voice.stop()
async def stop(self):
self.songs.clear()
if self.voice:
await self.voice.disconnect()
self.voice = None
self.bot.loop.create_task(self._cog.cleanup(self._ctx))
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, ctx: commands.Context):
state = self.voice_states.get(ctx.guild.id)
if not state:
state = VoiceState(self.bot, ctx)
self.voice_states[ctx.guild.id] = state
return state
def cog_unload(self):
for state in self.voice_states.values():
self.bot.loop.create_task(state.stop())
def cog_check(self, ctx: commands.Context):
if not ctx.guild:
raise commands.NoPrivateMessage('This command can\'t be used in DM channels.')
return True
async def cog_before_invoke(self, ctx: commands.Context):
ctx.voice_state = self.get_voice_state(ctx)
async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):
await ctx.send('에러 : {}'.format(str(error)))
'''
@commands.command(name='join', invoke_without_subcommand=True)
async def _join(self, ctx: commands.Context):
destination = ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
'''
async def cleanup(self, ctx: commands.Context):
del self.voice_states[ctx.guild.id]
@commands.command(name=command[0][0], aliases=command[0][1:]) #음성 채널 입장
#@commands.has_permissions(manage_guild=True)
async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):
channel = ctx.message.author.voice.channel
if not channel and not ctx.author.voice:
raise VoiceError(':no_entry_sign: 현재 접속중인 음악채널이 없습니다.')
destination = channel or ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name=command[1][0], aliases=command[1][1:]) #음성 채널 퇴장
#@commands.has_permissions(manage_guild=True)
async def _leave(self, ctx: commands.Context):
if not ctx.voice_state.voice:
return await ctx.send(embed=discord.Embed(title=":no_entry_sign: 현재 접속중인 음악채널이 없습니다.",colour = 0x2EFEF7))
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
@commands.command(name=command[8][0], aliases=command[8][1:]) #볼륨 조절
async def _volume(self, ctx: commands.Context, *, volume: int):
vc = ctx.voice_client
if not ctx.voice_state.is_playing:
return await ctx.send(embed=discord.Embed(title=":mute: 현재 재생중인 음악이 없습니다.",colour = 0x2EFEF7))
if not 0 < volume < 101:
return await ctx.send(embed=discord.Embed(title=":no_entry_sign: 볼륨은 1 ~ 100 사이로 입력 해주세요.",colour = 0x2EFEF7))
if vc.source:
vc.source.volume = volume / 100
ctx.voice_state.volume = volume / 100
await ctx.send(embed=discord.Embed(title=f":loud_sound: 볼륨을 {volume}%로 조정하였습니다.",colour = 0x2EFEF7))
@commands.command(name=command[7][0], aliases=command[7][1:]) #현재 재생 중인 목록
async def _now(self, ctx: commands.Context):
await ctx.send(embed=ctx.voice_state.current.create_embed())
@commands.command(name=command[3][0], aliases=command[3][1:]) #음악 일시 정지
#@commands.has_permissions(manage_guild=True)
async def _pause(self, ctx: commands.Context):
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():
ctx.voice_state.voice.pause()
await ctx.message.add_reaction('⏸')
@commands.command(name=command[4][0], aliases=command[4][1:]) #음악 다시 재생
#@commands.has_permissions(manage_guild=True)
async def _resume(self, ctx: commands.Context):
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_paused():
ctx.voice_state.voice.resume()
await ctx.message.add_reaction('⏯')
@commands.command(name=command[9][0], aliases=command[9][1:]) #음악 정지
#@commands.has_permissions(manage_guild=True)
async def _stop(self, ctx: commands.Context):
ctx.voice_state.songs.clear()
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
await ctx.message.add_reaction('⏹')
@commands.command(name=command[5][0], aliases=command[5][1:]) #현재 음악 스킵
async def _skip(self, ctx: commands.Context, *, args: int = 1):
if not ctx.voice_state.is_playing:
return await ctx.send(embed=discord.Embed(title=':mute: 현재 재생중인 음악이 없습니다.',colour = 0x2EFEF7))
await ctx.message.add_reaction('⏭')
if args != 1:
ctx.voice_state.songs.select(args, ctx.voice_state.loop)
ctx.voice_state.skip()
'''
voter = ctx.message.author
if voter == ctx.voice_state.current.requester:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
elif voter.id not in ctx.voice_state.skip_votes:
ctx.voice_state.skip_votes.add(voter.id)
total_votes = len(ctx.voice_state.skip_votes)
if total_votes >= 3:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
else:
await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes))
else:
await ctx.send('```이미 투표하셨습니다.```')
'''
@commands.command(name=command[6][0], aliases=command[6][1:]) #재생 목록
async def _queue(self, ctx: commands.Context, *, page: int = 1):
if len(ctx.voice_state.songs) == 0:
return await ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7))
items_per_page = 10
pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue = ''
for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):
queue += '`{0}.` [**{1.source.title}**]({1.source.url})\n'.format(i + 1, song)
if ctx.voice_state.loop:
embed = discord.Embed(title = '🔁 Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current))
else:
embed = discord.Embed(title = 'Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current))
embed.add_field(name ='\u200B\n**{} tracks:**\n'.format(len(ctx.voice_state.songs)), value = f"\u200B\n{queue}")
embed.set_thumbnail(url=ctx.voice_state.current.source.thumbnail)
embed.set_footer(text='Viewing page {}/{}'.format(page, pages))
await ctx.send(embed=embed)
@commands.command(name=command[11][0], aliases=command[11][1:]) #음악 셔플
async def _shuffle(self, ctx: commands.Context):
if len(ctx.voice_state.songs) == 0:
return await ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7))
ctx.voice_state.songs.shuffle()
result = await ctx.send(embed=discord.Embed(title=':twisted_rightwards_arrows: 셔플 완료!',colour = 0x2EFEF7))
await result.add_reaction('🔀')
@commands.command(name=command[10][0], aliases=command[10][1:]) #음악 삭제
async def _remove(self, ctx: commands.Context, index: int):
if len(ctx.voice_state.songs) == 0:
return ctx.send(embed=discord.Embed(title=':mute: 재생목록이 없습니다.',colour = 0x2EFEF7))
# remove_result = '`{0}.` [**{1.source.title}**] 삭제 완료!\n'.format(index, ctx.voice_state.songs[index - 1])
result = await ctx.send(embed=discord.Embed(title='`{0}.` [**{1.source.title}**] 삭제 완료!\n'.format(index, ctx.voice_state.songs[index - 1]),colour = 0x2EFEF7))
ctx.voice_state.songs.remove(index - 1)
await result.add_reaction('✅')
@commands.command(name=command[14][0], aliases=command[14][1:]) #음악 반복
async def _loop(self, ctx: commands.Context):
if not ctx.voice_state.is_playing:
return await ctx.send(embed=discord.Embed(title=':mute: 현재 재생중인 음악이 없습니다.',colour = 0x2EFEF7))
# Inverse boolean value to loop and unloop.
ctx.voice_state.loop = not ctx.voice_state.loop
if ctx.voice_state.loop :
result = await ctx.send(embed=discord.Embed(title=':repeat: 반복재생이 설정되었습니다!',colour = 0x2EFEF7))
else:
result = await ctx.send(embed=discord.Embed(title=':repeat_one: 반복재생이 취소되었습니다!',colour = 0x2EFEF7))
await result.add_reaction('🔁')
@commands.command(name=command[2][0], aliases=command[2][1:]) #음악 재생
async def _play(self, ctx: commands.Context, *, search: str):
if not ctx.voice_state.voice:
await ctx.invoke(self._summon)
async with ctx.typing():
try:
source = await YTDLSource.create_source(self.bot, ctx, search, loop=self.bot.loop)
if not source:
return await ctx.send(f"노래 재생/예약이 취소 되었습니다.")
except YTDLError as e:
await ctx.send('에러가 발생했습니다 : {}'.format(str(e)))
else:
song = Song(source)
await ctx.channel.purge(limit=1)
await ctx.voice_state.songs.put(song)
await ctx.send(embed=discord.Embed(title=f':musical_note: 재생목록 추가 : {str(source)}',colour = 0x2EFEF7))
# @commands.command(name=command[13][0], aliases=command[13][1:]) #지우기
# async def clear_channel_(self, ctx: commands.Context, *, msg: int = 1):
# try:
# msg = int(msg)
# except:
# await ctx.send(f"```지우고 싶은 줄수는 [숫자]로 입력해주세요!```")
# await ctx.channel.purge(limit = msg)
@_summon.before_invoke
@_play.before_invoke
async def ensure_voice_state(self, ctx: commands.Context):
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandError('음성채널에 접속 후 사용해주십시오.')
if ctx.voice_client:
if ctx.voice_client.channel != ctx.author.voice.channel:
raise commands.CommandError('봇이 이미 음성채널에 접속해 있습니다.')
# @commands.command(name=command[12][0], aliases=command[12][1:]) #도움말
# async def menu_(self, ctx):
# command_list = ''
# command_list += '!인중 : 봇상태가 안좋을 때 쓰세요!' #!
# command_list += ','.join(command[0]) + '\n' #!들어가자
# command_list += ','.join(command[1]) + '\n' #!나가자
# command_list += ','.join(command[2]) + ' [검색어] or [url]\n' #!재생
# command_list += ','.join(command[3]) + '\n' #!일시정지
# command_list += ','.join(command[4]) + '\n' #!다시재생
# command_list += ','.join(command[5]) + ' (숫자)\n' #!스킵
# command_list += ','.join(command[6]) + ' 혹은 [명령어] + [숫자]\n' #!목록
# command_list += ','.join(command[7]) + '\n' #!현재재생
# command_list += ','.join(command[8]) + ' [숫자 1~100]\n' #!볼륨
# command_list += ','.join(command[9]) + '\n' #!정지
# command_list += ','.join(command[10]) + '\n' #!삭제
# command_list += ','.join(command[11]) + '\n' #!섞기
# command_list += ','.join(command[14]) + '\n' #!
# command_list += ','.join(command[13]) + ' [숫자]\n' #!경주
# embed = discord.Embed(
# title = "----- 명령어 -----",
# description= '```' + command_list + '```',
# color=0xff00ff
# )
# await ctx.send( embed=embed, tts=False)
################ 음성파일 생성 후 재생 ################
@commands.command(name="==인중")
async def playText_(self, ctx):
#msg = ctx.message.content[len(ctx.invoked_with)+1:]
#sayMessage = msg
await MakeSound('뮤직봇이 많이 아파요. 잠시 후 사용해주세요.', './say' + str(ctx.guild.id))
await ctx.send("```뮤직봇이 많이 아파요. 잠시 후 사용해주세요.```", tts=False)
if not ctx.voice_state.voice:
await ctx.invoke(self._summon)
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
await PlaySound(ctx.voice_state.voice, './say' + str(ctx.guild.id) + '.wav')
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
#client = commands.Bot(command_prefix='==', help_command = None)
client = commands.Bot('', help_command = None)
client.add_cog(Music(client))
access_client_id = os.environ["client_id"]
access_client_secret = os.environ["client_secret"]
client_id = access_client_id
client_secret = access_client_secret
def create_soup(url, headers):
res = requests.get(url, headers=headers)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
return soup
@client.event
async def on_ready():
print(f'로그인 성공: {client.user.name}!')
game = discord.Game("==명령어")
await client.change_presence(status=discord.Status.online, activity=game)
@client.event
async def on_command_error(ctx, error):
if isinstance(error, CommandNotFound):
return
elif isinstance(error, discord.ext.commands.MissingRequiredArgument):
return
raise error
@client.command(pass_context = True, aliases=['==명령어'])
async def cmd_cmd_abc(ctx):
await ctx.channel.purge(limit=1)
emoji_list : list = ["🅰️", "1️⃣", "2️⃣", "3️⃣", "🚫"]
embed = discord.Embed(title = "캬루봇 명령어 목록", colour = 0x30e08b)
embed.add_field(name = ':a: 전체', value = '전체 명령어 보기', inline = False)
embed.add_field(name = ':one: 일반', value = '일반 명령어 보기', inline = False)
embed.add_field(name = ':two: TruckersMP', value = 'TruckersMP 관련 명령어 보기', inline = False)
embed.add_field(name = ':three: 음악', value = '음악 재생 관련 명령어 보기', inline = False)
embed.add_field(name = ':no_entry_sign: 취소', value = '실행 취소', inline = False)
cmd_message = await ctx.send(embed = embed)
for emoji in emoji_list:
await cmd_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == cmd_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await client.wait_for('reaction_add', check = reaction_check, timeout = 10)
except asyncio.TimeoutError:
reaction = "🚫"
for emoji in emoji_list:
# await cmd_message.remove_reaction(emoji, client.user)
await cmd_message.delete(delay = 0)
await cmd_message.delete(delay = 10)
if str(reaction) == "1️⃣":
embed1 = discord.Embed(title = "캬루봇 명령어 목록 [일반 명령어]", colour = 0x30e08b)
embed1.add_field(name = '==지우기 <숫자>', value = '최근 1~99개의 메세지를 삭제합니다.', inline = False)
embed1.add_field(name = '==내정보', value = '자신의 디스코드 정보를 보여줍니다.', inline = False)
embed1.add_field(name = '==실검', value = '네이버의 급상승 검색어 TOP10을 보여줍니다.', inline = False)
embed1.add_field(name = '==날씨 <지역>', value = '<지역>의 날씨를 알려줍니다.', inline = False)
embed1.add_field(name = '==말해 <text>', value = '<text>를 말합니다.', inline = False)
embed1.add_field(name = '==번역 <언어> <text>', value = '<text>를 번역합니다.', inline = False)
embed1.add_field(name = '==유튜브 <text>', value = '유튜브에서 <text>를 검색합니다.', inline = False)
embed1.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed1)
elif str(reaction) == "2️⃣":
embed2 = discord.Embed(title = "캬루봇 명령어 목록 [TruckersMP]", colour = 0x30e08b)
embed2.add_field(name = '==T정보, ==ts', value = 'TruckersMP의 정보를 보여줍니다.', inline = False)
embed2.add_field(name = '==T프로필 <TMPID>, ==tp', value = '해당 TMPID 아이디를 가진 사람의 프로필을 보여줍니다.', inline = False)
embed2.add_field(name = '==T트래픽순위, ==ttr', value = 'TruckersMP의 트래픽 순위 TOP5를 보여줍니다.', inline = False)
embed2.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed2)
elif str(reaction) == "3️⃣":
embed3 = discord.Embed(title = "캬루봇 명령어 목록 [음악 재생]", colour = 0x30e08b)
embed3.add_field(name = '==들어와', value = '봇이 음성 통화방에 들어옵니다.', inline = False)
embed3.add_field(name = '==나가', value = '봇이 음성 통화방에서 나갑니다.', inline = False)
embed3.add_field(name = '==재생', value = '봇이 음악을 재생합니다.', inline = False)
embed3.add_field(name = '==일시정지', value = '현재 재생 중인 음악을 일시 정지합니다.', inline = False)
embed3.add_field(name = '==다시재생', value = '일시 정지한 음악을 다시 재생합니다.', inline = False)
embed3.add_field(name = '==스킵', value = '현재 재생 중인 음악을 스킵합니다.', inline = False)
embed3.add_field(name = '==목록', value = '재생 목록을 보여줍니다.', inline = False)
embed3.add_field(name = '==현재재생', value = '현재 재생 중인 음악을 보여줍니다.', inline = False)
embed3.add_field(name = '==볼륨', value = '봇의 볼륨을 조절합니다.', inline = False)
embed3.add_field(name = '==정지', value = '현재 재생 중인 음악을 정지합니다.', inline = False)
embed3.add_field(name = '==삭제 <트랙 번호>', value = '재생 목록에 있는 특정 음악을 삭제합니다.', inline = False)
embed3.add_field(name = '==섞기', value = '재생 목록을 섞습니다.', inline = False)
embed3.add_field(name = '==반복', value = '현재 재생 중인 음악을 반복 재생합니다.', inline = False)
embed3.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed3)
elif str(reaction) == "🅰️":
embed6 = discord.Embed(title = "캬루봇 명령어 목록 [전체 명령어]", colour = 0x30e08b)
embed6.add_field(name = '==지우기 <숫자>', value = '최근 1~99개의 메세지를 삭제합니다.', inline = False)
embed6.add_field(name = '==내정보', value = '자신의 디스코드 정보를 보여줍니다.', inline = False)
embed6.add_field(name = '==실검', value = '네이버의 급상승 검색어 TOP10을 보여줍니다.', inline = False)
embed6.add_field(name = '==날씨 <지역>', value = '<지역>의 날씨를 알려줍니다.', inline = False)
embed6.add_field(name = '==말해 <내용>', value = '<내용>을 말합니다.', inline = False)
embed6.add_field(name = '==번역 <언어> <내용>', value = '<내용>을 번역합니다.', inline = False)
embed6.add_field(name = '==유튜브 <text>', value = '유튜브에서 <text>를 검색합니다.', inline = False)
embed6.add_field(name = '==T정보, ==ts', value = 'TruckersMP의 서버 정보를 보여줍니다.', inline = False)
embed6.add_field(name = '==T프로필 <TMPID>, ==tp', value = '해당 TMPID 아이디를 가진 사람의 프로필을 보여줍니다.', inline = False)
embed6.add_field(name = '==T트래픽순위, ==ttr', value = 'TruckersMP의 트래픽 순위 TOP5를 보여줍니다.', inline = False)
embed6.add_field(name = '==들어와', value = '봇이 음성 통화방에 들어옵니다.', inline = False)
embed6.add_field(name = '==나가', value = '봇이 음성 통화방에서 나갑니다.', inline = False)
embed6.add_field(name = '==재생', value = '봇이 음악을 재생합니다.', inline = False)
embed6.add_field(name = '==일시정지', value = '현재 재생 중인 음악을 일시 정지합니다.', inline = False)
embed6.add_field(name = '==다시재생', value = '일시 정지한 음악을 다시 재생합니다.', inline = False)
embed6.add_field(name = '==스킵', value = '현재 재생 중인 음악을 스킵합니다.', inline = False)
embed6.add_field(name = '==목록', value = '재생 목록을 보여줍니다.', inline = False)
embed6.add_field(name = '==현재재생', value = '현재 재생 중인 음악을 보여줍니다.', inline = False)
embed6.add_field(name = '==볼륨', value = '봇의 볼륨을 조절합니다.', inline = False)
embed6.add_field(name = '==정지', value = '현재 재생 중인 음악을 정지합니다.', inline = False)
embed6.add_field(name = '==삭제 <트랙 번호>', value = '재생 목록에 있는 특정 음악을 삭제합니다.', inline = False)
embed6.add_field(name = '==섞기', value = '재생 목록을 섞습니다.', inline = False)
embed6.add_field(name = '==반복', value = '현재 재생 중인 음악을 반복 재생합니다.', inline = False)
embed6.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed6)
elif str(reaction) == "🚫":
await cmd_message.delete(delay = 0)
else:
return False
@client.command(pass_context = True, aliases=['==지우기'])
@commands.has_permissions(administrator=True)
async def claer_clear_abc(ctx, amount):
amount = int(amount)
if amount < 100:
await ctx.channel.purge(limit=amount)
embed = discord.Embed(title=f":put_litter_in_its_place: {amount}개의 채팅을 삭제했어요.",colour = 0x2EFEF7)
embed.set_footer(text = 'Service provided by RyuZU')
await ctx.channel.send(embed=embed)
else:
await ctx.channel.purge(limit=1)
await ctx.channel.send(embed=discord.Embed(title=f":no_entry_sign: 숫자를 99 이하로 입력해 주세요.",colour = 0x2EFEF7))
embed = discord.Embed(title=f":put_litter_in_its_place: {amount}개의 채팅을 삭제했어요.",colour = 0x2EFEF7)
embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed=embed)
@client.command(aliases=['==핑'])
async def ping_ping_abc(ctx):
await ctx.channel.send('퐁! `{}ms`'.format(round(client.latency * 1000)))
@client.command(pass_context = True, aliases=['==내정보'])
async def my_my_abc_profile(ctx):
date = datetime.datetime.utcfromtimestamp(((int(ctx.author.id) >> 22) + 1420070400000) / 1000)
embed = discord.Embed(title = ctx.author.display_name + "님의 정보", colour = 0x2EFEF7)
embed.add_field(name = '사용자명', value = ctx.author.name, inline = False)
embed.add_field(name = '가입일', value = str(date.year) + "년" + str(date.month) + "월" + str(date.day) + "일", inline = False)
embed.add_field(name = '아이디', value = ctx.author.id, inline = False)
embed.set_thumbnail(url = ctx.author.avatar_url)
embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed)
@client.command(pass_context = True, aliases=['==카페'])
async def cafe_cafe_abc(ctx):
embed = discord.Embed(title = "KCTG 공식 카페", colour = 0x2EFEF7)
embed.add_field(name = 'https://cafe.naver.com/kctgofficial', value = "\n\u200b", inline = False)
embed.set_thumbnail(url = "https://cdn.discordapp.com/attachments/740877681209507880/744451389396353106/KCTG_Wolf_1.png")
embed.set_footer(text = 'Service provided by RyuZU', icon_url="https://cdn.discordapp.com/attachments/740877681209507880/755440825667813497/20200817_184231.jpg")
await ctx.channel.send(embed = embed)
@client.command(pass_context = True, aliases=['==실검'])
async def search_search_abc_rank(ctx):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'}
url = "https://datalab.naver.com/keyword/realtimeList.naver?where=main"
soup = create_soup(url, headers)
rank_list = soup.find("ul", attrs={"class":"ranking_list"})
one = rank_list.find_all("span", attrs={"class":"item_title"})[0].get_text().strip().replace("1", "") #순서대로 실검 1~10위
two = rank_list.find_all("span", attrs={"class":"item_title"})[1].get_text().strip().replace("2", "")
three = rank_list.find_all("span", attrs={"class":"item_title"})[2].get_text().strip().replace("3", "")
four = rank_list.find_all("span", attrs={"class":"item_title"})[3].get_text().strip().replace("4", "")
five = rank_list.find_all("span", attrs={"class":"item_title"})[4].get_text().strip().replace("5", "")
six = rank_list.find_all("span", attrs={"class":"item_title"})[5].get_text().strip().replace("6", "")
seven = rank_list.find_all("span", attrs={"class":"item_title"})[6].get_text().strip().replace("7", "")
eight = rank_list.find_all("span", attrs={"class":"item_title"})[7].get_text().strip().replace("8", "")
nine = rank_list.find_all("span", attrs={"class":"item_title"})[8].get_text().strip().replace("9", "")
ten = rank_list.find_all("span", attrs={"class":"item_title"})[9].get_text().strip().replace("10", "")
time = soup.find("span", attrs={"class":"time_txt _title_hms"}).get_text() #현재 시간
await ctx.channel.send(f'Ⅰ ``{one}``\nⅡ ``{two}``\nⅢ ``{three}``\nⅣ ``{four}``\nⅤ ``{five}``\nⅥ ``{six}``\nⅦ ``{seven}``\nⅧ ``{eight}``\nⅨ ``{nine}``\nⅩ ``{ten}``\n\n``Time[{time}]``')
@client.command(pass_context = True, aliases=['==날씨'])
async def weather_weather_abc(ctx, arg1):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'}
url = f"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query={arg1}+날씨&oquery=날씨&tqi=U1NQ%2FsprvmsssUNA1MVssssssPN-224813"
soup = create_soup(url, headers)
rotate = soup.find("span", attrs={"class":"btn_select"}).get_text() #지역
cast = soup.find("p", attrs={"class":"cast_txt"}).get_text() #맑음, 흐림 같은거
curr_temp = soup.find("p", attrs={"class":"info_temperature"}).get_text().replace("도씨", "") #현재 온도
sen_temp = soup.find("span", attrs={"class":"sensible"}).get_text().replace("체감온도", "체감") #체감 온도
min_temp = soup.find("span", attrs={"class":"min"}).get_text() #최저 온도
max_temp = soup.find("span", attrs={"class":"max"}).get_text() #최고 온도
# 오전, 오후 강수 확률
morning_rain_rate = soup.find("span", attrs={"class":"point_time morning"}).get_text().strip() #오전
afternoon_rain_rate = soup.find("span", attrs={"class":"point_time afternoon"}).get_text().strip() #오후
# 미세먼지, 초미세먼지
dust = soup.find("dl", attrs={"class":"indicator"})
pm10 = dust.find_all("dd")[0].get_text() #미세먼지
pm25 = dust.find_all("dd")[1].get_text() #초미세먼지
daylist = soup.find("ul", attrs={"class":"list_area _pageList"})
tomorrow = daylist.find_all("li")[1]
#내일 온도
to_min_temp = tomorrow.find_all("span")[12].get_text() #최저
to_max_temp = tomorrow.find_all("span")[14].get_text() #최고
#내일 강수
to_morning_rain_rate = daylist.find_all("span", attrs={"class":"point_time morning"})[1].get_text().strip() #오전
to_afternoon_rain_rate = daylist.find_all("span", attrs={"class":"point_time afternoon"})[1].get_text().strip() #오후
await ctx.channel.send((rotate) + f'\n오늘의 날씨 ``' + (cast) + f'``\n__기온__ ``현재 {curr_temp}({sen_temp}) 최저 {min_temp} 최고 {max_temp}``\n__강수__ ``오전 {morning_rain_rate}`` ``오후 {afternoon_rain_rate}``\n__대기__ ``미세먼지 {pm10}`` ``초미세먼지 {pm25}``\n\n내일의 날씨\n__기온__ ``최저 {to_min_temp}˚`` ``최고 {to_max_temp}˚``\n__강수__ ``오전 {to_morning_rain_rate}`` ``오후 {to_afternoon_rain_rate}``')
@client.command(pass_context = True, aliases=['==말해'])
async def tell_tell_abc(ctx, *, arg):
tell = str(arg)
await ctx.channel.purge(limit=1)
await ctx.channel.send(tell)
@client.command(pass_context = True, aliases=['==T정보', '==TS', '==t정보', '==ts'])
async def tmp_tmp_abc_server_status(ctx):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'}
url = "https://stats.truckersmp.com/"
soup = create_soup(url, headers)
#현재 접속중인 플레이어
curr_status = soup.find("div", attrs={"class":"container-fluid"})
sim1 = curr_status.find_all("div", attrs={"class":"server-count"})[0].get_text().strip()
sim2 = curr_status.find_all("div", attrs={"class":"server-count"})[1].get_text().strip()
sim_us = curr_status.find_all("div", attrs={"class":"server-count"})[2].get_text().strip()
sim_sgp = curr_status.find_all("div", attrs={"class":"server-count"})[3].get_text().strip()
arc = curr_status.find_all("div", attrs={"class":"server-count"})[4].get_text().strip()
pro = curr_status.find_all("div", attrs={"class":"server-count"})[5].get_text().strip()
pro_arc = curr_status.find_all("div", attrs={"class":"server-count"})[6].get_text().strip()
#서버 온오프 여부
sim1_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[0].get_text().strip().replace("LINE", "")
sim2_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[1].get_text().strip().replace("LINE", "")
sim_us_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[2].get_text().strip().replace("LINE", "")
sim_sgp_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[3].get_text().strip().replace("LINE", "")
arc_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[4].get_text().strip().replace("LINE", "")
pro_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[5].get_text().strip().replace("LINE", "")
pro_arc_sta = curr_status.find_all("div", attrs={"class":"server-status ONLINE"})[6].get_text().strip().replace("LINE", "")
#서버 시간
curr_game_time = soup.find("span", attrs={"id":"game_time"}).get_text().strip()
embed = discord.Embed(title = "[ETS2] TruckersMP 서버 현황", colour = 0x2EFEF7)
embed.add_field(name = f'`[{sim1_sta}]` Simulation 1', value = f"{sim1}", inline = False)
embed.add_field(name = f'`[{sim2_sta}]` Simulation 2', value = f"{sim2}", inline = False)
embed.add_field(name = f'`[{sim_us_sta}]` [US] Simulation', value = f"{sim_us}", inline = False)
embed.add_field(name = f'`[{sim_sgp_sta}]` [SGP] Simulation', value = f"{sim_sgp}", inline = False)
embed.add_field(name = f'`[{arc_sta}]` Arcade', value = f"{arc}", inline = False)
embed.add_field(name = f'`[{pro_sta}]` ProMods', value = f"{pro}", inline = False)
embed.add_field(name = f'`[{pro_arc_sta}]` ProMods Arcade', value = f"{pro_arc}", inline = False)
embed.set_footer(text=f"서버 시간: {curr_game_time}")
await ctx.channel.send(embed = embed)
@client.command(pass_context = True, aliases=['==T트래픽순위', '==TTR', '==t트래픽순위', '==ttr'])
async def tmp_tmp_abc_traffic(ctx):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'}
url = "https://traffic.krashnz.com/"
soup = create_soup(url, headers)
#실시간 트래픽 순위
traffic_top = soup.find("ul", attrs={"class":"list-group mb-3"})
rank1 = traffic_top.find_all("div")[1].get_text().strip()
rank2 = traffic_top.find_all("div")[2].get_text().strip()
rank3 = traffic_top.find_all("div")[3].get_text().strip()
rank4 = traffic_top.find_all("div")[4].get_text().strip()
rank5 = traffic_top.find_all("div")[5].get_text().strip()
g_set = soup.find("div", attrs={"class":"row text-center mb-2"})
g_player = g_set.find_all("span", attrs={"class":"stats-number"})[0].get_text().strip()
g_time = g_set.find_all("span", attrs={"class":"stats-number"})[1].get_text().strip()
embed = discord.Embed(title = "[ETS2] TruckersMP 실시간 트래픽 TOP5", colour = 0x2EFEF7)
embed.add_field(name = f'{rank1}', value = "\n\u200b", inline = False)
embed.add_field(name = f'{rank2}', value = "\n\u200b", inline = False)
embed.add_field(name = f'{rank3}', value = "\n\u200b", inline = False)
embed.add_field(name = f'{rank4}', value = "\n\u200b", inline = False)
embed.add_field(name = f'{rank5}', value = f"\n{g_player} players tracked / {g_time} in-game time", inline = False)
await ctx.channel.send(embed = embed)
@client.command(pass_context = True, aliases=['==T프로필', '==TP', '==t프로필', '==tp'])
async def tmp_tmp_abc_user_profile(ctx, arg):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Whale/2.8.105.22 Safari/537.36'}
url = f"https://truckersmp.com/user/{arg}"
soup = create_soup(url, headers)
#플레이어 정보
user_status = soup.find("div", attrs={"class":"profile-bio"})
name = user_status.find_all("span")[0].get_text().strip()
check = user_status.find_all("strong")[0].get_text()
if check == "Also known as":
steam = user_status.find_all("span")[3].get_text().strip().replace("Steam ID:", "")
birt = user_status.find_all("span")[5].get_text().strip().replace("Member since:", "")
bans = user_status.find_all("span")[6].get_text().strip().replace("Active bans:", "")
else:
steam = user_status.find_all("span")[2].get_text().strip().replace("Steam ID:", "")
birt = user_status.find_all("span")[4].get_text().strip().replace("Member since:", "")
bans = user_status.find_all("span")[5].get_text().strip().replace("Active bans:", "")
vtc_check = soup.find_all("h2", attrs={"class":"panel-title heading-sm pull-left"})[2].get_text()
if vtc_check == " VTC":
vtc_find = soup.find_all("div", attrs={"class":"panel panel-profile"})[2]
vtc_name = vtc_find.find("h5", attrs={"class":"text-center break-all"}).get_text().strip()
else:
vtc_name = "없음"
#프로필 이미지
img = soup.find_all("div", attrs={"class": "col-md-3 md-margin-bottom-40"})[0]
imgs = img.find("img", attrs={"class": "img-responsive profile-img margin-bottom-20 shadow-effect-1"})
prof_image = imgs.get("src")
embed = discord.Embed(title = f"[TruckersMP] {arg}'s 프로필", colour = 0x2EFEF7)
embed.add_field(name = 'Name', value = f"{name}", inline = False)
embed.add_field(name = 'Steam ID', value = f"{steam}", inline = False)
embed.add_field(name = 'Member since', value = f"{birt}", inline = False)
embed.add_field(name = 'Active bans', value = f"{bans}", inline = False)
embed.add_field(name = 'VTC', value = f"{vtc_name}", inline = False)
embed.set_thumbnail(url=prof_image)
await ctx.channel.send(embed = embed)
@client.command(aliases=['==번역'])
async def _translator_abc(ctx, arg, *, content):
content = str(content)
if arg[0] == '한':
langso = "Korean"
so = "ko"
elif arg[0] == '영':
langso = "English"
so = "en"
elif arg[0] == '일':
langso = "Japanese"
so = "ja"
elif arg[0] == '중':
langso = "Chinese"
so = "zh-CN"
else:
pass
if arg[1] == '한':
langta = "Korean"
ta = "ko"
elif arg[1] == '영':
langta = "English"
ta = "en"
elif arg[1] == '일':
langta = "Japanese"
ta = "ja"
elif arg[1] == '중':
langta = "Chinese"
ta = "zh-CN"
else:
pass
url = "https://openapi.naver.com/v1/papago/n2mt"
#띄어쓰기 : split처리후 [1:]을 for문으로 붙인다.
trsText = str(content)
try:
if len(trsText) == 1:
await ctx.channel.send("단어 혹은 문장을 입력해주세요.")
else:
trsText = trsText[0:]
combineword = ""
for word in trsText:
combineword += "" + word
sourcetext = combineword.strip()
combineword = quote(sourcetext)
dataParmas = f"source={so}&target={ta}&text=" + combineword
request = Request(url)
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urlopen(request, data=dataParmas.encode("utf-8"))
responsedCode = response.getcode()
if (responsedCode == 200):
response_body = response.read()
# response_body -> byte string : decode to utf-8
api_callResult = response_body.decode('utf-8')
# JSON data will be printed as string type. So need to make it back to type JSON(like dictionary)
api_callResult = json.loads(api_callResult)
#번역 결과
translatedText = api_callResult['message']['result']["translatedText"]
embed = discord.Embed(title=f"번역 ┃ {langso} → {langta}", description="", color=0x2e9fff)
embed.add_field(name=f"{langso}", value=sourcetext, inline=False)
embed.add_field(name=f"{langta}", value=translatedText, inline=False)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/740877681209507880/755471340227526706/papago_og.png")
embed.set_footer(text="Provided by Naver Open API",
icon_url='https://cdn.discordapp.com/attachments/740877681209507880/755471340227526706/papago_og.png')
await ctx.channel.send(embed=embed)
else:
await ctx.channel.send("Error Code : " + responsedCode)
except HTTPError as e:
await ctx.channel.send("번역 실패. HTTP에러 발생.")
@client.command(pass_context = True, aliases=['==유튜브'])
async def _youtube_abc_search(ctx, * , arg):
arg_title = str(arg)
arg = str(arg).replace(" ", "%20")
DEVELOPER_KEY = os.environ["DEVELOPER_KEY"]
YOUTUBE_API_SERVICE_NAME="youtube"
YOUTUBE_API_VERSION="v3"
youtube = build(YOUTUBE_API_SERVICE_NAME,YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q = f"{arg_title}",
order = "relevance",
part = "snippet",
maxResults = 6
).execute()
thumbnail_img = search_response['items'][1]['snippet']['thumbnails']['high']['url']
title1 = search_response['items'][1]['snippet']['title'].replace('"', '"').replace("'", "'")
title2 = search_response['items'][2]['snippet']['title'].replace('"', '"').replace("'", "'")
title3 = search_response['items'][3]['snippet']['title'].replace('"', '"').replace("'", "'")
title4 = search_response['items'][4]['snippet']['title'].replace('"', '"').replace("'", "'")
title5 = search_response['items'][5]['snippet']['title'].replace('"', '"').replace("'", "'")
link = "https://www.youtube.com/watch?v="
link1 = link + search_response['items'][1]['id']['videoId']
link2 = link + search_response['items'][2]['id']['videoId']
link3 = link + search_response['items'][3]['id']['videoId']
link4 = link + search_response['items'][4]['id']['videoId']
link5 = link + search_response['items'][5]['id']['videoId']
url = f"https://www.youtube.com/results?search_query={arg}"
embed = discord.Embed(title = f":movie_camera: {arg_title} 검색 결과", colour = 0xb30e11)
embed.set_author(name = '더보기', url = url)
embed.add_field(name = "\n\u200b", value = f'**1. [{title1}]({link1})**', inline = False)
embed.add_field(name = "\n\u200b", value = f'**2. [{title2}]({link2})**', inline = False)
embed.add_field(name = "\n\u200b", value = f'**3. [{title3}]({link3})**', inline = False)
embed.add_field(name = "\n\u200b", value = f'**4. [{title4}]({link4})**', inline = False)
embed.add_field(name = "\n\u200b", value = f'**5. [{title5}]({link5})**\n\u200b', inline = False)
embed.set_thumbnail(url=thumbnail_img)
embed.set_footer(text='Provided by Youtube API')
await ctx.channel.send(embed = embed)
access_token = os.environ["BOT_TOKEN"]
client.run(access_token)
| 47,627 | 21,301 |
FORUMS_MAPPING = {
'forums_add_moderator': {
'resource': 'forums/addModerator.json',
'docs': 'https://disqus.com/api/docs/forums/addModerator/',
'methods': ['POST'],
},
'forums_create': {
'resource': 'forums/create.json',
'docs': 'https://disqus.com/api/docs/forums/create/',
'methods': ['POST'],
},
'forums_details': {
'resource': 'forums/details.json',
'docs': 'https://disqus.com/api/docs/forums/details/',
'methods': ['GET'],
},
'forums_fix_fav_icons_for_classified_forums': {
'resource': 'forums/fixFavIconsForClassifiedForums.json',
'docs': 'https://disqus.com/api/docs/forums/fixFavIconsForClassifiedForums/',
'methods': ['GET'],
},
'forums_follow': {
'resource': 'forums/follow.json',
'docs': 'https://disqus.com/api/docs/forums/follow/',
'methods': ['POST'],
},
'forums_generate_interesting_content': {
'resource': 'forums/generateInterestingContent.json',
'docs': 'https://disqus.com/api/docs/forums/generateInterestingContent/',
'methods': ['GET'],
},
'forums_interesting_forums': {
'resource': 'forums/interestingForums.json',
'docs': 'https://disqus.com/api/docs/forums/interestingForums/',
'methods': ['GET'],
},
'forums_list_categories': {
'resource': 'forums/listCategories.json',
'docs': 'https://disqus.com/api/docs/forums/listCategories/',
'methods': ['GET'],
},
'forums_list_followers': {
'resource': 'forums/listFollowers.json',
'docs': 'https://disqus.com/api/docs/forums/listFollowers/',
'methods': ['GET'],
},
'forums_list_moderators': {
'resource': 'forums/listModerators.json',
'docs': 'https://disqus.com/api/docs/forums/listModerators/',
'methods': ['GET'],
},
'forums_list_most_active_users': {
'resource': 'forums/listMostActiveUsers.json',
'docs': 'https://disqus.com/api/docs/forums/listMostActiveUsers/',
'methods': ['GET'],
},
'forums_list_most_liked_users': {
'resource': 'forums/listMostLikedUsers.json',
'docs': 'https://disqus.com/api/docs/forums/listMostLikedUsers/',
'methods': ['GET'],
},
'forums_list_posts': {
'resource': 'forums/listPosts.json',
'docs': 'https://disqus.com/api/docs/forums/listPosts/',
'methods': ['GET'],
},
'forums_list_threads': {
'resource': 'forums/listThreads.json',
'docs': 'https://disqus.com/api/docs/forums/listThreads/',
'methods': ['GET'],
},
'forums_list_users': {
'resource': 'forums/listUsers.json',
'docs': 'https://disqus.com/api/docs/forums/listUsers/',
'methods': ['GET'],
},
'forums_remove_moderator': {
'resource': 'forums/removeModerator.json',
'docs': 'https://disqus.com/api/docs/forums/removeModerator/',
'methods': ['POST'],
},
'forums_unfollow': {
'resource': 'forums/unfollow.json',
'docs': 'https://disqus.com/api/docs/forums/unfollow/',
'methods': ['POST'],
},
'forums_update': {
'resource': 'forums/update.json',
'docs': 'https://disqus.com/api/docs/forums/update/',
'methods': ['POST'],
},
}
| 3,361 | 1,194 |
from __future__ import division, print_function
import pickle
import pdb
import os
import time
from sklearn.cross_validation import StratifiedKFold
from sklearn import svm
from sklearn import metrics
import gensim
import random
from learners import SK_SVM,SK_KNN,SK_LDA
from tuner import DE_Tune_ML
from model import PaperData
from utility import study
from results import results_process
import numpy as np
#import wget
import zipfile
from sklearn import neighbors
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
import threading
from threading import Barrier
import timeit
import multiprocessing
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.lda import LDA
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
import collections
from multiprocessing import Queue
import pandas as pd
def tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal,
target_class=None):
"""
:param learner:
:param train_X:
:param train_Y:
:param tune_X:
:param tune_Y:
:param goal:
:param target_class:
:return:
"""
if not target_class:
target_class = goal
clf = learner(train_X, train_Y, tune_X, tune_Y, goal)
tuner = DE_Tune_ML(clf, clf.get_param(), goal, target_class)
return tuner.Tune()
def load_vec(d, data, use_pkl=False, file_name=None):
if use_pkl:
if os.path.isfile(file_name):
with open(file_name, "rb") as my_pickle:
return pickle.load(my_pickle)
else:
# print("call get_document_vec")
return d.get_document_vec(data, file_name)
def print_results(clfs):
file_name = time.strftime(os.path.sep.join([".", "results",
"%Y%m%d_%H:%M:%S.txt"]))
file_name = os.path.sep.join(["20171103.txt"])
content = ""
for each in clfs:
content += each.confusion
print(content)
with open(file_name, "w") as f:
f.write(content)
results_process.reports(file_name)
def get_acc(cm):
out = []
for i in range(4):
out.append(cm[i][i] / 400)
return out
@study
def run_tuning_SVM(word2vec_src, repeats=1,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
print(train_pd)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_SVM][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
print(goal)
F = {}
clfs = []
start = timeit.default_timer()
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
print(train_pd)
print(train_index)
train_data = train_pd.ix[train_index]
print(train_data)
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
stop = timeit.default_timer()
print("Model training time: ", stop - start)
print_results(clfs)
@study
def run_tuning_KNN(word2vec_src, repeats=1,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_KNN][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
F = {}
clfs = []
start = timeit.default_timer()
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
train_data = train_pd.ix[train_index]
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
stop = timeit.default_timer()
print("Model training time: ", stop - start)
print_results(clfs)
@study
def run_tuning_LDA(word2vec_src, repeats=1,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_LDA][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
F = {}
clfs = []
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
print(train_index)
train_data = train_pd.ix[train_index]
print(train_data)
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
print_results(clfs)
@study
def run_SVM_baseline(word2vec_src):
"""
Run SVM+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = svm.SVC(kernel="rbf", gamma=0.005)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_LDA(word2vec_src):
"""
Run LDA+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = LDA(solver='lsqr', shrinkage='auto')
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_LinearDiscriminantAnalysis(word2vec_src):
"""
Run LinearDiscriminantAnalysis+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
def select_n_components(var_ratio, goal_var: float) -> int:
# Set initial variance explained so far
total_variance = 0.0
# Set initial number of features
n_components = 0
# For the explained variance of each feature:
for explained_variance in var_ratio:
# Add the explained variance to the total
total_variance += explained_variance
# Add one to the number of components
n_components += 1
# If we reach our goal level of explained variance
if total_variance >= goal_var:
# End the loop
break
# Return the number of components
return n_components
print("# word2vec:", word2vec_src)
clf = LinearDiscriminantAnalysis(n_components=None)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
lda_var_ratios = clf.explained_variance_ratio_
n_com = select_n_components(lda_var_ratios, 0.99)
clf = LinearDiscriminantAnalysis(n_components=n_com)
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_KNN(word2vec_src):
"""
Run KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_RNN(word2vec_src):
"""
Run KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = neighbors.RadiusNeighborsClassifier(radius=5.0)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_SVM_KNN(word2vec_src):
"""
Run SVM->KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
classX1 = []
classX2 = []
classX3 = []
classX4 = []
classY1 = []
classY2 = []
classY3 = []
classY4 = []
classTX1 = []
classTX2 = []
classTX3 = []
classTX4 = []
classTY1 = []
classTY2 = []
classTY3 = []
classTY4 = []
predicted_F = []
finalY = []
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = svm.SVC(kernel="rbf", gamma=0.005)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
predicted = clf.predict(train_X)
# predicted = pd.DataFrame(predicted)
# train_X = pd.DataFrame(train_X)
# t = predicted.index[predicted.loc[1] == 1].tolist()
# print(predicted.axes)
# print(t)
for i in range(len(predicted)):
if predicted[i] == '1':
classX1.append(train_X[i])
classY1.append(train_Y[i])
elif predicted[i] == '2':
classX2.append(train_X[i])
classY2.append(train_Y[i])
elif predicted[i] == '3':
classX3.append(train_X[i])
classY3.append(train_Y[i])
elif predicted[i] == '4':
classX4.append(train_X[i])
classY4.append(train_Y[i])
clf2 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf3 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf4 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf5 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf2.fit(classX1,classY1)
clf3.fit(classX2,classY2)
clf4.fit(classX3,classY3)
clf5.fit(classX4,classY4)
stop = timeit.default_timer()
predicted0 = clf.predict(test_X)
for i in range(len(predicted0)):
if predicted0[i] == '1':
classTX1.append(test_X[i])
classTY1.append(test_Y[i])
elif predicted0[i] == '2':
classTX2.append(test_X[i])
classTY2.append(test_Y[i])
elif predicted0[i] == '3':
classTX3.append(test_X[i])
classTY3.append(test_Y[i])
elif predicted0[i] == '4':
classTX4.append(test_X[i])
classTY4.append(test_Y[i])
predicted1 = clf2.predict(classTX1)
predicted2 = clf3.predict(classTX2)
predicted3 = clf4.predict(classTX3)
predicted4 = clf5.predict(classTX4)
finalY = np.append(classTY1, classTY2)
finalY = np.append(finalY, classTY3)
finalY = np.append(finalY, classTY4)
predicted_F = np.append(predicted1, predicted2)
predicted_F = np.append(predicted_F, predicted3)
predicted_F = np.append(predicted_F, predicted4)
print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++")
print(metrics.classification_report(test_Y, predicted0,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY1, predicted1,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY2, predicted2,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY3, predicted3,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY4, predicted4,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++combined result+++++++++++++++++++++++++")
print(metrics.classification_report(finalY, predicted_F,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_SVM_KNN_thread(word2vec_src):
"""
Run SVM->KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
classX1 = []
classX2 = []
classX3 = []
classX4 = []
classY1 = []
classY2 = []
classY3 = []
classY4 = []
classTX1 = []
classTX2 = []
classTX3 = []
classTX4 = []
classTY1 = []
classTY2 = []
classTY3 = []
classTY4 = []
TrainingSamplesX = []
TrainingSamplesY = []
models = []
predicted_F = []
finalY = []
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = svm.SVC(kernel="rbf", gamma=0.005)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start0 = timeit.default_timer()
clf.fit(train_X, train_Y)
stop0 = timeit.default_timer()
predicted = clf.predict(train_X)
for i in range(len(predicted)):
if predicted[i] == '1':
classX1.append(train_X[i])
classY1.append(train_Y[i])
elif predicted[i] == '2':
classX2.append(train_X[i])
classY2.append(train_Y[i])
elif predicted[i] == '3':
classX3.append(train_X[i])
classY3.append(train_Y[i])
elif predicted[i] == '4':
classX4.append(train_X[i])
classY4.append(train_Y[i])
TrainingSamplesX.append(classX1)
TrainingSamplesY.append(classY1)
TrainingSamplesX.append(classX2)
TrainingSamplesY.append(classY2)
TrainingSamplesX.append(classX3)
TrainingSamplesY.append(classY3)
TrainingSamplesX.append(classX4)
TrainingSamplesY.append(classY4)
clf2 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf3 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf4 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf5 = neighbors.KNeighborsClassifier(n_neighbors = 5)
models.append(clf2)
models.append(clf3)
models.append(clf4)
models.append(clf5)
start1 = timeit.default_timer()
for i in range((len(TrainingSamplesX))):
t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]])
threads.append(t)
t.start()
stop1 = timeit.default_timer()
predicted0 = clf.predict(test_X)
for i in range(len(predicted0)):
if predicted0[i] == '1':
classTX1.append(test_X[i])
classTY1.append(test_Y[i])
elif predicted0[i] == '2':
classTX2.append(test_X[i])
classTY2.append(test_Y[i])
elif predicted0[i] == '3':
classTX3.append(test_X[i])
classTY3.append(test_Y[i])
elif predicted0[i] == '4':
classTX4.append(test_X[i])
classTY4.append(test_Y[i])
predicted1 = clf2.predict(classTX1)
predicted2 = clf3.predict(classTX2)
predicted3 = clf4.predict(classTX3)
predicted4 = clf5.predict(classTX4)
finalY = np.append(classTY1, classTY2)
finalY = np.append(finalY, classTY3)
finalY = np.append(finalY, classTY4)
predicted_F = np.append(predicted1, predicted2)
predicted_F = np.append(predicted_F, predicted3)
predicted_F = np.append(predicted_F, predicted4)
print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++")
print(metrics.classification_report(test_Y, predicted0,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY1, predicted1,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY2, predicted2,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY3, predicted3,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY4, predicted4,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++combined result+++++++++++++++++++++++++")
print(metrics.classification_report(finalY, predicted_F,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start0))
@study
def run_KNN_SVM(word2vec_src):
"""
Run KNN -> SVM+word embedding experiment !
This is the baseline method.
:return:None
"""
classX1 = []
classX2 = []
classX3 = []
classX4 = []
classY1 = []
classY2 = []
classY3 = []
classY4 = []
classTX1 = []
classTX2 = []
classTX3 = []
classTX4 = []
classTY1 = []
classTY2 = []
classTY3 = []
classTY4 = []
TrainingSamplesX = []
TrainingSamplesY = []
models = []
predicted_F = []
finalY = []
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
#print("before train")
start0 = timeit.default_timer()
clf.fit(train_X, train_Y)
stop0 = timeit.default_timer()
predicted = clf.predict(train_X)
for i in range(len(predicted)):
if predicted[i] == '1':
classX1.append(train_X[i])
classY1.append(train_Y[i])
elif predicted[i] == '2':
classX2.append(train_X[i])
classY2.append(train_Y[i])
elif predicted[i] == '3':
classX3.append(train_X[i])
classY3.append(train_Y[i])
elif predicted[i] == '4':
classX4.append(train_X[i])
classY4.append(train_Y[i])
TrainingSamplesX.append(classX1)
TrainingSamplesY.append(classY1)
TrainingSamplesX.append(classX2)
TrainingSamplesY.append(classY2)
TrainingSamplesX.append(classX3)
TrainingSamplesY.append(classY3)
TrainingSamplesX.append(classX4)
TrainingSamplesY.append(classY4)
clf2 = svm.SVC(kernel="rbf", gamma=0.005)
clf3 = svm.SVC(kernel="rbf", gamma=0.005)
clf4 = svm.SVC(kernel="rbf", gamma=0.005)
clf5 = svm.SVC(kernel="rbf", gamma=0.005)
models.append(clf2)
models.append(clf3)
models.append(clf4)
models.append(clf5)
start1 = timeit.default_timer()
for i in range((len(TrainingSamplesX))):
t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]])
threads.append(t)
t.start()
stop1 = timeit.default_timer()
predicted0 = clf.predict(test_X)
for i in range(len(predicted0)):
if predicted0[i] == '1':
classTX1.append(test_X[i])
classTY1.append(test_Y[i])
elif predicted0[i] == '2':
classTX2.append(test_X[i])
classTY2.append(test_Y[i])
elif predicted0[i] == '3':
classTX3.append(test_X[i])
classTY3.append(test_Y[i])
elif predicted0[i] == '4':
classTX4.append(test_X[i])
classTY4.append(test_Y[i])
predicted1 = clf2.predict(classTX1)
predicted2 = clf3.predict(classTX2)
predicted3 = clf4.predict(classTX3)
predicted4 = clf5.predict(classTX4)
finalY = np.append(classTY1, classTY2)
finalY = np.append(finalY, classTY3)
finalY = np.append(finalY, classTY4)
predicted_F = np.append(predicted1, predicted2)
predicted_F = np.append(predicted_F, predicted3)
predicted_F = np.append(predicted_F, predicted4)
print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++")
print(metrics.classification_report(test_Y, predicted0,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY1, predicted1,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY2, predicted2,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY3, predicted3,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY4, predicted4,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++combined result+++++++++++++++++++++++++")
print(metrics.classification_report(finalY, predicted_F,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start0))
@study
def run_KNN_KNN(word2vec_src):
"""
Run KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
classX1 = []
classX2 = []
classX3 = []
classX4 = []
classY1 = []
classY2 = []
classY3 = []
classY4 = []
classTX1 = []
classTX2 = []
classTX3 = []
classTX4 = []
classTY1 = []
classTY2 = []
classTY3 = []
classTY4 = []
TrainingSamplesX = []
TrainingSamplesY = []
models = []
predicted_F = []
finalY = []
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
#clf = svm.SVC(kernel="rbf", gamma=0.005)
clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
#clf = KMeans(n_clusters=4, init='k-means++', max_iter=100, n_init=1)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
#print("before train")
start0 = timeit.default_timer()
clf.fit(train_X, train_Y)
stop0 = timeit.default_timer()
predicted = clf.predict(train_X)
for i in range(len(predicted)):
if predicted[i] == '1':
classX1.append(train_X[i])
classY1.append(train_Y[i])
elif predicted[i] == '2':
classX2.append(train_X[i])
classY2.append(train_Y[i])
elif predicted[i] == '3':
classX3.append(train_X[i])
classY3.append(train_Y[i])
elif predicted[i] == '4':
classX4.append(train_X[i])
classY4.append(train_Y[i])
#print(classX1)
TrainingSamplesX.append(classX1)
TrainingSamplesY.append(classY1)
TrainingSamplesX.append(classX2)
TrainingSamplesY.append(classY2)
TrainingSamplesX.append(classX3)
TrainingSamplesY.append(classY3)
TrainingSamplesX.append(classX4)
TrainingSamplesY.append(classY4)
clf2 = neighbors.KNeighborsClassifier(n_neighbors = 10)
clf3 = neighbors.KNeighborsClassifier(n_neighbors = 10)
clf4 = neighbors.KNeighborsClassifier(n_neighbors = 10)
clf5 = neighbors.KNeighborsClassifier(n_neighbors = 10)
models.append(clf2)
models.append(clf3)
models.append(clf4)
models.append(clf5)
start1 = timeit.default_timer()
for i in range((len(TrainingSamplesX))):
t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]])
threads.append(t)
t.start()
stop1 = timeit.default_timer()
predicted0 = clf.predict(test_X)
for i in range(len(predicted0)):
if predicted0[i] == '1':
classTX1.append(test_X[i])
classTY1.append(test_Y[i])
elif predicted0[i] == '2':
classTX2.append(test_X[i])
classTY2.append(test_Y[i])
elif predicted0[i] == '3':
classTX3.append(test_X[i])
classTY3.append(test_Y[i])
elif predicted0[i] == '4':
classTX4.append(test_X[i])
classTY4.append(test_Y[i])
predicted1 = clf2.predict(classTX1)
predicted2 = clf3.predict(classTX2)
predicted3 = clf4.predict(classTX3)
predicted4 = clf5.predict(classTX4)
finalY = np.append(classTY1, classTY2)
finalY = np.append(finalY, classTY3)
finalY = np.append(finalY, classTY4)
predicted_F = np.append(predicted1, predicted2)
predicted_F = np.append(predicted_F, predicted3)
predicted_F = np.append(predicted_F, predicted4)
print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++")
print(metrics.classification_report(test_Y, predicted0,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY1, predicted1,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY2, predicted2,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY3, predicted3,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY4, predicted4,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++combined result+++++++++++++++++++++++++")
print(metrics.classification_report(finalY, predicted_F,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start0))
@study
def run_KMeans_Wpair(word2vec_src):
"""
Run KMeans+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
#clf = svm.SVC(kernel="rbf", gamma=0.005)
#clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf = KMeans(n_clusters=4, init='k-means++', max_iter=100, n_init=1)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "PostIdVec"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
train_X1 = train_pd.loc[:, "RelatedPostIdVec"].tolist()
train_Y1 = train_pd.loc[:, "LinkTypeId"].tolist()
np.append(train_X,train_X1)
np.append(train_Y,train_Y1)
test_X = test_pd.loc[:, "PostIdVec"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
clf.fit(train_X, train_Y)
predicted = clf.predict(test_X)
print(predicted)
x = list(np.asarray(clf.labels_) + 1)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, x))
print("Completeness: %0.3f" % metrics.completeness_score(train_Y, clf.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(train_Y, clf.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(train_X, clf.labels_, sample_size=1000))
#################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# returns the svm model
def run_SVM(word2vec_src, train_pd, queue):
clf = svm.SVC(kernel="rbf", gamma=0.005)
# word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
# data = PaperData(word2vec=word2vec_model)
# print("Train data: " + str(train_pd.shape))
# if train_pd is None: train_pd = load_vec(
# data, data.train_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
print("SVM Model Train Time", (stop-start))
queue.put(clf)
return clf
def run_KNN_clustering(word2vec_src, train_pd, queue):
print("# word2vec:", word2vec_src)
clf = neighbors.KNeighborsClassifier(n_neighbors = 10)
# word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
# data = PaperData(word2vec=word2vec_model)
# print("Train data: " + str(train_pd.shape))
# if train_pd is None: train_pd = load_vec(
# data, data.train_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
print("SVM Model Train Time", (stop-start))
queue.put(clf)
return clf
@study
def run_tuning_SVM_C(word2vec_src,train_pd_c,queue, repeats=1,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd_c = train_pd_c.reset_index()
train_pd = train_pd_c
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_SVM][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
print(goal)
F = {}
clfs = []
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
print(train_pd)
train_data = train_pd.ix[train_index]
print(train_index)
print(train_data)
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
print(train_X)
train_Y = train_data.loc[:, "LinkTypeId"].values
print(train_Y)
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
queue.put(clfs)
print_results(clfs)
# parses and returns a given svm in the format of dictionary -
# [class](precision, recall, f1score, support)
def results_SVM(clf, test_X, test_Y):
predicted = clf.predict(test_X)
# labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"]
report_gen = metrics.classification_report(
test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)
parsed_report = parse_classification_report(report_gen)
return parsed_report
#cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
#print("accuracy ", get_acc(cm)
def total_summary(result_set, num_rows, start0,start1,stop0,stop1):
weightedAvgs = [0, 0, 0]
for l in result_set:
avg_list = l['avg']
for i in range(3):
support_count = avg_list[3]
weightedAvgs[i] += (avg_list[i] * support_count)/num_rows
result = {}
result['precision'] = weightedAvgs[0]
result['recall'] = weightedAvgs[1]
result['f1'] = weightedAvgs[2]
print(result)
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start0))
def run_kmeans(word2vec_src):
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
queue = Queue()
numClusters = optimalK(train_X)
#numClusters = 5
print("Found optimal k: " + str(numClusters))
clf = KMeans(n_clusters=numClusters,
init='k-means++', max_iter=200, n_init=1)
start0 = timeit.default_timer()
clf.fit(train_X)
stop0 = timeit.default_timer()
svm_models = [] # maintain a list of svms
s1 = timeit.default_timer()
data.train_data['clabel'] = clf.labels_
s2 = timeit.default_timer()
print("Inter - ", (s2-s1))
start1 = timeit.default_timer()
#b = Barrier(numClusters-1)
for l in range(numClusters):
cluster = data.train_data.loc[data.train_data['clabel'] == l]
t = threading.Thread(target=run_tuning_SVM_C, args = [word2vec_src,cluster,queue])
threads.append(t)
t.start()
response = queue.get()
svm_models.append(response)
#b.wait()
t.join()
stop1 = timeit.default_timer()
svm_results = [] # maintain a list of svm results
test_X = test_pd.loc[:, "Output"].tolist()
predicted = clf.predict(test_X)
data.test_data['clabel'] = predicted
for l in range(numClusters):
#print("Label " + str(l))
cluster = data.test_data.loc[data.test_data['clabel'] == l]
svm_model = svm_models[l]
cluster_X = cluster.loc[:, "Output"].tolist()
cluster_Y = cluster.loc[:, "LinkTypeId"].tolist()
svm_results.append(results_SVM(svm_model, cluster_X, cluster_Y))# store all the SVM result report in a dictionary
# call the helper method to summarize the svm results
total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1)
# Source: https://anaconda.org/milesgranger/gap-statistic/notebook
def optimalK(data, nrefs=3, maxClusters=15):
"""
Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie
Params:
data: ndarry of shape (n_samples, n_features)
nrefs: number of sample reference datasets to create
maxClusters: Maximum number of clusters to test for
Returns: (gaps, optimalK)
"""
gaps = np.zeros((len(range(1, maxClusters)),))
resultsdf = pd.DataFrame({'clusterCount': [], 'gap': []})
for gap_index, k in enumerate(range(1, maxClusters)):
# Holder for reference dispersion results
refDisps = np.zeros(nrefs)
# For n references, generate random sample and perform kmeans getting resulting dispersion of each loop
for i in range(nrefs):
# Create new random reference set
# randomReference = np.random.random_sample(size=data.shape)
# Fit to it
km = KMeans(n_clusters=k, init='k-means++', max_iter=200, n_init=1)
km.fit(data)
refDisp = km.inertia_
refDisps[i] = refDisp
# Fit cluster to original data and create dispersion
km = KMeans(k)
km.fit(data)
origDisp = km.inertia_
# print(str(i+1) + ": " + str(origDisp))
# Calculate gap statistic
gap = np.log(np.mean(refDisps)) - np.log(origDisp)
# Assign this loop's gap statistic to gaps
gaps[gap_index] = gap
resultsdf = resultsdf.append(
{'clusterCount': k, 'gap': gap}, ignore_index=True)
# return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal
return gaps.argmax()
# Not used, but wanted to put this code somewhere
def results_kmeans(clf, train_X, train_Y, test_X, test_Y):
predicted = clf.predict(test_X)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, clf.labels_))
print("Completeness: %0.3f" %
metrics.completeness_score(train_Y, clf.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(train_Y, clf.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(train_X, clf.labels_, sample_size=1000))
"""
Parse a sklearn classification report into a dict keyed by class name
and containing a tuple (precision, recall, fscore, support) for each class
Reference: https://gist.github.com/julienr/6b9b9a03bd8224db7b4f
"""
def parse_classification_report(clfreport):
lines = clfreport.split('\n')
# Remove empty lines
lines = list(filter(lambda l: not len(l.strip()) == 0, lines))
# Starts with a header, then score for each class and finally an average
header = lines[0]
cls_lines = lines[1:-1]
avg_line = lines[-1]
assert header.split() == ['precision', 'recall', 'f1-score', 'support']
assert avg_line.split()[0] == 'avg'
# class names can have spaces - figure the width of the class field
# using indentation of the precision header
cls_field_width = len(header) - len(header.lstrip())
# Now, collect all the class names and score in a dict
def parse_line(l):
"""Parse a line of classification_report"""
cls_name = l[:cls_field_width].strip()
precision, recall, fscore, support = l[cls_field_width:].split()
precision = float(precision)
recall = float(recall)
fscore = float(fscore)
support = int(support)
return (cls_name, precision, recall, fscore, support)
data = collections.OrderedDict()
for l in cls_lines:
ret = parse_line(l)
cls_name = ret[0]
scores = ret[1:]
data[cls_name] = scores
data['avg'] = parse_line(avg_line)[1:] # average
return data
#################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def prepare_word2vec():
print("Downloading pretrained word2vec models")
url = "https://zenodo.org/record/807727/files/word2vecs_models.zip"
file_name = wget.download(url)
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall()
if __name__ == "__main__":
word_src = "word2vecs_models"
threads = []
if not os.path.exists(word_src):
prepare_word2vec()
elif len(os.listdir(word_src)) == 0:
os.rmdir(word_src)
prepare_word2vec()
for x in range(1):
random.seed(x)
np.random.seed(x)
myword2vecs = [os.path.join(word_src, i) for i in os.listdir(word_src)
if "syn" not in i]
# t = threading.Thread(target=run_tuning_SVM_KNN, args = [myword2vecs[x]])
# threads.append(t)
# t.start()
run_SVM_baseline(myword2vecs[x])
#run_SVM_KNN_thread(myword2vecs[x])
#run_LinearDiscriminantAnalysis(myword2vecs[x])
#run_KNN(myword2vecs[x])
#run_SVM_KNN(myword2vecs[x])
#run_KMeans_Wpair(myword2vecs[x])
#run_kmeans(myword2vecs[x])
#run_KNN_SVM(myword2vecs[x])
#run_KNN_KNN(myword2vecs[x])
#Srun_LDA(myword2vecs[x])
#run_RNN(myword2vecs[x])
#print("Run completed for baseline model--------------------------------------------------")
#run_tuning_SVM(myword2vecs[x])
#run_tuning_LDA(myword2vecs[x])
#run_tuning_KNN(myword2vecs[x])
#print("Run completed for DE model--------------------------------------------------") | 50,168 | 18,849 |
"""Seg NN Modules"""
from .sync_bn.syncbn import *
from .loss import * | 70 | 26 |
import json
import solcx
from solcx import compile_standard
# solcx.install_solc()
with open("./SimpleStorage.sol", "r") as file:
simple_storage_file = file.read()
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_file}},
"settings": {
"outputSelection": {
"*": {"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]}
}
},
},
)
with open("./compiled_code.json", "w") as file:
json.dump(compiled_sol, file)
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"][
"bytecode"
]["object"]
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
| 760 | 249 |
class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
00 01 02
10 11 12
20 21 22
divide 3 : 0
mod 3 : < 3
m = {
'row0-8' : set()
'col0-8'
'div3 + mod3'
}
"""
m = {}
# row
for i in range(9):
for j in range(9):
cur = board[i][j]
if cur == '.':
continue
sym_row = 'r' + str(i)
if sym_row in m:
if cur in m[sym_row]:
return False
m[sym_row].add(cur)
else:
m[sym_row] = set()
m[sym_row].add(cur)
sym_col = 'c' + str(j)
if sym_col in m:
if cur in m[sym_col]:
return False
m[sym_col].add(cur)
else:
m[sym_col] = set()
m[sym_col].add(cur)
sym_box = str(int(i/3)) + str(int(j/3))
if sym_box in m:
if cur in m[sym_box]:
return False
m[sym_box].add(cur)
else:
m[sym_box] = set()
m[sym_box].add(cur)
return True
| 1,433 | 443 |
"""Primary testing suite for clouddb.models.instance.
This code is licensed under the MIT license. See COPYING for more details."""
import time
import unittest
import clouddb
import test_clouddb
CLOUDDB_TEST_INSTANCE_OBJECT = None
CLOUDDB_TEST_BASELINE_INSTANCE_COUNT = None
CLOUDDB_TEST_INSTANCE_NAME = "testsuite-ci-%d" % time.time()
class InstanceBaseline(test_clouddb.BaseTestCase):
def test_instance_list_baseline(self):
instances = self.raxdb.instances()
self.assertIsInstance(instances, list)
test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT = len(instances)
class InstanceCreate(test_clouddb.BaseTestCase):
def test_create_instance(self):
test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT = \
self.raxdb.create_instance(CLOUDDB_TEST_INSTANCE_NAME, 1, 1, wait=True)
self.assertIsInstance(test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT,
clouddb.models.instance.Instance)
class InstanceListGet(test_clouddb.BaseTestCase):
def test_instance_list(self):
instances = self.raxdb.instances()
self.assertIsInstance(instances, list)
self.assertEqual(len(instances),
test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT + 1)
self.assertIsInstance(instances[-1], clouddb.models.instance.Instance)
class InstanceDestroy(test_clouddb.BaseTestCase):
def test_instance_remove(self):
test_clouddb.test_instance.CLOUDDB_TEST_INSTANCE_OBJECT.delete(wait=True)
class InstanceListFinal(test_clouddb.BaseTestCase):
def test_instance_list_baseline_again(self):
instances = self.raxdb.instances()
self.assertEqual(len(instances),
test_clouddb.test_instance.CLOUDDB_TEST_BASELINE_INSTANCE_COUNT)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InstanceBaseline))
suite.addTest(unittest.makeSuite(InstanceCreate))
suite.addTest(unittest.makeSuite(InstanceListGet))
suite.addTest(unittest.makeSuite(InstanceDestroy))
suite.addTest(unittest.makeSuite(InstanceListFinal))
return suite
if __name__ == "__main__":
unittest.main()
| 2,185 | 743 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
import tensorflow as tf
import numpy as np
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
'input_model', None, 'Run inference with specified pb graph.')
flags.DEFINE_string(
'output_model', None, 'The output model of the quantized model.')
flags.DEFINE_string(
'mode', 'performance', 'define benchmark mode for accuracy or performance')
flags.DEFINE_bool(
'tune', False, 'whether to tune the model')
flags.DEFINE_bool(
'benchmark', False, 'whether to benchmark the model')
flags.DEFINE_string(
'config', 'bert.yaml', 'yaml configuration of the model')
flags.DEFINE_bool(
'strip_iterator', False, 'whether to strip the iterator of the model')
def strip_iterator(graph_def):
from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes
input_node_names = ['input_ids', 'input_mask', 'segment_ids']
output_node_names = ['unstack']
# create the placeholder and merge with the graph
with tf.compat.v1.Graph().as_default() as g:
input_ids = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="input_ids")
input_mask = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="input_mask")
segment_ids = tf.compat.v1.placeholder(tf.int32, shape=(None,384), name="segment_ids")
tf.import_graph_def(graph_def, name='')
graph_def = g.as_graph_def()
# change the input from iterator to placeholder
for node in graph_def.node:
for idx, in_tensor in enumerate(node.input):
if 'IteratorGetNext:0' == in_tensor or 'IteratorGetNext' == in_tensor:
node.input[idx] = 'input_ids'
if 'IteratorGetNext:1' in in_tensor:
node.input[idx] = 'input_mask'
if 'IteratorGetNext:2' in in_tensor:
node.input[idx] = 'segment_ids'
graph_def = strip_unused_nodes(graph_def, input_node_names, output_node_names)
return graph_def
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
if FLAGS.benchmark:
from neural_compressor.experimental import Benchmark
evaluator = Benchmark(FLAGS.config)
evaluator.model = FLAGS.input_model
evaluator(FLAGS.mode)
elif FLAGS.tune:
from neural_compressor.experimental import Quantization
quantizer = Quantization(FLAGS.config)
quantizer.model = FLAGS.input_model
q_model = quantizer()
if FLAGS.strip_iterator:
q_model.graph_def = strip_iterator(q_model.graph_def)
q_model.save(FLAGS.output_model)
if __name__ == "__main__":
tf.compat.v1.app.run()
| 3,327 | 1,077 |
"""driver/ad7843 module"""
__author__ = 'Bartosz Kosciow'
| 58 | 26 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
class TargetTools():
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getConnection(self, username, password, url):
pass
@abc.abstractmethod
def validTarget(self, target):
pass | 286 | 102 |
from sqlalchemy import select, func, or_
from sqlalchemy.ext.hybrid import hybrid_property
from psi.app import const
from psi.app.models import Product, InventoryTransactionLine, \
InventoryTransaction
from psi.app.service import Info
from psi.app.utils import format_decimal, get_weeks_between
db = Info.get_db()
class ProductInventory(Product):
@hybrid_property
def inventory_advice(self):
from psi.app.advice import InventoryAdvice
return InventoryAdvice.advice(self)
@inventory_advice.setter
def inventory_advice(self, value):
pass
@inventory_advice.expression
def inventory_advice(self):
pass
@hybrid_property
def average_purchase_price(self):
return self.cal_inv_trans_average(const.PURCHASE_IN_INV_TRANS_KEY)
@average_purchase_price.setter
def average_purchase_price(self, val):
pass
@average_purchase_price.expression
def average_purchase_price(self):
from psi.app.models import EnumValues
return (select([func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) /
func.sum(InventoryTransactionLine.quantity)])
.where(self.id == InventoryTransactionLine.product_id
and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id
and InventoryTransaction.type_id == EnumValues.id
and EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY)
.label('average_purchase_price'))
@hybrid_property
def average_retail_price(self):
return self.cal_inv_trans_average(const.SALES_OUT_INV_TRANS_TYPE_KEY)
@average_retail_price.setter
def average_retail_price(self, val):
pass
@average_retail_price.expression
def average_retail_price(self):
from psi.app.models import EnumValues
return (select([func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) /
func.greatest(func.sum(InventoryTransactionLine.quantity), 1)])
.where(self.id == InventoryTransactionLine.product_id
and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id
and InventoryTransaction.type_id == EnumValues.id
and EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY)
.label('average_retail_price'))
@hybrid_property
def average_unit_profit(self):
if self.average_purchase_price != 0 and self.average_retail_price != 0:
return self.average_retail_price - self.average_purchase_price
return 0
@average_unit_profit.setter
def average_unit_profit(self, value):
pass
@average_unit_profit.expression
def average_unit_profit(self):
from .enum_values import EnumValues
return ((select([-func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) /
func.greatest(func.sum(InventoryTransactionLine.quantity), 1)])
.where(self.id == InventoryTransactionLine.product_id)
.where(InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id)
.where(InventoryTransaction.type_id == EnumValues.id)
.where(or_(EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY, EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY)))
.label('average_unit_profit'))
@hybrid_property
def weekly_average_profit(self):
if 0 == self.average_unit_profit:
return 0
return format_decimal(self.weekly_sold_qty * self.average_unit_profit)
@weekly_average_profit.expression
def weekly_average_profit(self):
from .enum_values import EnumValues
return ((select([-func.sum(InventoryTransactionLine.quantity * InventoryTransactionLine.price) /
func.greatest(func.sum(InventoryTransactionLine.quantity), 1)])
.where(self.id == InventoryTransactionLine.product_id
and InventoryTransactionLine.inventory_transaction_id == InventoryTransaction.id
and InventoryTransaction.type_id == EnumValues.id
and (EnumValues.code == const.SALES_OUT_INV_TRANS_TYPE_KEY or
EnumValues.code == const.PURCHASE_IN_INV_TRANS_KEY)))
.label('weekly_average_profit'))
@weekly_average_profit.setter
def weekly_average_profit(self, value):
pass
@hybrid_property
def gross_profit_rate(self):
if self.average_retail_price != 0 and self.average_purchase_price != 0:
val = (self.average_retail_price - self.average_purchase_price)/self.average_purchase_price
try:
fval = float(val)
percent = "{:.2%}".format(fval)
return percent
except Exception as e:
return '-'
return '-'
@gross_profit_rate.setter
def gross_profit_rate(self, value):
pass
@hybrid_property
def weekly_sold_qty(self):
"""
SQL:
SELECT p.id, p.name,
-sum(itl.quantity),
-sum(itl.quantity) / (greatest(date_part('days', max(it.date) - min(it.date)), 1)/7),
FROM
inventory_transaction_line itl,
inventory_transaction it,
enum_values ev,
product p
where
itl.inventory_transaction_id = it.id
AND itl.product_id = p.id
AND ev.code = 'SALES_OUT'
AND it.type_id = ev.id
GROUP BY p.id, p.name;
:return: quantity of sold out product averaged by week.
"""
i_ts = self.inventory_transaction_lines
tot_qty = 0
max_date, min_date = None, None
if len(i_ts) > 0:
for l in i_ts:
if l.type.code == const.SALES_OUT_INV_TRANS_TYPE_KEY:
if l.quantity is not None and l.price is not None:
tot_qty += abs(l.quantity)
if max_date is None or l.inventory_transaction.date > max_date:
max_date = l.inventory_transaction.date
if min_date is None or l.inventory_transaction.date < min_date:
min_date = l.inventory_transaction.date
weeks = get_weeks_between(min_date, max_date)
if weeks == 0:
weeks = 1
return format_decimal(tot_qty / weeks)
@weekly_sold_qty.setter
def weekly_sold_qty(self, value):
pass
@weekly_sold_qty.expression
def weekly_sold_qty(self):
from psi.app.models.sales_order import SalesOrderLine, SalesOrder
return ((select([func.sum(SalesOrderLine.quantity)])
.where(self.id == SalesOrderLine.product_id)
.where(SalesOrderLine.sales_order_id == SalesOrder.id)
.where(SalesOrder.order_date > func.now() - 7)).label('weekly_sold_qty'))
def cal_inv_trans_average(self, transaction_type):
i_ts = self.inventory_transaction_lines
tot_amt = 0
tot_qty = 0
if len(i_ts) > 0:
for l in i_ts:
if l.type.code == transaction_type:
if l.quantity is not None and l.price is not None:
tot_qty += abs(l.quantity)
tot_amt += abs(l.quantity) * l.price
if tot_amt != 0 and tot_qty != 0:
return format_decimal(tot_amt / tot_qty)
return 0
| 7,627 | 2,323 |
import carto
import argparse
def main():
parser = argparse.ArgumentParser(
description="Runs programs for the carto MapReduce library"
)
parser.add_argument(
"--host", dest="host", type=str, default="localhost",
help="Host of the program"
)
parser.add_argument(
"--port", dest="port", type=int, default=8000,
help="Port of the program"
)
parser.add_argument(
"--name", dest="name", type=str,
help="Name used by the worker"
)
parser.add_argument(
"--program", dest="program", type=str, default="client",
help="Used to determine what program will run"
)
parser.add_argument(
"--ns-host", dest="ns_host", type=str, default="localhost",
help="Host of the name server"
)
parser.add_argument(
"--ns-port", dest="ns_port", type=int, default="8080",
help="Port used by the name server"
)
args = parser.parse_args()
if args.program == carto.master.worker.WorkerType.MASTER:
carto.master.run(args.host, args.port)
elif args.program == carto.master.worker.WorkerType.MAPPER:
carto.mapper.run(args.host, args.port, args.ns_host,
args.ns_port, args.name)
elif args.program == carto.master.worker.WorkerType.REDUCER:
carto.reducer.run(args.host, args.port, args.ns_host,
args.ns_port, args.name)
if __name__ == "__main__":
main()
| 1,484 | 482 |
# (C) Copyright IBM Corporation 2017, 2018, 2019
# U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted
# by GSA ADP Schedule Contract with IBM Corp.
#
# Author: Leonardo P. Tizzei <ltizzei@br.ibm.com>
from microservices_miner.control.database_conn import IssueConn, UserConn, RepositoryConn
from microservices_miner.model.repository import Repository
import logging
logging.basicConfig(filename='github_miner.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
class IssueMgr:
def __init__(self, path_to_db):
self.db_path = path_to_db
self.issue_conn = IssueConn(path_to_db)
self.user_conn = UserConn(path_to_db)
self.repo_conn = RepositoryConn(path_to_db)
def insert_issue_into_db(self, repo):
"""
Parameters
----------
repo: Repository
Returns
-------
"""
for issue in repo.issues:
updated_at = issue.updated_at
if updated_at is not None:
updated_at_str = updated_at.isoformat()
else:
updated_at_str = None
if issue.closed_at is None:
closed_at_str = None
else:
closed_at_str = issue.closed_at.isoformat()
user_id = issue.user.commit_id
issue_id = self.issue_conn.insert_issue(title=issue.title, body=issue.body, repository_id=repo.repository_id,
closed_at=closed_at_str, updated_at=updated_at_str,
created_at=issue.created_at.isoformat(),
user_id=user_id, state=issue.state)
for assignee in issue.assignees:
assignee_id = self.issue_conn.insert_assignee(assignee)
self.issue_conn.insert_issue_assignee(assignee_id=assignee_id, issue_id=issue_id)
for label in issue.labels:
label_id = self.issue_conn.insert_label(label)
self.issue_conn.insert_issue_label(issue_id=issue_id, label_id=label_id)
def get_issues_by_label(self, repository_id: int):
"""
Parameters
----------
repository_id: int
Returns
-------
List[Issue]
"""
issues = self.issue_conn.get_issues(repository_id=repository_id)
return issues
def get_label(self, name):
"""
Parameters
----------
name
Returns
-------
Label
"""
labels = self.issue_conn.get_labels(name=name)
if len(labels) == 0:
return None
else:
label = labels.pop()
return label
def get_assignee(self, login):
"""
Parameters
----------
login
Returns
-------
Assignee
"""
assignees = self.issue_conn.get_assignee(login)
if len(assignees) == 0:
return None
else:
assignee = assignees.pop()
return assignee
def insert_assignee(self, assignee):
"""
Parameters
----------
assignee: Assignee
Returns
-------
int
"""
rowid = self.issue_conn.insert_assignee(assignee)
return rowid
def insert_label(self, label):
"""
Parameters
----------
label: Label
Returns
-------
int
"""
row_id = self.issue_conn.insert_label(label)
return row_id
| 3,623 | 1,079 |
"""Configuration flow for the skyq platform."""
import ipaddress
import json
import logging
import re
from operator import attrgetter
import homeassistant.helpers.config_validation as cv
import pycountry
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import callback
from pyskyqremote.const import KNOWN_COUNTRIES
from pyskyqremote.skyq_remote import SkyQRemote
from .const import (
CHANNEL_DISPLAY,
CHANNEL_SOURCES_DISPLAY,
CONF_CHANNEL_SOURCES,
CONF_COUNTRY,
CONF_EPG_CACHE_LEN,
CONF_GEN_SWITCH,
CONF_LIVE_TV,
CONF_OUTPUT_PROGRAMME_IMAGE,
CONF_ROOM,
CONF_SOURCES,
CONF_VOLUME_ENTITY,
CONST_DEFAULT,
CONST_DEFAULT_EPGCACHELEN,
DOMAIN,
LIST_EPGCACHELEN,
SKYQREMOTE,
)
from .schema import DATA_SCHEMA
from .utils import convert_sources_JSON
SORT_CHANNELS = False
_LOGGER = logging.getLogger(__name__)
def host_valid(host):
"""Return True if hostname or IP address is valid."""
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
class SkyqConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Example config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initiliase the configuration flow."""
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Sky Q options callback."""
return SkyQOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input:
if host_valid(user_input[CONF_HOST]):
host = user_input[CONF_HOST]
name = user_input[CONF_NAME]
try:
await self._async_setUniqueID(host)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
return self.async_create_entry(title=name, data=user_input)
errors[CONF_HOST] = "invalid_host"
return self.async_show_form(
step_id="user", data_schema=vol.Schema(DATA_SCHEMA), errors=errors
)
async def _async_setUniqueID(self, host):
remote = await self.hass.async_add_executor_job(SkyQRemote, host)
if not remote.deviceSetup:
raise CannotConnect()
deviceInfo = await self.hass.async_add_executor_job(remote.getDeviceInformation)
await self.async_set_unique_id(
deviceInfo.countryCode
+ "".join(e for e in deviceInfo.serialNumber.casefold() if e.isalnum())
)
self._abort_if_unique_id_configured()
class SkyQOptionsFlowHandler(config_entries.OptionsFlow):
"""Config flow options for Sky Q."""
def __init__(self, config_entry):
"""Initialize Sky Q options flow."""
self._name = config_entry.title
self._config_entry = config_entry
self._remote = None
self._channel_sources = config_entry.options.get(CONF_CHANNEL_SOURCES, [])
self._sources = convert_sources_JSON(
sources_list=config_entry.options.get(CONF_SOURCES)
)
self._room = config_entry.options.get(CONF_ROOM)
self._volume_entity = config_entry.options.get(CONF_VOLUME_ENTITY)
self._gen_switch = config_entry.options.get(CONF_GEN_SWITCH, False)
self._live_tv = config_entry.options.get(CONF_LIVE_TV, True)
self._country = config_entry.options.get(CONF_COUNTRY, CONST_DEFAULT)
if self._country != CONST_DEFAULT:
self._country = self._convertCountry(alpha_3=self._country)
self._output_programme_image = config_entry.options.get(
CONF_OUTPUT_PROGRAMME_IMAGE, True
)
self._epg_cache_len = config_entry.options.get(
CONF_EPG_CACHE_LEN, CONST_DEFAULT_EPGCACHELEN
)
self._channelDisplay = []
self._channel_list = []
async def async_step_init(self, user_input=None):
"""Set up the option flow."""
self._remote = self.hass.data[DOMAIN][self._config_entry.entry_id][SKYQREMOTE]
s = set(KNOWN_COUNTRIES[country] for country in KNOWN_COUNTRIES)
countryNames = []
for alpha3 in s:
countryName = self._convertCountry(alpha_3=alpha3)
countryNames.append(countryName)
self._country_list = [CONST_DEFAULT] + sorted(countryNames)
if self._remote.deviceSetup:
channelData = await self.hass.async_add_executor_job(
self._remote.getChannelList
)
self._channel_list = channelData.channels
for channel in self._channel_list:
self._channelDisplay.append(
CHANNEL_DISPLAY.format(channel.channelno, channel.channelname)
)
self._channel_sources_display = []
for channel in self._channel_sources:
try:
channelData = next(
c for c in self._channel_list if c.channelname == channel
)
self._channel_sources_display.append(
CHANNEL_DISPLAY.format(
channelData.channelno, channelData.channelname
)
)
except StopIteration:
pass
return await self.async_step_user()
return await self.async_step_retry()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input:
self._channel_sources_display = user_input[CHANNEL_SOURCES_DISPLAY]
user_input.pop(CHANNEL_SOURCES_DISPLAY)
if len(self._channel_sources_display) > 0:
channelitems = []
for channel in self._channel_sources_display:
channelData = next(
c
for c in self._channel_list
if channel == CHANNEL_DISPLAY.format(c.channelno, c.channelname)
)
channelitems.append(channelData)
if SORT_CHANNELS:
channelnosorted = sorted(channelitems, key=attrgetter("channelno"))
channelsorted = sorted(
channelnosorted, key=attrgetter("channeltype"), reverse=True
)
channel_sources = []
for c in channelsorted:
channel_sources.append(c.channelname)
else:
channel_sources = []
for c in channelitems:
channel_sources.append(c.channelname)
user_input[CONF_CHANNEL_SOURCES] = channel_sources
self._gen_switch = user_input.get(CONF_GEN_SWITCH)
self._live_tv = user_input.get(CONF_LIVE_TV)
self._output_programme_image = user_input.get(CONF_OUTPUT_PROGRAMME_IMAGE)
self._room = user_input.get(CONF_ROOM)
self._volume_entity = user_input.get(CONF_VOLUME_ENTITY)
self._country = user_input.get(CONF_COUNTRY)
if self._country == CONST_DEFAULT:
user_input.pop(CONF_COUNTRY)
else:
user_input[CONF_COUNTRY] = self._convertCountry(name=self._country)
self._epg_cache_len = user_input.get(CONF_EPG_CACHE_LEN)
try:
self._sources = user_input.get(CONF_SOURCES)
if self._sources:
user_input[CONF_SOURCES] = convert_sources_JSON(
sources_json=self._sources
)
for source in user_input[CONF_SOURCES]:
self._validate_commands(source)
return self.async_create_entry(title="", data=user_input)
except json.decoder.JSONDecodeError:
errors["base"] = "invalid_sources"
except InvalidCommand:
errors["base"] = "invalid_command"
return self.async_show_form(
step_id="user",
description_placeholders={CONF_NAME: self._name},
data_schema=vol.Schema(
{
vol.Optional(
CHANNEL_SOURCES_DISPLAY, default=self._channel_sources_display
): cv.multi_select(self._channelDisplay),
vol.Optional(
CONF_OUTPUT_PROGRAMME_IMAGE,
default=self._output_programme_image,
): bool,
vol.Optional(CONF_LIVE_TV, default=self._live_tv): bool,
vol.Optional(CONF_GEN_SWITCH, default=self._gen_switch): bool,
vol.Optional(
CONF_ROOM, description={"suggested_value": self._room}
): str,
vol.Optional(CONF_COUNTRY, default=self._country): vol.In(
self._country_list
),
vol.Optional(
CONF_VOLUME_ENTITY,
description={"suggested_value": self._volume_entity},
): str,
vol.Optional(
CONF_EPG_CACHE_LEN, default=self._epg_cache_len
): vol.In(LIST_EPGCACHELEN),
vol.Optional(
CONF_SOURCES, description={"suggested_value": self._sources}
): str,
}
),
errors=errors,
)
async def async_step_retry(self, user_input=None):
"""Handle a failed connection."""
errors = {}
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="retry",
data_schema=vol.Schema({}),
errors=errors,
)
def _convertCountry(self, alpha_3=None, name=None):
if name:
return pycountry.countries.get(name=name).alpha_3
if alpha_3:
return pycountry.countries.get(alpha_3=alpha_3).name
def _validate_commands(self, source):
commands = source[1].split(",")
for command in commands:
if command not in SkyQRemote.commands:
raise InvalidCommand()
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidCommand(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| 10,893 | 3,183 |
from tradssat.tmpl.output import OutFile
from tradssat.tmpl.var import FloatVar, IntegerVar
class SoilNiOut(OutFile):
"""
Reader for DSSAT soil nitrogen (SOILNI.OUT) files.
"""
filename = 'SoilNi.Out'
def _get_var_info(self):
return vars_
vars_ = {
IntegerVar('YEAR', 4, info='Year'),
IntegerVar('DOY', 3, info='Day of year starting on Jan 1.'),
IntegerVar('DAS', 5, info='Day after start'),
IntegerVar('NAPC', 5, info='Cumulative inorganic N applied, kg/ha'),
IntegerVar('NI#M', 5, info='N application numbers'),
FloatVar('NIAD', 7, 1, info='Inorganic N in soil, kg/ha'),
FloatVar('NITD', 6, 1, info='Amount of total NO3, kg/ha'),
FloatVar('NHTD', 6, 1, info='Amount of total NH4, kg/ha'),
FloatVar('NI1D', 7, 2, info='NO3 at 0-5 cm soil depth, ppm'),
FloatVar('NI2D', 7, 2, info='NO3 at 5-15 cm soil depth, ppm'),
FloatVar('NI3D', 7, 2, info='NO3 at 15-30 cm soil depth, ppm'),
FloatVar('NI4D', 7, 2, info='NO3 at 30-45 cm soil depth, ppm'),
FloatVar('NI5D', 7, 2, info='NO3 at 45-60 cm soil depth, ppm'),
FloatVar('NI6D', 7, 2, info='NO3 at 60-90 cm soil depth, ppm'),
FloatVar('NI7D', 7, 2, info='NO3 at 90-110 cm soil depth, ppm'),
FloatVar('NH1D', 7, 2, info='NH4 at 0-5 cm soil depth, ppm'),
FloatVar('NH2D', 7, 2, info='NH4 at 5-15 cm soil depth, ppm'),
FloatVar('NH3D', 7, 2, info='NH4 at 15-30 cm soil depth, ppm'),
FloatVar('NH4D', 7, 2, info='NH4 at 30-45 cm soil depth, ppm'),
FloatVar('NH5D', 7, 2, info='NH4 at 45-60 cm soil depth, ppm'),
FloatVar('NH6D', 7, 2, info='NH4 at 60-90 cm soil depth, ppm'),
FloatVar('NH7D', 7, 2, info='NH4 at 90-110 cm soil depth, ppm'),
FloatVar('NMNC', 7, 0, info=''),
FloatVar('NITC', 7, 0, info=''),
FloatVar('NDNC', 7, 0, info=''),
FloatVar('NIMC', 7, 0, info=''),
FloatVar('AMLC', 7, 0, info=''),
FloatVar('NNMNC', 7, 0, info=''),
FloatVar('NUCM', 7, 0, info='N uptake, kg/ha'),
FloatVar('NLCC', 7, 0, info='Cumulative N leached, kg/ha'),
}
| 2,045 | 921 |
#!/usr/bin/env python3
import argparse
import sys
import os
import time
import traceback
import subprocess
from subprocess import PIPE, STDOUT, CalledProcessError
# Find path to this script
SELF_PATH = os.path.dirname(os.path.abspath(__file__))
# Find path to Contiki-NG relative to this script
CONTIKI_PATH = os.path.dirname(os.path.dirname(SELF_PATH))
cooja_jar = os.path.normpath(os.path.join(CONTIKI_PATH, "tools", "cooja", "dist", "cooja.jar"))
cooja_output = 'COOJA.testlog'
cooja_log = 'COOJA.log'
#######################################################
# Run a child process and get its output
def _run_command(command):
try:
proc = subprocess.run(command, stdout=PIPE, stderr=STDOUT, shell=True, universal_newlines=True)
return proc.returncode, proc.stdout if proc.stdout else ''
except CalledProcessError as e:
print(f"Command failed: {e}", file=sys.stderr)
return e.returncode, e.stdout if e.stdout else ''
except (OSError, Exception) as e:
traceback.print_exc()
return -1, str(e)
def _remove_file(filename):
try:
os.remove(filename)
except FileNotFoundError:
pass
#############################################################
# Run a single instance of Cooja on a given simulation script
def run_simulation(cooja_file, output_path=None):
# Remove any old simulation logs
_remove_file(cooja_output)
_remove_file(cooja_log)
target_basename = cooja_file
if target_basename.endswith('.csc.gz'):
target_basename = target_basename[:-7]
elif target_basename.endswith('.csc'):
target_basename = target_basename[:-4]
simulation_id = str(round(time.time() * 1000))
if output_path is not None:
target_basename = os.path.join(output_path, target_basename)
target_basename += '-dt-' + simulation_id
target_basename_fail = target_basename + '-fail'
target_output = target_basename + '/cooja.testlog'
target_log_output = target_basename + '/cooja.log'
# filename = os.path.join(SELF_PATH, cooja_file)
command = (f"java -Djava.awt.headless=true -jar {cooja_jar} -nogui={cooja_file} -contiki={CONTIKI_PATH}"
f" -datatrace={target_basename}")
sys.stdout.write(f" Running Cooja:\n {command}\n")
start_time = time.perf_counter_ns()
(return_code, output) = _run_command(command)
end_time = time.perf_counter_ns()
with open(cooja_log, 'a') as f:
f.write(f'\nSimulation execution time: {end_time - start_time} ns.\n')
if not os.path.isdir(target_basename):
os.mkdir(target_basename)
has_cooja_output = os.path.isfile(cooja_output)
if has_cooja_output:
os.rename(cooja_output, target_output)
os.rename(cooja_log, target_log_output)
if return_code != 0 or not has_cooja_output:
print(f"Failed, ret code={return_code}, output:", file=sys.stderr)
print("-----", file=sys.stderr)
print(output, file=sys.stderr, end='')
print("-----", file=sys.stderr)
if not has_cooja_output:
print("No Cooja simulation script output!", file=sys.stderr)
os.rename(target_basename, target_basename_fail)
return False
print(" Checking for output...")
is_done = False
with open(target_output, "r") as f:
for line in f.readlines():
line = line.strip()
if line == "TEST OK":
is_done = True
continue
if not is_done:
print(" test failed.")
os.rename(target_basename, target_basename_fail)
return False
print(f" test done in {round((end_time - start_time) / 1000000)} milliseconds.")
return True
#######################################################
# Run the application
def main(parser=None):
if not os.access(cooja_jar, os.R_OK):
sys.exit(f'The file "{cooja_jar}" does not exist, did you build Cooja?')
if not parser:
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output_path')
parser.add_argument('input', nargs='+')
try:
conopts = parser.parse_args(sys.argv[1:])
except Exception as e:
sys.exit(f"Illegal arguments: {e}")
if conopts.output_path and not os.path.isdir(conopts.output_path):
os.mkdir(conopts.output_path)
for simulation_file in conopts.input:
if not os.access(simulation_file, os.R_OK):
print(f'Can not read simulation script "{simulation_file}"', file=sys.stderr)
sys.exit(1)
print(f'Running simulation "{simulation_file}"')
if not run_simulation(simulation_file, conopts.output_path):
sys.exit(f'Failed to run simulation "{simulation_file}"')
print('Done. No more simulation files specified.')
#######################################################
if __name__ == '__main__':
main()
| 4,888 | 1,622 |
#!/usr/bin/env python3
import json, operator
import mruservice, mruuserdata
APP_NAME = 'Excel'
APP_BUNDLE_ID = 'com.microsoft.Excel'
APP_URL_PREFIX = 'ms-excel:ofe|u|'
EXTENSION_TO_ICON_NAME = dict(
slk='XLS8', dif='XLS8', ods='ODS', xls='XLS8', xlsx='XLSX', xltx='XLTX', xlsm='XLSM',
xltm='XLTM', xlsb='XLSB', xlam='XLAM', xlw='XLW8', xla='XLA8', xlb='XLB8', xlt='XLT',
xld='XLD5', xlm='XLM4', xll='XLL', csv='CSV', txt='TEXT', xml='XMLS', tlb='OTLB', _='TEXT')
items = mruuserdata.items_for_app(APP_NAME)
items += mruservice.items_for_app(APP_NAME, APP_BUNDLE_ID, APP_URL_PREFIX, EXTENSION_TO_ICON_NAME)
items.sort(key=operator.itemgetter('Timestamp'), reverse=True)
print(json.dumps(items))
| 711 | 336 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 22 12:03:16 2021
@author: user
"""
'''
Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.
Notice that the solution set must not contain duplicate triplets.
Example 1:
Input: nums = [-1,0,1,2,-1,-4]
Output: [[-1,-1,2],[-1,0,1]]
Example 2:
Input: nums = []
Output: []
Example 3:
Input: nums = [0]
Output: []
Constraints:
0 <= nums.length <= 3000
-105 <= nums[i] <= 105
Accepted
1,304,501
Submissions
4,576,232
'''
'''
Slow brutforce
'''
class Solution:
def threeSum(self, nums):
if len(nums)<3:
return []
else:
out=[]
import itertools
indices=[x for x in range(0,len(nums))]
combs=list(itertools.combinations(indices, 3))
for i in combs:
summ=[]
for j in i:
summ.append(nums[j])
if sum(summ)==0 and sorted(summ) not in out:
out.append(sorted(summ))
return out
y=Solution()
nums = [-1,0,1,2,-1,-4]
print(y.threeSum(nums))
nums = [0,0,0]
print(y.threeSum(nums))
# nums = [0]
# print(y.threeSum(nums))
'''
Faster
'''
class Solution:
def threeSum(self, nums):
if len(nums)<3:
return []
else:
out=[]
indices = {}
nums=sorted(nums)
for key ,value in enumerate(nums):
indices[value]=key
for first_ind,first_num in enumerate(nums):
if first_num>0:#no reason to continue
break
else:
for second_ind,second_num in enumerate(nums[first_ind+1:]):
zero=-(first_num+second_num)
if zero in indices.keys() and indices[zero]>first_ind+second_ind+1:
temp=sorted([zero,first_num,second_num])
if temp not in out:
out.append(temp)
return out
y=Solution()
nums = [-1,0,1,2,-1,-4]
print(y.threeSum(nums))
# nums = [0,0,0]
# print(y.threeSum(nums))
# nums = [0]
# print(y.threeSum(nums))
| 2,458 | 849 |
class ResponseErrors:
DEFAULT = 'An unexpected error occurred while processing this request'
INVALID_JSON = 'Unable to parse JSON from the request body'
INVALID_LOGIN = 'The username/password you specified is invalid'
USER_DNE = 'User does not exist'
ACCOUNT_DNE = 'Account does not exist'
ACCOUNT_NO_ACCESS = 'Invalid account'
ACCOUNT_EXISTS = 'Account already exists for this user'
ACCOUNT_INVALID_ACTION = 'Invalid action for account'
ACCOUNT_INSUFFICIENTFUNDS = 'Insufficient funds'
STOCK_DNE = 'Stock does not exist'
STOCK_EXISTS = 'Stock already exists'
STOCK_DATA_UNAVAILABLE = 'Stock data is currently unavailable'
NOT_ENOUGH_FUNDS = 'Not enough funds to make this trade'
TOO_MANY_SHARES = 'Shares passed is greater than what is owned' | 799 | 248 |
from unittest import TestCase
from torch import Tensor
from abcde.loss import PairwiseRankingCrossEntropyLoss
class TestPairwiseRankingLoss(TestCase):
def test_simple_case(self):
loss = PairwiseRankingCrossEntropyLoss()
res = loss(pred_betweenness=Tensor([[0.5], [0.7], [3]]), target_betweenness=Tensor([[0.2], [1], [2]]),
src_ids=Tensor([0, 1, 2, 2, 1, 0, 1, 2, 2, 1, 0, 1, 2, 2, 1, ]).long(),
targ_ids=Tensor([1, 0, 0, 1, 2, 1, 0, 0, 1, 2, 1, 0, 0, 1, 2, ]).long())
# This number is taken from the tensorflow implementation
self.assertAlmostEqual(res, 0.636405362070762)
| 651 | 286 |
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, January 2017
import os
import scipy.io as sio
import utils.datasets as utils
# ---------------------------------------------------------------
# data set paths
__data_set_path = "{}/data/peasraw-dataset.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/mvda_peas_raw.pickle".format(os.path.split(__file__)[0])
# ---------------------------------------------------------------
# TODO: Add docstring with usage examples (see 'uv_fuel' data set)
@utils.load_data_from_pickle(__pickle_path)
def load_mvda_peas_raw():
# loading matlab data set
raw_data = sio.loadmat(__data_set_path)
features_labels = raw_data['var_labels_all']
data = raw_data['data_all']
samples_labels = list(range(1, data.shape[0] + 1))
return utils.build_data_set(data, samples_labels, features_labels)
| 1,088 | 368 |
#selection sort
#Passo a passo:
# 1° Descobrir o menor item da lista
# 2° colocar dentro de uma função
#Rascunho do algoritimo de ordenação para mudar as posições
# if (lista[2] < minimo):
# aux = lista[0]
# minimo = lista[2]
# lista[2] = aux
lista = [7, 5, 1, 3, 8]
n = len(lista)
minimo = lista[0]
for i in range(n):
if(lista[i]< minimo):
minimo = lista[i]
for j in range(n):
print(lista)
| 472 | 190 |
import argparse
import unittest
from ffwd.ffwd_send import tag_type
class TestFFWDSend(unittest.TestCase):
def test_tag_type(self):
self.assertEquals(('hello', 'world'), tag_type("hello:world"))
self.assertEquals(('hello', 'world:two'), tag_type("hello:world:two"))
with self.assertRaises(argparse.ArgumentTypeError):
tag_type('hello')
| 380 | 122 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 防篡改
Case Name : 验证历史记录数可配置
Description :
1.查询参数password_reuse_max值
2.修改password_reuse_max参数值为3
3.恢复默认值
Expect :
1.显示默认值0
2.显示设置后的值3
3.默认值恢复成功
History :
"""
import os
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
class ModifyCase(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info(f'-----{os.path.basename(__file__)} start-----')
self.primary_sh = CommonSH('PrimaryDbUser')
self.Constant = Constant()
self.common = Common()
self.default_value = self.common.show_param('password_reuse_max')
def test_security(self):
text = '----step1:查询参数password_reuse_max值; expect:默认值0----'
self.logger.info(text)
show_para = self.default_value
self.logger.info(show_para)
self.assertEqual("0", show_para, "执行失败:" + text)
text = '----step2:修改password_reuse_max参数值为3 expect:成功----'
self.logger.info(text)
sql_cmd = self.primary_sh.execut_db_sql(f'''
alter system set password_reuse_max to 3;
select pg_sleep(2);
show password_reuse_max;''')
self.logger.info(sql_cmd)
self.assertEqual("3", sql_cmd.split("\n")[-2].strip(),
"执行失败:" + text)
def tearDown(self):
text = '----step3:恢复默认值 expect:成功----'
self.logger.info(text)
sql_cmd = self.primary_sh.execut_db_sql(f'''
alter system set password_reuse_max to {self.default_value};
select pg_sleep(2);
show password_reuse_max;''')
self.logger.info(sql_cmd)
self.assertEqual("0", sql_cmd.split("\n")[-2].strip(),
"执行失败:" + text)
self.logger.info(f'-----{os.path.basename(__file__)} end-----')
| 2,475 | 920 |
class Meme( object ):
def __init__( self, name, image, themes ):
self.__name = name
self.__image = image
self.__themes = themes | 162 | 52 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for abandoning instances owned by a managed instance group."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
def _AddArgs(parser, multizonal):
"""Adds args."""
parser.add_argument('name',
help='The managed instance group name.')
parser.add_argument(
'--instances',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
metavar='INSTANCE',
required=True,
help='Names of instances to abandon.')
if multizonal:
scope_parser = parser.add_mutually_exclusive_group()
flags.AddRegionFlag(
scope_parser,
resource_type='instance group',
operation_type='abandon instances',
explanation=flags.REGION_PROPERTY_EXPLANATION_NO_DEFAULT)
flags.AddZoneFlag(
scope_parser,
resource_type='instance group manager',
operation_type='abandon instances',
explanation=flags.ZONE_PROPERTY_EXPLANATION_NO_DEFAULT)
else:
flags.AddZoneFlag(
parser,
resource_type='instance group manager',
operation_type='abandon instances')
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class AbandonInstances(base_classes.BaseAsyncMutator):
"""Abandon instances owned by a managed instance group."""
@staticmethod
def Args(parser):
_AddArgs(parser=parser, multizonal=False)
@property
def method(self):
return 'AbandonInstances'
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def CreateRequests(self, args):
zone_ref = self.CreateZonalReference(args.name, args.zone)
instance_refs = self.CreateZonalReferences(
args.instances,
zone_ref.zone,
resource_type='instances')
instances = [instance_ref.SelfLink() for instance_ref in instance_refs]
return [(self.method,
self.messages.ComputeInstanceGroupManagersAbandonInstancesRequest(
instanceGroupManager=zone_ref.Name(),
instanceGroupManagersAbandonInstancesRequest=(
self.messages.InstanceGroupManagersAbandonInstancesRequest(
instances=instances,
)
),
project=self.project,
zone=zone_ref.zone,
),),]
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AbandonInstancesAlpha(base_classes.BaseAsyncMutator,
instance_groups_utils.InstancesReferenceMixin):
"""Abandon instances owned by a managed instance group."""
@staticmethod
def Args(parser):
_AddArgs(parser=parser, multizonal=True)
@property
def method(self):
return 'AbandonInstances'
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def CreateRequests(self, args):
errors = []
group_ref = instance_groups_utils.CreateInstanceGroupReference(
scope_prompter=self, compute=self.compute, resources=self.resources,
name=args.name, region=args.region, zone=args.zone)
instances = self.CreateInstanceReferences(
group_ref, args.instances, errors)
if group_ref.Collection() == 'compute.instanceGroupManagers':
service = self.compute.instanceGroupManagers
request = (
self.messages.
ComputeInstanceGroupManagersAbandonInstancesRequest(
instanceGroupManager=group_ref.Name(),
instanceGroupManagersAbandonInstancesRequest=(
self.messages.InstanceGroupManagersAbandonInstancesRequest(
instances=instances,
)
),
project=self.project,
zone=group_ref.zone,
))
else:
service = self.compute.regionInstanceGroupManagers
request = (
self.messages.
ComputeRegionInstanceGroupManagersAbandonInstancesRequest(
instanceGroupManager=group_ref.Name(),
regionInstanceGroupManagersAbandonInstancesRequest=(
self.messages.
RegionInstanceGroupManagersAbandonInstancesRequest(
instances=instances,
)
),
project=self.project,
region=group_ref.region,
))
return [(service, self.method, request)]
AbandonInstances.detailed_help = {
'brief': 'Abandon instances owned by a managed instance group.',
'DESCRIPTION': """
*{command}* abandons one or more instances from a managed instance
group, thereby reducing the targetSize of the group. Once instances have been
abandoned, the currentSize of the group is automatically reduced as well to
reflect the change.
Abandoning an instance does not delete the underlying virtual machine instances,
but just removes the instances from the instance group. If you would like the
delete the underlying instances, use the delete-instances command instead.
""",
}
AbandonInstancesAlpha.detailed_help = AbandonInstances.detailed_help
| 5,981 | 1,610 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
#base case(s)
if(root is None): return True;
if(root.left is None and root.right is None): return True;
q = [root];
while(len(q) > 0):
list = [];
qq = [];
while(len(q) > 0):
temp = q.pop(0);
if(temp is None):
list.append('null');
else:
list.append(temp.val);
qq.append(temp.left);
qq.append(temp.right);
if(self.isPalindrome(list) is False): return False;
q = qq;
return True;
def isPalindrome(self, list):
[i , j] = [0, len(list) - 1];
while(i < j):
if(list[i] == list[j]):
i += 1;
j -= 1;
continue;
else:
return False;
return True;
| 1,185 | 353 |