text stringlengths 957 885k |
|---|
# -*- coding: utf-8 -*-
"""model_training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zAyiq6cN_OEIjv9yaG6xJXbop2lcREhP
"""
#%%
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy
# from sklearn.linear_model import ElasticNet, Ridge, Lasso, LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
# from sklearn.ensemble import RandomForestRegressor
from xgboost.sklearn import XGBRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, DotProduct, Matern, RationalQuadratic, WhiteKernel
from sklearn.model_selection import GridSearchCV, KFold
# from sklearn.preprocessing import StandardScaler
import pickle
#%%
# Load dataset
Dataset = pd.read_csv("data/UD_867_formulation_training.csv")
TARGETS = ['Water_Absorption_%','Hardness','Thermal_Conductivity_(mW/m.K)']
NR_FEATURES = ['Clarifier_1','Clarifier_2','Clarifier_3',
'Polymer_3','UV_absorber_1','UV_absorber_2',
'Filler_2','Filler_3']
TRAINING_OPTION = 'partial' # if remove 'partial'
#%%
"""# 1. Data preprocessing"""
# Reproducibility
SEED = 12345
# Select only features from dataset
X_train = Dataset.drop(columns=['name']+TARGETS)
# add entropy as a feature
X_train['entropy']=scipy.stats.entropy(X_train, axis=1)
# removing some features may help?
if TRAINING_OPTION == 'partial':
X_train = X_train.drop(columns=NR_FEATURES)
# Select only targets from dataset
Y_train = Dataset[TARGETS]
# Standardization
mu = X_train.mean(axis=0)
sigma = X_train.std(axis=0)
X_train = (X_train - mu)/sigma
# y_train = Y_train[TARGETS[0]]
#%%
"""# 2. Models definitions"""
models = {
# Similarity-based regressors
'KNeighborsRegressor':{
# 'n_neighbors': np.arange(1,3,2),
'n_neighbors': np.arange(1,15,2),
'weights': ['uniform','distance'],
'p': [1,2],
'metric': ['minkowski','chebyshev'],
},
'GaussianProcessRegressor':{
'kernel':None,
'n_restarts_optimizer':[5],
'random_state':[SEED]
},
# Tree-based regressors
'XGBRegressor':{
'learning_rate': np.arange(0.025,0.150,0.025),
'gamma':np.arange(0.05,0.45,0.05),
# 'max_depth':np.arange(2,14,2),
# 'min_child_weight':np.arange(1,8,1),
'n_estimators':np.arange(10,80,5),
# 'subsample':np.arange(0.60,0.95,0.05),
# 'colsample_bytree':np.arange(0.60,0.95,0.05),
'reg_alpha':np.logspace(-3,2,6), #alpha
'reg_lambda':np.logspace(-3,2,6),#lambda
},
}
#%%
# Model instantiation
def set_model(name):
"""Initialization module for models to be evaluated"""
# Similarity-based regressors
if name=='KNeighborsRegressor':
model = KNeighborsRegressor()
elif name=='GaussianProcessRegressor':
model = GaussianProcessRegressor()
# Tree-based regressor
elif name=='XGBRegressor':
model = XGBRegressor()
return model
#%%
# Kernel initialization
def restart_kernels(init_length_scale=1.0):
"""Function that calls kernels every time they need to be instanciated."""
kernels = [1.0*RBF(length_scale=init_length_scale)+1.0*WhiteKernel(),
1.0*DotProduct()+1.0*WhiteKernel(),
1.0*Matern(length_scale=init_length_scale, nu=0.5)+1.0*WhiteKernel(),
1.0*Matern(length_scale=init_length_scale, nu=1.5)+1.0*WhiteKernel(),
1.0*Matern(length_scale=init_length_scale, nu=2.5)+1.0*WhiteKernel(),
1.0*RationalQuadratic()+1.0*WhiteKernel()]
return kernels
#%%
# Training loop for best models
if TRAINING_OPTION == 'full':
best_models_names = ['GaussianProcessRegressor','KNeighborsRegressor','XGBRegressor']
elif TRAINING_OPTION == 'partial':
best_models_names = ['GaussianProcessRegressor','GaussianProcessRegressor','XGBRegressor']
best_trained_models = []
for j,target in enumerate(TARGETS):
y_train = Y_train[target]
# type of regressor
m_i = best_models_names[j]
# define the model
model = set_model(m_i)
if m_i=='GaussianProcessRegressor':
models[m_i]['kernel'] = restart_kernels(np.ones(X_train.shape[1]))
# define search space
space = models[m_i]
# configure the cross-validation procedure
cv_inner = KFold(n_splits=10, shuffle=True, random_state=SEED)
# define search
search = GridSearchCV(model, space, scoring='r2', cv=cv_inner, refit=True, n_jobs=-1)
# execute search
result = search.fit(X_train, y_train)
# get the best performing model fit on the whole training set
best_model = result.best_estimator_
best_trained_models.append(best_model)
#%%
"""# 3. Saving models"""
models_name = TRAINING_OPTION+'_dataset'+'_models.dump'
with open(models_name , "wb") as f:
pickle.dump(best_trained_models, f)
params_name = 'standardizer_'+TRAINING_OPTION+'_dataset'+'.dump'
# Open a file and use dump()
with open(params_name, 'wb') as file:
# A new file will be created
pickle.dump([mu,sigma], file)
|
import sys, os, re, json, pprint, traceback
from pathlib import Path
from aiohttp import web
routes = web.RouteTableDef()
async def app():
from web_chains_202105 import directories
app = web.Application(middlewares=[exception_middleware])
app.add_routes(routes)
app.router.add_static("/js/", path="js", name="js")
directories.load(app)
app["charts"] = {} # loaded charts: {ace-path: acmacs.Chart}, loading is done on demand
sys.path[:0] = ["lib"] # to be abel to import acmacs module (acmacs_py) by web_chains_202105.chart
return app
# ======================================================================
# https://docs.aiohttp.org/en/stable/web_advanced.html
@web.middleware
async def exception_middleware(request, handler):
try:
return await handler(request)
except Exception as err:
import cgitb
context = 10
if "/api" in request.path:
return web.json_response({"ERROR": str(err), "tb": cgitb.text(sys.exc_info(), context=context)})
else:
return web.Response(text=cgitb.html(sys.exc_info(), context=context), content_type='text/html')
# ======================================================================
# pages
# ======================================================================
@routes.get("/")
async def index(request):
from web_chains_202105.index_page import index_page
return index_page(request=request)
# ----------------------------------------------------------------------
@routes.get("/table")
async def table_data(request):
from web_chains_202105.table_page import table_page
return table_page(request=request, subtype_id=request.query["subtype_id"], table_date=request.query["date"])
@routes.get("/chain")
async def chain_data(request):
from web_chains_202105.chain_page import chain_page
return chain_page(request=request, subtype_id=request.query["subtype_id"], chain_id=request.query["chain_id"])
# ======================================================================
# images
# ======================================================================
def image(request, image_type):
from web_chains_202105.chart import get_map
args = request_args(request)
if args["type"] == "map":
headers = {
"pid": str(os.getpid()),
"Content-Disposition": f'inline; filename="{Path(args["ace"]).with_suffix("." + image_type).name}"',
}
return web.Response(body=get_map(request=request, image_type=image_type, **args), content_type=sMimeType[image_type], headers=headers)
elif args["type"] == "pc":
headers = {
"pid": str(os.getpid()),
"Content-Disposition": f'inline; filename="pc-{Path(args["ace1"]).stem}-vs-{Path(args["ace2"]).stem}.{image_type}"',
}
return web.Response(body=get_map(request=request, image_type=image_type, **args), content_type=sMimeType[image_type], headers=headers)
else:
print(f">> WARNING: unsupported {image_type}:", request.query)
return web.Response(text=str(request.query), status=418, headers={"Error": f"unsupported {image_type}"})
@routes.get("/png")
async def png(request):
return image(request=request, image_type="png")
@routes.get("/pdf")
async def pdf(request):
return image(request=request, image_type="pdf")
# ======================================================================
# ace
# ======================================================================
@routes.get("/ace")
async def ace(request):
args = {kk: v for kk, v in ((k, t(request.query.get(k))) for k,t in [["ace", Path]]) if v is not None}
headers = {
"pid": str(os.getpid()),
"Content-Disposition": f'inline; filename="{args["ace"].name}"',
}
return web.Response(body=args["ace"].open("rb").read(), content_type=sMimeType["ace"], headers=headers)
# ======================================================================
# api
# ======================================================================
@routes.get("/api/subtype-data/")
async def subtype_data(request):
from web_chains_202105.index_page import collect_tables_of_subtype
subtype_id = request.query["subtype_id"]
return web.json_response({
"tables": collect_tables_of_subtype(subtype_id),
"subtype_id": subtype_id,
})
# ======================================================================
# utils
# ======================================================================
def bool_from_str(src):
return src and src.lower() in ["true", "yes", "1"]
sMimeType = {
"png": "image/png",
"pdf": "application/pdf",
"ace": "application/octet-stream",
}
sRequestArgTypes = [
["type", str],
["ace", str],
["ace1", str], # pc
["ace2", str], # pc
["coloring", str],
["size", int],
["save_chart", bool_from_str]
]
def request_args(request):
return {kk: v for kk, v in ((k, t(request.query.get(k))) for k,t in sRequestArgTypes) if v is not None}
# ======================================================================
|
import sys
import json, flask
import os
import psycopg2
from sqlalchemy import create_engine
from app import app
from app import get_scanned
class Query:
def __init__(self):
with open('db_config.json', 'r') as db_file:
db_info = json.load(db_file)
self.db_name = db_info["database"]["database_name"]
self.username = db_info["database"]["username"]
self.password = db_info["database"]["password"]
self.host = db_info["database"]["host"]
self.engine_name = "postgresql://" + self.username + ":" + self.password + "@" + self.host + ":5432/" + self.db_name
self.conn = psycopg2.connect(self.engine_name)
self.cur = self.conn.cursor()
def record_flavors(self):
self.cur.execute("select store_datetime()")
self.cur.executemany("select store_debs(%s, %s ,%s, %s)", get_scanned.debs)
self.cur.executemany("select store_groups(%s, %s, %s, %s)", get_scanned.groups)
self.cur.executemany("select store_shadow(%s, %s, %s, %s, %s, %s, %s, %s, %s)", get_scanned.shadow)
self.cur.executemany("select store_users(%s, %s, %s, %s,%s, %s, %s)", get_scanned.users)
self.conn.commit()
def record_knowledge(self, json_file, name, resource, action):
self.cur.execute("select store_knowledge(%s, %s, %s)", (name, resource, action))
with open("app/" + json_file, 'r') as json_res:
self.res = json.load(json_res)
debs = self.res[0]
new_debs = debs["Debs"]["New"]
groups = self.res[1]
new_groups = groups["Groups"]["New"]
shadow = self.res[2]
new_shadow = shadow["Shadow"]["New"]
users = self.res[3]
new_users = users["Users"]["New"]
if new_debs[0] != "No changes":
for self.nd in new_debs:
self.deb = self.cur.execute("select store_knowledge_debs(%s, %s ,%s, %s)", (self.nd["Stat"], self.nd["Name"], self.nd["Version"], self.nd["Architecture"]))
if new_groups[0] != "No changes":
for self.ng in new_groups:
self.cur.execute("select store_knowledge_groups(%s, %s ,%s, %s)", (self.ng["Group Name"], self.ng["Password"], self.ng["Gid"], self.ng["Users"]))
if new_shadow[0] != "No changes":
for self.ns in new_shadow:
self.cur.execute("select store_knowledge_shadow(%s, %s ,%s, %s, %s, %s ,%s, %s, %s)", (self.ns["Username"], self.ns["Password"], self.ns["Last Changed"], self.ns["Minimum"], self.ns["Maximum"], self.ns["Warn"], self.ns["Inactive"], self.ns["Expire"], self.ns["Reserve"]))
if new_users[0] != "No changes":
for self.nu in new_users:
self.cur.execute("select store_knowledge_users(%s, %s ,%s, %s, %s, %s, %s )",(self.nu["Username"], self.nu["Password"], self.nu["UID"], self.nu["GID"], self.nu["Description"], self.nu["Path"], self.nu["Shell"]))
self.conn.commit()
def new_debs(self):
self.debs = self.cur.execute("select get_debs_unique()")
self.debs = self.cur.fetchall()
self.new_debs = []
if len(self.debs) != 0:
for self.deb in self.debs:
d = str(self.deb[0])
db = d.split(',')
self.new_debs.append({"Stat" : db[0][1:],
"Name" : db[1],
"Version" : db[2],
"Architecture" : db[3][:len(db[3]) - 1]})
else:
self.new_debs.append('No changes')
self.conn.commit()
return self.new_debs
def new_groups(self):
self.groups = self.cur.execute("select get_groups_unique()")
self.groups = self.cur.fetchall()
self.new_groups = []
if len(self.groups) != 0:
for self.group in self.groups:
g = str(self.group[0])
gr = g.split(',')
self.new_groups.append({"Group Name" : gr[0][1:],
"Password" : gr[1],
"Gid" : gr[2],
"Users" : gr[3][:len(gr[3]) - 1]})
else:
self.new_groups.append('No changes')
self.conn.commit()
return self.new_groups
def new_shadow(self):
self.shadow = self.cur.execute("select get_shadow_unique()")
self.shadow = self.cur.fetchall()
self.new_shadow = []
if len(self.shadow) != 0:
for self.shad in self.shadow:
s = str(self.shad[0])
sh = s.split(',')
self.new_shadow.append({"Username" : sh[0][1:],
"Password" : sh[1],
"Last Changed" : sh[2],
"Minimum" : sh[3],
"Maximum" : sh[4],
"Warn" : sh[5],
"Inactive" : sh[6],
"Expire" : sh[7],
"Reserve" : sh[8][:len(sh[8]) - 1]})
else:
self.new_shadow.append('No changes')
self.conn.commit()
return self.new_shadow
def new_users(self):
self.users = self.cur.execute("select get_users_unique()")
self.users = self.cur.fetchall()
self.new_users = []
if len(self.users) != 0:
for self.user in self.users:
u = str(self.user[0])
us = u.split(',')
self.new_users.append({"Username" : us[0][1:],
"Password" : us[1],
"UID" : us[2],
"GID" : us[3],
"Description" : us[4],
"Path" : us[5],
"Shell" : us[6][:len(us[6]) - 1]})
else:
self.new_users.append('No changes')
self.conn.commit()
return self.new_users
def null_cases(self):
self.cur.execute("select store_debs(%s, %s ,%s, %s)", ('stat', None, 'test', 'test'))
self.conn.commit()
self.cur.execute("select store_debs(%s, %s ,%s, %s)", ('stat', None, 'test', 'test'))
self.conn.commit()
self.cur.execute("select count(*) from debs where stat=%s and name is null and version=%s and architecture=%s", ('stat', 'test', 'test'))
count = self.cur.fetchone()
assert count[0] == 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import unittest
import types
import solution
class TestFizzBuzz(unittest.TestCase):
def setUp(self):
self.default_beginning_index = 1
self.default_ending_index = 100
def test_wrong_type_parameters(self):
with self.assertRaises(Exception):
solution.fizz_buzz('2', '100')
def test_return_type(self):
self.assertEqual(types.GeneratorType, type(solution.fizz_buzz(1, 100)))
def test_length_return(self):
with self.assertRaises(Exception):
solution.fizz_buzz(100, 2)
def test_negative_numbers(self):
with self.assertRaises(Exception):
solution.fizz_buzz(-1, -100)
def test_output_wrong_type_for_key_raises_exception(self):
with self.assertRaises(TypeError):
solution.fizz_buzz(1, 100, 'hello')
def test_exception_raised_when_key_not_present(self):
with self.assertRaises(ValueError):
result = solution.fizz_buzz(1, 100, 0)
next(result)
def test_output_3_returns_three(self):
index = 3
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index, index)
self.assertEqual(next(results), 'Three')
def test_output_95_returns_five(self):
index = 95
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index, index)
self.assertEqual(next(results), 'Five')
def test_output_90_returns_threefive(self):
index = 90
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index, index)
self.assertEqual(next(results), 'ThreeFive')
def test_output_16_returns_number(self):
index = 16
expected_result = 16
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index, index)
self.assertEqual(next(results), expected_result)
def test_output_30_returns_threefive_string(self):
index = 30
expected_result = 'ThreeFive'
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index, index)
self.assertEqual(next(results), expected_result)
def test_output_fizz_buzz_from_one_to_hundred(self):
expected_results = [1, 2, 'Three', 4, 'Five', 'Three', 7, 8, 'Three', 'Five', 11,
'Three', 13, 14, 'ThreeFive', 16, 17, 'Three', 19,
'Five', 'Three', 22, 23, 'Three', 'Five', 26, 'Three', 28, 29,
'ThreeFive', 31, 32, 'Three', 34, 'Five', 'Three', 37, 38, 'Three', 'Five',
41, 'Three', 43, 44, 'ThreeFive', 46, 47, 'Three', 49, 'Five',
'Three', 52, 53, 'Three', 'Five', 56, 'Three', 58, 59,
'ThreeFive', 61, 62, 'Three', 64, 'Five', 'Three', 67, 68, 'Three',
'Five', 71, 'Three', 73, 74, 'ThreeFive', 76, 77, 'Three',
79, 'Five', 'Three', 82, 83, 'Three', 'Five', 86, 'Three', 88, 89,
'ThreeFive', 91, 92, 'Three', 94, 'Five', 'Three', 97, 98, 'Three', 'Five']
results = list(solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index))
for expected_result, result in zip(expected_results, results):
self.assertEqual(expected_result, result)
def test_output_empty_parameters_returns_type(self):
results = solution.fizz_buzz()
self.assertEqual(types.GeneratorType, type(results))
def test_output_length_fizz_buzz_from_one_to_hundred(self):
expected_result = 100
results = solution.fizz_buzz(
self.default_beginning_index, self.default_ending_index)
self.assertEqual(len(list(results)), expected_result)
def test_output_135_returns_threefive_string(self):
expected_result = 'ThreeFive'
index = 135
results = solution.fizz_buzz(self.default_beginning_index, 200, index)
self.assertEqual(next(results), expected_result)
def test_output_1440_returns_threefive_string(self):
expected_result = 'ThreeFive'
index = 1440
results = solution.fizz_buzz(self.default_beginning_index, 2000, 1440)
self.assertEqual(next(results), expected_result)
def test_output_7368_returns_three_string(self):
expected_result = 'Three'
index = 7368
results = solution.fizz_buzz(self.default_beginning_index, 8000, index)
self.assertEqual(next(results), expected_result)
def test_output_148140_not_returns_five_string(self):
error_result = 'Three'
index = 148140
results = solution.fizz_buzz(1, 200000, index)
self.assertNotEqual(next(results), error_result)
def test_output_6_not_returns_integer_type(self):
index = 6
results = solution.fizz_buzz(key=index)
self.assertNotEqual(type(next(results)), int)
if __name__ == '__main__':
unittest.main()
|
"""
Django settings for example_project project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import json
from . import PROJECT_NAME
def load_settings(file_name: str) -> dict:
"""Return settings JSON file"""
with open(file_name) as f:
return json.load(f)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
protected_settings = load_settings(f"{BASE_DIR}/{PROJECT_NAME}/settings.json")
SECRET_KEY = protected_settings["SECRET_KEY"]
DEBUG = protected_settings["DEBUG"]
ALLOWED_HOSTS = protected_settings["ALLOWED_HOSTS"]
EMAIL_CONFIGURED = True
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.gmail.com"
EMAIL_PORT = 587
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"contact",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = f"{PROJECT_NAME}.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(BASE_DIR, "templates"),
os.path.join(BASE_DIR, f"{PROJECT_NAME}/templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = f"{PROJECT_NAME}.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "US/Central"
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, f"{PROJECT_NAME}/static")]
|
# -*- coding: utf-8 -*-
import pytest
import torch
from eznlp.dataset import Dataset
from eznlp.model import EncoderConfig, BertLikeConfig, BoundarySelectionDecoderConfig, ExtractorConfig
from eznlp.model.decoder.boundary_selection import _spans_from_upper_triangular
from eznlp.training import Trainer
class TestModel(object):
def _assert_batch_consistency(self):
self.model.eval()
batch = [self.dataset[i] for i in range(4)]
batch012 = self.dataset.collate(batch[:3]).to(self.device)
batch123 = self.dataset.collate(batch[1:]).to(self.device)
losses012, states012 = self.model(batch012, return_states=True)
losses123, states123 = self.model(batch123, return_states=True)
hidden012, hidden123 = states012['full_hidden'], states123['full_hidden']
min_step = min(hidden012.size(1), hidden123.size(1))
delta_hidden = hidden012[1:, :min_step] - hidden123[:-1, :min_step]
assert delta_hidden.abs().max().item() < 1e-4
delta_losses = losses012[1:] - losses123[:-1]
assert delta_losses.abs().max().item() < 5e-4
pred012 = self.model.decode(batch012, **states012)
pred123 = self.model.decode(batch123, **states123)
assert pred012[1:] == pred123[:-1]
def _assert_trainable(self):
optimizer = torch.optim.AdamW(self.model.parameters())
trainer = Trainer(self.model, optimizer=optimizer, device=self.device)
dataloader = torch.utils.data.DataLoader(self.dataset,
batch_size=4,
shuffle=True,
collate_fn=self.dataset.collate)
trainer.train_epoch(dataloader)
def _setup_case(self, data, device):
self.device = device
self.dataset = Dataset(data, self.config)
self.dataset.build_vocabs_and_dims()
self.model = self.config.instantiate().to(self.device)
assert isinstance(self.config.name, str) and len(self.config.name) > 0
@pytest.mark.parametrize("use_biaffine", [True, False])
@pytest.mark.parametrize("affine_arch", ['FFN', 'LSTM'])
@pytest.mark.parametrize("size_emb_dim", [25, 0])
@pytest.mark.parametrize("fl_gamma, sl_epsilon, sb_epsilon, sb_size", [(0.0, 0.0, 0.0, 1),
(2.0, 0.0, 0.0, 1),
(0.0, 0.1, 0.0, 1),
(0.0, 0.0, 0.1, 1),
(0.0, 0.0, 0.1, 2),
(0.0, 0.0, 0.1, 3),
(0.0, 0.1, 0.1, 1)])
def test_model(self, use_biaffine, affine_arch, size_emb_dim, fl_gamma, sl_epsilon, sb_epsilon, sb_size, conll2004_demo, device):
self.config = ExtractorConfig(decoder=BoundarySelectionDecoderConfig(use_biaffine=use_biaffine,
affine=EncoderConfig(arch=affine_arch),
size_emb_dim=size_emb_dim,
fl_gamma=fl_gamma, sl_epsilon=sl_epsilon, sb_epsilon=sb_epsilon, sb_size=sb_size))
self._setup_case(conll2004_demo, device)
self._assert_batch_consistency()
self._assert_trainable()
def test_model_with_bert_like(self, conll2004_demo, bert_with_tokenizer, device):
bert, tokenizer = bert_with_tokenizer
self.config = ExtractorConfig('boundary_selection',
ohots=None,
bert_like=BertLikeConfig(tokenizer=tokenizer, bert_like=bert),
intermediate2=None)
self._setup_case(conll2004_demo, device)
self._assert_batch_consistency()
self._assert_trainable()
def test_prediction_without_gold(self, conll2004_demo, device):
self.config = ExtractorConfig('boundary_selection')
self._setup_case(conll2004_demo, device)
data_wo_gold = [{'tokens': entry['tokens']} for entry in conll2004_demo]
dataset_wo_gold = Dataset(data_wo_gold, self.config, training=False)
trainer = Trainer(self.model, device=device)
set_chunks_pred = trainer.predict(dataset_wo_gold)
assert len(set_chunks_pred) == len(data_wo_gold)
@pytest.mark.parametrize("sb_epsilon", [0.0, 0.1])
@pytest.mark.parametrize("sl_epsilon", [0.0, 0.1])
def test_boundaries_obj(sb_epsilon, sl_epsilon, EAR_data_demo):
entry = EAR_data_demo[0]
tokens, chunks = entry['tokens'], entry['chunks']
config = ExtractorConfig(decoder=BoundarySelectionDecoderConfig(sb_epsilon=sb_epsilon, sl_epsilon=sl_epsilon))
dataset = Dataset(EAR_data_demo, config)
dataset.build_vocabs_and_dims()
boundaries_obj = dataset[0]['boundaries_obj']
num_tokens, num_chunks = len(tokens), len(chunks)
assert boundaries_obj.chunks == chunks
if sb_epsilon == 0 and sl_epsilon == 0:
assert all(boundaries_obj.boundary2label_id[start, end-1] == config.decoder.label2idx[label] for label, start, end in chunks)
labels_retr = [config.decoder.idx2label[i] for i in boundaries_obj.boundary2label_id[torch.arange(num_tokens) >= torch.arange(num_tokens).unsqueeze(-1)].tolist()]
else:
assert all(boundaries_obj.boundary2label_id[start, end-1].argmax() == config.decoder.label2idx[label] for label, start, end in chunks)
labels_retr = [config.decoder.idx2label[i] for i in boundaries_obj.boundary2label_id[torch.arange(num_tokens) >= torch.arange(num_tokens).unsqueeze(-1)].argmax(dim=-1).tolist()]
assert (boundaries_obj.boundary2label_id.sum(dim=-1) - 1).abs().max().item() < 1e-6
if sb_epsilon == 0:
assert (boundaries_obj.boundary2label_id[:, :, config.decoder.none_idx] < 1).sum().item() == num_chunks
else:
assert (boundaries_obj.boundary2label_id[:, :, config.decoder.none_idx] < 1).sum().item() > num_chunks
chunks_retr = [(label, start, end) for label, (start, end) in zip(labels_retr, _spans_from_upper_triangular(num_tokens)) if label != config.decoder.none_label]
assert set(chunks_retr) == set(chunks)
extractor = config.instantiate()
# \sum_{k=0}^N k(N-k), where N is `num_tokens`
assert extractor.decoder._span_size_ids.sum().item() == (num_tokens**3 - num_tokens) // 6
assert extractor.decoder._span_non_mask.sum().item() == (num_tokens**2 + num_tokens) // 2
@pytest.mark.parametrize("sb_epsilon", [0.1, 0.5])
@pytest.mark.parametrize("sb_size", [1, 2, 3])
def test_boundaries_obj_for_boundary_smoothing(sb_epsilon, sb_size):
entry = {'tokens': list("abcdef"),
'chunks': [('EntA', 0, 1), ('EntA', 0, 4), ('EntB', 0, 5), ('EntA', 3, 5), ('EntA', 4, 5)]}
config = BoundarySelectionDecoderConfig(sb_epsilon=sb_epsilon, sb_size=sb_size)
config.build_vocab([entry])
boundaries_obj = config.exemplify(entry)['boundaries_obj']
num_tokens, num_chunks = len(entry['tokens']), len(entry['chunks'])
span_sizes = torch.arange(num_tokens) - torch.arange(num_tokens).unsqueeze(-1) + 1
assert (boundaries_obj.boundary2label_id.sum(dim=-1) - 1).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[:, :, 1:].sum() - num_chunks).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[span_sizes<=0] - torch.tensor([1.0, 0.0, 0.0])).abs().max().item() < 1e-6
if sb_size == 1:
assert (boundaries_obj.boundary2label_id[0, 0] - torch.tensor([(1/4)*sb_epsilon, 1-(1/4)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 3] - torch.tensor([(1/2)*sb_epsilon, 1-(3/4)*sb_epsilon, (1/4)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 4] - torch.tensor([(1/2)*sb_epsilon, (1/4)*sb_epsilon, 1-(3/4)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[3, 4] - torch.tensor([(3/4)*sb_epsilon, 1-(3/4)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[4, 4] - torch.tensor([(1/4)*sb_epsilon, 1-(1/4)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
elif sb_size == 2:
assert (boundaries_obj.boundary2label_id[0, 0] - torch.tensor([(1/4)*sb_epsilon, 1-(1/4)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 3] - torch.tensor([(9/16)*sb_epsilon, 1-(11/16)*sb_epsilon, (1/8)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 4] - torch.tensor([(1/2)*sb_epsilon, (1/8)*sb_epsilon, 1-(5/8)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[3, 4] - torch.tensor([(5/8)*sb_epsilon, 1-(5/8)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[4, 4] - torch.tensor([(3/8)*sb_epsilon, 1-(3/8)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
elif sb_size == 3:
assert (boundaries_obj.boundary2label_id[0, 0] - torch.tensor([(7/36)*sb_epsilon, 1-(7/36)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 3] - torch.tensor([(37/72)*sb_epsilon, 1-(43/72)*sb_epsilon, (1/12)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[0, 4] - torch.tensor([(4/9)*sb_epsilon, (1/9)*sb_epsilon, 1-(5/9)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[3, 4] - torch.tensor([(19/36)*sb_epsilon, 1-(5/9)*sb_epsilon, (1/36)*sb_epsilon])).abs().max().item() < 1e-6
assert (boundaries_obj.boundary2label_id[4, 4] - torch.tensor([(1/3)*sb_epsilon, 1-(1/3)*sb_epsilon, 0.0])).abs().max().item() < 1e-6
@pytest.mark.parametrize("neg_sampling_rate, hard_neg_sampling_rate", [(1.0, 1.0), (0.0, 1.0), (0.0, 0.0),
(0.3, 0.6), (0.1, 0.9), (0.2, 0.8)])
@pytest.mark.parametrize("training", [True, False])
def test_boundaries_obj_for_neg_sampling(neg_sampling_rate, hard_neg_sampling_rate, training):
entry = {'tokens': list("<KEY>"),
'chunks': [('EntA', 0, 1), ('EntA', 0, 4), ('EntB', 0, 5), ('EntA', 3, 5), ('EntA', 4, 5)]}
config = BoundarySelectionDecoderConfig(neg_sampling_rate=neg_sampling_rate,
hard_neg_sampling_rate=hard_neg_sampling_rate,
hard_neg_sampling_size=3)
config.build_vocab([entry])
boundaries_obj = config.exemplify(entry, training=training)['boundaries_obj']
if (not training) or (neg_sampling_rate >= 1):
assert not hasattr(boundaries_obj, 'non_mask')
elif neg_sampling_rate <=0 and hard_neg_sampling_rate <= 0:
assert boundaries_obj.non_mask.sum().item() == 5
elif neg_sampling_rate <=0 and hard_neg_sampling_rate >= 1:
assert boundaries_obj.non_mask.sum().item() == 30
else:
assert abs(boundaries_obj.non_mask.sum().item() - (25*neg_sampling_rate + 25*hard_neg_sampling_rate + 5)) < 5
@pytest.mark.parametrize("seq_len", [1, 5, 10, 100])
def test_spans_from_upper_triangular(seq_len):
assert len(list(_spans_from_upper_triangular(seq_len))) == (seq_len+1)*seq_len // 2
|
<reponame>diegojromerolopez/pystats-trello
# -*- coding: utf-8 -*-
import settings
# Abstraction of a Trello Board with all the fields needed for the stats initialized
class TrelloBoard(object):
# Constructor based on credentials and a board name of the board it will compute the stats
def __init__(self, trello_connector, configuration):
self.client = trello_connector.get_trello_client()
self._fetch_board(configuration.board_name)
# Check that configuration (that lists name are right)
self._init_configuration(configuration)
# Fetches the board from Trello API
# It also fetches and initializes its lists and its cards.
def _fetch_board(self, board_name):
"""
Connects to Trello and sets the board (py-trello Board object).
:return: True if board with self.board_name was found, raise and exception otherwise.
"""
boards = self.client.list_boards()
for board in boards:
if board.name.decode("utf-8") == board_name:
self.board_name = board_name
self.board = board
self._fetch_members()
self._fetch_lists()
self._fetch_labels()
self._init_cards()
return True
raise RuntimeWarning(u"Board {0} was not found. Are your credentials correct?".format(self.board_name))
# Fetching the members of this board
def _fetch_members(self):
self.members = self.board.all_members()
self.members_dict = {member.id: member for member in self.members}
# Fetching of the board lists from Trello API
def _fetch_lists(self):
"""
Initialize lists and lists_dict attributes of this objects.
These attributes contain a list of the board lists and a dict for fast access to a list given its id.
"""
# List of the board
self.lists = self.board.all_lists()
# Compute list orders
i = 1
for list_ in self.lists:
list_.order = i
i += 1
# List dict of the board used to avoid fetching list data more than once
self.lists_dict = {list_.id: list_ for list_ in self.lists}
self.lists_dict_by_name = {list_.name.decode("utf-8"): list_ for list_ in self.lists}
# Comparison function used to compute forward and backward movements
# when computing card.stats_by_list
def list_cmp(list_a_id, list_b_id):
if self.lists_dict[list_b_id].order > self.lists_dict[list_a_id].order:
return 1
if self.lists_dict[list_b_id].order < self.lists_dict[list_a_id].order:
return -1
return 0
self.list_cmp = list_cmp
# Done list initialization
self._init_done_list()
# Cycle lists initialization
self._init_cycle_lists()
# Done list initialization
def _init_done_list(self):
# List that will be considered the "done list"
# It is used to check if the card is done.
# By default, the last list is the "done list"
self.done_list = self.lists[-1]
# But we could have specified another one
if self.configuration.done_list_name:
self.done_list = self.lists_dict_by_name[self.configuration.done_list_name]
# Initializes the cycle lists
def _init_cycle_lists(self):
"""
Initializes the lists that play a role when computing the cycle time.
Cycle lists are stored in self.cycle_lists (list) and self.cycle_lists_dict (dict).
"""
development_list = self.lists_dict_by_name[self.configuration.development_list_name]
self.cycle_lists = []
self.cycle_lists_dict = {}
# Assumes from the development list to the end list, they all play a role in development
add_to_cycle_list = False
for _list in self.lists:
if _list.id == development_list.id:
add_to_cycle_list = True
if add_to_cycle_list:
self.cycle_lists.append(_list)
self.cycle_lists_dict[_list.id] = _list
# If there is no cycle lists, assume the configuration is wrong
if len(self.cycle_lists) <= 1:
raise EnvironmentError(
u"Development list has not been configured for board {0}".format(self.board_name))
# Fetch and initializes board card labels
def _fetch_labels(self):
self.labels = self.board.get_labels()
self.labels_dict = {label.id: label for label in self.labels}
# Initializes the cards
def _init_cards(self):
self.cards = self.board.all_cards()
def _init_configuration(self, configuration):
"""
Asserts configuration is right and initializes it lists
:param configuration:
:return:
"""
self._assert_configuration(configuration)
self._init_configuration_workflows(configuration)
self.configuration = configuration
# Asserts configuration looking for errors
def _assert_configuration(self, configuration):
# Check development list existence
self._assert_list_existence(configuration.development_list_name)
# Check done list existence
self._assert_list_existence(configuration.done_list_name)
# Check workflows
for workflow in configuration.custom_workflows:
for list_ in workflow.list_name_order:
self._assert_list_existence(list_)
for list_ in workflow.done_list_names:
self._assert_list_existence(list_)
# Check the existence of a list with a name in the board
def _assert_list_existence(self, list_name):
if self.lists_dict_by_name.get(list_name) is None:
raise ValueError(u"Development list '{0}' does not exists in board {1}".format(list_name, self.board_name))
# Initializes the configuration lists and done_lists attributes
def _init_configuration_workflows(self, configuration):
for workflow in configuration.custom_workflows:
wf_configuration_lists = []
wf_configuration_done_lists = []
for list_ in workflow.list_name_order:
wf_configuration_lists.append(self.lists_dict_by_name.get(list_))
for list_ in workflow.done_list_names:
wf_configuration_done_lists.append(self.lists_dict_by_name.get(list_))
workflow.init_lists(wf_configuration_lists, wf_configuration_done_lists) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# DSA key recovery from nonce
#
# Step 1: Relocate so that you are out of easy travel distance of us.
# Step 2: Implement DSA, up to signing and verifying, including parameter
# generation.
#
# Hah-hah you're too far away to come punch us.
#
# Just kidding you can skip the parameter generation part if you want; if you
# do, use these params:
#
# p = 800000000000000089e1855218a0e7dac38136ffafa72eda7
# 859f2171e25e65eac698c1702578b07dc2a1076da241c76c6
# 2d374d8389ea5aeffd3226a0530cc565f3bf6b50929139ebe
# ac04f48c3c84afb796d61e5a4f9a8fda812ab59494232c7d2
# b4deb50aa18ee9e132bfa85ac4374d7f9091abc3d015efc87
# 1a584471bb1
#
# q = f4f47f05794b256174bba6e9b396a7707e563c5b
#
# g = <KEY>
# 458fef538b8fa4046c8db53039db620c094c9fa077ef389b5
# 322a559946a71903f990f1f7e0e025e2d7f7cf494aff1a047
# 0f5b64c36b625a097f1651fe775323556fe00b3608c887892
# 878480e99041be601a62166ca6894bdd41a7054ec89f756ba
# 9fc95302291
#
# ("But I want smaller params!" Then generate them yourself.)
#
# The DSA signing operation generates a random subkey "k". You know this
# because you implemented the DSA sign operation.
#
# This is the first and easier of two challenges regarding the DSA "k" subkey.
#
# Given a known "k", it's trivial to recover the DSA private key "x":
#
# (s * k) - H(msg)
# x = ---------------- mod q
# r
#
# Do this a couple times to prove to yourself that you grok it. Capture it in
# a function of some sort.
#
# Now then. I used the parameters above. I generated a keypair. My pubkey is:
#
# y = 84ad4719d044495496a3201c8ff484feb45b962e7302e56a392aee4
# abab3e4bdebf2955b4736012f21a08084056b19bcd7fee56048e004
# e44984e2f411788efdc837a0d2e5abb7b555039fd243ac01f0fb2ed
# 1dec568280ce678e931868d23eb095fde9d3779191b8c0299d6e07b
# bb283e6633451e535c45513b2d33c99ea17
#
# I signed:
#
# For those that envy a MC it can be hazardous to your health
# So be friendly, a matter of life and death, just like a etch-a-sketch
#
# (My SHA1 for this string was d2d0714f014a9784047eaeccf956520045c45265; I
# don't know what NIST wants you to do, but when I convert that hash to an
# integer I get: 0xd2d0714f014a9784047eaeccf956520045c45265).
#
# I get:
#
# r = 548099063082341131477253921760299949438196259240
# s = 857042759984254168557880549501802188789837994940
#
# I signed this string with a broken implemention of DSA that generated "k"
# values between 0 and 2^16. What's my private key?
#
# Its SHA-1 fingerprint (after being converted to hex) is:
#
# 0954edd5e0afe5542a4adf012611a91912a3ec16
#
# Obviously, it also generates the same signature for that string.
#
import inspect
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.dsa import G, P, Q, dsa_sign, dsa_verify, make_dsa_keys
from util.misc import invmod, modexp
from util.sha1 import SHA1
from util.text import from_bytes, print_indent, to_bytes, to_hexstring
def bruteforce_dsa_privkey(bs, sig, max_k=2 ** 16, p=P, q=Q, g=G):
r, s = sig
z = from_bytes(SHA1(bs).digest())
for k in range(1, max_k):
k_inv = invmod(k, q)
if k_inv is None:
continue
x = (((((s * k) % q) - z) % q) * invmod(r, q)) % q
sk, rk = (k_inv * (z + x * r)) % q, modexp(g, k, p) % q
if s == sk and r == rk:
return x
def _test_dsa():
ptext = b"Hickory dickory dock"
pubkey, privkey = make_dsa_keys()
sig = dsa_sign(ptext, privkey)
print(
f"Signature validates for '{ptext.decode()}':", dsa_verify(ptext, sig, pubkey)
)
def main():
print("First, we'll test our DSA implementation.")
_test_dsa()
print()
print("Now, let's bruteforce a DSA private key from a 16-bit subkey.")
print()
ptext = (
b"For those that envy a MC it can be hazardous to your health\n"
b"So be friendly, a matter of life and death, just like a etch-a-sketch\n"
)
sig = [
548099063082341131477253921760299949438196259240,
857042759984254168557880549501802188789837994940,
]
print(f"Plaintext:")
print_indent(*ptext.split(b"\n"), width=70, as_hex=False)
print(f"Bruteforced private key:")
privkey = bruteforce_dsa_privkey(ptext, sig)
print_indent(privkey, as_hex=False)
# This part is a little convoluted, but the SHA-1 hashes mentioned
# in the challenge must've been mentioned ~for a reason~!!
known_sha1s = [
"d2d0714f014a9784047eaeccf956520045c45265",
"0954edd5e0afe5542a4adf012611a91912a3ec16",
]
sha1s = [SHA1(ptext).hexdigest(), SHA1(to_hexstring(to_bytes(privkey))).hexdigest()]
print(
"Calculated hashes match for plaintext and private key:",
all(a == b for a, b in zip(known_sha1s, sha1s)),
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# First, we'll test our DSA implementation.
# Signature validates for 'Hickory dickory dock': True
#
# Now, let's bruteforce a DSA private key from a 16-bit subkey.
#
# Plaintext:
#
# For those that envy a MC it can be hazardous to your health
# So be friendly, a matter of life and death, just like a etch-a-sketch
#
# Bruteforced private key:
#
# 125489817134406768603130881762531825565433175625
#
# Calculated hashes match for plaintext and private key: True
#
|
#!/usr/bin/python
# coding: utf8
import unittest
import logging
import os
from pytsdb.models import Item, BucketType
from pytsdb.storage import MemoryStorage, RedisStorage, CassandraStorage, SQLiteStorage
from pytsdb.errors import NotFoundError
class StorageTest(unittest.TestCase):
def setUp(self):
Item.DEFAULT_BUCKETTYPE = BucketType.dynamic
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
pass
@classmethod
def setUpClass(cls):
pass
def test_sqlitestore(self):
storage = SQLiteStorage("test.db3")
storage._dropTable()
storage._createTable()
self.assertTrue(storage)
with self.assertRaises(NotFoundError):
storage.get(key="test.ph", range_key=1000)
with self.assertRaises(NotFoundError):
storage.last(key="test.ph")
i = Item.new("test.ph", [(1000, 1.0)])
storage.insert(i)
i = Item.new("test.ph", [(2000, 4.0)])
storage.insert(i)
i = Item.new("test.ph", [(1100, 2.0)])
storage.insert(i)
i = Item.new("test.ph", [(1200, 3.0)])
storage.insert(i)
d = storage.get(key="test.ph", range_key=1000)
self.assertEqual(d[0], (1000, 1.0))
d = storage.get(key="test.ph", range_key=1100)
self.assertEqual(d[0], (1100, 2.0))
d = storage.get(key="test.ph", range_key=1200)
self.assertEqual(d[0], (1200, 3.0))
d = storage.get(key="test.ph", range_key=2000)
self.assertEqual(d[0], (2000, 4.0))
ds = storage.query(key="test.ph", range_min=1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-999, range_max=999)
self.assertEqual(len(ds), 0)
ds = storage.query(key="test.ph", range_min=1000, range_max=1200)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=1350)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=1101, range_max=1200)
self.assertEqual(len(ds), 2)
self.assertEqual(ds[0][0], (1100, 2.0))
self.assertEqual(ds[1][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=999999)
self.assertEqual(len(ds), 4)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
self.assertEqual(ds[3][0], (2000, 4.0))
d = storage.last(key="test.ph")
self.assertEqual(d[0], (2000, 4.0))
d = storage.first(key="test.ph")
self.assertEqual(d[0], (1000, 1.0))
d = storage.left(key="test.ph", range_key=1050)
self.assertEqual(d[0], (1000, 1.0))
s = storage.stats(key="test.ph")
self.assertEqual(s["ts_min"], 1000)
self.assertEqual(s["ts_max"], 2000)
self.assertEqual(s["count"], 4)
def test_cassandrastore(self):
cassandra_host = os.getenv('CASSANDRA_HOST', 'localhost')
cassandra_port = os.getenv('CASSANDRA_PORT', 9042)
storage = CassandraStorage(contact_points=[cassandra_host], port=cassandra_port)
storage._dropTable()
storage._createTable()
self.assertTrue(storage)
with self.assertRaises(NotFoundError):
storage.get(key="test.ph", range_key=1000)
with self.assertRaises(NotFoundError):
storage.last(key="test.ph")
i = Item.new("test.ph", [(1000, 1.0)])
storage.insert(i)
i = Item.new("test.ph", [(2000, 4.0)])
storage.insert(i)
i = Item.new("test.ph", [(1100, 2.0)])
storage.insert(i)
i = Item.new("test.ph", [(1200, 3.0)])
storage.insert(i)
d = storage.get(key="test.ph", range_key=1000)
self.assertEqual(d[0], (1000, 1.0))
d = storage.get(key="test.ph", range_key=1100)
self.assertEqual(d[0], (1100, 2.0))
d = storage.get(key="test.ph", range_key=1200)
self.assertEqual(d[0], (1200, 3.0))
d = storage.get(key="test.ph", range_key=2000)
self.assertEqual(d[0], (2000, 4.0))
ds = storage.query(key="test.ph", range_min=1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-999, range_max=999)
self.assertEqual(len(ds), 0)
ds = storage.query(key="test.ph", range_min=1000, range_max=1200)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=1350)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=1101, range_max=1200)
self.assertEqual(len(ds), 2)
self.assertEqual(ds[0][0], (1100, 2.0))
self.assertEqual(ds[1][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=999999)
self.assertEqual(len(ds), 4)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
self.assertEqual(ds[3][0], (2000, 4.0))
d = storage.last(key="test.ph")
self.assertEqual(d[0], (2000, 4.0))
d = storage.first(key="test.ph")
self.assertEqual(d[0], (1000, 1.0))
d = storage.left(key="test.ph", range_key=1050)
self.assertEqual(d[0], (1000, 1.0))
s = storage.stats(key="test.ph")
self.assertEqual(s["ts_min"], 1000)
self.assertEqual(s["ts_max"], 2000)
self.assertEqual(s["count"], 4)
def test_redisstore(self):
redis_host = os.getenv('REDIS_HOST', 'localhost')
redis_port = os.getenv('REDIS_PORT', 6379)
storage = RedisStorage(host=redis_host, port=redis_port, db=0, expire=5)
self.assertTrue(storage)
with self.assertRaises(NotFoundError):
storage.get(key="test.ph", range_key=1000)
with self.assertRaises(NotFoundError):
storage.last(key="test.ph")
i = Item.new("test.ph", [(1000, 1.0)])
storage.insert(i)
i = Item.new("test.ph", [(2000, 4.0)])
storage.insert(i)
i = Item.new("test.ph", [(1100, 2.0)])
storage.insert(i)
i = Item.new("test.ph", [(1200, 3.0)])
storage.insert(i)
d = storage.get(key="test.ph", range_key=1000)
self.assertEqual(d[0], (1000, 1.0))
d = storage.get(key="test.ph", range_key=1100)
self.assertEqual(d[0], (1100, 2.0))
d = storage.get(key="test.ph", range_key=1200)
self.assertEqual(d[0], (1200, 3.0))
d = storage.get(key="test.ph", range_key=2000)
self.assertEqual(d[0], (2000, 4.0))
ds = storage.query(key="test.ph", range_min=1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-999, range_max=999)
self.assertEqual(len(ds), 0)
ds = storage.query(key="test.ph", range_min=1000, range_max=1200)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=1350)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=1101, range_max=1200)
self.assertEqual(len(ds), 2)
self.assertEqual(ds[0][0], (1100, 2.0))
self.assertEqual(ds[1][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=999999)
self.assertEqual(len(ds), 4)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
self.assertEqual(ds[3][0], (2000, 4.0))
d = storage.last(key="test.ph")
self.assertEqual(d[0], (2000, 4.0))
d = storage.first(key="test.ph")
self.assertEqual(d[0], (1000, 1.0))
d = storage.left(key="test.ph", range_key=1050)
self.assertEqual(d[0], (1000, 1.0))
s = storage.stats(key="test.ph")
self.assertEqual(s["ts_min"], 1000)
self.assertEqual(s["ts_max"], 2000)
self.assertEqual(s["count"], 4)
def test_memorystore(self):
storage = MemoryStorage()
self.assertTrue(storage)
with self.assertRaises(NotFoundError):
storage.get(key="test.ph", range_key=1000)
with self.assertRaises(NotFoundError):
storage.last(key="test.ph")
i = Item.new("test.ph", [(1000, 1.0)])
storage.insert(i)
i = Item.new("test.ph", [(2000, 4.0)])
storage.insert(i)
i = Item.new("test.ph", [(1100, 2.0)])
storage.insert(i)
i = Item.new("test.ph", [(1200, 3.0)])
storage.insert(i)
d = storage.get(key="test.ph", range_key=1000)
self.assertEqual(d[0], (1000, 1.0))
d = storage.get(key="test.ph", range_key=1100)
self.assertEqual(d[0], (1100, 2.0))
d = storage.get(key="test.ph", range_key=1200)
self.assertEqual(d[0], (1200, 3.0))
d = storage.get(key="test.ph", range_key=2000)
self.assertEqual(d[0], (2000, 4.0))
ds = storage.query(key="test.ph", range_min=1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-1000, range_max=1000)
self.assertEqual(len(ds), 1)
self.assertEqual(ds[0][0], (1000, 1.0))
ds = storage.query(key="test.ph", range_min=-999, range_max=999)
self.assertEqual(len(ds), 0)
ds = storage.query(key="test.ph", range_min=1000, range_max=1200)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=1350)
self.assertEqual(len(ds), 3)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=1101, range_max=1200)
self.assertEqual(len(ds), 2)
self.assertEqual(ds[0][0], (1100, 2.0))
self.assertEqual(ds[1][0], (1200, 3.0))
ds = storage.query(key="test.ph", range_min=99, range_max=999999)
self.assertEqual(len(ds), 4)
self.assertEqual(ds[0][0], (1000, 1.0))
self.assertEqual(ds[1][0], (1100, 2.0))
self.assertEqual(ds[2][0], (1200, 3.0))
self.assertEqual(ds[3][0], (2000, 4.0))
d = storage.last(key="test.ph")
self.assertEqual(d[0], (2000, 4.0))
d = storage.first(key="test.ph")
self.assertEqual(d[0], (1000, 1.0))
d = storage.left(key="test.ph", range_key=1050)
self.assertEqual(d[0], (1000, 1.0))
s = storage.stats(key="test.ph")
self.assertEqual(s["ts_min"], 1000)
self.assertEqual(s["ts_max"], 2000)
self.assertEqual(s["count"], 4)
|
<filename>app/table_handler.py
from datetime import datetime
import pandas as pd
from utils import exchange_into_rub, PostgreSQLStarter, get_exchange_rates
from app import COUNTRIES_LIST, CURRENCIES_LIST
def drop_table(table_name):
"""
Delete the entire table from the database
:param table_name: name of the table subject for deletion
"""
cursor.execute(f'drop table {table_name}')
connection.commit()
print('Table dropped')
def get_limits_table(cursor):
"""
Return the entire limits table from the database
:param cursor: cursor instance
:return: rows of the limits table
"""
cursor.execute('''SELECT * FROM limits''')
rows = cursor.fetchall()
return [dict(zip(['ID', 'COUNTRY', 'CUR', 'MAX_LIMIT'], row)) for row in rows]
def get_limit_by_id(id, cursor):
"""
Return the row of limits table with a certain ID from the database
:param id:
:param cursor: cursor instance
:return:
"""
try:
cursor.execute('''SELECT * FROM limits WHERE id = {}'''.format(id))
row = cursor.fetchone()
return dict(zip(['ID', 'COUNTRY', 'CUR', 'MAX_LIMIT'], row))
except Exception as e:
return {'failure': f'ID={id} does not exist'}
def get_history_table(cursor):
"""
Return the entire history table from the database
:param cursor: cursor instance
:return:
"""
cursor.execute('''SELECT * FROM history''')
rows = cursor.fetchall()
return [dict(zip(['ID', 'DATE', 'AMOUNT', 'CUR', 'COUNTRY'], row)) for row in rows]
def insert_into_limits(id, country: str, currency: str, max_limit, connection, cursor):
"""
Insert a new record into the limits table
:param id: new ID to insert
:param country: new country to insert, possible options - 'RUS', 'AUS', 'ABH'
:param currency: new currency to insert, possible options - 'RUB', 'USD', 'EUR'
:param max_limit: maximum monthly limit in corresponding currency for the certain ID
:param connection: connection instance
:param cursor: cursor instance
:return:
"""
if country in COUNTRIES_LIST and currency in CURRENCIES_LIST:
query = '''INSERT INTO limits (ID, COUNTRY, CUR, MAX_LIMIT) VALUES ({}, '{}', '{}', {})'''
cursor.execute(query.format(id, country.upper(), currency.upper(), max_limit))
connection.commit()
print(f'Line ({id}, {country.upper()}, {currency.upper()}, {max_limit}) successfully inserted')
return get_limit_by_id(id, cursor)
else:
message = 'Failed to update limits. Please, check your input'
print(message)
return {'failure': message}
def insert_into_history(id, date, amount, currency: str, country: str, connection, cursor):
"""
Insert a new record into the history table
:param id: new ID to insert
:param date: date to insert in format '%Y-%m-%d %H:%M:%S', or 'now' for the current time
:param amount: amount to insert in corresponding currency
:param currency: new currency to insert, possible options - 'RUB', 'USD', 'EUR'
:param country: new country to insert, possible options - 'RUS', 'AUS', 'ABH'
:param connection: connection instance
:param cursor: cursor instance
:return:
"""
if country in COUNTRIES_LIST and currency in CURRENCIES_LIST:
time_data = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
cur_month, cur_year = time_data.month, time_data.year
next_month = cur_month + 1 if cur_month != 12 else 1
next_year = cur_year if cur_month != 12 else cur_year + 1
history_query = '''SELECT amount, cur FROM history WHERE date BETWEEN date '{}-{}-1' and date '{}-{}-1' '''
cursor.execute(history_query.format(cur_year, cur_month, next_year, next_month))
exchange_rates = get_exchange_rates()
overall_to_date = exchange_into_rub(cursor.fetchall(), exchange_rates)
rub_amount = exchange_into_rub([(amount, currency.upper())], exchange_rates)
limits_query = '''SELECT max_limit, cur FROM limits WHERE id = {} '''
cursor.execute(limits_query.format(id))
max_limit = exchange_into_rub([cursor.fetchone()], exchange_rates)
if overall_to_date + rub_amount <= max_limit:
insert_query = '''INSERT INTO history (ID, DATE, AMOUNT, CUR, COUNTRY)
VALUES ({}, '{}', {}, '{}', '{}')'''
cursor.execute(insert_query.format(id, date, amount, currency.upper(), country.upper()))
connection.commit()
print(f'History successfully updated')
return {'message': 'history successfully updated'}
else:
message = f'Sum within a current month ({overall_to_date + rub_amount} rub) ' \
f'exceeds max_limit ({max_limit} rub)'
print(message)
return {'failure': message}
else:
message = 'Failed to update history. Please, check your inputs'
print(message)
return {'failure': message}
def update_limits(id, connection, cursor, currency=None, country=None, max_limit=None):
"""
Update existing limit record
:param id: ID to update
:param connection: connection instance
:param cursor: cursor instance
:param currency: currency to update, possible options - 'RUB', 'USD', 'EUR'
:param country: country to insert, possible options - 'RUS', 'AUS', 'ABH'
:param max_limit: maximum monthly limit in corresponding currency for the certain ID
:return:
"""
columns = ['CUR', 'COUNTRY', 'MAX_LIMIT']
if country in COUNTRIES_LIST + [None] and currency in CURRENCIES_LIST + [None]:
counter = 0
for i, item in enumerate([currency, country, max_limit]):
if item is not None:
update_query = '''UPDATE limits SET {} = '{}' WHERE ID = {}''' if columns[i] != 'MAX_LIMIT' else \
'''UPDATE limits SET {} = {} WHERE ID = {}'''
cursor.execute(update_query.format(columns[i], item, id))
connection.commit()
print(f'Limits updated: id = {id}, {columns[i]} = {item}')
counter += 1
if counter == 0:
return None
else:
return get_limit_by_id(id, cursor)
def delete_from_limits_by_id(id, connection, cursor):
"""
Delete row with a certain ID from limits table
:param id: ID to delete
:param connection: connection instance
:param cursor: cursor instance
:return:
"""
check_for_existence = get_limit_by_id(id, cursor)
if check_for_existence.get('failure') is None:
delete_query = '''Delete from limits where id = {}'''
cursor.execute(delete_query.format(id))
connection.commit()
print(f'Record with id={id} deleted')
return {'status': 'success', 'message': f'Record with id={id} deleted'}
else:
print(f'Failed to delete, ID={id} does not exist')
return {'failure': f'Failed to delete, ID={id} does not exist'}
def show_all_pretty_tables():
"""
Show limits and history tables in a pretty pandas.DataFrame format
:return:
"""
print('Limits table')
print(pd.DataFrame.from_records(get_limits_table(cursor)))
print('\nHistory table')
print(pd.DataFrame.from_records(get_history_table(cursor)))
if __name__ == '__main__':
connection, cursor = PostgreSQLStarter().get_connection_and_cursor()
show_all_pretty_tables()
|
<filename>tests/test_cmd_stat.py
"""
Тесты для команды "стата"
"""
from socket import timeout
from datetime import datetime, timedelta
from time import sleep
from discord.ext.test import message, get_embed
from mcstatus import MinecraftServer
from pytest import fixture, mark
from discord import Color
from mcstatus.pinger import PingResponse
class TestStatistic:
"""Класс для тестов и фикстур"""
@fixture(scope="class")
async def stat_online_not_added(self, event_loop, bot, database, monkeypatch_session):
"""Фикстура для проверки правильно ли бот сработает если сервер онлайн, но не добавлен"""
def fake_server_answer(class_self=None):
"""Эмулирует ответ сервера"""
return PingResponse(
{
"description": {"text": "A Minecraft Server"},
"players": {"max": 20, "online": 0},
"version": {"name": "1.17.1", "protocol": 756},
}
)
monkeypatch_session.setattr(MinecraftServer, "status", fake_server_answer)
await message("стата 127.0.0.3")
embed = get_embed()
while str(embed.color) == str(Color.orange()): # ждет пока бот не отошлет результаты вместо
sleep(0.01) # "ожидайте, в процессе"
embed = get_embed()
return embed
@fixture(scope="class")
async def stat_online(self, event_loop, bot, database, monkeypatch_session):
"""Основная фикстура для тестов, отсылает онлайн сервер"""
await database.add_server("127.0.0.4", 0, 25565)
await database.add_record("127.0.0.4", 25565, 33)
def fake_server_answer(class_self=None):
"""Эмулирует ответ сервера"""
return PingResponse(
{
"description": {"text": "A Minecraft Server"},
"players": {"max": 20, "online": 5},
"version": {"name": "1.17.1", "protocol": 756},
}
)
monkeypatch_session.setattr(MinecraftServer, "status", fake_server_answer)
await message("стата 127.0.0.4")
embed = get_embed()
while str(embed.color) == str(Color.orange()): # ждет пока бот не отошлет результаты вместо
sleep(0.01) # "ожидайте, в процессе"
embed = get_embed()
return embed
@fixture(scope="class")
async def stat_alias(self, event_loop, bot, database, monkeypatch_session):
"""Фикстура для тестов поддерживает ли команда алиасы"""
await database.add_server("127.0.0.5", 0, 25565)
await database.add_alias("тест_алиас", "127.0.0.5", 25565)
yesterday = datetime.now() - timedelta(hours=24)
await database.pool.execute("INSERT INTO sunpings VALUES ($1, $2, $3, $4);", "127.0.0.5", 25565, yesterday, 12)
# Генерирует 25 пингов
i = 0
args = []
while i <= 25:
time = datetime.now() - timedelta(minutes=i * 10)
args.append(("127.0.0.5", 25565, time, i))
i += 1
await database.pool.executemany("INSERT INTO sunpings VALUES ($1, $2, $3, $4);", args)
def fake_server_answer(class_self=None):
"""Эмулирует ответ сервера"""
return PingResponse(
{
"description": {"text": "A Minecraft Server"},
"players": {"max": 20, "online": 5},
"version": {"name": "1.17.1", "protocol": 756},
}
)
monkeypatch_session.setattr(MinecraftServer, "status", fake_server_answer)
await message("стата тест_алиас")
embed = get_embed()
while str(embed.color) == str(Color.orange()): # ждет пока бот не отошлет результаты вместо
sleep(0.01) # "ожидайте, в процессе"
embed = get_embed()
return embed
@fixture(scope="class")
async def stat_not_valid(self, event_loop, bot, database, monkeypatch_session):
"""Вызывает команду с не валидным айпи"""
monkeypatch_session.undo()
await message("стата www")
embed = get_embed()
while str(embed.color) == str(Color.orange()): # ждет пока бот не отошлет результаты вместо
sleep(0.01) # "ожидайте, в процессе"
embed = get_embed()
return embed
@fixture(scope="class")
async def stat_offline(self, event_loop, bot, database, monkeypatch_session):
"""Вызывает команду с пингом выключенного сервера"""
def fake_server_answer(class_self=None):
"""Когда сервер выключен, модуль вызывает exception socket.timeout"""
raise timeout
monkeypatch_session.setattr(MinecraftServer, "status", fake_server_answer)
await message("стата 127.0.0.6")
embed = get_embed()
while str(embed.color) == str(Color.orange()): # ждет пока бот не отошлет результаты вместо
sleep(0.01) # "ожидайте, в процессе"
embed = get_embed()
return embed
@mark.asyncio
async def test_server_not_added_color(self, event_loop, bot, database, stat_online_not_added):
"""Проверят цвет в ответе бота, если сервер не добавлен"""
await database.make_tables()
assert str(stat_online_not_added.color) == str(Color.red())
def test_online(self, bot, database, stat_online):
"""Проверяет правильно ли бот распознает текущий онлайн"""
online = stat_online.fields[0].value.split("/")
assert online[0] == "5"
def test_online_max(self, bot, database, stat_online):
"""Проверяет правильно ли бот распознает максимальный онлайн"""
online = stat_online.fields[0].value.split("/")
assert online[1] == "20"
def test_record(self, bot, database, stat_online):
"""Проверяет правильно ли бот распознает рекорд онлайна"""
assert stat_online.fields[2].value == "33"
def test_online_yest_null(self, bot, database, stat_online):
"""Проверяет правильно ли бот распознает вчерашний онлайн, если записей об этом нету"""
assert stat_online.fields[1].value == "Нету информации"
def test_color(self, bot, database, stat_online):
"""Проверят цвет в ответе бота"""
assert str(stat_online.color) == str(Color.green())
def test_alias_color(self, bot, stat_alias, database):
"""Проверят цвет в ответе бота, если использовать алиас"""
assert str(stat_alias.color) == str(Color.green())
def test_alias_numip(self, bot, stat_alias, database):
"""Проверят правильно ли бот распознает цифровое айпи, если использовать алиас"""
assert "127.0.0.5" in stat_alias.description
assert "25565" in stat_alias.description
@mark.skip(reason="фича еще не добавлена")
def test_alias_in(self, bot, stat_alias, database):
"""Проверяет правильно ли бот распознает алиас, и не выводит цифровой айпи"""
assert "тест_алиас" in stat_alias.title
def test_plot(self, bot, stat_alias, database):
"""Проверяет создает ли бот график онлайна"""
assert "attachment://1172.16.58.3_25565.png" == stat_alias.image.url
def test_check_yesterday_online(self, bot, stat_alias, database):
"""Проверят правильно ли бот распознает вчерашние пинги"""
assert stat_alias.fields[1].value == "12"
def test_ip_not_valid(self, bot, database, stat_not_valid):
"""Проверят цвет в ответе бота, если айпи не валидный"""
assert str(stat_not_valid.color) == str(Color.red())
def test_offline_color(self, bot, database, stat_offline):
"""Проверяет цвет Embed-а когда сервер оффлайн"""
assert str(stat_offline.color) == str(Color.red())
|
<filename>plugwise/stick.py
"""
Use of this source code is governed by the MIT license found in the LICENSE file.
Main stick object to control associated plugwise plugs
"""
import logging
import time
import serial
import sys
import threading
from datetime import datetime, timedelta
from plugwise.constants import (
ACK_ERROR,
ACK_TIMEOUT,
MAX_TIME_DRIFT,
MESSAGE_TIME_OUT,
MESSAGE_RETRY,
NODE_TYPE_STICK,
NODE_TYPE_CIRCLE_PLUS,
NODE_TYPE_CIRCLE,
NODE_TYPE_SWITCH,
NODE_TYPE_SENSE,
NODE_TYPE_SCAN,
NODE_TYPE_STEALTH,
SLEEP_TIME,
WATCHDOG_DEAMON,
)
from plugwise.connections.socket import SocketConnection
from plugwise.connections.serial import PlugwiseUSBConnection
from plugwise.exceptions import (
CirclePlusError,
NetworkDown,
PortError,
StickInitError,
TimeoutException,
)
from plugwise.message import PlugwiseMessage
from plugwise.messages.requests import (
CirclePlusScanRequest,
CircleCalibrationRequest,
CirclePlusRealTimeClockGetRequest,
CirclePlusRealTimeClockSetRequest,
CirclePowerUsageRequest,
CircleSwitchRequest,
NodeClockGetRequest,
NodeClockSetRequest,
NodeInfoRequest,
NodePingRequest,
NodeRequest,
StickInitRequest,
)
from plugwise.messages.responses import (
CircleScanResponse,
CircleCalibrationResponse,
CirclePlusRealTimeClockResponse,
CirclePowerUsageResponse,
CircleSwitchResponse,
NodeClockResponse,
NodeInfoResponse,
NodePingResponse,
NodeResponse,
StickInitResponse,
)
from plugwise.parser import PlugwiseParser
from plugwise.node import PlugwiseNode
from plugwise.nodes.circle import PlugwiseCircle
from plugwise.nodes.circle_plus import PlugwiseCirclePlus
from plugwise.nodes.stealth import PlugwiseStealth
from plugwise.util import inc_seq_id, validate_mac
from queue import Queue
class stick(object):
"""
Plugwise connection stick
"""
def __init__(self, port, callback=None, print_progress=False):
self.logger = logging.getLogger("plugwise")
self._mac_stick = None
self.port = port
self.network_online = False
self.circle_plus_mac = None
self.network_id = None
self.parser = PlugwiseParser(self)
self._plugwise_nodes = {}
self._nodes_to_discover = []
self._nodes_not_discovered = {}
self._stick_callbacks = {}
self.last_ack_seq_id = None
self.expected_responses = {}
self.print_progress = print_progress
self.timezone_delta = datetime.now().replace(
minute=0, second=0, microsecond=0
) - datetime.utcnow().replace(minute=0, second=0, microsecond=0)
self._run_receive_timeout_thread = False
self._run_send_message_thread = False
self._run_update_thread = False
if callback:
self.auto_initialize(callback)
def auto_initialize(self, callback=None):
""" automatic initialization """
def init_finished():
if not self.network_online:
self.logger.Error("plugwise Zigbee network down")
else:
if self.print_progress:
print("Scan Plugwise network")
self.scan(callback)
try:
if self.print_progress:
print("Open port")
self.connect()
if self.print_progress:
print("Initialize Plugwise USBstick")
self.initialize_stick(init_finished)
except PortError as e:
self.logger.error("Failed to connect: '%s'", e)
except StickInitError as e:
self.logger.error("Failed to initialize USBstick: '%s'", e)
except NetworkDown as e:
self.logger.error("Failed to communicated: Plugwise Zigbee network")
except TimeoutException as e:
self.logger.error("Timeout exception while initializing USBstick")
except Exception as e:
self.logger.error("Unknown error : %s"), e
def connect(self, callback=None):
""" Connect to stick and raise error if it fails"""
self.init_callback = callback
# Open connection to USB Stick
if ":" in self.port:
self.logger.debug("Open socket connection to Plugwise Zigbee stick")
self.connection = SocketConnection(self.port, self)
else:
self.logger.debug("Open USB serial connection to Plugwise Zigbee stick")
self.connection = PlugwiseUSBConnection(self.port, self)
self.connection.open_port()
self.logger.debug("Starting threads...")
# receive timeout deamon
self._run_receive_timeout_thread = True
self._receive_timeout_thread = threading.Thread(
None, self._receive_timeout_loop, "receive_timeout_deamon", (), {}
)
self._receive_timeout_thread.daemon = True
self._receive_timeout_thread.start()
# send deamon
self._send_message_queue = Queue()
self._run_send_message_thread = True
self._send_message_thread = threading.Thread(
None, self._send_message_loop, "send_messages_deamon", (), {}
)
self._send_message_thread.daemon = True
self._send_message_thread.start()
# update deamon
self._run_update_thread = False
self._auto_update_timer = None
self._update_thread = threading.Thread(
None, self._update_loop, "update_daemon", (), {}
)
self._update_thread.daemon = True
self.logger.debug("All threads started")
def initialize_stick(self, callback=None) -> bool:
# Initialize USBstick
if not self.connection.is_connected():
raise StickInitError
self.init_finished = False
def cb_stick_initialized():
""" Callback when initialization of Plugwise USBstick is finished """
self.init_finished = True
if not self.network_online:
raise NetworkDown
# Start watchdog deamon
self._run_watchdog = True
self._watchdog_thread = threading.Thread(
None, self._watchdog_loop, "watchdog_daemon", (), {}
)
self._watchdog_thread.daemon = True
self._watchdog_thread.start()
if callback:
callback()
self.logger.debug("Send init request to Plugwise Zigbee stick")
self.send(StickInitRequest(), cb_stick_initialized)
timeout = 0
while not self.init_finished and (timeout < MESSAGE_TIME_OUT):
timeout += 1
time.sleep(1)
if not self.init_finished:
raise StickInitError
def disconnect(self):
""" Disconnect from stick and raise error if it fails"""
try:
self.connection.close_port()
except Exception as e:
self.logger.error(
"Error while disconnect port: %s", e,
)
def subscribe_stick_callback(self, callback, callback_type):
""" Subscribe callback to execute """
if callback_type not in self._stick_callbacks:
self._stick_callbacks[callback_type] = []
self._stick_callbacks[callback_type].append(callback)
def unsubscribe_stick_callback(self, callback, callback_type):
""" Register callback to execute """
if callback_type in self._stick_callbacks:
self._stick_callbacks[callback_type].remove(callback)
def do_callback(self, callback_type, callback_arg=None):
""" Execute callbacks registered for specified callback type """
if callback_type in self._stick_callbacks:
for callback in self._stick_callbacks[callback_type]:
try:
callback(callback_arg)
except Exception as e:
self.stick.logger.error("Error while executing callback : %s", e)
def discover_after_scan(self):
""" Helper to do callback for new node """
mac_found = None
for mac in self._nodes_not_discovered.keys():
if mac in self._plugwise_nodes:
self.do_callback("NEW_NODE", mac)
mac_found = mac
if mac_found:
del self._nodes_not_discovered[mac_found]
def nodes(self) -> list:
""" Return mac addresses of known plugwise nodes """
return list(self._plugwise_nodes.keys())
def node(self, mac) -> PlugwiseNode:
""" Return specific Plugwise node object"""
assert isinstance(mac, str)
if mac in self._plugwise_nodes:
return self._plugwise_nodes[mac]
return None
def discover_node(self, mac, callback=None) -> bool:
""" Discovery plugwise node """
assert isinstance(mac, str)
if validate_mac(mac) == True:
self.send(
NodeInfoRequest(bytes(mac, "ascii")), callback,
)
return True
return False
def scan(self, callback=None):
""" scan for connected plugwise nodes """
def scan_finished(nodes_to_discover):
""" Callback when scan is finished """
time.sleep(1)
self.logger.debug("Scan plugwise network finished")
self._nodes_discovered = 0
self._nodes_to_discover = nodes_to_discover
self._discovery_finished = False
def node_discovered():
self._nodes_discovered += 1
self.logger.debug(
"Discovered Plugwise node %s of %s",
str(len(self._plugwise_nodes)),
str(self._nodes_to_discover),
)
if (len(self._plugwise_nodes) - 1) >= len(self._nodes_to_discover):
self._discovery_finished = True
self._nodes_to_discover = None
if callback:
callback()
def timeout_expired():
if not self._discovery_finished:
for (mac, address) in self._nodes_to_discover:
if mac not in self._plugwise_nodes.keys():
self.logger.warning(
"Failed to discover Plugwise node %s before timeout expired.",
str(mac),
)
# Add nodes to be discovered later at update loop
self._nodes_not_discovered[mac] = (None, None)
if callback:
callback()
# setup timeout for loading nodes
discover_timeout = (
10 + (len(nodes_to_discover) * 2) + (MESSAGE_TIME_OUT * MESSAGE_RETRY)
)
self.discover_timeout = threading.Timer(
discover_timeout, timeout_expired
).start()
self.logger.debug("Start discovery of linked node types...")
for (mac, address) in nodes_to_discover:
self.discover_node(mac, node_discovered)
def scan_circle_plus():
"""Callback when Circle+ is discovered"""
if self.circle_plus_mac in self._plugwise_nodes:
if self.print_progress:
print("Scan Circle+ for linked nodes")
self.logger.debug("Scan Circle+ for linked nodes...")
self._plugwise_nodes[self.circle_plus_mac].scan_for_nodes(scan_finished)
else:
self.logger.error(
"Circle+ is not discovered in %s", self._plugwise_nodes
)
# Discover Circle+
if self.circle_plus_mac:
if self.print_progress:
print("Discover Circle+")
self.logger.debug("Discover Circle+ at %s", self.circle_plus_mac)
self.discover_node(self.circle_plus_mac, scan_circle_plus)
else:
self.logger.error(
"Plugwise stick not properly initialized, Circle+ MAC is missing."
)
def _append_node(self, mac, address, node_type):
""" Add Plugwise node to be controlled """
self.logger.debug(
"Add new node type (%s) with mac %s", str(node_type), mac,
)
if node_type == NODE_TYPE_CIRCLE:
if self.print_progress:
print("Circle node found using mac " + mac)
self._plugwise_nodes[mac] = PlugwiseCircle(mac, address, self)
elif node_type == NODE_TYPE_CIRCLE_PLUS:
if self.print_progress:
print("Circle+ node found using mac " + mac)
self._plugwise_nodes[mac] = PlugwiseCirclePlus(mac, address, self)
elif node_type == NODE_TYPE_STEALTH:
self._plugwise_nodes[mac] = PlugwiseStealth(mac, address, self)
if self.print_progress:
print("Stealth node found using mac " + mac)
else:
self.logger.warning("Unsupported node type '%s'", str(node_type))
def _remove_node(self, mac):
"""
remove circle from stick
:return: None
"""
if mac in self._plugwise_nodes:
del self._plugwise_nodes[mac]
def feed_parser(self, data):
""" Feed parser with new data """
assert isinstance(data, bytes)
self.parser.feed(data)
def send(self, request, callback=None, retry_counter=0):
"""
Submit request message into Plugwise Zigbee network and queue expected response
"""
assert isinstance(request, NodeRequest)
if isinstance(request, CirclePowerUsageRequest):
response_message = CirclePowerUsageResponse()
elif isinstance(request, NodeInfoRequest):
response_message = NodeInfoResponse()
elif isinstance(request, NodePingRequest):
response_message = NodePingResponse()
elif isinstance(request, CircleSwitchRequest):
response_message = CircleSwitchResponse()
elif isinstance(request, CircleCalibrationRequest):
response_message = CircleCalibrationResponse()
elif isinstance(request, CirclePlusScanRequest):
response_message = CircleScanResponse()
elif isinstance(request, CirclePlusRealTimeClockGetRequest):
response_message = CirclePlusRealTimeClockResponse()
elif isinstance(request, NodeClockGetRequest):
response_message = NodeClockResponse()
elif isinstance(request, StickInitRequest):
response_message = StickInitResponse()
else:
response_message = None
self._send_message_queue.put(
[response_message, request, callback, retry_counter, None,]
)
def _send_message_loop(self):
""" deamon to send messages in queue """
while self._run_send_message_thread:
request_set = self._send_message_queue.get(block=True)
if self.last_ack_seq_id != None:
# Calc new seq_id based last received ack messsage
seq_id = inc_seq_id(self.last_ack_seq_id)
else:
# first message, so use a fake seq_id
seq_id = b"0000"
self.expected_responses[seq_id] = request_set
if not isinstance(request_set[1], StickInitRequest):
mac = request_set[1].mac.decode("ascii")
self.logger.debug(
"send %s to %s using seq_id %s",
request_set[1].__class__.__name__,
mac,
str(seq_id),
)
if mac in self._plugwise_nodes:
self._plugwise_nodes[mac].last_request = datetime.now()
if self.expected_responses[seq_id][3] > 0:
self.logger.debug(
"Retry %s for message %s to %s",
str(self.expected_responses[seq_id][3]),
str(self.expected_responses[seq_id][1].__class__.__name__),
self.expected_responses[seq_id][1].mac.decode("ascii"),
)
else:
self.logger.debug(
"send StickInitRequest using seq_id %s", str(seq_id),
)
self.expected_responses[seq_id][4] = datetime.now()
self.connection.send(request_set[1])
time.sleep(SLEEP_TIME)
timeout_counter = 0
# Wait max 1 second for acknowledge response
while (
self.last_ack_seq_id != seq_id
and timeout_counter <= 10
and seq_id != b"0000"
and self.last_ack_seq_id != None
):
time.sleep(0.1)
timeout_counter += 1
if timeout_counter > 10:
if seq_id in self.expected_responses:
if self.expected_responses[seq_id][3] <= MESSAGE_RETRY:
self.logger.warning(
"Resend %s for %s because stick did not acknowledge request (%s)",
str(self.expected_responses[seq_id][1].__class__.__name__),
self.expected_responses[seq_id][1].mac.decode("ascii"),
str(seq_id),
)
self.send(
self.expected_responses[seq_id][1],
self.expected_responses[seq_id][2],
self.expected_responses[seq_id][3] + 1,
)
else:
self.logger.warning(
"Drop %s request for mac %s because max (%s) retries reached",
self.expected_responses[seq_id][1].__class__.__name__,
self.expected_responses[seq_id][1].mac.decode("ascii"),
str(MESSAGE_RETRY),
)
del self.expected_responses[seq_id]
def _receive_timeout_loop(self):
""" deamon to time out receive messages """
while self._run_receive_timeout_thread:
for seq_id in list(self.expected_responses.keys()):
if isinstance(self.expected_responses[seq_id][1], StickInitRequest):
if self._cb_stick_initialized:
self._cb_stick_initialized()
del self.expected_responses[seq_id]
elif isinstance(
self.expected_responses[seq_id][1], NodeClockSetRequest
):
del self.expected_responses[seq_id]
elif isinstance(
self.expected_responses[seq_id][1],
CirclePlusRealTimeClockSetRequest,
):
del self.expected_responses[seq_id]
else:
if self.expected_responses[seq_id][4] != None:
if self.expected_responses[seq_id][4] < (
datetime.now() - timedelta(seconds=MESSAGE_TIME_OUT)
):
self.logger.debug(
"Timeout expired for message with sequence ID %s",
str(seq_id),
)
if self.expected_responses[seq_id][3] <= MESSAGE_RETRY:
self.logger.debug(
"Resend request %s",
str(
self.expected_responses[seq_id][
1
].__class__.__name__
),
)
self.send(
self.expected_responses[seq_id][1],
self.expected_responses[seq_id][2],
self.expected_responses[seq_id][3] + 1,
)
else:
self.logger.warning(
"Drop %s request for mac %s because max (%s) retries reached",
self.expected_responses[seq_id][
1
].__class__.__name__,
self.expected_responses[seq_id][1].mac.decode(
"ascii"
),
str(MESSAGE_RETRY),
)
del self.expected_responses[seq_id]
time.sleep(MESSAGE_TIME_OUT)
def new_message(self, message):
""" Received message from Plugwise Zigbee network """
assert isinstance(message, NodeResponse)
self.logger.debug(
"New %s message with seq id %s for %s",
message.__class__.__name__,
str(message.seq_id),
message.mac.decode("ascii"),
)
mac = message.mac.decode("ascii")
if isinstance(message, StickInitResponse):
self._mac_stick = message.mac
if message.network_is_online.value == 1:
self.network_online = True
else:
self.network_online = False
# Replace first 2 charactors by 00 for mac of circle+ node
self.circle_plus_mac = "00" + message.circle_plus_mac.value[2:].decode(
"ascii"
)
self.network_id = message.network_id.value
# The first StickInitResponse gives the actual sequence ID
if b"0000" in self.expected_responses:
seq_id = b"0000"
else:
seq_id = message.seq_id
self.message_processed(seq_id)
elif isinstance(message, NodeInfoResponse):
if not mac in self._plugwise_nodes:
if message.node_type.value == NODE_TYPE_CIRCLE_PLUS:
self._append_node(mac, 0, message.node_type.value)
else:
for (mac_to_discover, address) in self._nodes_to_discover:
if mac == mac_to_discover:
self._append_node(mac, address, message.node_type.value)
self._plugwise_nodes[mac].on_message(message)
else:
if mac in self._plugwise_nodes:
self._plugwise_nodes[mac].on_message(message)
def message_processed(self, seq_id, ack_response=None):
""" Execute callback of received messages """
if seq_id in self.expected_responses:
# excute callback at response of message
self.logger.debug(
"%s request with seq id %s processed",
self.expected_responses[seq_id][0].__class__.__name__,
str(seq_id),
)
if isinstance(self.expected_responses[seq_id][1], StickInitRequest):
if self.expected_responses[seq_id][2]:
self.expected_responses[seq_id][2]()
else:
if ack_response == ACK_TIMEOUT:
if self.expected_responses[seq_id][3] <= MESSAGE_RETRY:
mac = self.expected_responses[seq_id][1].mac.decode("ascii")
self.logger.debug(
"Network time out received for (%s of %s) of %s to %s, resend request",
str(self.expected_responses[seq_id][3] + 1),
str(MESSAGE_RETRY + 1),
str(self.expected_responses[seq_id][1].__class__.__name__),
mac,
)
if mac in self._plugwise_nodes:
if self._plugwise_nodes[mac].get_available():
self.send(
self.expected_responses[seq_id][1],
self.expected_responses[seq_id][2],
self.expected_responses[seq_id][3] + 1,
)
else:
self.logger.debug(
"Max (%s) network time out messages received for %s to %s, drop request",
str(self.expected_responses[seq_id][3] + 1),
str(self.expected_responses[seq_id][1].__class__.__name__),
self.expected_responses[seq_id][1].mac.decode("ascii"),
)
# Mark node as unavailable
mac = self.expected_responses[seq_id][1].mac.decode("ascii")
if mac in self._plugwise_nodes:
if self._plugwise_nodes[mac].get_available():
self.logger.warning(
"Mark %s as unavailabe because %s time out responses reached",
mac,
str(MESSAGE_RETRY + 1),
)
self._plugwise_nodes[mac].set_available(False)
elif ack_response == ACK_ERROR:
mac = self.expected_responses[seq_id][1].mac.decode("ascii")
if self.expected_responses[seq_id][3] <= MESSAGE_RETRY:
self.logger.debug(
"Error response received for (%s of %s) of %s to %s, resend request",
str(self.expected_responses[seq_id][3] + 1),
str(MESSAGE_RETRY + 1),
str(self.expected_responses[seq_id][1].__class__.__name__),
mac,
)
if mac in self._plugwise_nodes:
if self._plugwise_nodes[mac].get_available():
self.send(
self.expected_responses[seq_id][1],
self.expected_responses[seq_id][2],
self.expected_responses[seq_id][3] + 1,
)
else:
self.logger.debug(
"Error response received for (%s of %s) of %s to %s, drop request",
str(self.expected_responses[seq_id][3] + 1),
str(MESSAGE_RETRY + 1),
str(self.expected_responses[seq_id][1].__class__.__name__),
mac,
)
elif ack_response == None:
if self.expected_responses[seq_id][2]:
try:
self.expected_responses[seq_id][2]()
except Exception as e:
self.logger.error(
"Error while executing callback after processing message : %s",
e,
)
del self.expected_responses[seq_id]
def stop(self):
"""
Stop connection to Plugwise Zigbee network
"""
self._run_watchdog = False
self._auto_update_timer = None
self._run_receive_timeout_thread = False
self._run_send_message_thread = False
self.connection.close_port()
def _watchdog_loop(self):
"""
Main worker loop to watch all other worker threads
"""
time.sleep(5)
while self._run_watchdog:
# Connection
if self.connection.is_connected():
# Connection reader daemon
if not self.connection.read_thread_alive():
self.logger.warning("Unexpected halt of connection reader thread")
# Connection writer daemon
if not self.connection.write_thread_alive():
self.logger.warning("Unexpected halt of connection writer thread")
# receive timeout daemon
if self._run_receive_timeout_thread:
if not self._receive_timeout_thread.isAlive():
self.logger.warning(
"Unexpected halt of receive thread, restart thread",
)
self._receive_timeout_thread = threading.Thread(
None,
self._receive_timeout_loop,
"receive_timeout_deamon",
(),
{},
)
self._receive_timeout_thread.daemon = True
self._receive_timeout_thread.start()
# send message deamon
if self._run_send_message_thread:
if not self._send_message_thread.isAlive():
self.logger.warning(
"Unexpected halt of send thread, restart thread",
)
self._send_message_thread = threading.Thread(
None, self._send_message_loop, "send_messages_deamon", (), {}
)
self._send_message_thread.daemon = True
self._send_message_thread.start()
# Update daemon
if self._run_update_thread:
if not self._update_thread.isAlive():
self.logger.warning(
"Unexpected halt of update thread, restart thread",
)
self._run_update_thread = True
self._update_thread = threading.Thread(
None, self._update_loop, "update_daemon", (), {}
)
self._update_thread.daemon = True
self._update_thread.start()
time.sleep(WATCHDOG_DEAMON)
def _update_loop(self):
"""
When node has not received any message during
last 2 update polls, reset availability
"""
self._run_update_thread = True
self._auto_update_first_run = True
try:
while self._run_update_thread:
for mac in self._plugwise_nodes:
# Do ping request
self.logger.debug(
"Send ping to node %s", mac,
)
self._plugwise_nodes[mac].ping()
# Only power use updates for supported nodes
if isinstance(
self._plugwise_nodes[mac], PlugwiseCircle
) or isinstance(self._plugwise_nodes[mac], PlugwiseCirclePlus):
# Don't check at first time
self.logger.debug(
"Request current power usage for node %s", mac
)
if not self._auto_update_first_run and self._run_update_thread:
# Only request update if node is available
if self._plugwise_nodes[mac].get_available():
self.logger.debug(
"Node '%s' is available for update request, last update (%s)",
mac,
str(self._plugwise_nodes[mac].get_last_update()),
)
# Skip update request if there is still an request expected to be received
open_requests_found = False
for seq_id in list(self.expected_responses.keys()):
if isinstance(
self.expected_responses[seq_id][1],
CirclePowerUsageRequest,
):
if mac == self.expected_responses[seq_id][
1
].mac.decode("ascii"):
open_requests_found = True
break
if not open_requests_found:
self._plugwise_nodes[mac].update_power_usage()
# Refresh node info once per hour and request power use afterwards
if self._plugwise_nodes[mac]._last_info_message != None:
if self._plugwise_nodes[mac]._last_info_message < (
datetime.now().replace(
minute=1,
second=MAX_TIME_DRIFT,
microsecond=0,
)
):
self._plugwise_nodes[mac]._request_info(
self._plugwise_nodes[
mac
]._request_power_buffer
)
if not self._plugwise_nodes[mac]._last_log_collected:
self._plugwise_nodes[mac]._request_power_buffer()
else:
if self._run_update_thread:
self.logger.debug(
"First request for current power usage for node %s",
mac,
)
self._plugwise_nodes[mac].update_power_usage()
self._auto_update_first_run = False
# Try to rediscover node(s) which where not available at initial scan
# Do this the first hour at every update, there after only once an hour
for mac in self._nodes_not_discovered:
(firstrequest, lastrequest) = self._nodes_not_discovered[mac]
if firstrequest and lastrequest:
if (firstrequest + timedelta(hours=1)) > datetime.now():
# first hour, so do every update a request
self.discover_node(mac, self.discover_after_scan)
self._nodes_not_discovered[mac] = (
firstrequest,
datetime.now(),
)
else:
if (lastrequest + timedelta(hours=1)) < datetime.now():
self.discover_node(mac, self.discover_after_scan)
self._nodes_not_discovered[mac] = (
firstrequest,
datetime.now(),
)
else:
self.discover_node(mac, self.discover_after_scan)
self._nodes_not_discovered[mac] = (
datetime.now(),
datetime.now(),
)
if self._auto_update_timer:
time.sleep(self._auto_update_timer)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.logger.error(
"Error at line %s of _update_loop : %s", exc_tb.tb_lineno, e
)
def auto_update(self, timer=None):
"""
setup auto update polling for power usage.
"""
if timer == 0:
self._run_update_thread = False
self._auto_update_timer = None
else:
self._auto_update_timer = 5
if timer == None:
# Timer based on number of nodes and 3 seconds per node
self._auto_update_timer = len(self._plugwise_nodes) * 3
elif timer > 5:
self._auto_update_timer = timer
if not self._run_update_thread:
self._update_thread.start()
|
import pyodbc
from platform import system
from Constants import *
def sql_exec(
p_server: str,
p_database: str,
p_user: str,
p_password: str,
p_port: int,
p_sql: str,
p_result: int =1
):
"""
Выполняет запросы на MSSQL
:param p_server: сервер
:param p_database: БД
:param p_user: пользователь
:param p_password: <PASSWORD>
:param p_port: порт
:param p_result: признак наличия результата запроса (по умолчанию 1)
"""
l_error=None
query_output=None
try:
if system() == "Darwin": # если macos нужно прописать путь до драйвера
mssql_cnct = pyodbc.connect(
server=p_server,
database=p_database,
uid=p_user,
tds_version=C_TDS_VERSION,
pwd=<PASSWORD>,
port=p_port,
driver=C_MSSQL_DRIVER_MACOS_PATH,
timeout=10
)
else:
mssql_cnct = pyodbc.connect(
server=p_server,
database=p_database,
uid=p_user,
pwd=<PASSWORD>,
port=p_port,
timeout=10
)
except pyodbc.Error as e:
l_error=e.args[1]
return query_output, l_error
crsr =mssql_cnct.cursor()
try:
crsr.execute(p_sql)
if p_result==1:
query_output=crsr.fetchall()
else:
query_output=1
except pyodbc.Error as e:
l_error=e.args[1]
finally:
crsr.close()
mssql_cnct.close()
return query_output, l_error
def get_objects(
p_server: str,
p_database: str,
p_user: str,
p_password: str,
p_port: int
):
l_sql="""
SELECT
tab.table_catalog,
tab.table_schema,
tab.table_name,
tab.table_type,
col.column_name,
col.data_type,
COALESCE(
col.character_maximum_length,
CASE WHEN col.numeric_scale<>0 AND col.data_type='decimal'
THEN col.numeric_precision
END
) AS character_maximum_length,
col.numeric_precision,
col.numeric_scale,
CASE WHEN ky.constraint_name IS NOT NULL THEN 1 ELSE 0 END
FROM information_schema.tables tab
LEFT JOIN information_schema.columns col
ON 1=1
AND tab.table_catalog=col.table_catalog
AND tab.table_schema=col.table_schema
AND tab.table_name=col.table_name
LEFT JOIN information_schema.key_column_usage ky
ON 1=1
AND tab.table_catalog=ky.table_catalog
AND tab.table_schema=ky.table_schema
AND tab.table_name=ky.table_name
AND col.column_name=ky.column_name
AND substring(ky.constraint_name,1,2)='PK'
"""
l_result=sql_exec(
p_server=p_server,
p_database=p_database,
p_user=p_user,
p_password=p_password,
p_port=p_port,
p_sql=l_sql,
p_result=1
)
return l_result
C_CURRENT_TIMESTAMP_SQL="CURRENT_TIMESTAMP"
|
<gh_stars>1-10
import numpy as np
from scipy.interpolate import interpolate as interp
import astropy.units as u
from gwemlightcurves.KNModels.io.Me2017 import calc_lc_UV
class KN_lc(object):
"""
Calculate some KNe lightcurves
Parameters
----------
file_list : list of str (None)
List of file paths to load.
If None, loads up all the files from data/tde/
"""
def __init__(self, mejs, vejs, betas, kappas):
filts = ["FUV", "NUV"]
magidxs = [0, 1]
tini, tmax, dt = 0.05, 3.0, 0.1
# Let's organize the data in to a list of dicts for easy lookup
self.data = []
for mej, vej, beta, kappa_r in zip(mejs, vejs, betas, kappas):
t, lbol, mag_ds, Tobs = calc_lc_UV(tini, tmax, dt,
mej, vej, beta, kappa_r)
new_dict = {}
for ii, (filt, magidx) in enumerate(zip(filts, magidxs)):
jj = np.where(np.isfinite(mag_ds[magidx, :]))[0]
f = interp.interp1d(t[jj], mag_ds[magidx, jj],
fill_value='extrapolate')
new_dict[filt] = {'ph': t, 'mag': f(t)}
self.data.append(new_dict)
def interp(self, t, filtername, lc_indx=0):
"""
t : array of floats
The times to interpolate the light curve to.
filtername : str
The filter. one of ugrizy
lc_index : int (0)
Which file to use.
"""
result = np.interp(t.jd, self.data[lc_indx][filtername]['ph'],
self.data[lc_indx][filtername]['mag'],
left=99, right=99)
return result
class KNePopMetric:
def __init__(self, mejs, vejs, betas, kappas,
m5Col='limmag', filterCol='filter'):
self.filterCol = filterCol
self.m5Col = m5Col
self.lightcurves = KN_lc(mejs, vejs, betas, kappas)
waves = {'FUV': 160., 'NUV': 250.}
self.waves = waves
self.R_v = 3.1
def _single_detect(self, dataSlice, slicePoint, mags, t):
"""
Simple detection criteria: detect at least once
"""
result = 1
around_peak = np.where((t > 0) & (t < 7) &
(mags < dataSlice[self.m5Col]))[0]
filters = dataSlice[self.filterCol][around_peak]
if np.size(filters) < 1:
return 0
return result
def _multi_detect(self, dataSlice, slicePoint, mags, t):
"""
Simple detection criteria: detect at least twice
"""
result = 1
around_peak = np.where((t > 0) & (t < 7) &
(mags < dataSlice[self.m5Col]))[0]
times = t[around_peak]
if np.size(np.unique(times)) < 2:
return 0
return result
def _multi_color_detect(self, dataSlice, slicePoint, mags, t):
"""
Color-based simple detection criteria:
detect at least twice, with at least two color
"""
result = 1
around_peak = np.where((t > 0) & (t < 7) &
(mags < dataSlice[self.m5Col]))[0]
filters = np.unique(dataSlice[self.filterCol][around_peak])
if np.size(filters) < 2:
return 0
return result
def run(self, dataSlice, slicePoint=None, extinction=None):
result = {}
t = dataSlice["time"] - slicePoint['peak_time']
mags = np.zeros(t.size, dtype=float)
lc_indx = slicePoint['file_indx']
for filtername in np.unique(dataSlice[self.filterCol]):
infilt = np.where(dataSlice[self.filterCol] == filtername)
mags[infilt] = self.lightcurves.interp(t[infilt],
filtername,
lc_indx=lc_indx)
# Apply dust extinction on the light curve
if extinction is not None:
mags[infilt] += extinction[filtername]
distmod = 5*np.log10(slicePoint['distance']*1e6) - 5.0
mags[infilt] += distmod
mags[infilt] = mags[infilt]
mags = mags * u.ABmag
result['single_detect'] = self._single_detect(dataSlice, slicePoint,
mags, t.jd)
result['multi_detect'] = self._multi_detect(dataSlice, slicePoint,
mags, t.jd)
result['multi_color_detect'] = self._multi_color_detect(dataSlice,
slicePoint,
mags, t.jd)
return result
def reduce_single_detect(self, metric):
return metric['single_detect']
def reduce_multi_detect(self, metric):
return metric['multi_detect']
def reduce_multi_color_detect(self, metric):
return metric['multi_color_detect']
def generateKNPopSlicer(t_start=1, t_end=3652, n_events=10000,
seed=42, n_files=100):
""" Generate a population of KNe events, and put the info
about them into a UserPointSlicer object
Parameters
----------
t_start : float (1)
The night to start KN events on (days)
t_end : float (3652)
The final night of KN events
n_events : int (10000)
The number of KN events to generate
seed : float
The seed passed to np.random
n_files : int (100)
The number of different KN lightcurves to use
"""
def rndm(a, b, g, size=1):
"""Power-law gen for pdf(x) \\propto x^{g-1} for a<=x<=b"""
r = np.random.random(size=size)
ag, bg = a**g, b**g
return (ag + (bg - ag)*r)**(1./g)
peak_times = np.random.uniform(low=t_start, high=t_end, size=n_events)
file_indx = np.floor(np.random.uniform(low=0,
high=n_files,
size=n_events)).astype(int)
distance = rndm(10, 300, 4, size=n_events)
slicer = []
for p, f, i in zip(peak_times, file_indx, distance):
slicer.append({'peak_time': p,
'file_indx': f,
'distance': i})
return slicer
|
# import BaseDatos
class ListaBaseDatos:
def __init__(self):
self.lista_bases_datos=[]
def Buscar(self, database):
for base_datos in self.lista_bases_datos:
if base_datos.Name==database:
return base_datos
else: return False
def createDatabase(self, database):
if database:
for base_datos in self.lista_bases_datos:
if base_datos.Nombre==database:
print("Base de datos '"+database+"' ya existente, no se pudo crear")
else:
#self.lista_bases_datos.append(BaseDatos(database))
print("Base de datos '"+database+"' creada con éxito")
else:
print("Se necesita un nombre para la base de datos")
def showDatabases(self):
print("//==============================//")
print(" - - BD EN ALMACENAMIENTO - -")
for base_datos in self.lista_bases_datos:
#print(base_datos.Name)
pass
print("//==============================//")
def alterDatabase(self, databaseNew, databaseOld):
temp=self.Buscar(databaseNew)
if temp:
#temp.Name=databaseOld
pass
else:
print("Base de datos '"+databaseNew+"' no encontrada")
def dropDatabase(self, database):
temp=self.Buscar(database)
if temp:
self.lista_bases_datos.remove(temp)
print("Base de datos '"+databaseNew+"' eliminada con éxito")
else:
print("Base de datos '"+database+"' no encontrada")
#======= LLAMADA A FUNCIONES IMPORTADAS ========
# ~~> ESTAS FUNCIONES DEBERÍAN SER MOVIDAS A UN ARCHIVO MAIN <~~
storage=ListaBaseDatos()
#==//== funciones con respecto a BaseDatos ==//==
def createTable(database, tableName, numberColumns):
temp=storage.Buscar(database)
if temp:
#temp.createTable(tableName, numberColumns)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def showTables(database):
temp=storage.Buscar(database)
if temp:
#temp.showTables()
pass
else:
print("Base de datos '"+database+"' no encontrada")
def alterTable(database, tableOld, tableNew):
temp=storage.Buscar(database)
if temp:
#temp.alterTable(tableOld, tableNew)
pass
else:
print("Base de datos '"+database+"' no contiene tablas")
def dropTable(database, tableName):
temp=storage.Buscar(database)
if temp:
#temp.dropTable(tableName)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def alterAdd(database, tableName, columnName):
temp=storage.Buscar(database)
if temp:
#temp.alterAdd(tableName, columnName)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def alterDrop(database, tableName, columnName):
temp=storage.Buscar(database)
if temp:
#temp.alterDrop(tableName, columnName)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def extractTable(database, tableName):
temp=storage.Buscar(database)
if temp:
#temp.extractTable(tableName)
pass
else:
print("Base de datos '"+database+"' no encontrada")
#==//== funciones con respecto a Tabla ==//==
def insert(database, table, columns):
temp=storage.Buscar(database)
if temp:
#temp.insert(table, columns)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def update(database, table, id, columnNumber, value):
temp=storage.Buscar(database)
if temp:
#temp.update(table, id, columnNumber, value)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def deleteTable(database, tableName, id):
temp=storage.Buscar(database)
if temp:
#temp.deleteTable(table, columns)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def truncate(database, tableName):
temp=storage.Buscar(database)
if temp:
#temp.truncate(tableName)
pass
else:
print("Base de datos '"+database+"' no encontrada")
def extractRow(database, table, id):
temp=storage.Buscar(database)
if temp:
#temp.extractRow(table, id)
pass
else:
print("Base de datos '"+database+"' no encontrada")
|
import random
class BaseCompose:
def __init__(self, transforms, p: float = 1.0, shuffle: bool = False):
self.transforms = transforms
self.p = p
self.shuffle = shuffle
self.are_parameters_frozen = False
name_list = []
for transform in self.transforms:
name_list.append(type(transform).__name__)
self.__name__ = "_".join(name_list)
def __call__(self, *args, **kwargs):
raise NotImplementedError
def randomize_parameters(self, *args, **kwargs):
"""
Randomize and define parameters of every transform in composition.
"""
apply_to_children = kwargs.get("apply_to_children", True)
if apply_to_children:
if "apply_to_children" in kwargs:
del kwargs["apply_to_children"]
for transform in self.transforms:
transform.randomize_parameters(*args, **kwargs)
def freeze_parameters(self, apply_to_children=True):
"""
Mark all parameters as frozen, i.e. do not randomize them for each call. This can be
useful if you want to apply an effect chain with the exact same parameters to multiple
sounds.
"""
self.are_parameters_frozen = True
if apply_to_children:
for transform in self.transforms:
transform.freeze_parameters()
def unfreeze_parameters(self, apply_to_children=True):
"""
Unmark all parameters as frozen, i.e. let them be randomized for each call.
"""
self.are_parameters_frozen = False
if apply_to_children:
for transform in self.transforms:
transform.unfreeze_parameters()
class Compose(BaseCompose):
"""
Compose applies the given sequence of transforms when called,
optionally shuffling the sequence for every call.
Example usage:
```
augment = Compose([
AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5),
TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5),
PitchShift(min_semitones=-4, max_semitones=4, p=0.5),
Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5),
])
# Generate 2 seconds of dummy audio for the sake of example
samples = np.random.uniform(low=-0.2, high=0.2, size=(32000,)).astype(np.float32)
# Augment/transform/perturb the audio data
augmented_samples = augment(samples=samples, sample_rate=16000)
```
"""
def __init__(self, transforms, p=1.0, shuffle=False):
super().__init__(transforms, p, shuffle)
def __call__(self, samples, sample_rate):
transforms = self.transforms.copy()
should_apply = random.random() < self.p
# TODO: Adhere to self.are_parameters_frozen
# https://github.com/iver56/audiomentations/issues/135
if should_apply:
if self.shuffle:
random.shuffle(transforms)
for transform in transforms:
samples = transform(samples, sample_rate)
return samples
class SpecCompose(BaseCompose):
def __init__(self, transforms, p=1.0, shuffle=False):
super().__init__(transforms, p, shuffle)
def __call__(self, magnitude_spectrogram):
transforms = self.transforms.copy()
should_apply = random.random() < self.p
# TODO: Adhere to self.are_parameters_frozen
# https://github.com/iver56/audiomentations/issues/135
if should_apply:
if self.shuffle:
random.shuffle(transforms)
for transform in transforms:
magnitude_spectrogram = transform(magnitude_spectrogram)
return magnitude_spectrogram
class OneOf(BaseCompose):
"""
OneOf randomly picks one of the given transforms when called, and applies that
transform.
Example usage:
```
augment = OneOf([
TimeStretch(min_rate=0.8, max_rate=1.25, p=1.0),
PitchShift(min_semitones=-4, max_semitones=4, p=1.0),
])
# Generate 2 seconds of dummy audio for the sake of example
samples = np.random.uniform(low=-0.2, high=0.2, size=(32000,)).astype(np.float32)
# Augment/transform/perturb the audio data
augmented_samples = augment(samples=samples, sample_rate=16000)
# Result: The audio was either time-stretched or pitch-shifted, but not both
```
"""
def __init__(self, transforms, p: float = 1.0):
super().__init__(transforms, p)
self.transform_index = 0
self.should_apply = True
def randomize_parameters(self, *args, **kwargs):
super().randomize_parameters(*args, **kwargs)
self.should_apply = random.random() < self.p
if self.should_apply:
self.transform_index = random.randint(0, len(self.transforms) - 1)
def __call__(self, *args, **kwargs):
if not self.are_parameters_frozen:
kwargs["apply_to_children"] = False
self.randomize_parameters(*args, **kwargs)
if self.should_apply:
if "apply_to_children" in kwargs:
del kwargs["apply_to_children"]
return self.transforms[self.transform_index](*args, **kwargs)
if "samples" in kwargs:
return kwargs["samples"]
elif "magnitude_spectrogram" in kwargs:
return kwargs["magnitude_spectrogram"]
else:
return args[0]
|
<gh_stars>0
import collections
import glob
import os
from typing import Dict, Set
import matplotlib.pyplot as plt
import networkx as nx
import torch
from pyvis.network import Network
from sklearn.manifold import TSNE
from torch_geometric.data import Data
from tqdm import tqdm
import igraph
from dataset_v1 import PhishingDataset
from utils.utils import extract_domain_name, tensor_to_tuple_list
ROOT_COLOR = '#0096FF'
DOMAIN_COLOR = '#73FCD6'
OUT_DOMAIN_COLOR = '#FFD479'
ERROR_COLOR = '#FF7E79'
def visualize(
data: Data,
width: int=1000,
height: int=800,
html_save_file: str="graph.html",
generate_svg: bool=False,
):
"""Create an html file with the corresponding graph
plotted using the pyvis library.
"""
folder = os.path.dirname(html_save_file)
if folder != '':
os.makedirs(folder, exist_ok=True)
edge_index = data.edge_index
viz_utils = data.pos
id_to_url = {v: k for k, v in viz_utils['url_to_id'].items()}
edges = tensor_to_tuple_list(edge_index)
# edges = [(x, y) for x, y in edges if x == 0]
G = nx.MultiDiGraph()
G.add_edges_from(edges)
net = Network(width=width, height=height, directed=True)
net.from_nx(G)
root_url = id_to_url[0]
domain = extract_domain_name(root_url)
colors = []
for node in net.nodes:
node_url = id_to_url[node['id']]
node['size'] = 15
node['label'] = ''
if domain in node_url:
node['color'] = DOMAIN_COLOR
else:
node['color'] = OUT_DOMAIN_COLOR
if node['id'] == 0:
node['color'] = ROOT_COLOR
if node_url in viz_utils['error_pages']:
node['color'] = ERROR_COLOR
colors.append(node['color'])
node['title'] = f'<a href="{id_to_url[node["id"]]}">{id_to_url[node["id"]]}</a>'
count_edges = dict(collections.Counter(edges))
for e in net.edges:
t = (e['from'], e['to'])
nb_occurences = 0
if t not in count_edges:
nb_occurences = count_edges[(e['to'], e['from'])]
else:
nb_occurences = count_edges[t]
if nb_occurences > 1:
e['label'] = nb_occurences
if generate_svg:
g2 = igraph.Graph().from_networkx(G)
g2 = g2.simplify()
layout = g2.layout_auto()
visual_style={}
visual_style["vertex_size"] = 10
visual_style["vertex_color"] = colors
visual_style["vertex_label_dist"] =1
visual_style["vertex_label_size"]= 8
visual_style["edge_color"] = "lightgrey"
visual_style["edge_width"] = 1
visual_style["edge_curved"] = 0.1
visual_style["layout"]=layout
visual_style["bbox"]=(500,500)
visual_style["margin"]=40
igraph.plot(g2, target=f"text{len(data.x)}.svg", **visual_style)
net.save_graph(html_save_file)
with open(html_save_file, 'a') as html_file:
graph_data_html = f"""
<div id="graph_data"
is_phishing="{data.y == 1.}"
url="{root_url}"
nb_edges="{data.num_edges}"
nb_nodes="{data.num_nodes}"
>
</div>
"""
html_file.write(graph_data_html)
def generate_every_graphs():
"""Creates the visulaization graphs as html files
for every example in the dataset (based on the files
in data/processed).
"""
path = os.path.join(os.getcwd(), "data", "train")
data_files = sorted(glob.glob(os.path.join(path, "processed", "data_viz*")))
if not os.path.exists(path) or len(data_files) == 0:
print(f"No csv raw files found in {path}")
dataset = PhishingDataset(
root=path,
use_process=False,
visulization_mode=True,
)
dataset = dataset.shuffle()
print(f"Start generating graphs...")
for i, data in enumerate(tqdm(dataset, total=len(dataset))):
visualize(data, html_save_file=f"visualization/graphs/graph{i}.html")
print(f"Graphs successfully created.")
def plot_embeddings(
model,
loader,
):
color_list = ["red", "green"]
embs = []
colors = []
for data in loader:
pred = model(data.x, data.edge_index, data.batch)
embs.append(model.embeddings)
colors += [color_list[int(y)] for y in data.y]
embs = torch.cat(embs, dim=0)
xs, ys = zip(*TSNE().fit_transform(embs.detach().numpy()))
plt.scatter(xs, ys, color=colors)
plt.savefig("embeddings.png")
if __name__ == "__main__":
generate_every_graphs()
|
"""Create AnVIL workspaces, set up with auth domain, add workspace READER access to auth domain, and OWNER access to AnVIL admins.
Usage:
> python3 set_up_anvil_workspace.py -t TSV_FILE [-p BILLING-PROJECT] """
import argparse
import json
import pandas as pd
import requests
from utils import add_user_to_authorization_domain, \
check_workspace_exists, \
create_authorization_domain, \
get_access_token, \
write_output_report
ADMIN_ANVIL_EMAIL = "<EMAIL>"
def add_members_to_workspace(workspace_name, auth_domain_name, project="anvil_datastorage"):
"""Add members to workspace permissions."""
acls = []
# add auth domain as READER, anvil-admins as OWNER
acls.append({'email': f'{<EMAIL>', 'accessLevel': 'READER', 'canShare': False, 'canCompute': False})
acls.append({'email': '<EMAIL>', 'accessLevel': 'OWNER', 'canShare': True, 'canCompute': True})
json_request = json.dumps(acls)
# request URL for updateWorkspaceACL
uri = f"https://api.firecloud.org/api/workspaces/{project}/{workspace_name}/acl?inviteUsersNotFound=false"
# Get access token and and add to headers for requests.
headers = {"Authorization": "Bearer " + get_access_token(), "accept": "*/*", "Content-Type": "application/json"}
# -H "accept: */*" -H "Authorization: Bearer [token] -H "Content-Type: application/json"
# capture response from API and parse out status code
response = requests.patch(uri, headers=headers, data=json_request)
status_code = response.status_code
emails = [acl['email'] for acl in acls]
# print success or fail message based on status code
if status_code != 200:
print(f"WARNING: Failed to update {project}/{workspace_name} with the following user(s)/group(s): {emails}.")
print("Check output file for error details.")
return False, response.text
print(f"Successfully updated {project}/{workspace_name} with the following user(s)/group(s): {emails}.")
emails_str = ("\n".join(emails)) # write list of emails as strings on new lines
return True, emails_str
def create_workspace(workspace_name, auth_domain_name, project="anvil-datastorage"):
"""Create the Terra workspace with given authorization domain."""
# check if workspace already exists
ws_exists, ws_exists_response = check_workspace_exists(workspace_name, project)
if ws_exists is None:
return False, ws_exists_response
if not ws_exists: # workspace doesn't exist (404), create workspace
# create request JSON
create_ws_json = make_create_workspace_request(workspace_name, auth_domain_name, project) # json for API request
# request URL for createWorkspace
uri = f"https://api.firecloud.org/api/workspaces"
# Get access token and and add to headers for requests.
# -H "accept: application/json" -H "Authorization: Bearer [token] -H "Content-Type: application/json"
headers = {"Authorization": "Bearer " + get_access_token(), "accept": "application/json", "Content-Type": "application/json"}
# capture response from API and parse out status code
response = requests.post(uri, headers=headers, data=json.dumps(create_ws_json))
status_code = response.status_code
if status_code != 201: # ws creation fail
print(f"WARNING: Failed to create workspace with name: {workspace_name}. Check output file for error details.")
return False, response.text
# workspace creation success
print(f"Successfully created workspace with name: {workspace_name}.")
return True, None
# workspace already exists
print(f"Workspace already exists with name: {project}/{workspace_name}.")
print(f"Existing workspace details: {json.dumps(json.loads(ws_exists_response), indent=2)}")
# make user decide if they want to update/overwrite existing workspace
while True: # try until user inputs valid response
update_existing_ws = input("Would you like to continue modifying the existing workspace? (Y/N)" + "\n")
if update_existing_ws.upper() in ["Y", "N"]:
break
else:
print("Not a valid option. Choose: Y/N")
if update_existing_ws.upper() == "N": # don't overwrite existing workspace
deny_overwrite_message = f"{project}/{workspace_name} already exists. User selected not to overwrite. Try again with unique workspace name."
return None, deny_overwrite_message
accept_overwrite_message = f"{project}/{workspace_name} already exists. User selected to overwrite."
return True, accept_overwrite_message # overwrite existing workspace - 200 status code for "Y"
def make_create_workspace_request(workspace_name, auth_domain_name, project="anvil-datastorage"):
"""Make the json request to pass into create_workspace()."""
# initialize empty dictionary
create_ws_request = {}
create_ws_request["namespace"] = project
create_ws_request["name"] = workspace_name
create_ws_request["authorizationDomain"] = [{"membersGroupName": f'{auth_domain_name}'}]
create_ws_request["attributes"] = {}
# TODO: set noWorkspaceOwner = True for data delivery workspaces - picard svc is the only owner
create_ws_request["noWorkspaceOwner"] = False
return create_ws_request
def setup_auth_domain(auth_domain_name):
"""Create authorization domain (google group) and add user."""
# create AD with given name
ad_success, ad_message = create_authorization_domain(auth_domain_name)
if not ad_success: # AD create fail
return False, ad_message
# AD create successful
permission = "ADMIN"
is_add_user, add_user_message = add_user_to_authorization_domain(auth_domain_name, ADMIN_ANVIL_EMAIL, permission)
if not is_add_user: # add user to AD fail - create AD success
return False, add_user_message
return True, None # add user to AD success - create AD success
def setup_single_workspace(workspace, project="anvil-datastorage"):
"""Create one workspace and set up with authorization domain and ACLs."""
# initialize workspace dictionary with default values assuming failure
workspace_dict = {"input_workspace_name": "NA",
"input_auth_domain_name": "NA",
"auth_domain_email": "Incomplete",
"auth_domain_setup_error": "NA",
"email_added_to_AD": "Incomplete",
"workspace_link": "Incomplete",
"workspace_creation_error": "NA",
"workspace_ACLs": "Incomplete",
"workspace_ACLs_error": "NA",
"final_workspace_status": "Failed"}
# start authorization domain
auth_domain_name = workspace['auth_domain_name']
workspace_dict["input_auth_domain_name"] = auth_domain_name
setup_ad_success, setup_ad_message = setup_auth_domain(auth_domain_name)
if not setup_ad_success:
workspace_dict["auth_domain_setup_error"] = setup_ad_message
return workspace_dict
# AD creation and add member to AD success
workspace_dict["auth_domain_email"] = f"{auth_<EMAIL>" # update dict with created AD email
workspace_dict["email_added_to_AD"] = ADMIN_ANVIL_EMAIL # update dict with member added to AD
# workspace creation if AD set up succeeds
workspace_name = workspace["workspace_name"]
workspace_dict["input_workspace_name"] = workspace_name
# create workspace
create_ws_success, create_ws_message = create_workspace(workspace_name, auth_domain_name, project)
workspace_dict["workspace_creation_error"] = create_ws_message
if not create_ws_success:
return workspace_dict
# ws creation success
workspace_dict["workspace_link"] = (f"https://app.terra.bio/#workspaces/{project}/{workspace_name}").replace(" ", "%20")
# add ACLs to workspace if workspace creation success
add_member_success, add_member_message = add_members_to_workspace(workspace_name, auth_domain_name, project)
if not add_member_success:
workspace_dict["workspace_ACLs_error"] = add_member_message
return workspace_dict
# adding ACLs to workspace success
workspace_dict["workspace_ACLs"] = add_member_message # update dict with ACL emails
workspace_dict["final_workspace_status"] = "Success" # final workspace setup step
return workspace_dict
def setup_workspaces(tsv, project="anvil-datastorage"):
"""Get the workspace and associated auth domain from input tsv file."""
# read full tsv into dataframe
setup_info_df = pd.read_csv(tsv, sep="\t")
# create df for output tsv file
col_names = ["input_workspace_name", "input_auth_domain_name",
"auth_domain_email", "auth_domain_setup_error",
"email_added_to_AD",
"workspace_link", "workspace_creation_error",
"workspace_ACLs", "workspace_ACLs_error",
"final_workspace_status"]
all_row_df = pd.DataFrame(columns=col_names)
# per row in tsv/df
for index, row in setup_info_df.iterrows():
row_dict = setup_single_workspace(row, project)
all_row_df = all_row_df.append(row_dict, ignore_index=True)
write_output_report(all_row_df)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Set-up AnVIL external data delivery workspaces.')
parser.add_argument('-t', '--tsv', required=True, type=str, help='tsv file with workspace name and auth domains to create.')
parser.add_argument('-p', '--workspace_project', type=str, default="anvil-datastorage", help='workspace project/namespace. default: anvil-datastorage')
args = parser.parse_args()
# call to create and set up external data delivery workspaces
setup_workspaces(args.tsv, args.workspace_project)
|
#!/usr/bin/env python2
"""Display letters regularly to the subject. Subj must respond to a target letter
only if it's different from the previous target letter.
Based on (Garavan, 1999 PNAS) doi:10.1073/pnas.96.14.8301"""
# SerialLetterTask_d1.py
# Created 10/06/17 by DJ based on GoNoGoTask_d1.py
from psychopy import core, gui, data, event, sound, logging
# from psychopy import visual # visual causes a bug in the guis, so it's declared after all GUIs run.
from psychopy.tools.filetools import fromFile, toFile # saving and loading parameter files
import time as ts, numpy as np # for timing and array operations
import AppKit, os, glob # for monitor size detection, files
import BasicPromptTools # for loading/presenting prompts and questions
import random, math # for randomization of trials, math
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True;
newParamsFilename = 'SerialLetterParams.pickle'
# Declare primary task parameters.
params = {
# Declare stimulus and response parameters
'nTrials': 10, # number of trials in this session
'stimDur': 0.5, # time when stimulus is presented (in seconds)
'tStartup': 2, # pause time before starting first stimulus
'tCoolDown': 2, # pause time after end of last stimulus before "the end" text
'triggerKey': 't', # key from scanner that says scan is starting
'respKeys': ['r'], # keys to be used for responses (mapped to 1,2,3,4)
'nullLetters': ['A','B','C','D'], # null
'GoLetters': ['X','Y'], # shape signaling oncoming trial
'minGoInterval', 30 # in letters
'goStimProb': 0.8, # probability of a given trial being a 'go' trial
'lureProb': 0.8, # given a go stim, what's the probability that the next is the same letter (a lure)?
# declare prompt and question files
'skipPrompts': False, # go right to the scanner-wait page
'promptDir': 'Prompts/', # directory containing prompts and questions files
'promptFile': 'GoNoGoPrompts.txt', # Name of text file containing prompts
# declare display parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 0, # display on primary screen (0) or secondary (1)?
'fixCrossSize': 50, # size of cross, in pixels
'fixCrossPos': [0,0], # (x,y) pos of fixation cross displayed before each stimulus (for gaze drift correction)
'screenColor':(128,128,128) # in rgb255 space: (r,g,b) all between 0 and 255
}
# save parameters
if saveParams:
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/Params', initFileName = newParamsFilename,
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
newParamsFilename = dlgResult
if newParamsFilename is None: # keep going, but don't save
saveParams = False
else:
toFile(newParamsFilename, params) # save it!
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
scriptName = os.path.basename(__file__)
try: # try to get a previous parameters file
expInfo = fromFile('%s-lastExpInfo.pickle'%scriptName)
expInfo['session'] +=1 # automatically increment session number
expInfo['paramsFile'] = [expInfo['paramsFile'],'Load...']
except: # if not there then use a default set
expInfo = {
'subject':'1',
'session': 1,
'skipPrompts':False,
'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
core.quit() # the user hit cancel, so exit
# find parameter file
if expInfo['paramsFile'] == 'Load...':
dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
# load params file
params = fromFile(expInfo['paramsFile'])
# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']
# print params to Output
print 'params = {'
for key in sorted(params.keys()):
print " '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
# save experimental info
toFile('%s-lastExpInfo.pickle'%scriptName, expInfo)#save params to file for next time
#make a log file to save parameter/event data
dateStr = ts.strftime("%b_%d_%H%M", ts.localtime()) # add the current time
filename = '%s-%s-%d-%s'%(scriptName,expInfo['subject'], expInfo['session'], dateStr) # log filename
logging.LogFile((filename+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='---START PARAMETERS---')
logging.log(level=logging.INFO, msg='filename: %s'%filename)
logging.log(level=logging.INFO, msg='subject: %s'%expInfo['subject'])
logging.log(level=logging.INFO, msg='session: %s'%expInfo['session'])
logging.log(level=logging.INFO, msg='date: %s'%dateStr)
# log everything in the params struct
for key in sorted(params.keys()): # in alphabetical order
logging.log(level=logging.INFO, msg='%s: %s'%(key,params[key])) # log each parameter
logging.log(level=logging.INFO, msg='---END PARAMETERS---')
# ========================== #
# ===== GET SCREEN RES ===== #
# ========================== #
# kluge for secondary monitor
if params['fullScreen']:
screens = AppKit.NSScreen.screens()
screenRes = (int(screens[params['screenToShow']].frame().size.width), int(screens[params['screenToShow']].frame().size.height))
# screenRes = [1920, 1200]
if params['screenToShow']>0:
params['fullScreen'] = False
else:
screenRes = [800,600]
print "screenRes = [%d,%d]"%screenRes
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
from psychopy import visual
# Initialize deadline for displaying next frame
tNextFlip = [0.0] # put in a list to make it mutable (weird quirk of python variables)
#create clocks and window
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
win = visual.Window(screenRes, fullscr=params['fullScreen'], allowGUI=False, monitor='testMonitor', screen=params['screenToShow'], units='deg', name='win',color=params['screenColor'],colorSpace='rgb255')
# create fixation cross
fCS = params['fixCrossSize'] # size (for brevity)
fCP = params['fixCrossPos'] # position (for brevity)
fixation = visual.ShapeStim(win,lineColor='#000000',lineWidth=3.0,vertices=((fCP[0]-fCS/2,fCP[1]),(fCP[0]+fCS/2,fCP[1]),(fCP[0],fCP[1]),(fCP[0],fCP[1]+fCS/2),(fCP[0],fCP[1]-fCS/2)),units='pix',closeShape=False,name='fixCross');
# create text stimuli
message1 = visual.TextStim(win, pos=[0,+.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='topMsg', text="aaa",units='norm')
message2 = visual.TextStim(win, pos=[0,-.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='bottomMsg', text="bbb",units='norm')
# draw stimuli
mainText = visual.TextStim(win, pos=[0,0], wrapWidth=1.5, color='#000000', alignHoriz='center', name='mainText', text="ccc",units='norm')
# read questions and answers from text files
[topPrompts,bottomPrompts] = BasicPromptTools.ParsePromptFile(params['promptDir']+params['promptFile'])
print('%d prompts loaded from %s'%(len(topPrompts),params['promptFile']))
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
# increment time of next window flip
def AddToFlipTime(tIncrement=1.0):
tNextFlip[0] += tIncrement
# flip window as soon as possible
def SetFlipTimeToNow():
tNextFlip[0] = globalClock.getTime()
def RunTrial(iTrial,iLastGoTrial,lastGoLetter):
# Decide Trial Params
if iTrial-iLastGoTrial<params['minGoInterval']:
isGoTrial=0
else:
isGoTrial = random.random()<params['goStimProb']
if isGoTrial:
isLureTrial = random.random()<params['lureProb']
if isLureTrial:
thisLetter = lastGoLetter
else:
otherGoLetters = list(set(params['goLetters'])-lastGoLetter)
thisLetter = random.choice(otherGoLetters)
lastGoLetter = thisLetter
iLastGoTrial=iTrial
else:
thisLetter = random.choice(params['nullLetters'])
# display info to experimenter
print('Running Trial %d: isGo = %d, isLure = %.1f'%(iTrial,isGoTrial,cueDur))
# Draw stim
if isGoTrial:
mainText.setText(params['goLetters']])
win.logOnFlip(level=logging.EXP, msg='Display go stim (%s)'%params['goStim'])
else:
mainText.setText(params['goLetters']])
win.logOnFlip(level=logging.EXP, msg='Display no-go stim (%s)'%params['noGoStim'])
mainText.draw()
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
tStimStart = globalClock.getTime() # record time when window flipped
# set up next win flip time after this one
AddToFlipTime(params['stimDur']) # add to tNextFlip[0]
# Wait for relevant key press or 'stimDur' seconds
newKeys = event.getKeys(keyList=params['respKeys']+['q','escape'],timeStamped=globalClock)
# check each keypress for escape or response keys
if len(newKeys)>0:
for thisKey in newKeys:
if thisKey[0] in ['q','escape']: # escape keys
CoolDown() # exit gracefully
# Flush the key buffer and mouse movements
event.clearEvents()
return(iLastGoTrial,lastGoLetter)
# Handle end of a session
def CoolDown():
# display cool-down message
message1.setText("That's the end! ")
message2.setText("Press 'q' or 'escape' to end the session.")
win.logOnFlip(level=logging.EXP, msg='Display TheEnd')
message1.draw()
message2.draw()
win.flip()
thisKey = event.waitKeys(keyList=['q','escape'])
# exit
core.quit()
# =========================== #
# ======= RUN PROMPTS ======= #
# =========================== #
# display prompts
if not params['skipPrompts']:
BasicPromptTools.RunPrompts(topPrompts,bottomPrompts,win,message1,message2)
# wait for scanner
message1.setText("Waiting for scanner to start...")
message2.setText("(Press '%c' to override.)"%params['triggerKey'].upper())
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display WaitingForScanner')
win.flip()
event.waitKeys(keyList=params['triggerKey'])
tStartSession = globalClock.getTime()
AddToFlipTime(tStartSession+params['tStartup'])
# wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.flip()
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
# log experiment start and set up
logging.log(level=logging.EXP, msg='---START EXPERIMENT---')
# run trials
iLastGoTrial = 0
lastGoLetter = []
for iTrial in range(0,params['nTrials']):
# display text
[iLastGoTrial,lastGoLetter] = RunTrial(iTrial,iLastGoTrial,lastGoLetter)
# wait before 'the end' text
fixation.draw()
win.flip()
AddToFlipTime(params['tCoolDown'])
while (globalClock.getTime()<tNextFlip[0]):
pass
# Log end of experiment
logging.log(level=logging.EXP, msg='--- END EXPERIMENT ---')
# exit experiment
CoolDown() |
<reponame>tlvu/raven<filename>raven/processes/wps_generic_zonal_stats.py<gh_stars>0
import logging
import json
import tempfile
from pywps import LiteralInput, ComplexInput
from pywps import ComplexOutput
from pywps import Process, FORMATS
from pywps.app.Common import Metadata
from rasterstats import zonal_stats
from raven.utils import archive_sniffer, crs_sniffer, single_file_check, generic_vector_reproject
from raven.utilities import gis
LOGGER = logging.getLogger("PYWPS")
SUMMARY_ZONAL_STATS = ['count', 'min', 'max', 'mean', 'median', 'sum', 'nodata']
class ZonalStatisticsProcess(Process):
"""Given files containing vector data and raster data, perform zonal statistics of the overlapping regions"""
def __init__(self):
inputs = [
ComplexInput('shape', 'Vector Shape',
abstract='An ESRI Shapefile, GML, JSON, GeoJSON, or single layer GeoPackage.'
' The ESRI Shapefile must be zipped and contain the .shp, .shx, and .dbf.'
' The shape and raster should have a matching CRS.',
min_occurs=1, max_occurs=1,
supported_formats=[FORMATS.GEOJSON, FORMATS.GML, FORMATS.JSON, FORMATS.SHP]),
ComplexInput('raster', 'Gridded raster data set',
abstract='The DEM to be queried. Defaults to the EarthEnv-DEM90 product.',
metadata=[Metadata('EarthEnv-DEM90', 'https://www.earthenv.org/DEM'),
Metadata(
'<NAME>, <NAME>, and <NAME> (2014). '
'EarthEnv-DEM90: A Nearly-Global, Void-Free, Multi-Scale Smoothed, 90m Digital '
'Elevation Model from Fused ASTER and SRTM Data. ISPRS Journal of '
'Photogrammetry and Remote Sensing 87: 57–67.',
'https://doi.org/10.1016/j.isprsjprs.2013.11.002')],
min_occurs=0, max_occurs=1, supported_formats=[FORMATS.GEOTIFF]),
LiteralInput('band', 'Raster band',
data_type='integer', default=1,
abstract='Band of raster examined to perform zonal statistics. Default: 1',
min_occurs=1, max_occurs=1),
LiteralInput('categorical', 'Return distinct pixel categories',
data_type='boolean', default='false',
min_occurs=1, max_occurs=1),
LiteralInput('select_all_touching', 'Additionally select boundary pixels that are touched by shape',
data_type='boolean', default='false',
min_occurs=1, max_occurs=1)
]
outputs = [
ComplexOutput('statistics', 'DEM properties within the region defined by `shape`.',
abstract='Elevation statistics: min, max, mean, median, sum, nodata',
supported_formats=[FORMATS.JSON, FORMATS.GEOJSON]),
]
super(ZonalStatisticsProcess, self).__init__(
self._handler,
identifier="zonal-stats",
title="Raster Zonal Statistics",
version="1.0",
abstract="Return zonal statistics based on the boundaries of a vector file.",
metadata=[],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True)
def _handler(self, request, response):
shape_url = request.inputs['shape'][0].file
band = request.inputs['band'][0].data
categorical = request.inputs['categorical'][0].data
touches = request.inputs['select_all_touching'][0].data
vectors = ['.gml', '.shp', '.gpkg', '.geojson', '.json']
vector_file = single_file_check(archive_sniffer(shape_url, working_dir=self.workdir, extensions=vectors))
rasters = ['.tiff', '.tif']
if 'raster' in request.inputs:
raster_url = request.inputs['raster'][0].file
raster_file = single_file_check(archive_sniffer(raster_url, working_dir=self.workdir, extensions=rasters))
else:
bbox = gis.get_bbox(vector_file)
raster_url = 'public:EarthEnv_DEM90_NorthAmerica'
raster_bytes = gis.get_raster_wcs(bbox, geographic=True, layer=raster_url)
raster_file = tempfile.NamedTemporaryFile(prefix='wcs_', suffix='.tiff', delete=False,
dir=self.workdir).name
with open(raster_file, 'wb') as f:
f.write(raster_bytes)
vec_crs, ras_crs = crs_sniffer(vector_file), crs_sniffer(raster_file)
if ras_crs != vec_crs:
msg = 'CRS for files {} and {} are not the same. Reprojecting vector...'.format(vector_file, raster_file)
LOGGER.warning(msg)
# Reproject full vector to preserve feature attributes
projected = tempfile.NamedTemporaryFile(prefix='reprojected_', suffix='.json', delete=False,
dir=self.workdir).name
generic_vector_reproject(vector_file, projected, source_crs=vec_crs, target_crs=ras_crs)
vector_file = projected
summary_stats = SUMMARY_ZONAL_STATS
try:
stats = zonal_stats(
vector_file, raster_file, stats=summary_stats, band=band, categorical=categorical,
all_touched=touches, geojson_out=True, raster_out=False)
feature_collect = {'type': 'FeatureCollection', 'features': stats}
response.outputs['statistics'].data = json.dumps(feature_collect)
except Exception as e:
msg = 'Failed to perform zonal statistics using {} and {}: {}'.format(shape_url, raster_url, e)
LOGGER.error(msg)
raise Exception(msg) from e
return response
|
#!/usr/bin/env python
#coding:utf8
#++++++++++++description++++++++++++#
"""
@author:ying
@contact:<EMAIL>
@site:
@software: PyCharm
@time: 2019/5/14 上午7:52
@该文件没什么用,仅供参考
"""
#+++++++++++++++++++++++++++++++++++#
import yaml,nmap,os,time,json,paramiko,hmac
os.environ['DJANGO_SETTINGS_MODULE'] = 'YingOps.settings'
import django
django.setup()
from assets import models
from pysnmp.entity.rfc3413.oneliner import cmdgen
PROJECT_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import sys
# print PROJECT_ROOT
"""加载配置文件"""
s_conf=yaml.load(open('{0}/conf/scanhosts.yaml'.format(PROJECT_ROOT) ))
'''服务器参数'''
net = s_conf['hostinfo']['nets'][0]
nets = s_conf['hostinfo']['nets'][0]+'.0/24'
ssh_pass = s_conf['hostinfo']['ssh_pass']
ssh_user = s_conf['hostinfo']['ssh_user']
syscmd_list = s_conf['hostinfo']['syscmd_list']
ports = s_conf['hostinfo']['ports']
black_list = s_conf['hostinfo']['black_list']
email_list = s_conf['hostinfo']['email_list']
ssh_key_file = s_conf['hostinfo']['ssh_key_file']
'''交换机参数'''
cpu_oids =s_conf['netinfo']['cpu_oids']
mem_oids =s_conf['netinfo']['mem_oids']
temp_oids =s_conf['netinfo']['temp_oids']
modle_oids =s_conf['netinfo']['modle_oids']
sysname_oid =s_conf['netinfo']['sysname_oid']
community = s_conf['netinfo']['community']
# s_conf['hostinfo']['nets'][0] = net = 'nihao'
# print net,s_conf['hostinfo']['nets'][0]
"""开始扫描"""
class ScanHostMethod(object):
"""初始化数据"""
def __init__(self,nets):
self.nets=nets
"""扫描出所有的ip"""
def allHost(self):
nm=nmap.PortScanner()
nm.scan(self.nets,arguments='-n sP PE')
all_host=nm.all_hosts()
with open('all_host.txt','w') as f:
f.write(json.dumps(all_host))
# print('-'*20,all_host)
return all_host
"""对扫描出来的所有IP进行分类"""
def hostItems(self):
unknow_list=[]
linux_dic={}
windows_dic={}
nm = nmap.PortScanner()
nm.scan(self.nets, arguments='-n sP PE')
all_host = nm.all_hosts()
for host in all_host:
try:
if nm[host]['tcp'][22]['state'] == 'open':
ports=nm[host]['tcp'].keys()
print('{0} is linux system...There are some ports opening --> {1}'.format(host,ports))
linux_dic[host]=ports
else:
try:
if nm[host]['tcp'][3389]['state'] == 'open':
ports = nm[host]['tcp'].keys()
print('%s is windows system..... There are some ports opening --> %s' %(host,ports))
windows_dic[host] = ports
else:
unknow_list.append(host)
except KeyError:
unknow_list.append(host)
continue
except KeyError:
try:
if nm[host]['tcp'][3389]['state'] == 'open':
ports = nm[host]['tcp'].keys()
print('%s is windows system.....!!!!!!! There are some ports opening --> %s' %(host,ports))
windows_dic[host]=ports
else:
unknow_list.append(host)
except KeyError:
unknow_list.append(host)
# print('--------> %s KeyError' %host)
with open('conf/txtfile/all_host.txt','w') as f:
f.write(json.dumps(all_host))
with open('conf/txtfile/linux_host.txt','w') as f:
f.write(json.dumps(linux_dic))
with open('conf/txtfile/windows_host.txt','w') as f:
f.write(json.dumps(windows_dic))
with open('conf/txtfile/unknow_host.txt','w') as f:
f.write(json.dumps(unknow_list))
return all_host,linux_dic,windows_dic,unknow_list
class SwitchMethod(object):
'''初始化参数'''
def __init__(self,unknow_li,cpu_oids,mem_oids,temp_oids,modle_oids,sysname_oid,community):
self.sw_li=unknow_li
self.cpu_oids=cpu_oids
self.mem_oids=mem_oids
self.temp_oids=temp_oids
self.modle_oids=modle_oids
self.sysname_oid=sysname_oid
self.community=community
'''交换机总执行方法'''
def swMethod(self,sw_ip,oids):
try:
cg = cmdgen.CommandGenerator()
errorIndication,errorStatus,errorIndex,varBinds = cg.getCmd(
cmdgen.CommunityData('server',self.community,1),
cmdgen.UdpTransportTarget((sw_ip,161)),
oids
)
result = str(varBinds[0][1]) if varBinds[0][1] else ""
except Exception as e:
result = None
print(sw_ip + ' is not switch or snmp not enable!')
except IndexError:
result = None
print(sw_ip + ' is not switch or snmp not enable!')
return result
'''获取cpu使用率'''
def cpuInfo(self,swip):
for oid in self.cpu_oids:
cpu_usage=self.swMethod(swip,oid)
if cpu_usage:
print('--------cpu_usage',cpu_usage)
return cpu_usage
else:
print('------++++++++++++--',cpu_usage)
'''获取mem使用率'''
def memInfo(self,swip):
for oid in self.mem_oids:
mem_usage=self.swMethod(swip,oid)
if mem_usage:
return mem_usage
'''获取交换机名称'''
def sysnameInfo(self,swip):
for oid in self.sysname_oid:
sysname = self.swMethod(swip,oid)
if sysname:
return sysname
'''获取交换机型号'''
def modleInfo(self,swip):
for oid in self.modle_oids:
modle_usage=self.swMethod(swip,oid)
if modle_usage:
return modle_usage
'''获取温度'''
def tempInfo(self,swip):
for oid in self.temp_oids:
temp_usage=self.swMethod(swip,oid)
if temp_usage:
return temp_usage
'''run'''
def run(self):
dic={}
obj = models.SwitchInfo.objects.all()
for swip in self.sw_li:
ip=swip
sysname = self.sysnameInfo(swip)
if sysname:
cpu = self.cpuInfo(swip)
mem = self.memInfo(swip)
temp = self.tempInfo(swip)
sysname = self.sysnameInfo(swip)
modle = self.modleInfo(swip)
dic['cpu']=cpu
dic['ip']=ip
dic['mem']=mem
dic['temp']=temp
dic['sysname']=sysname
dic['modle']=modle
if cpu or mem or temp or sysname or modle:
print('{0} switch ---> cpu:{1},mem:{2},temp:{3},sysname:{4},modle:{5}'.format(swip,cpu,mem,temp,sysname,modle))
# print temp,mem,sysname
obj.create(**dic)
class LinuxMethod(object):
def __init__(self, linux_dic, ssh_user, ssh_pass):
self.linux_dic=linux_dic
self.ssh_user=ssh_user
self.ssh_pass=ssh_pass
def enhmac(self,str):
newstr = hmac.new('yingzi')
newstr.update(str)
return newstr
def try_ssh_login(self):
obj2 = models.LinuxInfo.objects.all()
# # 创建SSH对象
ssh = paramiko.SSHClient()
# # 允许连接不在know_hosts文件中的主机
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
infos = {}
for host in self.linux_dic.keys():
for user in self.ssh_user:
for pas in self.ssh_pass:
# # 连接服务器
info = []
try:
ssh.connect(hostname=host, port=22, username=user, password=<PASSWORD>)
info.append(user)
info.append(pas)
# # 执行命令
for cmd in syscmd_list:
# print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
# # 获取命令结果
result = stdout.read()
# print(result)
res=str((result).replace('\\n','').replace('\\l','').replace('\S','').replace('\\','').strip().replace('Kernel r on an m',''))
info.append(res)
infos[host] = info
break
except paramiko.ssh_exception.AuthenticationException:
# pass
print(host,user,pas,'用户名密码错误....')
except paramiko.ssh_exception.SSHException:
print(host, user, pas, '用户名密码错误....')
except EOFError:
print('EOFError')
# # # 关闭连接
print('-------------------->>>>>>', infos)
ssh.close()
dicc={}
for host_ip, j in infos.items():
# print("ip地址:", i, '操作系统:', j[0], j[1], '主机名:', j[2], 'MAC地址:', j[3], 'SN序列号:', j[4], '制造商:', j[5], '型号:',
# j[6], '根磁盘使用率:', j[7], '内存使用率:', j[8], 'G', '负载:', j[9])
print ("ip地址:",host_ip,'操作系统:',j[2],j[3],'主机名:',j[4],'MAC地址:',j[5],'SN序列号:',j[6].replace(' ',''),'制造商:',j[7].replace(' ',''),'型号:',j[8].replace(' ',''),'根磁盘使用率:',j[9],'内存使用率:',j[10],'G','负载:',j[11])
# print(i,j)
obj2.create(
ip=host_ip,
hostname=j[4],
system_ver=j[2]+j[3],
ssh_port=22,
ssh_user=j[0],
ssh_passwd=j[1],
mac_address=j[5],
sn=j[6],
manufacturer=j[7],
cpu_cores=j[11],
mem_total=j[10],
disk_total=j[9]
)
class WindowsMethod(object):
def __init__(self,windows_dic):
self.windows_dic=windows_dic
def windowsInfo(self):
dic={}
obj3=models.WindowsInfo.objects.all()
for ip,port in self.windows_dic.items():
print(ip,port)
# dic[ip]=str(port)
obj3.create(
ip=ip,
port=str(port),
)
|
from izi_pygame import *
import os
from math import cos
from math import sin
from random import randint
from random import seed
FILL = 2
window = Window(wtitle="MCD", wwidth=1400, wheight=800)
entity_font = Fontstring(size=20, bold=1, window=window.get_canva())
property_font = Fontstring(size=20, window=window.get_canva())
cardinalite_font = Fontstring(size=20, bold=True, window=window.get_canva())
id_font = Fontstring(size=20, window=window.get_canva(), underline=True)
cursor = Block(width=1, height=1)
class HeadEntity(Drawblock):
number = 0
def __init__(self, window, name="ENTITY", x=0, y=0, text_list=None):
seed(10)
self.name = name.upper()+str(HeadEntity.number+1)
Drawblock.__init__(self, x=x, y=y, color=(100,100,100), fill=FILL, window=window, speed=1)
self.print_name = Printstring(main_font=entity_font, string=self.name, color=(0,0,0), x=self.xbegin, y=self.ybegin)
self.set_dimension(self.print_name.string.get_width(), self.print_name.string.get_height())
HeadEntity.number+=1
text_list.append(self.print_name)
def set_position(self, x, y):
Drawblock.set_position(self, x, y)
self.print_name.x = self.xbegin
self.print_name.y = self.ybegin
def print(self):
self.draw()
self.print_name.write()
def __str__(self):
return f"{self.xbegin} {self.ybegin} {self.width} {self.height}\n"
class FootEntity(Drawblock):
def __init__(self, window, x, y, width, height, head, text_list):
Drawblock.__init__(self, x=x, y=y, width=width, height=height, color=(100,100,100), fill=FILL, window=window, speed=1)
self.idEntity = Printstring(main_font=id_font, string="id"+head.print_name.get_text().lower(), color=(0,0,150),
x=head.print_name.x+FILL, y=head.yend+FILL)
text_list.append(self.idEntity)
self.property = list()
head.set_dimension(height=head.height)
self.set_dimension(height=self.height+FILL*3)
def add_property(self, head, text_list):
self.property.append(Printstring(main_font=property_font, string=f"property{len(self.property)+1}",
color=(0,0,0), x=head.print_name.x+FILL, y=head.yend+(len(self.property)+1)*head.print_name.string.get_height()))
text_list.append(self.property[len(self.property)-1])
self.set_dimension(self.width, (len(self.property)+1)*head.print_name.string.get_height())
def set_position(self, x, y):
Drawblock.set_position(self, x, y)
i = 1
self.idEntity.x = self.xbegin+FILL
self.idEntity.y = self.ybegin
for p in self.property:
p.x = self.xbegin+FILL
p.y = self.ybegin+i*p.string.get_height()
i+=1
def print(self):
self.draw()
self.idEntity.write()
for p in self.property:
p.write()
def __str__(self):
info = f"{self.xbegin} {self.ybegin} {self.width} {self.height}\n"
return info
class FocusEntity(Drawblock):
def __init__(self, window, x, y, width=10, height=10, focus=0):
Drawblock.__init__(self, x=x, y=y, width=width, height=height, speed=0, color=(255,153,51), fill=focus, window=window)
class Entity:
LIST = list()
focus = None
focus_view = True
def __init__(self, window, x=0, y=0, focus=0):
self.text_list = list()
self.head = HeadEntity(window, x=x, y=y, text_list=self.text_list)
self.foot = FootEntity(window, self.head.xbegin, self.head.yend, self.head.width, self.head.height, self.head, self.text_list)
self.focus = FocusEntity(window, self.foot.xend, self.foot.yend-5, focus=focus)
self.bg = Drawblock(x=x,y=y,color=(220,220,220),window=window,width=self.head.width, height=self.head.height+self.foot.height)
Entity.LIST.append(self)
if not focus:
Entity.focus = self
self.list_point = list()
self.id = str(HeadEntity.number)+"e"
def print(self):
global focus_view
self.bg.draw()
self.head.print()
self.foot.print()
if Entity.focus_view:
self.focus.draw()
def set_name(self, new_name):
self.head.print_name << new_name.upper()
self.foot.idEntity << self.foot.idEntity.get_text()[0:2]+new_name.lower()
self.ajust_dimension()
def set_property(self, idp, new_name):
self.foot.property[idp-1] << new_name
self.ajust_dimension()
def del_property(self, idp):
del self.foot.property[idp-1]
self.ajust_dimension()
def add_property(self):
self.foot.add_property(self.head, self.text_list)
self.focus.set_position(self.foot.xend, self.foot.yend)
self.ajust_dimension()
def set_position(self, x, y):
self.head.set_position(x-self.head.width//2, y-(self.head.height+self.foot.height)//2)
self.foot.set_position(x-self.head.width//2, self.head.yend)
self.focus.set_position(self.foot.xend, self.foot.yend)
Drawblock.set_position(self.bg,self.head.xbegin,self.head.ybegin)
for p in self.list_point:
p.x = x
p.y = y
def change_position(self, x, y):
self.head.set_position(x, y)
self.foot.set_position(x, self.head.yend)
self.focus.set_position(self.foot.xend, self.foot.yend)
Drawblock.set_position(self.bg,self.head.xbegin,self.head.ybegin)
for p in self.list_point:
p.x = x+self.head.width//2
p.y = y+(self.head.height+self.foot.height)//2
def cursor_collide(self, cursor):
if self.head.cursor_collide(cursor) or self.foot.cursor_collide(cursor):
return True
return False
#no use
def focus_cursor_collide(self, cursor):
if self.focus.cursor_collide(cursor):
return True
return False
def ajust_dimension(self):
maxi = self.text_list[0].string.get_width()
for t in self.text_list:
if maxi < t.string.get_width():
maxi = t.string.get_width()
self.head.set_dimension(width=maxi+FILL)
self.foot.set_dimension(width=maxi+FILL, height=(len(self.foot.property)+1)*self.head.print_name.string.get_height())
self.focus.set_position(self.foot.xend, self.foot.yend)
self.bg.set_dimension(width=self.head.width, height=self.head.height+self.foot.height)
for p in self.list_point:
p.x = self.foot.xbegin + self.foot.width//2
p.y = self.foot.ybegin + self.foot.height//2
def add_point(self, point):
if len(Link.LIST):
link = Link.LIST[len(Link.LIST)-1]
"""link.cardinalite.x = (point.x + self.foot.width)
link.cardinalite.y = (point.y + self.foot.height)"""
for l in Link.LIST:
if link.p1.parent == l.p2.parent and link.p2.parent == l.p1.parent:
link.rect = True
try:
self.list_point.remove(l.p1)
l.p2.parent.list_point.remove(l.p2)
Link.LIST.remove(l)
except:pass
self.list_point.append(point)
self.ajust_dimension()
def __str__(self):
info = str(self.head)+str(self.foot)
for t in self.text_list:
info += f"{t.get_text()}\n"
return info
class Point:
def __init__(self, x, y, parent):
self.x = x
self.y = y
self.parent = parent
if parent:
self.parent.add_point(self)
class Link:
LIST = list()
focus = None
def __init__(self, x, y, window, parent, no_focus=False, x2=None, y2=None, parent2=None):
self.p1 = Point(x, y, parent)
self.p2 = Point(x2, y2, parent2)
"""self.cardinalite = Printstring(main_font=cardinalite_font, string="0 , N", color=(150,0,200), x=-100,y=-100)
if type(parent) == Entity:
self.cardinalite.x = (self.p1.x + parent.foot.width)
self.cardinalite.y = (self.p1.y + parent.foot.height)"""
self.rect = False
self.window = window
Link.LIST.append(self)
if not no_focus:
Link.focus = self
self.parent = parent
def update_drawing(self, x, y):
self.p2.x = x
self.p2.y = y
pygame.draw.aaline(self.window, (0,100,100), (self.p1.x, self.p1.y), (x, y), 1)
def draw(self):
if self != Link.focus:
if self.rect:
if self.p2.x < self.p1.x:
if self.p2.y > self.p1.y:
pygame.draw.rect(self.window, (0,100,100), (self.p2.x, self.p1.y, abs(self.p2.x-self.p1.x), abs(self.p2.y-self.p1.y)), FILL)
else:
pygame.draw.rect(self.window, (0,100,100), (self.p2.x, self.p2.y, abs(self.p2.x-self.p1.x), abs(self.p2.y-self.p1.y)), FILL)
elif self.p2.x > self.p1.x:
if self.p2.y > self.p1.y:
pygame.draw.rect(self.window, (0,100,100), (self.p1.x, self.p1.y, abs(self.p2.x-self.p1.x), abs(self.p2.y-self.p1.y)), FILL)
else:
pygame.draw.rect(self.window, (0,100,100), (self.p1.x, self.p2.y, abs(self.p2.x-self.p1.x), abs(self.p2.y-self.p1.y)), FILL)
else:
pygame.draw.aaline(self.window, (0,100,100), (self.p1.x, self.p1.y), (self.p2.x, self.p2.y), 1)
#self.cardinalite.write()
def __str__(self):
return f"{self.p1.x} {self.p1.y} {self.p1.parent.id} {self.p2.x} {self.p2.y} {self.p2.parent.id} {self.rect}\n"
class Cardinalite(Block):
number = 0
focus = None
LIST = list()
def __init__(self, x, y, txt="0,N"):
self.text = Printstring(main_font=cardinalite_font, string=txt, color=(0,150,150), x=x, y=y)
Block.__init__(self, x=x-self.text.string.get_width()//2, y=y-self.text.string.get_height()//2,
width=self.text.string.get_width(), height=self.text.string.get_height(), speed=0)
self.text.x = self.xbegin
self.text.y = self.ybegin
Cardinalite.number += 1
self.id = str(Cardinalite.number)+"c"
Cardinalite.LIST.append(self)
Cardinalite.focus = self
def print(self):
self.text.write()
def set_position(self, x, y, mouse=False):
if not mouse:
Block.set_position(self, x,y)
else:
Block.set_position(self, x-self.width//2,y-self.height//2)
self.text.x = self.xbegin
self.text.y = self.ybegin
def change_position(self, x, y):
Block.set_position(self, x,y)
self.text.x = self.xbegin
self.text.y = self.ybegin
def __str__(self):
return f"{self.xbegin} {self.ybegin} {self.text.get_text()}\n"
|
<filename>pydpiper/itk/tools.py
import copy
import os
from typing import Optional, Sequence
from configargparse import Namespace
from pydpiper.minc.conversion import generic_converter
from pydpiper.minc.files import ToMinc
from pydpiper.minc.nlin import Algorithms
from pydpiper.core.stages import Result, CmdStage, Stages
from pydpiper.core.files import ImgAtom, FileAtom
# TODO delete ITK prefix?
class ITKXfmAtom(FileAtom):
pass
class ITKImgAtom(ImgAtom):
pass
def convert(infile : ImgAtom, out_ext : str) -> Result[ImgAtom]:
s = Stages()
outfile = infile.newext(ext=out_ext)
if infile.mask is not None:
outfile.mask = s.defer(convert(infile.mask, out_ext=out_ext))
if infile.labels is not None:
outfile.mask = s.defer(convert(infile.labels, out_ext=out_ext))
s.add(CmdStage(inputs=(infile,), outputs=(outfile,),
cmd = ['c3d', infile.path, '-o', outfile.path]))
return Result(stages=s, output=outfile)
def itk_convert_xfm(xfm : ITKXfmAtom, out_ext : str) -> Result[ITKXfmAtom]:
if xfm.ext == out_ext:
return Result(stages=Stages(), output=xfm)
else:
out_xfm = xfm.newext(out_ext)
cmd = CmdStage(inputs=(xfm,), outputs=(out_xfm,),
cmd=["itk_convert_xfm", "--clobber", xfm.path, out_xfm.path])
return Result(stages=Stages((cmd,)), output=out_xfm)
# TODO 'ITK' seems like a weird place for these; probably belong in minc;
# also, 'generic_converter' is - did I mention? - generic
mnc2nii = generic_converter(renamer = lambda img: img.newext(".nii"),
mk_cmd = lambda i, o: ["bash", "-c", "'rm %s.path; mnc2nii %s %s'" % (o, i, o)])
nii2mnc = generic_converter(renamer = lambda img: img.newext(".mnc"),
mk_cmd = lambda i, o: "nii2mnc -clobber {i} {o}".split())
class Interpolation(object):
def render(self):
return self.__class__.__name__
class Linear(Interpolation): pass
class NearestNeighbor(Interpolation): pass
class MultiLabel(Interpolation): """TODO: add options"""
class Gaussian(Interpolation): """TODO: add options"""
class BSpline(Interpolation):
def __init__(self, order=None):
self.order = order
def render(self):
return self.__class__.__name__ + (("[order=%d]" % self.order) if self.order is not None else "")
class CosineWindowsSinc(Interpolation): pass
class WelchWindowedSinc(Interpolation): pass
class HammingWindowedSinc(Interpolation): pass
class LanczosWindowedSinc(Interpolation): pass
def as_deformation(transform, reference_image, interpolation: Interpolation = None,
invert: bool = None, dimensionality: int = None,
default_voxel_value: float = None, new_name_wo_ext: str = None,
subdir: str = None, ext: str = None) -> Result[ITKImgAtom]:
"""Convert an arbitrary ITK transformation to a deformation field representation.
Consider this an image rather than a transformation since, e.g., AverageImages can be used."""
if not subdir:
subdir = 'tmp'
ext = ext or ".nii.gz"
if not new_name_wo_ext:
out_xfm = xfmToImage(transform.newname(name=transform.filename_wo_ext + '_def', subdir=subdir, ext=ext))
else:
out_xfm = xfmToImage(transform.newname(name=new_name_wo_ext, subdir=subdir, ext=ext))
# TODO add rest of --output options
cmd = (["antsApplyTransforms",
"--reference-image", reference_image.path,
"--output", "[%s,1]" % out_xfm.path]
+ (["--transform", transform.path] if invert is None
else ["-t", "[%s,%d]" % (transform.path, invert)])
+ (["--dimensionality", dimensionality] if dimensionality is not None else [])
+ (["--interpolation", interpolation.render()] if interpolation is not None else [])
+ (["--default-voxel-value", str(default_voxel_value)] if default_voxel_value is not None else []))
s = CmdStage(cmd=cmd,
inputs=(transform, reference_image),
outputs=(out_xfm,))
return Result(stages=Stages([s]), output=out_xfm)
# TODO: generalize to multiple transforms (see program name)
def antsApplyTransforms(img,
transform,
reference_image,
#outfile: str = None,
interpolation: Interpolation = None,
invert: bool = None,
dimensionality: int = None,
input_image_type = None,
output_warped_file: bool = None,
default_voxel_value: float = None,
#static_case_for_R: bool = None,
#float: bool = None
new_name_wo_ext: str = None,
subdir: str = None):
if not subdir:
subdir = 'resampled'
if not new_name_wo_ext:
out_img = img.newname(name=transform.filename_wo_ext + '-resampled', subdir=subdir)
else:
out_img = img.newname(name=new_name_wo_ext, subdir=subdir)
# TODO add rest of --output options
cmd = (["antsApplyTransforms",
"--input", img.path,
"--reference-image", reference_image.path,
"--output", out_img.path]
+ (["--transform", transform.path] if invert is None
else ["-t", "[%s,%d]" % (transform.path, invert)])
+ (["--dimensionality", dimensionality] if dimensionality is not None else [])
+ (["--interpolation", interpolation.render()] if interpolation is not None else [])
+ (["--default-voxel-value", str(default_voxel_value)] if default_voxel_value is not None else []))
s = CmdStage(cmd=cmd,
inputs=(img, transform, reference_image),
outputs=(out_img,))
return Result(stages=Stages([s]), output=out_img)
def resample_simple(img, xfm, like,
invert = False,
use_nn_interpolation = None,
new_name_wo_ext = None,
subdir = None,
postfix = None):
return antsApplyTransforms(img=img, transform=xfm, reference_image=like,
interpolation=MultiLabel() if use_nn_interpolation else None,
invert=invert, new_name_wo_ext=new_name_wo_ext, subdir=subdir)
def resample(img,
xfm, # TODO: update to handler?
like,
invert = False,
use_nn_interpolation = None,
new_name_wo_ext: str = None,
subdir: str = None,
postfix: str = None):
s = Stages()
if not subdir:
subdir = 'resampled'
# we need to get the filename without extension here in case we have
# masks/labels associated with the input file. When that's the case,
# we supply its name with "_mask" and "_labels" for which we need
# to know what the main file will be resampled as
if not new_name_wo_ext:
# FIXME this is wrong when invert=True
new_name_wo_ext = xfm.filename_wo_ext + '-resampled'
new_img = s.defer(resample_simple(img=img, xfm=xfm, like=like,
invert=invert,
use_nn_interpolation=use_nn_interpolation,
new_name_wo_ext=new_name_wo_ext,
subdir=subdir))
new_img.mask = s.defer(resample_simple(img=img.mask, xfm=xfm, like=like,
use_nn_interpolation=True,
invert=invert,
new_name_wo_ext=new_name_wo_ext + "_mask",
subdir=subdir)) if img.mask is not None else None
new_img.labels = s.defer(resample_simple(img=img.labels, xfm=xfm, like=like,
use_nn_interpolation=True,
invert=invert,
new_name_wo_ext=new_name_wo_ext + "_labels",
subdir=subdir)) if img.labels is not None else None
# Note that new_img can't be used for anything until the mask/label files are also resampled.
# This shouldn't create a problem with stage dependencies as long as masks/labels appear in inputs/outputs of CmdStages.
# (If this isn't automatic, a relevant helper function would be trivial.)
# TODO: can/should this be done semi-automatically? probably ...
return Result(stages=s, output=new_img)
# is c3d or ImageMath better for this (memory)?
def max(imgs : Sequence[ImgAtom], out_img : ImgAtom):
cmd = CmdStage(inputs = imgs, outputs = (out_img,),
cmd = (['c3d'] + [img.path for img in imgs]
+ ['-accum', '-max', '-endaccum', '-o', out_img.path]))
return Result(stages=Stages((cmd,)), output=out_img)
def average_images(imgs : Sequence[ImgAtom],
dimensions : int = 3,
normalize : bool = False,
output_dir : str = '.',
name_wo_ext : str = "average",
out_ext : Optional[str] = None,
avg_file : Optional[ITKImgAtom] = None) -> Result[ITKImgAtom]:
s = Stages()
if len(imgs) == 0:
raise ValueError("`AverageImages` arg `imgs` is empty (can't average zero files)")
ext = out_ext or imgs[0].ext
# the output_dir basically gives us the equivalent of the pipeline_sub_dir for
# regular input files to a pipeline, so use that here
avg = avg_file or ImgAtom(name=os.path.join(output_dir, '%s.todo' % name_wo_ext),
orig_name=None,
pipeline_sub_dir=output_dir)
avg.ext = ext
# if all input files have masks associated with them, add the combined mask to
# the average:
# TODO what if avg_file has a mask ... should that be used instead? (then rename avg -> avg_file above)
all_inputs_have_masks = all((img.mask for img in imgs))
if all_inputs_have_masks:
combined_mask = (ImgAtom(name=os.path.join(avg_file.dir, '%s_mask.todo' % avg_file.filename_wo_ext),
orig_name=None,
pipeline_sub_dir=avg_file.pipeline_sub_dir)
if avg_file is not None else
ImgAtom(name=os.path.join(output_dir, '%s_mask.todo' % name_wo_ext),
orig_name=None,
pipeline_sub_dir=output_dir))
combined_mask.ext = ext
s.defer(max(imgs=sorted({img_inst.mask for img_inst in imgs}),
out_img=combined_mask))
avg.mask = combined_mask
s.add(CmdStage(inputs = imgs,
outputs = (avg,),
cmd = ["AverageImages", str(dimensions), avg.path, "%d" % normalize]
+ [img.path for img in imgs]))
return Result(stages=s, output=avg)
class ToMinc(ToMinc):
@staticmethod
def to_mnc(img): return convert(img, out_ext=".mnc")
@staticmethod
def from_mnc(img): return convert(img, out_ext=".nii.gz")
@staticmethod
def to_mni_xfm(xfm): return itk_convert_xfm(xfm, out_ext=".mnc")
@staticmethod
def from_mni_xfm(xfm): return itk_convert_xfm(xfm, out_ext=".nii.gz")
def imageToXfm(i : ITKImgAtom) -> ITKXfmAtom:
x = copy.deepcopy(i)
x.__class__ = ITKXfmAtom
del x.mask
del x.labels
return x
def xfmToImage(x : ITKXfmAtom):
i = copy.deepcopy(x)
i.__class__ = ITKImgAtom
i.mask = None
i.labels = None
return i
# TODO move this
class Algorithms(Algorithms):
average = average_images
@staticmethod
def blur(img, fwhm, gradient=True, subdir='tmp'):
# note c3d can take voxel rather than fwhm specification, but the Algorithms interface
# currently doesn't allow this to be used ... maybe an argument from switching from mincblur
if fwhm in (-1, 0, None):
if gradient:
raise ValueError("can't compute gradient without a positive FWHM")
return Result(stages=Stages(), output=Namespace(img=img))
if gradient:
out_gradient = img.newname_with("_blur%s_grad" % fwhm)
else:
out_gradient = None
out_img = img.newname_with("_blurred%s" % fwhm)
cmd = CmdStage(cmd=['c3d', '-smooth', "%smm" % fwhm, '-o', out_img.path, img.path]
+ (['-gradient', '-o', out_gradient.path] if gradient else []),
inputs=(img), outputs=(out_img, out_gradient) if gradient else (out_img,))
return Result(stages=Stages((cmd,)),
output=Namespace(img=out_img, gradient=out_gradient)
if gradient else Namespace(img=out_img))
resample = resample
@staticmethod
def scale_transform(xfm, scale, newname_wo_ext):
s = Stages()
defs = s.defer(as_deformation(transform=xfm.xfm, reference=xfm.source))
scaled_defs = (defs.xfm.newname(newname_wo_ext) if newname_wo_ext else
defs.xfm.newname_with_suffix("_scaled_%s" % scale))
s.defer(CmdStage(cmd=['c3d', '-scale', str(scale), defs.path, "-o", scaled_defs.path],
inputs=(defs,), outputs=(scaled_defs,)))
return Result(stages=s, output=scaled_defs)
@staticmethod
def average_transforms(xfms, avg_xfm):
s = Stages()
defs = [s.defer(as_deformation(transform=xfm.xfm, reference_image=xfm.source)) for xfm in xfms]
#avg_img = NotImplemented
avg = imageToXfm(s.defer(average_images(defs,
avg_file=xfmToImage(avg_xfm),
#output_dir=os.path.join(defs[0].pipeline_sub_dir,
# defs[0].output_sub_dir,
# "transforms")
)))
return Result(stages=s, output=avg) |
from __future__ import annotations
import email.message
import pprint
import smtplib
import workflows.recipe
from workflows.services.common_service import CommonService
import zocalo.configuration
class _SafeDict(dict):
"""A dictionary that returns undefined keys as {keyname}.
This can be used to selectively replace variables in datastructures."""
def __missing__(self, key):
return "{" + key + "}"
class Mailer(CommonService):
"""A service that generates emails from messages."""
# Human readable service name
_service_name = "Mail Notifications"
# Logger name
_logger_name = "zocalo.services.mailer"
def initializing(self):
"""Subscribe to the Mail notification queue.
Received messages must be acknowledged."""
self.log.debug("Mail notifications starting")
if not self.config:
raise zocalo.ConfigurationError("No Zocalo configuration loaded")
if not self.config.smtp:
raise zocalo.ConfigurationError(
"There are no SMTP settings configured in your environment"
)
workflows.recipe.wrap_subscribe(
self._transport,
"mailnotification",
self.receive_msg,
acknowledgement=True,
log_extender=self.extend_log,
allow_non_recipe_messages=True,
)
@staticmethod
def listify(recipients):
if isinstance(recipients, list):
return recipients
elif isinstance(recipients, tuple):
return list(recipients)
else:
return [recipients]
def receive_msg(self, rw, header, message):
"""Do some mail notification."""
self.log.info(f"{message=}")
if rw:
parameters = rw.recipe_step["parameters"]
content = None
else:
# Incoming message is not a recipe message. Simple messages can be valid
if (
not isinstance(message, dict)
or not message.get("parameters")
or not message.get("content")
):
self.log.warning("Rejected invalid simple message")
self._transport.nack(header)
return
parameters = message["parameters"]
content = message["content"]
recipients = parameters.get("recipients", parameters.get("recipient"))
if not recipients:
self.log.warning("No recipients set for message")
self._transport.nack(header)
return
if isinstance(recipients, dict):
if "select" not in recipients:
self.log.warning(
"Recipients dictionary must have key 'select' to select relevant group"
)
self._transport.nack(header)
return
selected_recipients = self.listify(recipients.get(recipients["select"], []))
if recipients.get("all"):
all_recipients = self.listify(recipients["all"])
recipients = sorted(set(selected_recipients) | set(all_recipients))
if not recipients:
self.log.warning("No selected recipients for message")
self._transport.nack(header)
return
else:
recipients = self.listify(recipients)
sender = parameters.get("from", self.config.smtp["from"])
subject = parameters.get("subject", "mail notification via zocalo")
content = parameters.get("content", content)
if not content:
self.log.warning("Message has no content")
self._transport.nack(header)
return
if isinstance(content, list):
content = "".join(content)
if isinstance(message, list):
pprint_message = "\n".join(message)
else:
pprint_message = pprint.pformat(message)
content = content.format_map(
_SafeDict(payload=message, pprint_payload=pprint_message)
)
self.log.info("Sending mail notification %r to %r", subject, recipients)
# Accept message before sending mail. While this means we do not guarantee
# message delivery it also means if the service crashes after delivery we
# will not re-deliver the message inifinitely many times.
self._transport.ack(header)
try:
msg = email.message.EmailMessage()
msg["Subject"] = subject
msg["To"] = recipients
msg["From"] = sender
msg.set_content(content)
with smtplib.SMTP(
host=self.config.smtp["host"], port=self.config.smtp["port"], timeout=60
) as s:
s.send_message(msg)
except TimeoutError as e:
self.log.error(
f"Message delivery failed with timeout: {e}",
)
except Exception as e:
self.log.error(
f"Message delivery failed with error {e}",
)
else:
self.log.debug("Message sent successfully")
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
import hashlib
import json
import logging
from typing import TYPE_CHECKING, Any, Dict
from Crypto.Cipher import AES
from feishu.dt_callback import (EventAppOpen, EventApproval, EventAppTicket, EventContactDepartment, EventContactScope,
EventContactUser, EventLeaveApproval, EventMessage, EventP2PCreateChat,
EventRemedyApproval, EventRemoveAddBot, EventShiftApproval, EventTripApproval,
EventUserInAndOutChat, EventWorkApproval)
from feishu.dt_enum import EventType
from feishu.dt_help import make_datatype
from feishu.exception import LarkInvalidArguments, LarkInvalidCallback
from feishu.helper import pop_or_none
if TYPE_CHECKING:
from feishu.api import OpenLark
from six import string_types
logger = logging.getLogger('feishu')
class _AESCipher(object):
def __init__(self, key):
self.bs = AES.block_size
self.key = hashlib.sha256(_AESCipher.str_to_bytes(key)).digest()
@staticmethod
def str_to_bytes(data):
u_type = type(b"".decode('utf8'))
if isinstance(data, u_type):
return data.encode('utf8')
return data # pragma: no cover
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
def decrypt(self, enc):
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def decrypt_string(self, enc):
enc = base64.b64decode(enc)
return self.decrypt(enc).decode('utf8')
def get_event_type(body):
"""
:param body:
:type body: Dict[string_types, typing.Union[Any, Dict[string_types, Any]]]
:return:
"""
t = body.get('type')
if t == 'event_callback':
t = body.get('event', {}).get('type')
return {
'app_ticket': EventType.app_ticket, # DONE
'app_open': EventType.app_open,
'message': EventType.message, # DONE
'user_add': EventType.user_add,
'user_update': EventType.user_update,
'user_leave': EventType.user_leave,
'dept_add': EventType.dept_add,
'dept_update': EventType.dept_update,
'dept_delete': EventType.dept_delete,
'contact_scope_change': EventType.contact_scope_change,
'approval': EventType.approval,
'leave_approval': EventType.leave_approval,
'work_approval': EventType.work_approval,
'shift_approval': EventType.shift_approval,
'remedy_approval': EventType.remedy_approval,
'trip_approval': EventType.trip_approval,
'remove_bot': EventType.remove_bot,
'add_bot': EventType.add_bot,
'p2p_chat_create': EventType.p2p_chat_create,
}.get(t, EventType.unknown)
return {
'url_verification': EventType.url_verification,
}.get(t, EventType.unknown)
class APICallbackMixin(object):
"""订阅事件
飞书中很多操作都会产生事件,应用可以订阅这些事件来与飞书进行高度整合,可以在开发者后台进行事件订阅配置来监听事件。
已经有的事件类型:
- 审批通过
- 收到消息(必须单聊或者是艾特机器人)
- 推送 app_ticket
https://open.feishu.cn/document/uYjL24iN/uUTNz4SN1MjL1UzM
"""
def handle_callback(
self,
body,
handle_message=None,
handle_app_ticket=None,
handle_approval=None,
handle_leave_approval=None,
handle_work_approval=None,
handle_shift_approval=None,
handle_remedy_approval=None,
handle_trip_approval=None,
handle_app_open=None,
handle_contact_user=None,
handle_contact_department=None,
handle_contact_scope=None,
handle_remove_add_bot=None,
handle_p2p_chat_create=None,
handle_user_in_out_chat=None,
):
"""处理机器人回调
:type self: OpenLark
:param body: 回调的消息主题
:type body: Dict[string_types, Any]
:param handle_message: 消息的回调 - 处理函数
:type handle_message: Callable[[str, str, 'EventMessage', Dict[str, Any]], Any]
:param handle_app_ticket: app_ticket 事件 - 处理函数
:type handle_app_ticket: Callable[[str, str, 'EventAppTicket', Dict[str, Any]], Any]
:param handle_approval:
:type handle_approval: Callable[[str, str, 'EventApproval', Dict[str, Any]], Any]
:param handle_leave_approval:
:type handle_leave_approval: Callable[[str, str, 'EventLeaveApproval', Dict[str, Any]], Any]
:param handle_work_approval:
:type handle_work_approval: Callable[[str, str, 'EventWorkApproval', Dict[str, Any]], Any]
:param handle_shift_approval:
:type handle_shift_approval: Callable[[str, str, 'EventShiftApproval', Dict[str, Any]], Any]
:param handle_remedy_approval:
:type handle_remedy_approval: Callable[[str, str, 'EventRemedyApproval', Dict[str, Any]], Any]
:param handle_trip_approval:
:type handle_trip_approval: Callable[[str, str, 'EventTripApproval', Dict[str, Any]], Any]
:param handle_app_open:
:type handle_app_open: Callable[[str, str, 'EventAppOpen', Dict[str, Any]], Any]
:param handle_contact_user:
:type handle_contact_user: Callable[[str, str, 'EventContactUser', Dict[str, Any]], Any]
:param handle_contact_department:
:type handle_contact_department: Callable[[str, str, 'EventContactDepartment', Dict[str, Any]], Any]
:param handle_contact_scope:
:type handle_contact_scope: Callable[[str, str, 'EventContactScope', Dict[str, Any]], Any]
:param handle_remove_add_bot:
:type handle_remove_add_bot: Callable[[str, str, 'EventRemoveAddBot', Dict[str, Any]], Any]
:param handle_p2p_chat_create:
:type handle_p2p_chat_create: Callable[[str, str, 'EventP2PCreateChat', Dict[str, Any]], Any]
:param handle_user_in_out_chat:
:type handle_user_in_out_chat: Callable[[str, str, 'EventUserInAndOutChat', Dict[str, Any]], Any]
"""
if not isinstance(body, dict):
raise LarkInvalidArguments(msg='回调参数需要是字典')
if 'encrypt' in body:
body = json.loads(self.decrypt_string(body['encrypt']))
if not self.verification_token:
raise LarkInvalidArguments(msg='回调需要 verification_token 参数')
token = body.get('token')
if token != self.verification_token:
raise LarkInvalidCallback(msg='token: {} 不合法'.format(token))
event_type = get_event_type(body)
if event_type == EventType.url_verification:
return {'challenge': body.get('challenge')}
msg_uuid = body.get('uuid', '') # type: str
msg_timestamp = body.get('ts', '') # type: str
json_event = body.get('event', {}) # type: Dict[str, Any]
logger.info('[callback] uuid=%s, ts=%s, event=%s', msg_uuid, msg_timestamp, json_event)
if event_type == EventType.approval:
# 审批通过
if handle_approval:
event_approval = make_datatype(EventApproval, json_event)
return handle_approval(msg_uuid, msg_timestamp, event_approval, json_event)
return
if event_type == EventType.leave_approval:
# 请假审批
if handle_leave_approval:
event_leave_approval = make_datatype(EventLeaveApproval, json_event)
return handle_leave_approval(msg_uuid, msg_timestamp, event_leave_approval, json_event)
return
if event_type == EventType.work_approval:
# 加班审批
if handle_work_approval:
event_work_approval = make_datatype(EventWorkApproval, json_event)
return handle_work_approval(msg_uuid, msg_timestamp, event_work_approval, json_event)
return
if event_type == EventType.shift_approval:
# 换班审批
if handle_shift_approval:
event_shift_approval = make_datatype(EventShiftApproval, json_event)
return handle_shift_approval(msg_uuid, msg_timestamp, event_shift_approval, json_event)
return
if event_type == EventType.remedy_approval:
# 补卡审批
if handle_remedy_approval:
event_remedy_approval = make_datatype(EventRemedyApproval, json_event)
return handle_remedy_approval(msg_uuid, msg_timestamp, event_remedy_approval, json_event)
return
if event_type == EventType.trip_approval:
# 出差审批
if handle_trip_approval:
event_trip_approval = make_datatype(EventTripApproval, json_event)
return handle_trip_approval(msg_uuid, msg_timestamp, event_trip_approval, json_event)
return
if event_type == EventType.app_open:
# 开通应用
if handle_app_open:
event_app_open = make_datatype(EventAppOpen, json_event)
return handle_app_open(msg_uuid, msg_timestamp, event_app_open, json_event)
return
if event_type in [EventType.user_add, EventType.user_leave, EventType.user_update]:
# 通讯录用户相关变更事件,包括 user_add, user_update 和 user_leave 事件类型
if handle_contact_user:
event_contact_user = make_datatype(EventContactUser, json_event)
return handle_contact_user(msg_uuid, msg_timestamp, event_contact_user, json_event)
return
if event_type in [EventType.dept_add, EventType.dept_delete, EventType.dept_update]:
# 通讯录部门相关变更事件,包括 dept_add, dept_update 和 dept_delete
if handle_contact_department:
event_contact_department = make_datatype(EventContactDepartment, json_event)
return handle_contact_department(msg_uuid, msg_timestamp, event_contact_department, json_event)
return
if event_type == EventType.contact_scope_change:
# 变更权限范围
if handle_contact_scope:
event_contact_scope = make_datatype(EventContactScope, json_event)
return handle_contact_scope(msg_uuid, msg_timestamp, event_contact_scope, json_event)
return
if event_type == EventType.message:
# 收到消息(必须单聊或者是艾特机器人)的回调
if handle_message:
event = make_datatype(EventMessage, json_event) # type: EventMessage
return handle_message(msg_uuid, msg_timestamp, event, json_event)
return
if event_type in [EventType.remove_bot, EventType.add_bot]:
# 机器人被移出群聊/机器人被邀请进入群聊
if handle_remove_add_bot:
event_remove_add_bot = make_datatype(EventRemoveAddBot, json_event)
return handle_remove_add_bot(msg_uuid, msg_timestamp, event_remove_add_bot, json_event)
return
if event_type == EventType.app_ticket:
# 下发 app_ticket
event_app_ticket = make_datatype(EventAppTicket, json_event)
self.update_app_ticket(event_app_ticket.app_ticket)
if handle_app_ticket:
return handle_app_ticket(msg_uuid, msg_timestamp, event_app_ticket, json_event)
return
if event_type == EventType.p2p_chat_create:
# 机器人和用户的会话第一次创建
if handle_p2p_chat_create:
event_chat_create = make_datatype(EventP2PCreateChat, json_event)
return handle_p2p_chat_create(msg_uuid, msg_timestamp, event_chat_create, json_event)
return
if event_type in [EventType.add_user_to_chat, EventType.remove_user_from_chat,
EventType.revoke_add_user_from_chat]:
# 用户进群和出群
if handle_user_in_out_chat:
event_in_and_out_chat = make_datatype(EventUserInAndOutChat, json_event)
return handle_user_in_out_chat(msg_uuid, msg_timestamp, event_in_and_out_chat, json_event)
return
logger.warning('[callback][unknown event] uuid=%s, ts=%s, event=%s', msg_uuid, msg_timestamp, event_type)
return {
'message': 'event: {} not handle'.format(event_type),
'msg_uuid': msg_uuid,
'msg_timestamp': msg_timestamp,
'json_event': json_event,
}
def handle_card_message_callback(self, body, handle=None):
"""处理卡片消息的回调
:type self: OpenLark
:type body: Dict[string_types, Any]
:type handle: Callable[[str, str, str, str, str, Dict[str, Any]], Any]
"""
if not isinstance(body, dict):
raise LarkInvalidArguments(msg='回调参数需要是字典')
if 'encrypt' in body:
body = json.loads(self.decrypt_string(body['encrypt']))
event_type = get_event_type(body)
if event_type == EventType.url_verification:
if not self.verification_token:
raise LarkInvalidArguments(msg='回调需要 verification_token 参数')
token = body.get('token')
if token != self.verification_token:
raise LarkInvalidCallback(msg='token: {} 不合法'.format(token))
return {'challenge': body.get('challenge')}
open_id = pop_or_none(body, 'open_id')
employee_id = pop_or_none(body, 'employee_id')
open_message_id = pop_or_none(body, 'open_message_id')
tenant_key = pop_or_none(body, 'tenant_key')
tag = pop_or_none(body, 'tag')
return handle(tenant_key, open_id, employee_id, open_message_id, tag, body)
def decrypt_string(self, s):
"""
:type self: OpenLark
:param s:
:return:
"""
if not self.encrypt_key:
raise LarkInvalidArguments(msg='需要 encrypt_key 参数')
return _AESCipher(self.encrypt_key).decrypt_string(s)
|
import numpy as np
import trimesh
import os
import glob
import scipy.io as sio
import torch
import torch.utils.data as data
import vgtk.pc as pctk
import vgtk.point3d as p3dtk
import vgtk.so3conv.functional as L
from vgtk.functional import rotation_distance_np, label_relative_rotation_np
from scipy.spatial.transform import Rotation as sciR
import random
import pickle
class Dataloader_Oxford(data.Dataset):
def __init__(self, opt, mode=None):
super(Dataloader_Oxford, self).__init__()
self.opt = opt
# 'train' or 'eval'
self.mode = opt.mode if mode is None else mode
# Load data dictionaries from the pickle files
if self.mode == 'train':
self.pickle_file = self.opt.train_file
elif self.mode == 'eval':
self.pickle_file = self.opt.val_file
self.raw_queries = self.get_queries_dict(self.pickle_file)
# only keep training queries that have enough positive data
# TODO: only keep queries that have other neg
self.queries = {}
self.queries_key = []
for i in range(len(self.raw_queries.keys())):
if (len(self.raw_queries[i]["positives"]) >= self.opt.pos_per_query):
self.queries[i] = self.raw_queries[i]
self.queries_key.append(i)
print("[Dataloader] : Training dataset size:", len(self.queries.keys()))
if self.opt.no_augmentation:
print("[Dataloader]: USING ALIGNED OXFORD LOADER!")
else:
print("[Dataloader]: USING ROTATED OXFORD LOADER!")
def load_pc_file(self, filename):
#returns Nx3 matrix
pc=np.fromfile(os.path.join(self.opt.dataset_path,filename), dtype=np.float64)
if(pc.shape[0]!= self.opt.num_points*3):
print("Error in pointcloud shape")
return np.array([])
pc=np.reshape(pc,(pc.shape[0]//3,3))
return pc
def load_pc_files(self, filenames):
pcs=[]
for filename in filenames:
#print(filename)
pc=self.load_pc_file(filename)
if(pc.shape[0]!=self.opt.num_points):
continue
pcs.append(pc)
pcs=np.array(pcs)
return pcs
def get_queries_dict(self, filename):
#key:{'query':file,'positives':[files],'negatives:[files], 'neighbors':[keys]}
with open(filename, 'rb') as handle:
queries = pickle.load(handle)
print("Queries Loaded.")
return queries
def get_query_tuple(self, dict_value, num_pos, num_neg, QUERY_DICT, hard_neg=[], other_neg=False):
#get query tuple for dictionary entry
#return list [query,positives,negatives]
query=self.load_pc_file(dict_value["query"]) #Nx3
random.shuffle(dict_value["positives"])
pos_files=[]
for i in range(num_pos):
pos_files.append(QUERY_DICT[dict_value["positives"][i]]["query"])
positives=self.load_pc_files(pos_files)
neg_files=[]
neg_indices=[]
if(len(hard_neg)==0):
random.shuffle(dict_value["negatives"])
for i in range(num_neg):
neg_files.append(QUERY_DICT[dict_value["negatives"][i]]["query"])
neg_indices.append(dict_value["negatives"][i])
else:
random.shuffle(dict_value["negatives"])
for i in hard_neg:
neg_files.append(QUERY_DICT[i]["query"])
neg_indices.append(i)
j=0
while(len(neg_files)<num_neg):
if not dict_value["negatives"][j] in hard_neg:
neg_files.append(QUERY_DICT[dict_value["negatives"][j]]["query"])
neg_indices.append(dict_value["negatives"][j])
j+=1
negatives=self.load_pc_files(neg_files)
if(other_neg==False):
return [query,positives,negatives]
#For Quadruplet Loss
else:
#get neighbors of negatives and query
neighbors=[]
for pos in dict_value["positives"]:
neighbors.append(pos)
for neg in neg_indices:
for pos in QUERY_DICT[neg]["positives"]:
neighbors.append(pos)
possible_negs= list(set(QUERY_DICT.keys())-set(neighbors))
random.shuffle(possible_negs)
if(len(possible_negs)==0):
return [query, positives, negatives, np.array([])]
neg2= self.load_pc_file(QUERY_DICT[possible_negs[0]]["query"])
return [query,positives,negatives,neg2]
def __len__(self):
return len(self.queries.keys())
def __getitem__(self, index):
current_key = self.queries_key[index]
anchor_pcd, positive_pcd, negative_pcd, other_neg_pcd = self.get_query_tuple(self.queries[current_key], \
self.opt.pos_per_query, \
self.opt.neg_per_query, \
self.raw_queries, \
other_neg=True)
# reshape to have same sizes
anchor_pcd = anchor_pcd.reshape(1, anchor_pcd.shape[0], anchor_pcd.shape[1])
other_neg_pcd = other_neg_pcd.reshape(1, other_neg_pcd.shape[0], other_neg_pcd.shape[1])
# TODO: rotate points if requires augmentation
# for if not self.opt.no_augmentation:
# pctk.rotate_point_cloud(pc)
return {'anchor': anchor_pcd,
'positive': positive_pcd,
'negative': negative_pcd,
'other_neg': other_neg_pcd,} |
<reponame>TokisakiKurumi2001/transformer-based
import random
# Hàm này dùng để cắt 1 câu thành các câu con, mặc định là tách thành các câu đơn.
# Các câu được cách nhau bởi dấu chấm "."
def splitSentences(num_sentences_split=1):
inputsDir = 'corpus'
output_dir = 'corpus_out'
txtVi = "data_vi_sentences.txt"
txtBana = "data_bana_sentences.txt"
txtViFile = open(inputsDir + '/' + txtVi, encoding='utf-8', errors='ignore').read()
txtBanaFile = open(inputsDir + '/' + txtBana, encoding='utf-8', errors='ignore').read()
splitsVi = txtViFile.split('\n')
splitsBana = txtBanaFile.split('\n')
num_rows = len(splitsVi)
for i in range(num_rows):
print('row is ', i)
if len(splitsVi[i].strip()) == 0 or splitsVi[i].__contains__(".."):
continue
sentences_arr_vi = splitsVi[i].split('. ')
sentences_arr_bana = splitsBana[i].split('. ')
num_sentences = len(sentences_arr_vi)
if num_sentences > 0 and num_sentences > num_sentences_split:
index = 1
split_sentence_bana = ""
split_sentence_vi = ""
while index <= num_sentences:
if index % num_sentences_split == 0:
vi_each_sentence = sentences_arr_vi[index - 1].strip()
if len(vi_each_sentence) > 0 and vi_each_sentence != '\n':
split_sentence_vi += vi_each_sentence + '\n'
split_sentence_vi = split_sentence_vi.replace('\n', '. ').strip()
txtViFile += '\n' + split_sentence_vi
split_sentence_vi = ""
bana_each_sentence = sentences_arr_bana[index - 1].strip()
if len(bana_each_sentence) > 0 and bana_each_sentence != '\n':
split_sentence_bana += bana_each_sentence
split_sentence_bana = split_sentence_bana.replace('\n', '. ').strip()
txtBanaFile += '\n' + split_sentence_bana
split_sentence_bana = ""
else:
vi_each_sentence = sentences_arr_vi[index - 1].strip()
if len(vi_each_sentence) > 0 and vi_each_sentence != '\n':
split_sentence_vi += vi_each_sentence + '\n'
bana_each_sentence = sentences_arr_bana[index - 1].strip()
if len(bana_each_sentence) > 0 and bana_each_sentence != '\n':
bana_each_sentence = bana_each_sentence.replace('.\n', '\n')
split_sentence_bana += bana_each_sentence
index += 1
if len(split_sentence_vi) > 0:
split_sentence_vi = split_sentence_vi.replace('\n', '. ').strip()
txtViFile += '\n' + split_sentence_vi
if len(split_sentence_bana) > 0:
split_sentence_bana = split_sentence_bana.replace('\n', '. ').strip()
txtBanaFile += '\n' + split_sentence_bana
txtViFile = txtViFile.replace(':.', ':').replace('..', '.')
txtBanaFile = txtBanaFile.replace(':.', '.').replace('..', '.')
with open(output_dir + '/lang-vi-spr.txt', 'w', encoding='utf-8') as f:
f.write(txtViFile)
with open(output_dir + '/lang-bana-spr.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFile)
# Hàm này dùng để nhân dữ liệu, mặc định là 10
# Input: Tôi đi học -> times=2
# Output: Tôi đi học (câu gốc)
# Tôi đi học
# Tôi đi học
def timesMultipleData(times=10, inputsDir="output", output_dir="output", txtVi="lang-vi-spr.txt", txtBana="lang-bana-spr.txt", txtOutVi="lang-vi-spr", txtOutBana="lang-bana-spr"):
# inputsDir = 'output'
# txtVi = "lang-vi-spr.txt"
# txtBana = "lang-bana-spr.txt"
txtViFile = open(inputsDir + '/' + txtVi, encoding='utf-8', errors='ignore').read()
txtBanaFile = open(inputsDir + '/' + txtBana, encoding='utf-8', errors='ignore').read()
splitsVi = txtViFile.split('\n')
splitsBana = txtBanaFile.split('\n')
num_rows = len(splitsVi)
txtViFile += '\n'
txtBanaFile += '\n'
for i in range(num_rows):
if len(splitsVi[i].strip()) == 0:
continue
for j in range(times):
txtViFile += '\n' + splitsVi[i].strip()
txtBanaFile += '\n' + splitsBana[i].strip()
numStr = str(times)
with open(output_dir + '/' + txtOutVi + '-x' + numStr + '.txt', 'w', encoding='utf-8') as f:
f.write(txtViFile)
with open(output_dir + '/' + txtOutBana + '-x' + numStr + '.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFile)
# Hàm này xử lý thay thế các dấu nháy kép và nháy đơn
def handleQuoutes():
inputsDir = 'new_output'
output_dir = 'new_output_new'
txtVi = "vi_2903.txt"
txtBana = "bana_2903.txt"
txtViFile = open(inputsDir + '/' + txtVi, encoding='utf-8', errors='ignore').read()
txtBanaFile = open(inputsDir + '/' + txtBana, encoding='utf-8', errors='ignore').read()
txtViFile = txtViFile.replace("“", "\"").replace("”", "\"").replace("‘", "'")
txtBanaFile = txtBanaFile.replace("“", "\"").replace("”", "\"").replace("‘", "'")
with open(output_dir + '/vi_2903.txt', 'w', encoding='utf-8') as f:
f.write(txtViFile)
with open(output_dir + '/bana_2903.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFile)
# Hàm này xáo trộn thứ tự các câu trong tập dữ liệu,
# tuy nhiên vị trí mới của ngôn ngữ nguồn vs ngôn ngữ đích tương đương
# Input: 2 file tiếng Việt và Bana
# Output: Thứ tự các câu trong 2 file thay đổi,
# nhưng vị trí mới của câu trong tiếng Việt giống với câu trong tiếng Bana tương ứng
def shuffleRandomSentences(inputDir="new_output", outputDir="new_output", txtVi="vi_2903_no_x.txt", txtBana="bana_2903_no_x.txt", txtViOut="vi_2903_no_x_shuffle", txtBanaOut="bana_2903_no_x_shuffle"):
# inputsDir = 'new_output'
# txtVi = "vi_2903_no_x.txt"
# txtBana = "bana_2903_no_x.txt"
txtViFile = open(inputDir + '/' + txtVi, encoding='utf-8', errors='ignore').read()
txtBanaFile = open(inputDir + '/' + txtBana, encoding='utf-8', errors='ignore').read()
splitsVi = txtViFile.split('\n')
splitsBana = txtBanaFile.split('\n')
num_rows = len(splitsVi)
generalArr = list(zip(splitsVi, splitsBana))
random.shuffle(generalArr)
splitsVi, splitsBana = zip(*generalArr)
txtViFileNew = ""
txtBanaFileNew = ""
splitsVi = list(splitsVi)
splitsBana = list(splitsBana)
for i in range(num_rows):
txtViFileNew += splitsVi[i].strip() + "\n"
txtBanaFileNew += splitsBana[i].strip() + "\n"
with open(outputDir + '/' + txtViOut + '_shuffle.txt', 'w', encoding='utf-8') as f:
f.write(txtViFileNew)
with open(outputDir + '/' + txtBanaOut + '_shuffle.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFileNew)
# Hàm này xử lý dấu chấm, nếu câu tiếng Việt có dấu chấm và câu tiếng Bana không có thì thêm tương ứng
# Nếu đã xử lý chỗ này thì có thể bỏ qua hàm này
def handleEndSentence(inputsDir='corpus', outputDir='new_output', txtVi="vi-2903.txt", txtBana="bana-2903.txt", outVi="vi_2903", outBana="bana_2903"):
# inputsDir = 'corpus'
# outputDir = 'new_output'
# txtVi = "vi-2903.txt"
# txtBana = "bana-2903.txt"
txtViFile = open(inputsDir + '/' + txtVi, encoding='utf-8', errors='ignore').read()
txtBanaFile = open(inputsDir + '/' + txtBana, encoding='utf-8', errors='ignore').read()
splitsVi = txtViFile.split('\n')
splitsBana = txtBanaFile.split('\n')
num_rows = len(splitsVi)
txtViFileNew = ""
txtBanaFileNew = ""
for i in range(num_rows):
if splitsVi[i].endswith(".") and (not splitsBana[i].endswith(".")):
txtViFileNew += splitsVi[i] + "\n"
txtBanaFileNew += splitsBana[i] + "." + "\n"
elif splitsBana[i].endswith(".") and (not splitsVi[i].endswith(".")):
txtBanaFileNew += splitsBana[i] + "\n"
txtViFileNew += splitsVi[i] + "." + "\n"
elif splitsVi[i].strip() != "" and splitsBana[i].strip() != "":
txtViFileNew += splitsVi[i] + "\n"
txtBanaFileNew += splitsBana[i] + "\n"
with open(outputDir + '/' + outVi + '.txt', 'w', encoding='utf-8') as f:
f.write(txtViFileNew)
with open(outputDir + '/' + outBana + '.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFileNew)
# Hàm này chia tập dữ liệu train và test, mặc định là chia 7-3
def train_test_split(trainSplitRate=0.7, inputDir="new_output_1", outputDir="new_output_1", txtVi="vi_0904_full_shuffle", txtBana="bana_0904_full_shuffle"):
txtViFile = open(inputDir + '/' + txtVi + '.txt', encoding='utf-8', errors='ignore').read()
txtBanaFile = open(outputDir + '/' + txtBana + '.txt', encoding='utf-8', errors='ignore').read()
splitsVi = txtViFile.split('\n')
splitsBana = txtBanaFile.split('\n')
num_rows = len(splitsVi)
train_rows = round(num_rows * trainSplitRate)
test_rows = num_rows - train_rows
if train_rows < 0 or train_rows > num_rows or test_rows < 0 or test_rows > num_rows:
return
txtViFileTrain = ""
txtBanaFileTrain = ""
txtViFileTest = ""
txtBanaFileTest = ""
for i in range(num_rows):
if i < train_rows:
txtViFileTrain += splitsVi[i].strip() + "\n"
txtBanaFileTrain += splitsBana[i].strip() + "\n"
else:
txtViFileTest += splitsVi[i].strip() + "\n"
txtBanaFileTest += splitsBana[i].strip() + "\n"
with open(outputDir + '/' + txtVi + '_train.txt', 'w', encoding='utf-8') as f:
f.write(txtViFileTrain)
with open(outputDir + '/' + txtBana + '_train.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFileTrain)
with open(outputDir + '/' + txtVi + '_test.txt', 'w', encoding='utf-8') as f:
f.write(txtViFileTest)
with open(outputDir + '/' + txtBana + '_test.txt', 'w', encoding='utf-8') as f:
f.write(txtBanaFileTest)
|
<reponame>yoagauthier/deep_learning_course
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
# Loading the MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
NUM_DIGITS = 784 # 28 x 28 = 784 (taille de l'image d'input)
NUM_HIDDEN = 256
NUM_CLASSES = 10
DISPLAY_IMGS = False
if DISPLAY_IMGS:
# >>>> Displaying some images and their labels form MNIST dataset
for i in range(1,10):
print("Label: " + str(mnist.train.labels[i])) # label of i-th element of training data
img = mnist.train.images[i].reshape((28, 28)) # saving in 'img', the reshaped i-th element of the training dataset
plt.imshow(img, cmap='gray') # displaying the image
plt.show()
# >>>> Define input and ground-truth variables
X = tf.placeholder(tf.float32, [None, NUM_DIGITS]) # input data
Y = tf.placeholder(tf.float32, [None, NUM_CLASSES]) # ground-truth
# >>>> Randomly intialize the variables
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
w_h1 = init_weights([NUM_DIGITS, NUM_HIDDEN])
w_h2 = init_weights([NUM_HIDDEN, NUM_HIDDEN])
w_o = init_weights([NUM_HIDDEN, NUM_CLASSES])
# >>>> Define the network model
def model(X, w_h1, w_h2, w_o):
h1 = tf.nn.relu(tf.matmul(X, w_h1))
h2 = tf.nn.relu(tf.matmul(h1, w_h2))
return tf.matmul(h2, w_o)
# > Compute the predicted Y_p for an imput vector X
Y_p = model(X, w_h1, w_h2, w_o)
# > Define the cost function and the optimizer
# utiliser softmax permet d'être sur que la somme des probas de sortie (proba
# qu'un élément soit dans la classe) est égale à 1
cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Y_p, labels=Y))
optimization_algorithm = tf.train.GradientDescentOptimizer(0.5).minimize(cost_function)
# >>>> Launch an interactive tensorflow session
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# >>>> For accuracy
# le vecteur de sortie est de la forme [0.1, 0.4, 0.9, ..., 0.0], avec chaque
# indice qui donne la proba de sortie dans l'ensemble des classes 0, 1, 2, ...
# qui sont les classes qu'on doit prédire
# tf.argmax(Y_p,1) donne l
correct_prediction = tf.equal(tf.argmax(Y_p, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# >>>> Train the network
# 1 epoch = on a parcouru le dataset en entier une fois
# besoin de faire un shuffle à chaque fois
# itération = on fait le calcul pour un batch
# ici on a 20000 * 50 / 55000 = 18 epoch
for iteration in range(20000):
batch = mnist.train.next_batch(50) # every batch of 50 images
if iteration % 100 == 0:
# batch[0] = image, batch[1] = label
train_accuracy = accuracy.eval(feed_dict={X: batch[0], Y: batch[1]})
print("iteration: %d, training accuracy: %g" % (iteration, train_accuracy))
optimization_algorithm.run(feed_dict={X: batch[0], Y: batch[1]})
# >>>> Save the learned model
# > Add ops to save and restore all the variables.
saver = tf.train.Saver()
# > Variables to save
tf.add_to_collection('vars', w_h1)
tf.add_to_collection('vars', w_h2)
tf.add_to_collection('vars', w_o)
# > Save the variables to disk
save_path = saver.save(sess, "./tensorflow_model.ckpt")
print("Model saved in file: %s" % save_path)
# >>>> Restore variables saved in learned model
new_saver = tf.train.import_meta_graph('tensorflow_model.ckpt.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./'))
all_vars = tf.get_collection('vars')
i = 0
for v in all_vars:
v_ = sess.run(v)
if i == 0:
w_h1 = v_ # restore w_h1
if i == 1:
w_h2 = v_ # restore w_h2
if i == 2:
w_o = v_ # restore w_o
i = i + 1
print("Model restored correctly!")
# >>>> Test the trained model
print("\n\nTest accuracy: %g" % accuracy.eval(feed_dict={X: mnist.test.images, Y: mnist.test.labels}))
|
<gh_stars>0
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Describe operational constraints on generation, storage, and DR projects.
This module contains the defaults for the operational type module methods (
the standard methods used by the operational type modules to interact with
the rest of the model).
If an operational type module method is not specified in an operational type
module, these defaults are used.
"""
import csv
import os.path
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.auxiliary import get_required_subtype_modules_from_projects_file
from gridpath.project.operations.common_functions import load_operational_type_modules
from gridpath.auxiliary.db_interface import setup_results_import
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
# Import needed operational modules
required_operational_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="operational_type",
)
imported_operational_modules = load_operational_type_modules(
required_operational_modules
)
# Add any components specific to the operational modules
for op_m in required_operational_modules:
imp_op_m = imported_operational_modules[op_m]
if hasattr(imp_op_m, "add_model_components"):
imp_op_m.add_model_components(m, d, scenario_directory, subproblem, stage)
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
# Import needed operational modules
required_operational_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="operational_type",
)
imported_operational_modules = load_operational_type_modules(
required_operational_modules
)
# Add any components specific to the operational modules
for op_m in required_operational_modules:
if hasattr(imported_operational_modules[op_m], "load_model_data"):
imported_operational_modules[op_m].load_model_data(
m, d, data_portal, scenario_directory, subproblem, stage
)
else:
pass
def export_results(scenario_directory, subproblem, stage, m, d):
"""
Export operations results.
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
The Pyomo abstract model
:param d:
Dynamic components
:return:
Nothing
"""
# Export module-specific results
# Operational type modules
required_operational_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="operational_type",
)
imported_operational_modules = load_operational_type_modules(
required_operational_modules
)
# Add any components specific to the operational modules
for op_m in required_operational_modules:
if hasattr(imported_operational_modules[op_m], "export_results"):
imported_operational_modules[op_m].export_results(
m,
d,
scenario_directory,
subproblem,
stage,
)
else:
pass
# TODO: move this into SubScenarios class?
def get_required_opchar_modules(scenario_id, c):
"""
Get the required operational type submodules based on the database inputs
for the specified scenario_id. Required modules are the unique set of
generator operational types in the scenario's portfolio. Get the list based
on the project_operational_chars_scenario_id of the scenario_id.
This list will be used to know for which operational type submodules we
should validate inputs, get inputs from database, or save results to
database.
Note: once we have determined the dynamic components, this information
will also be stored in the DynamicComponents class object.
:param scenario_id: user-specified scenario ID
:param c: database cursor
:return: List of the required operational type submodules
"""
project_portfolio_scenario_id = c.execute(
"""SELECT project_portfolio_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
project_opchars_scenario_id = c.execute(
"""SELECT project_operational_chars_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
required_opchar_modules = [
p[0]
for p in c.execute(
"""SELECT DISTINCT operational_type
FROM
(SELECT project FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = {}) as prj_tbl
INNER JOIN
(SELECT project, operational_type
FROM inputs_project_operational_chars
WHERE project_operational_chars_scenario_id = {}) as op_type_tbl
USING (project);""".format(
project_portfolio_scenario_id, project_opchars_scenario_id
)
).fetchall()
]
return required_opchar_modules
def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and validate the inputs
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
# Load in the required operational modules
c = conn.cursor()
required_opchar_modules = get_required_opchar_modules(scenario_id, c)
imported_operational_modules = load_operational_type_modules(
required_opchar_modules
)
# Validate module-specific inputs
for op_m in required_opchar_modules:
if hasattr(imported_operational_modules[op_m], "validate_inputs"):
imported_operational_modules[op_m].validate_inputs(
scenario_id, subscenarios, subproblem, stage, conn
)
else:
pass
def write_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
):
"""
Get inputs from database and write out the model input .tab files
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
# Load in the required operational modules
c = conn.cursor()
required_opchar_modules = get_required_opchar_modules(scenario_id, c)
imported_operational_modules = load_operational_type_modules(
required_opchar_modules
)
# Write module-specific inputs
for op_m in required_opchar_modules:
if hasattr(imported_operational_modules[op_m], "write_model_inputs"):
imported_operational_modules[op_m].write_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
)
else:
pass
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("project dispatch all")
# dispatch_all.csv
# Delete prior results and create temporary import table for ordering
setup_results_import(
conn=db,
cursor=c,
table="results_project_dispatch",
scenario_id=scenario_id,
subproblem=subproblem,
stage=stage,
)
# Load results into the temporary table
results = []
with open(
os.path.join(results_directory, "dispatch_all.csv"), "r"
) as dispatch_file:
reader = csv.reader(dispatch_file)
next(reader) # skip header
for row in reader:
project = row[0]
period = row[1]
horizon = row[2]
timepoint = row[3]
operational_type = row[4]
balancing_type = row[5]
timepoint_weight = row[6]
number_of_hours_in_timepoint = row[7]
load_zone = row[8]
technology = row[9]
power_mw = row[10]
results.append(
(
scenario_id,
project,
period,
subproblem,
stage,
timepoint,
operational_type,
balancing_type,
horizon,
timepoint_weight,
number_of_hours_in_timepoint,
load_zone,
technology,
power_mw,
)
)
insert_temp_sql = """
INSERT INTO temp_results_project_dispatch{}
(scenario_id, project, period, subproblem_id, stage_id, timepoint,
operational_type, balancing_type,
horizon, timepoint_weight,
number_of_hours_in_timepoint,
load_zone, technology, power_mw)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO results_project_dispatch
(scenario_id, project, period, subproblem_id, stage_id, timepoint,
operational_type, balancing_type,
horizon, timepoint_weight, number_of_hours_in_timepoint,
load_zone, technology, power_mw)
SELECT
scenario_id, project, period, subproblem_id, stage_id, timepoint,
operational_type, balancing_type,
horizon, timepoint_weight, number_of_hours_in_timepoint,
load_zone, technology, power_mw
FROM temp_results_project_dispatch{}
ORDER BY scenario_id, project, subproblem_id, stage_id, timepoint;
""".format(
scenario_id
)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(), many=False)
# Load in the required operational modules
required_opchar_modules = get_required_opchar_modules(scenario_id, c)
imported_operational_modules = load_operational_type_modules(
required_opchar_modules
)
# Import module-specific results
for op_m in required_opchar_modules:
if hasattr(
imported_operational_modules[op_m], "import_model_results_to_database"
):
imported_operational_modules[op_m].import_model_results_to_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
)
else:
pass
def process_results(db, c, scenario_id, subscenarios, quiet):
"""
:param db:
:param c:
:param subscenarios:
:param quiet:
:return:
"""
# Load in the required operational modules
required_opchar_modules = get_required_opchar_modules(scenario_id, c)
imported_operational_modules = load_operational_type_modules(
required_opchar_modules
)
# Process module-specific results
for op_m in required_opchar_modules:
if hasattr(imported_operational_modules[op_m], "process_model_results"):
imported_operational_modules[op_m].process_model_results(
db, c, scenario_id, subscenarios, quiet
)
else:
pass
# Operational Type Module Method Defaults
###############################################################################
def power_provision_rule(mod, prj, tmp):
"""
If no power_provision_rule is specified in an operational type module, the
default power provision for load-balance purposes is 0.
"""
return 0
def online_capacity_rule(mod, g, tmp):
"""
The default online capacity is the available capacity.
"""
return mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp]
def variable_om_cost_rule(mod, prj, tmp):
"""
By default the variable cost is the power provision (for load balancing
purposes) times the variable cost. Projects of operational type that
produce power not used for load balancing (e.g. curtailed power or
auxiliary power) should not use this default rule.
"""
return mod.Power_Provision_MW[prj, tmp] * mod.variable_om_cost_per_mwh[prj]
def variable_om_cost_by_ll_rule(mod, prj, tmp, s):
"""
By default the VOM curve cost needs to be greater than or equal to 0.
"""
return 0
def fuel_burn_rule(mod, prj, tmp):
"""
If no fuel_burn_rule is specified in an operational type module, the
default fuel burn is 0.
"""
return 0
def fuel_burn_by_ll_rule(mod, prj, tmp, s):
"""
If no fuel_burn_by_ll_rule is specified in an operational type module, the
default fuel burn needs to be greater than or equal to 0.
"""
return 0
def startup_cost_simple_rule(mod, prj, tmp):
"""
If no startup_cost_simple_rule is specified in an operational type module,
the default startup cost is 0.
"""
return 0
def startup_cost_by_st_rule(mod, prj, tmp):
"""
If no startup_cost_rule is specified in an operational type module, the
default startup fuel cost is 0.
"""
return 0
def shutdown_cost_rule(mod, prj, tmp):
"""
If no shutdown_cost_rule is specified in an operational type module, the
default shutdown fuel cost is 0.
"""
return 0
def startup_fuel_burn_rule(mod, prj, tmp):
"""
If no startup_fuel_burn_rule is specified in an operational type module, the
default startup fuel burn is 0.
"""
return 0
def rec_provision_rule(mod, prj, tmp):
"""
If no rec_provision_rule is specified in an operational type module,
the default REC provisions is the power provision for load-balancing
purposes.
"""
return mod.Power_Provision_MW[prj, tmp]
def scheduled_curtailment_rule(mod, prj, tmp):
"""
If no scheduled_curtailment_rule is specified in an operational type
module, the default scheduled curtailment is 0.
"""
return 0
def subhourly_curtailment_rule(mod, prj, tmp):
"""
If no subhourly_curtailment_rule is specified in an operational type
module, the default subhourly curtailment is 0.
"""
return 0
def subhourly_energy_delivered_rule(mod, prj, tmp):
"""
If no subhourly_energy_delivered_rule is specified in an operational type
module, the default subhourly energy delivered is 0.
"""
return 0
def operational_violation_cost_rule(mod, prj, tmp):
"""
If no operational_violation_cost_rule is specified, the default
operational violation cost is 0.
"""
return 0
def curtailment_cost_rule(mod, prj, tmp):
"""
If no curtailment_cost_rule is specified, the default curtailment cost
is 0.
"""
return 0
|
<reponame>Crossroadsman/python-notes<filename>libraries/pillow/pillow_tut.py
# imports for Pillow still use PIL as the import name
from PIL import Image
# Pillow lazy-loads the file so this operation is fast
image_file = Image.open("img/IMG_1.jpg")
# These details can be obtained just by looking at the file header so these
# operations are fast and not filesize dependent
print(image_file.format) # JPEG
print(image_file.size) # (3003, 3003) x then y
print(image_file.mode) # RGB
# Creates a temporary file and loads it in the default image editor
image_file.show()
# rotate(d) rotates the file counter-clockwise by the specified amount in
# degrees
# optional args:
# - resample : (default=Image.NEAREST) the resampling filter to use
# - expand : (default=False) expand the image box to contain the whole image
# after rotation. Otherwise crops the image to preserve the original
# image dimensions.
rotate_image = Image.open("img/IMG_2.jpg")
rotated = rotate_image.rotate(45)
rotated.show()
rotated_expand = rotate_image.rotate(45,
resample=Image.BICUBIC,
expand=True)
rotated_expand.show()
# Mode conversion
# ===============
# the following are some of the most common modes:
# 1 : 1-bit pixels
# L : 8-bit black and white
# P : 8-bit using a palette
# RGB : 24-bit
# RGBA : 32-bit (24-bit plus 8-bit transparency mask)
# CMYK
# YCbCr
# LAB
# HSV
bw = image_file.convert(mode='1')
bw.show()
bw = image_file.convert(mode='L')
bw.show()
# Resizing
# ========
# parameters:
# - size : 2-tuple ints width, height in px for output
# - resample (optional) : the filter to use. Default is nearest-neighbour
# - PIL.Image.NEAREST (default) : nearest-neighbour. Worst quality, best
# performance
# - PIL.Image.BICUBIC : Bicupic filter. Best quality filter available for
# geometry transforms other than resize/thumbnail
# - PIL.Image.LANCZOS : Lanczos filter. Best quality, worst performance
# - box (optional) : 4-tuple describing the co-ordinates of the image to resize
small = image_file.resize((256, 256))
small.show()
large = image_file.resize((4000,1000))
large.show()
small_input = Image.open("img/IMG_3.jpg")
downsized = small_input.resize((256,256), Image.LANCZOS)
downsized.show()
x_multiplier = 8
y_multiplier = 8
up_bad = downsized.resize(
(downsized.width * x_multiplier, downsized.height * y_multiplier),
Image.NEAREST
)
up_bad.show()
up_good = downsized.resize(
(downsized.width * x_multiplier, downsized.height * y_multiplier),
Image.LANCZOS
)
up_good.show()
# Thumbnail
# like resize except modifies in-place rather than returning a new instance
# note that the size arguments describe the maximum value in that direction,
# preserving aspect ratio
small_input_copy = small_input.copy()
small_input_copy.thumbnail((256, 256))
small_input_copy.show()
# Crop
# the box is in the form (x0, y0, x1, y1)
box = (500, 1100, 900, 1700) # look for jQuery widget that gets a box
cropping_image = Image.open("img/IMG_4.jpg")
cropping_image.show()
cropped_image = cropping_image.crop(box)
cropped_image.show()
re_up_multiplier = 3
cropped_image.resize(
(cropped_image.width*re_up_multiplier, cropped_image.height*re_up_multiplier),
Image.LANCZOS
).show()
# Saving
to_save_file = Image.open("img/IMG_5.jpg")
box = (1250, 800, 3850, 2020)
transformed = to_save_file.crop(box)
scale_factor = 1800/1220
transformed = transformed.resize(
(int(transformed.width * scale_factor), int(transformed.height * scale_factor)),
Image.LANCZOS
)
transformed.save("img/transformed.jpg")
# See also the fourth video in the Treehouse Image Manipulation with Python
# workshop for examples of using ImageEnhance (brightness, desaturation etc)
# and ImageFilter (sharpen, gaussian blur, etc)
# https://teamtreehouse.com/library/enhance-and-filter
|
<gh_stars>0
"""Fill docstrings to avoid redundant docstrings in multiple files.
Inspired from mne: https://mne.tools/stable/index.html
Inspired from mne.utils.docs.py by <NAME> <<EMAIL>>
"""
import sys
from typing import Callable, List
# ------------------------- Documentation dictionary -------------------------
docdict = dict()
# -------------------------------- general -----------------------------------
docdict[
"folder_data"
] = """
folder : path-like
Path to the directory containing raw data with recordings, logs and
models."""
docdict[
"participant"
] = """
participants : int
ID of the participant to include."""
docdict[
"participants"
] = """
participants : list | tuple
List of participant IDx to include."""
docdict[
"session"
] = """
session : int
ID of the session to include (between 1 and 15)."""
docdict[
"raw"
] = """
raw : Raw"""
docdict[
"copy"
] = """
copy : bool
If True, operates and return a copy. Default to False to operate
in-place."""
# --------------------------------- evamed -----------------------------------
docdict[
"df_raw_evamed"
] = """
df : DataFrame
DataFrame loaded by neurotin.io.read_csv_evamed()."""
docdict[
"df_clinical"
] = """
df: DataFrame
DataFrame containing the columns 'participant, 'visit', 'date', and
'result'."""
# ------------------------------ preprocessing -------------------------------
docdict[
"bandpass"
] = """
bandpass : tuple
A 2-length tuple (highpass, lowpass), e.g. (1., 40.).
The lowpass or highpass filter can be disabled by using None."""
docdict[
"notch"
] = """
notch : bool
If True, a notch filter at (50, 100, 150) Hz is applied, removing EU
powerline activity."""
docdict[
"ica"
] = """
ica : ICA
ICA decomposition using the Preconditioned ICA for Real Data algorithm
(PICARD)."""
# ------------------------------------ psd -----------------------------------
docdict[
"df_psd"
] = """
df : DataFrame
PSD in frequency band (fmin, fmax) averaged across bins. The columns are:
participant : int - Participant ID
session : int - Session ID (1 to 15)
run : int - Run ID
phase : str - 'regulation' or 'non-regulation'
idx : ID of the phase within the run (0 to 9)
ch : float - Averaged PSD (1 per channel)"""
docdict[
"psd_duration"
] = """
duration : float
Duration of an epoch in seconds."""
docdict[
"psd_overlap"
] = """
overlap :float
Duration of epoch overlap in seconds."""
docdict[
"psd_reject"
] = """
reject : dict | 'auto' | None
MNE-compatible rejection dictionary or 'auto' to compute it with
autoreject. If set to None, rejection is skipped."""
# -------------------------------- externals ---------------------------------
docdict[
"plt.figsize"
] = """
figsize : tuple
2-sequence tuple defining the matplotlib figure size: (width, height) in
inches."""
# ------------------------- Documentation functions --------------------------
docdict_indented = dict()
def fill_doc(f: Callable) -> Callable:
"""Fill a docstring with docdict entries.
Parameters
----------
f : callable
The function to fill the docstring of (modified in place).
Returns
-------
f : callable
The function, potentially with an updated __doc__.
"""
docstring = f.__doc__
if not docstring:
return f
lines = docstring.splitlines()
indent_count = _indentcount_lines(lines)
try:
indented = docdict_indented[indent_count]
except KeyError:
indent = " " * indent_count
docdict_indented[indent_count] = indented = dict()
for name, docstr in docdict.items():
lines = [
indent + line if k != 0 else line
for k, line in enumerate(docstr.strip().splitlines())
]
indented[name] = "\n".join(lines)
try:
f.__doc__ = docstring % indented
except (TypeError, ValueError, KeyError) as exp:
funcname = f.__name__
funcname = docstring.split("\n")[0] if funcname is None else funcname
raise RuntimeError(f"Error documenting {funcname}:\n{str(exp)}")
return f
def _indentcount_lines(lines: List[str]) -> int:
"""Minimum indent for all lines in line list.
>>> lines = [' one', ' two', ' three']
>>> indentcount_lines(lines)
1
>>> lines = []
>>> indentcount_lines(lines)
0
>>> lines = [' one']
>>> indentcount_lines(lines)
1
>>> indentcount_lines([' '])
0
"""
indent = sys.maxsize
for k, line in enumerate(lines):
if k == 0:
continue
line_stripped = line.lstrip()
if line_stripped:
indent = min(indent, len(line) - len(line_stripped))
if indent == sys.maxsize:
return 0
return indent
def copy_doc(source: Callable) -> Callable:
"""Copy the docstring from another function (decorator).
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator.
This is useful when inheriting from a class and overloading a method. This
decorator can be used to copy the docstring of the original method.
Parameters
----------
source : callable
The function to copy the docstring from.
Returns
-------
wrapper : callable
The decorated function.
Examples
--------
>>> class A:
... def m1():
... '''Docstring for m1'''
... pass
>>> class B(A):
... @copy_doc(A.m1)
... def m1():
... ''' this gets appended'''
... pass
>>> print(B.m1.__doc__)
Docstring for m1 this gets appended
"""
def wrapper(func):
if source.__doc__ is None or len(source.__doc__) == 0:
raise RuntimeError(
f"The docstring from {source.__name__} could not be copied "
"because it was empty."
)
doc = source.__doc__
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper
|
<reponame>frzfrsfra4/phylanx
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# significant re-working of the algorithm implementation found on this site:
#
# https://machinelearningmastery.com/implement-random-forest-scratch-python/
#
from numpy import floor, argsort, sum, sqrt
from numpy import float64, int64, zeros
from numpy import argmax, inf, genfromtxt
from numpy import vstack, iinfo, finfo, unique
from numpy.random import randint, rand
def test_split(idx, val, dataset):
left, right = list(), list()
for i in range(dataset.shape[0]):
row = dataset[i, :]
if row[idx] < val:
left.append(row)
else:
right.append(row)
if len(left) < 1 and len(right) > 0:
return (zeros((0,)), vstack(right))
elif len(left) > 0 and len(right) < 0:
return (vstack(left), zeros((0,)))
return (vstack(left), vstack(right))
def gini_index(groups, classes):
groups_len = list(map(lambda x: len(x), groups))
n_instances = float64(sum(groups_len))
gini = 0.0
p = zeros(len(classes), dtype=float64)
for (group, group_len) in filter(
lambda x: x[1] > 0, zip(groups, groups_len)
):
for row in group:
p[classes[int64(row[-1])]] += 1.0
score = sum(((p / float64(group_len)) ** 2.0))
gini += (1.0 - score) * float64(group_len / n_instances)
p[:] = 0.0
return gini
def get_split(dataset, n_features, classes):
cls_values = zeros(len(classes), dtype=int64)
for i in range(len(classes)):
cls_values[classes[i]] = i
b_idx = iinfo(int64).max
b_val = finfo(float64).max
b_score = finfo(float64).max
b_groups = (list(), list())
idx_w = randint(0, dataset.shape[1] - 1, size=dataset.shape[1] - 1)
idx = zeros(dataset.shape[1] - 1, dtype=int64)
for i in range(dataset.shape[1] - 1):
idx[i] = i
features = idx[argsort(idx_w)][:n_features]
for feature in features:
for r in range(dataset.shape[0]):
groups = test_split(feature, dataset[r, feature], dataset)
gini = gini_index(groups, cls_values)
if gini < b_score:
b_idx = feature
b_val = dataset[r, feature]
b_score = gini
b_groups = groups
return {'index': b_idx,
'value': b_val,
'groups': b_groups,
'lw': inf,
'rw': inf}
def to_terminal(group, classes):
outcome_hist = zeros(len(classes), dtype=int64)
for g in group:
k = int64(g[-1])
outcome_hist[classes[k]] += 1
return argmax(outcome_hist)
def split(node, max_depth, min_sz, n_features, depth, classes):
GRP, LFT, RHT, LW, RW = 'groups', 'left', 'right', 'lw', 'rw'
(left, right) = node[GRP]
del(node[GRP])
if left.shape == (0,) or right.shape == (0,):
if left.shape == (0,):
term = to_terminal(right, classes)
else:
term = to_terminal(left, classes)
node[LW] = term
node[RW] = term
return
if depth >= max_depth:
lterm = to_terminal(left, classes)
rterm = to_terminal(right, classes)
node[LW] = lterm
node[RW] = rterm
return
if len(left) <= min_sz:
node[LW] = to_terminal(left, classes)
else:
node[LFT] = get_split(left, n_features, classes)
split(node[LFT], max_depth, min_sz, n_features, depth + 1, classes)
if len(right) <= min_sz:
node[RW] = to_terminal(right, classes)
else:
node[RHT] = get_split(right, n_features, classes)
split(node[RHT], max_depth, min_sz, n_features, depth + 1, classes)
def build_tree(train, max_depth, min_sz, n_features, classes):
root = get_split(train, n_features, classes)
split(root, max_depth, min_sz, n_features, 1, classes)
return root
def node_predict(node, r):
if r[node['index']] < node['value']:
if node['lw'] == inf:
return node_predict(node['left'], r)
else:
return node['lw']
else:
if node['rw'] == inf:
return node_predict(node['right'], r)
else:
return node['rw']
def subsample(dataset, ratio):
n_sample = int64(floor(len(dataset) * ratio))
idx_w = list(map(lambda x: rand(), range(dataset.shape[0])))
idx_s = argsort(idx_w)
sample = vstack(map(lambda x: dataset[idx_s[x], :], range(n_sample)))
return sample
def bagging_predict(trees, row, classes):
predictions = list(map(lambda tree: node_predict(tree, row), trees))
# parallel
#
# predictions =
# list(map(lambda tree:
# node_predict(trees[tree], row),
# prange(len(trees)))
#
classes_vec = zeros(len(classes), dtype=int64)
for p in predictions:
classes_vec[classes[p]] += 1
idx = argmax(classes_vec)
for (k, v) in classes.items():
if v == idx:
return k
return inf
def random_forest(train, max_depth, min_sz, sample_sz, n_trees):
cls = unique(train[:, -1])
classes = dict()
for c in range(cls.shape[0]):
classes[int64(cls[c])] = c
n_features = int64(floor(sqrt(dataset.shape[0])))
trees = list(
map(lambda i:
build_tree(
subsample(train, sample_sz),
max_depth,
min_sz,
n_features,
classes
),
range(n_trees))
)
# parallel
#
# trees =
# list(map(lambda i:
# build_tree(subsample(train, sample_sz)
# , max_depth, min_sz, n_features)
# , prange(n_trees)))
#
return {'trees': trees, 'classes': classes}
def predict(randomforest, test):
trees, classes = randomforest['trees'], randomforest['classes']
predictions = list(
map(lambda row:
bagging_predict(
trees,
test[row, :],
classes),
range(len(test)))
)
return predictions
if __name__ == "__main__":
file_name = "../datasets/breast_cancer.csv"
dataset = genfromtxt(file_name, skip_header=1, delimiter=",")
max_depth = 10
min_size = 1
sample_size = 1.0
n_trees = [1, 5, 10]
train = int64(dataset.shape[0] / 2)
trees = random_forest(
dataset[:train, :],
max_depth,
min_size,
sample_size,
n_trees[1]
)
print('predict')
predict = predict(trees, dataset[train:, :])
print(predict)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: iam_user_info
short_description: Gather IAM user(s) facts in AWS
description:
- This module can be used to gather IAM user(s) facts in AWS.
author:
- <NAME> (@Constantin07)
- <NAME> (@Akasurde)
options:
name:
description:
- The name of the IAM user to look for.
required: false
type: str
group:
description:
- The group name name of the IAM user to look for. Mutually exclusive with C(path).
required: false
type: str
path:
description:
- The path to the IAM user. Mutually exclusive with C(group).
- If specified, then would get all user names whose path starts with user provided value.
required: false
default: '/'
type: str
requirements:
- botocore
- boto3
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about "test" user.
- name: Get IAM user facts
iam_user_info:
name: "test"
# Gather facts about all users in the "dev" group.
- name: Get IAM user facts
iam_user_info:
group: "dev"
# Gather facts about all users with "/division_abc/subdivision_xyz/" path.
- name: Get IAM user facts
iam_user_info:
path: "/division_abc/subdivision_xyz/"
'''
RETURN = r'''
iam_users:
description: list of maching iam users
returned: success
type: complex
contains:
arn:
description: the ARN of the user
returned: if user exists
type: str
sample: "arn:aws:iam::156360693172:user/dev/test_user"
create_date:
description: the datetime user was created
returned: if user exists
type: str
sample: "2016-05-24T12:24:59+00:00"
password_last_used:
description: the last datetime the password was used by user
returned: if password was used at least once
type: str
sample: "2016-05-25T13:39:11+00:00"
path:
description: the path to user
returned: if user exists
type: str
sample: "/dev/"
user_id:
description: the unique user id
returned: if user exists
type: str
sample: "AIDUIOOCQKTUGI6QJLGH2"
user_name:
description: the user name
returned: if user exists
type: str
sample: "test_user"
'''
from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def list_iam_users_with_backoff(client, operation, **kwargs):
paginator = client.get_paginator(operation)
return paginator.paginate(**kwargs).build_full_result()
def list_iam_users(connection, module):
name = module.params.get('name')
group = module.params.get('group')
path = module.params.get('path')
params = dict()
iam_users = []
if not group and not path:
if name:
params['UserName'] = name
try:
iam_users.append(connection.get_user(**params)['User'])
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name)
if group:
params['GroupName'] = group
try:
iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users']
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group)
if name:
iam_users = [user for user in iam_users if user['UserName'] == name]
if path and not group:
params['PathPrefix'] = path
try:
iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users']
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path)
if name:
iam_users = [user for user in iam_users if user['UserName'] == name]
module.exit_json(iam_users=[camel_dict_to_snake_dict(user) for user in iam_users])
def main():
argument_spec = dict(
name=dict(),
group=dict(),
path=dict(default='/')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[
['group', 'path']
],
supports_check_mode=True
)
connection = module.client('iam')
list_iam_users(connection, module)
if __name__ == '__main__':
main()
|
__author__ = 'leif'
from django.db import models
from django.contrib.auth.models import User
from django.forms import ModelForm
from django import forms
from django.forms.widgets import RadioSelect, Textarea
from import_export import resources
from survey.forms import clean_to_zero
SEX_CHOICES = \
(
('N', 'Not Indicated'),
('M', 'Male'), ('F', 'Female'), ('O', 'Other')
)
YES_CHOICES = \
(
('', 'Not Specified'),
('Y', 'Yes'), ('N', 'No')
)
YES_NO_CHOICES = \
(
('Y', 'Yes'), ('N', 'No')
)
STANDING_CHOICES = \
(
('', 'Not Specified'),
('Freshman', 'Freshman'),
('Sophomore', 'Sophomore'),
('Junior', 'Junior'),
('Senior', 'Senior')
)
YEAR_CHOICES = \
(
('', 'Not Specified'),
('1', 'First Year'), ('2', 'Second Year'),
('3', 'Third Year'), ('4', 'Fourth Year'),
('5', 'Fifth Year'), ('6', 'Completed')
)
ABILITY_CHOICES = \
(
('0', 'Not Specified'),
('1', '1 - Novice'),
('2', '2'), ('3', '3'),
('4', '4'), ('5', '5'),
('6', '6'), ('7', '7 - Expert')
)
class AnitaConsent(models.Model):
user = models.ForeignKey(User)
agreed = models.BooleanField(default=False)
def __unicode__(self):
return self.user.username
class AnitaConsentForm(ModelForm):
agreed = forms.BooleanField(label="Do you consent to participate in the study?", required=True)
def clean(self):
cleaned_data = super(AnitaConsentForm, self).clean()
agreed = cleaned_data.get("agreed")
if not agreed:
raise forms.ValidationError("Consent not given.")
return cleaned_data
class Meta:
model = AnitaConsent
exclude = ('user',)
class AnitaDemographicsSurvey(models.Model):
user = models.ForeignKey(User)
age = models.IntegerField(
default=0,
help_text="Please provide your age (in years).")
# sex = models.CharField(max_length=1, choices = SEX_CHOICES, help_text="Please indicate your sex.")
status = models.CharField(max_length=100, default="")
work = models.CharField(max_length=100, default="")
level = models.CharField(max_length=3, default="")
search_freq = models.IntegerField(
default=0,
help_text="How many times per week do you "
"conduct searches for information "
"(please enter a whole number)?")
search_ability = models.CharField(default="", max_length=1)
def __unicode__(self):
return self.user.username
ED_CHOICES = \
(
('', 'Not Specified'),
('GED', 'High School or GED'),
('ASS', "Associate's"),
('BCA', "Bachelor's"),
('MAS', "Master's"),
('PHD', "Doctorate")
)
STATUS_CHOICES = \
(
('', 'Not Specified'),
('staff', 'Staff'),
('undergrad', 'Undergraduate Student'),
('postgrad', 'Graduate Student')
)
class AnitaDemographicsSurveyForm(ModelForm):
age = forms.IntegerField(
label="Please provide your age (in years).",
max_value=100,
min_value=0,
required=False)
# sex = forms.CharField(
# max_length=1,
# widget=forms.Select(
# choices=SEX_CHOICES),
# label="Please indicate your sex.",
# required=False)
status = forms.CharField(
widget=forms.Select(choices=STATUS_CHOICES),
label="What is your status at University of Glasgow?",
required=False)
work = forms.CharField(
widget=forms.TextInput(attrs={'size': '60', 'class': 'inputText'}),
label="Please provide your occupation/major:", required=False)
level = forms.CharField(
max_length=3, widget=forms.Select(choices=ED_CHOICES),
label="Please indicate the highest degree you've earned:", required=False)
search_freq = forms.IntegerField(
label="How many times per week do you conduct "
"searches for information (please enter a whole number)?",
max_value=10000, min_value=0, required=False)
search_ability = forms.CharField(
max_length=1, widget=forms.Select(choices=ABILITY_CHOICES),
label="How would you rate your online search ability?", required=False)
def clean(self):
cleaned_data = self.cleaned_data
if not cleaned_data.get("age"):
cleaned_data["age"] = 0
if not cleaned_data.get("search_freq"):
cleaned_data["search_freq"] = 0
return cleaned_data
class Meta:
model = AnitaDemographicsSurvey
exclude = ('user',)
LIKERT_CHOICES = \
(
(1, 'Strongly Disagree'), (2, ''), (3, ''), (4, ''), (5, ''), (6, ''), (7, 'Strongly Agree')
)
class AnitaPreTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
apt_interested = models.IntegerField(default=0)
apt_know = models.IntegerField(default=0)
apt_clear_what = models.IntegerField(default=0)
apt_info_diff = models.IntegerField(default=0)
apt_sys_diff = models.IntegerField(default=0)
apt_clear_how = models.IntegerField(default=0)
apt_clear_steps = models.IntegerField(default=0)
apt_difficult_finish = models.IntegerField(default=0)
apt_task_diff = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaPreTaskSurveyForm(ModelForm):
apt_interested = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am interested in this topic.", required=False)
apt_know = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES, label="I know a lot about this topic.",
required=False)
apt_clear_what = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="It is clear what information I need to complete the task.",
required=False)
apt_info_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I think it will be difficult to find relevant items for this task.",
required=False)
apt_sys_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I think it will be difficult to search for information using this system.",
required=False)
apt_clear_how = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="It is clear how much information I need to complete the task.",
required=False)
apt_clear_steps = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="It is clear which steps I need to take to complete this task.",
required=False)
apt_difficult_finish = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I think it will be difficult to determine when I have enough information to finish the task.",
required=False)
apt_task_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="Overall, I think this will be a difficult task.", required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaPreTaskSurvey
exclude = ('user', 'task_id', 'topic_num', 'condition')
class AnitaPostTask0Survey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
condition = models.IntegerField()
apt_satisfied_amount = models.IntegerField(default=0)
apt_satisfied_steps = models.IntegerField(default=0)
apt_work_fast = models.IntegerField(default=0)
apt_difficult_enough = models.IntegerField(default=0)
apt_time_pressure = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaPostTask0SurveyForm(ModelForm):
apt_satisfied_amount = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am satisfied with the amount of information I found for the search topic.",
required=False)
apt_satisfied_steps = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am satisfied with the steps I took to find information about the search topic.",
required=False)
apt_work_fast = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I needed to work fast to complete this task.", required=False)
apt_difficult_enough = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought it was difficult to determine when I had enough information to finish the task.",
required=False)
apt_time_pressure = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt time pressure when completing this task.", required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaPostTask0Survey
exclude = ('user', 'task_id', 'topic_num')
class AnitaPostTask1Survey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
condition = models.IntegerField()
apt_search_diff = models.IntegerField(default=0)
apt_hurried = models.IntegerField(default=0)
apt_satisfied_systems = models.IntegerField(default=0)
apt_doing_well = models.IntegerField(default=0)
apt_found_enough = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaPostTask1SurveyForm(ModelForm):
apt_search_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought it was difficult to search for information on this topic.",
required=False)
apt_hurried = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt hurried or rushed when completing this task.", required=False)
apt_satisfied_systems = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am satisfied with how the system performed for this task.",
required=False)
apt_doing_well = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="While I was working on this task, I thought about how well I was doing on the task.",
required=False)
apt_found_enough = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found enough information about the search topic.", required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaPostTask1Survey
exclude = ('user', 'task_id', 'topic_num','condition')
#
class AnitaPostTask2Survey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
apt_accurate = models.IntegerField(default=0)
apt_quick_results = models.IntegerField(default=0)
apt_more_info = models.IntegerField(default=0)
apt_time_left = models.IntegerField(default=0)
apt_quick_task = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaPostTask2SurveyForm(ModelForm):
apt_accurate = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="It was important to me to complete this task accurately.",
required=False)
apt_quick_results = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The system retrieved and displayed search results pages quickly.",
required=False)
apt_more_info = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I thought about how much information I had already "
"found and how much more I still needed.",
required=False)
apt_time_left = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I thought about how much time I had left on the task. ",
required=False)
apt_quick_task = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="It was important to me to complete this task quickly.",
required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaPostTask2Survey
exclude = ('user', 'task_id', 'topic_num')
#
class AnitaPostTask3Survey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
apt_system_relevance = models.IntegerField(default=0)
apt_system_download = models.IntegerField(default=0)
apt_finding_diff = models.IntegerField(default=0)
apt_all_info = models.IntegerField(default=0)
apt_task_diff = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaPostTask3SurveyForm(ModelForm):
apt_system_relevance = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="This system provided me with a great deal of relevant information.",
required=False)
apt_system_download = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="The system displayed the individual news articles quickly.",
required=False)
apt_finding_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought it was difficult to find relevant information on this topic.",
required=False)
apt_all_info = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found all of the information about the search topic in the search system.",
required=False)
apt_task_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="Overall, I thought this was a difficult task.", required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaPostTask3Survey
exclude = ('user', 'task_id', 'topic_num')
class AnitaExit1Survey(models.Model):
user = models.ForeignKey(User)
ae_use_freq = models.IntegerField(default=0)
ae_complex = models.IntegerField(default=0)
ae_easy = models.IntegerField(default=0)
ae_integrated = models.IntegerField(default=0)
ae_inconsistent = models.IntegerField(default=0)
ae_learn_quickly = models.IntegerField(default=0)
ae_cumbersome = models.IntegerField(default=0)
ae_confident = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class AnitaExit1SurveyForm(ModelForm):
ae_use_freq = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I think that I would like to use this system frequently.", required=False)
ae_complex = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found the system unnecessarily complex.", required=False)
ae_easy = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought the system was easy to use.", required=False)
ae_integrated = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found the various functions in the system to be well integrated.",
required=False)
ae_inconsistent = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought this system was too inconsistent.", required=False)
ae_learn_quickly = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I would imagine that most people would learn to use this system very quickly.",
required=False)
ae_cumbersome = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found the system very cumbersome to use.", required=False)
ae_confident = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt very confident using the system.", required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaExit1Survey
exclude = ('user',)
EXTENT_CHOICES = \
(
(1, 'Not at all'), (2, ''), (3, ''), (4, ''), (5, ''), (6, ''), (7, 'Very much')
)
class AnitaExit2Survey(models.Model):
user = models.ForeignKey(User)
ae_time_extent = models.IntegerField(default=0)
ae_time_reasonable = models.TextField(default="")
ae_time_process = models.TextField(default="")
ae_time_amount_found = models.TextField(default="")
ae_time_amount_read = models.TextField(default="")
ae_time_pressure_points = models.TextField(default="")
def __unicode__(self):
return self.user.username
class AnitaExit2SurveyForm(ModelForm):
ae_time_extent = forms.ChoiceField(
widget=RadioSelect, choices=EXTENT_CHOICES,
label="To what extent did the amount of time you "
"had to complete these task influence your performance?",
required=False)
ae_time_reasonable = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Do you think the time you had to complete these"
" tasks was reasonable? Please explain.",
required=False)
ae_time_process = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the time you had to complete the tasks impact"
" the process you used to complete the tasks "
"(e.g., steps, thought process)? Please explain.",
required=False)
ae_time_amount_found = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the time you had to complete the tasks impact"
" the amount of information you found? Please explain.",
required=False)
ae_time_amount_read = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the time you had to complete the tasks impact"
" the extent to which you read the information that you found? Please explain.",
required=False)
ae_time_pressure_points = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="At what point(s) during the search tasks did you"
" feel time pressure, if any? Please explain.",
required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaExit2Survey
exclude = ('user',)
class AnitaExit3Survey(models.Model):
user = models.ForeignKey(User)
ae_speed_compare = models.TextField(default="")
ae_speed_process = models.TextField(default="")
ae_speed_amount_found = models.TextField(default="")
ae_speed_amount_read = models.TextField(default="")
def __unicode__(self):
return self.user.username
class AnitaExit3SurveyForm(ModelForm):
ae_speed_compare = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="How did the speed of this system compare to others "
"you have used? Please explain.",
required=False)
ae_speed_process = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the system speed impact the "
"process you used to complete the tasks "
"(e.g., steps, thought process)? Please explain.",
required=False)
ae_speed_amount_found = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the system speed impact the amount of "
"information you found for the tasks? "
"Please explain.",
required=False)
ae_speed_amount_read = forms.CharField(
widget=forms.Textarea(attrs={'cols': 100, 'rows': 6}),
label="Did the system speed impact the extent "
"to which you read the information "
"that you found? Please explain.",
required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = AnitaExit3Survey
exclude = ('user',)
class MickeyPostTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
condition = models.IntegerField(default=0)
interface = models.IntegerField(default=0)
snip_readable = models.IntegerField(default=0)
snip_confidence = models.IntegerField(default=0)
snip_informativeness = models.IntegerField(default=0)
snip_relevance = models.IntegerField(default=0)
snip_clarity = models.IntegerField(default=0)
snip_size = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class MickeyPostTaskSurveyForm(ModelForm):
snip_readable = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets (title, link and description) were not readable.",
required=False)
snip_confidence = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets increased my confidence in my decisions.",
required=False)
snip_informativeness = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were not informative.",
required=False)
snip_relevance = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The results snippets help me judge the relevance of the document.",
required=False)
snip_clarity = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were clear and concise.",
required=False)
snip_size = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were not an appropriate size.",
required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = MickeyPostTaskSurvey
exclude = ('user', 'task_id', 'topic_num','condition')
class AnitaPreTaskResource(resources.ModelResource):
class Meta:
model = AnitaPreTaskSurvey
exclude = ('id',)
class MickeyPostTaskResource(resources.ModelResource):
class Meta:
model = MickeyPostTaskSurvey
exclude = ('id',)
class SnippetPostTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField()
topic_num = models.IntegerField()
condition = models.IntegerField()
interface = models.IntegerField()
#snip_helpfulness = models.IntegerField(default=0)
snip_clarity = models.IntegerField(default=0)
snip_confidence = models.IntegerField(default=0)
snip_informativeness = models.IntegerField(default=0)
snip_relevance = models.IntegerField(default=0)
snip_readable = models.IntegerField(default=0)
snip_size = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class SnippetPostTaskSurveyForm(ModelForm):
snip_clarity = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets (title, link and description) were clear and concise.",
required=False)
snip_confidence = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets increased my confidence in my decisions.",
required=False)
snip_informativeness = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were not informative.",
required=False)
snip_relevance = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The results snippets help me judge the relevance of the document.",
required=False)
snip_readable = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were not readable.",
required=False)
snip_size = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The result snippets were an appropriate size and length.",
required=False)
def clean(self):
return clean_to_zero(self)
class Meta:
model = SnippetPostTaskSurvey
exclude = ('user', 'task_id', 'topic_num','condition','interface')
class SnippetPostTaskResource(resources.ModelResource):
class Meta:
model = SnippetPostTaskSurvey
exclude = ('id',)
class SystemSnippetPostTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField()
topic_num = models.IntegerField()
condition = models.IntegerField()
interface = models.IntegerField()
apt_accurate = models.IntegerField()
apt_quick_results = models.IntegerField()
apt_search_diff = models.IntegerField()
apt_hurried = models.IntegerField()
apt_satisfied_systems = models.IntegerField()
ae_cumbersome = models.IntegerField()
ae_confident = models.IntegerField()
def __unicode__(self):
return self.user.username
class SystemSnippetPostTaskSurveyForm(ModelForm):
apt_accurate = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="It was important to me to complete this task accurately.",
required=True)
apt_quick_results = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The system retrieved and displayed search results pages quickly.",
required=True)
apt_search_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought it was difficult to search for information on this topic.",
required=True)
apt_hurried = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt rushed when completing this task.", required=True)
apt_satisfied_systems = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am satisfied with how the system performed for this task.",
required=True)
ae_cumbersome = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I found the system very cumbersome to use.", required=True)
ae_confident = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt very confident using the system.", required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = SystemSnippetPostTaskSurvey
exclude = ('user', 'task_id', 'topic_num','condition','interface')
class SystemSnippetPostTaskResource(resources.ModelResource):
class Meta:
model = SystemSnippetPostTaskSurvey
exclude = ('id',)
SEARCH_FREQ = (('','Not Specified'), (6, 'Many times a day'),
(5,'1-2 times a day'), (4,'Many times a week'), (3,'A few times a week'),
(2,'Sometimes'), (1,'Rarely'), (0,'Never'), )
DEVICES = (('','Not Specified'),('MS','Mouse with Scroll Wheel/Gesture'),('M','Mouse'),('TS','Trackpad with Scroll/Gesture'), ('T','Trackpad'), ('O','Other') )
ENGINES = ( ('','Not Specified'),('AOL','AOL'),('BAI','Baidu'),('BIN','Bing'), ('GOO','Google'), ('YAH','Yahoo!'), ('OTH','Other') )
class SnippetDemographicsSurvey(models.Model):
user = models.ForeignKey(User)
age = models.IntegerField(
help_text="Please provide your age (in years).")
sex = models.CharField(max_length=1, choices = SEX_CHOICES, help_text="Please indicate your sex.")
work = models.CharField(max_length=100)
level = models.CharField(max_length=3)
search_freq = models.IntegerField()
news_search_freq = models.IntegerField()
input_device = models.CharField(max_length=2)
search_engine = models.CharField(max_length=3)
def __unicode__(self):
return self.user.username
class SnippetDemographicsSurveyForm(ModelForm):
age = forms.IntegerField(
label="Please provide your age (in years).",
max_value=100,
min_value=18,
required=True)
sex = forms.CharField(
max_length=1,
widget=forms.Select(
choices=SEX_CHOICES),
label="Please indicate your gender.",
required=True)
work = forms.CharField(
widget=forms.TextInput(attrs={'size': '60', 'class': 'inputText'}),
label="Please provide your occupation:", required=True)
level = forms.CharField(
max_length=3, widget=forms.Select(choices=ED_CHOICES),
label="Please indicate the highest degree you've been awarded:", required=True)
search_freq = forms.IntegerField( widget=forms.Select(choices=SEARCH_FREQ),
label="How often do you search the web?",
max_value=7, min_value=-1, required=True)
news_search_freq = forms.IntegerField( widget=forms.Select(choices=SEARCH_FREQ),
label="How often do you search the web for news articles?",
max_value=7, min_value=-1, required=True)
search_engine = forms.CharField( widget=forms.Select(choices=ENGINES),
label="What search engine do you typically use?",
max_length=3, required=True)
input_device = forms.CharField( widget=forms.Select(choices=DEVICES),
label="What kinds of pointing device are you using?",
max_length=2, required=True)
def clean(self):
cleaned_data = self.cleaned_data
if not cleaned_data.get("age"):
cleaned_data["age"] = 0
return cleaned_data
class Meta:
model = SnippetDemographicsSurvey
exclude = ('user',)
SNIP_CHOICES = ((0, 'Title Only'),(1, 'Title + 1 line summary)'),
(2,'Title + 1-2 lines summary)'), (3,'Title + 2-3 line summary'))
class SnippetExitSurvey(models.Model):
user = models.ForeignKey(User)
snip_info = models.IntegerField()
snip_easy = models.IntegerField()
snip_help = models.IntegerField()
snip_useful = models.IntegerField()
snip_prefer = models.IntegerField()
snip_why = models.TextField()
snip_improve = models.TextField()
def __unicode__(self):
return self.user.username
class SnippetExitSurveyForm(ModelForm):
snip_info = forms.ChoiceField(
widget=RadioSelect, choices=SNIP_CHOICES,
label="The most informative result summaries were:", required=True)
snip_easy = forms.ChoiceField(
widget=RadioSelect, choices=SNIP_CHOICES,
label="The unhelpful result summaries were:", required=True)
snip_help = forms.ChoiceField(
widget=RadioSelect, choices=SNIP_CHOICES,
label="The easiest result summaries to use were:", required=True)
snip_useful = forms.ChoiceField(
widget=RadioSelect, choices=SNIP_CHOICES,
label="The least useful result summaries were:",
required=True)
snip_prefer = forms.ChoiceField(
widget=RadioSelect, choices=SNIP_CHOICES,
label="The most preferable type of result summaries for such tasks were:",
required=True)
snip_why = forms.CharField(widget=Textarea,
label="Given your last answer, explain why you prefer result summaries of this length.",
required=True)
snip_improve = forms.CharField(widget=Textarea,
label="Please provide suggestions on how this study could be improved.",
required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = SnippetExitSurvey
exclude = ('user',)
class SnippetPreTaskTopicKnowledgeSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField()
topic_num = models.IntegerField()
condition = models.IntegerField()
interface = models.IntegerField()
topic_knowledge = models.IntegerField()
topic_relevance = models.IntegerField()
topic_interest = models.IntegerField()
topic_searched = models.IntegerField()
topic_difficulty = models.IntegerField()
def __unicode__(self):
return self.user.username
TOPIC_NOTHING_CHOICES = ( (1, 'Nothing'), (2, ''), (3, ''), (4, ''), (5, 'I Know Details') )
TOPIC_NOTATALL_CHOICES = ( (1, 'Not at all'), (2, ''), (3, ''), (4, ''), (5, 'Very Much') )
TOPIC_NEVER_CHOICES = ( (1, 'Never'), (2, ''), (3, ''), (4, ''), (5, 'Very Often') )
TOPIC_EASY_CHOICES = ( (1, 'Very Easy'), (2, ''), (3, ''), (4, ''), (5, 'Very Difficult') )
TOPIC_NOTGOOD_CHOICES = ( (1, 'Not Good'), (2, ''), (3, ''), (4, ''), (5, 'Very Good') )
TOPIC_UNSUCCESSFUL_CHOICES = ( (1, 'Unsuccessful'), (2, ''), (3, ''), (4, ''), (5, 'Successful') )
TOPIC_FEW_CHOICES = ( (1, 'A few of them'), (2, ''), (3, ''), (4, ''), (5, 'All of them') )
class SnippetPreTaskTopicKnowledgeSurveyForm(ModelForm):
topic_knowledge = forms.ChoiceField(widget=RadioSelect,
choices=TOPIC_NOTHING_CHOICES,
label="How much do you know about this topic?",
required=True)
topic_relevance = forms.ChoiceField(widget=RadioSelect,
choices=TOPIC_NOTATALL_CHOICES,
label="How relevant is this topic to your life?",
required=True)
topic_interest = forms.ChoiceField(widget=RadioSelect,
choices=TOPIC_NOTATALL_CHOICES,
label="How interested are you to learn more about this topic?",
required=True)
topic_searched = forms.ChoiceField(widget=RadioSelect, choices=TOPIC_NEVER_CHOICES,
label="Have you ever searched for information related to this topic?",
required=True)
topic_difficulty = forms.ChoiceField(widget=RadioSelect, choices=TOPIC_EASY_CHOICES,
label="How difficult do you think it will be to search for information about this topic?",
required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = SnippetPreTaskTopicKnowledgeSurvey
exclude = ('user', 'task_id', 'topic_num','condition','interface')
########################################
# DIVERSITY POST TASK SURVEYS
#
########################################
class BehaveDiversityPostTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField()
topic_num = models.IntegerField()
condition = models.IntegerField()
interface = models.IntegerField()
diversity = models.IntegerField()
beh_div_success = models.IntegerField(default=0)
beh_div_speed = models.IntegerField(default=0)
beh_div_queries = models.IntegerField(default=0)
beh_div_documents = models.IntegerField(default=0)
beh_div_time = models.IntegerField(default=0)
beh_div_marked = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
class BehaveDiversityPostTaskSurveyForm(ModelForm):
beh_div_success = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I was able to complete the search task successfully.",
required=True)
beh_div_speed = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I was able the complete the search task quickly.",
required=True)
beh_div_queries = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I tried to explore the topic with different queries.",
required=True)
beh_div_documents= forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I only examined a few documents per query.",
required=True)
beh_div_time = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I checked each document carefully before saving.",
required=True)
beh_div_marked = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="I saved more documents than I needed.",
required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = BehaveDiversityPostTaskSurvey
exclude = ('user', 'task_id', 'topic_num','condition','interface','diversity')
class SystemDiversityPostTaskSurvey(models.Model):
user = models.ForeignKey(User)
task_id = models.IntegerField()
topic_num = models.IntegerField()
condition = models.IntegerField()
interface = models.IntegerField()
diversity = models.IntegerField()
apt_quick_results = models.IntegerField()
apt_search_diff = models.IntegerField()
apt_time = models.IntegerField()
apt_satisfied_systems = models.IntegerField()
ae_cumbersome = models.IntegerField()
ae_confident = models.IntegerField()
def __unicode__(self):
return self.user.username
class SystemDiversityPostTaskSurveyForm(ModelForm):
apt_quick_results = forms.ChoiceField(
widget=RadioSelect,
choices=LIKERT_CHOICES,
label="The system helped me complete my task quickly.",
required=True)
apt_search_diff = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I thought the system made it difficult to find useful information.",
required=True)
apt_time = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="The system helped me to complete my task easily.", required=True)
apt_satisfied_systems = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I am happy with how the system performed for this task.",
required=True)
ae_cumbersome = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="The system was very cumbersome to use.", required=True)
ae_confident = forms.ChoiceField(
widget=RadioSelect, choices=LIKERT_CHOICES,
label="I felt confident in my decisions.", required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = SystemDiversityPostTaskSurvey
exclude = ('user', 'task_id', 'topic_num','condition','interface','diversity')
########################################
# DIVERSITY POST EXPERIMENT SURVEYS
#
########################################
DIVERSITY_CHOICES = ((1, 'Definitely YoYo'), (3, 'Slightly YoYo'),
(4,'Slightly Hula'), (6,'Definitely Hula') )
class DiversityExitSurvey(models.Model):
user = models.ForeignKey(User)
div_info = models.IntegerField()
div_easy = models.IntegerField()
div_help = models.IntegerField()
div_useful = models.IntegerField()
div_prefer = models.IntegerField()
div_relevance_prefer = models.IntegerField()
div_diversity_prefer = models.IntegerField()
div_why = models.TextField()
div_improve = models.TextField()
def __unicode__(self):
return self.user.username
class DiversityExitSurveyForm(ModelForm):
div_info = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The most informative system was:", required=True)
div_easy = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The most unhelpful system was:", required=True)
div_help = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The easiest system to use was:", required=True)
div_useful = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The least useful system was:",
required=True)
div_relevance_prefer = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The system that returned the most relevant information was:",
required=True)
div_diversity_prefer = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The system that returned the most diverse information was:",
required=True)
div_prefer = forms.ChoiceField(
widget=RadioSelect, choices=DIVERSITY_CHOICES,
label="The most preferable system overall was:",
required=True)
div_why = forms.CharField(widget=Textarea,
label="Given your last answer, explain why you prefer the selected system.",
required=True)
div_improve = forms.CharField(widget=Textarea,
label="Please provide suggestions on how this study could be improved.",
required=True)
def clean(self):
return clean_to_zero(self)
class Meta:
model = DiversityExitSurvey
exclude = ('user',)
fields = ['div_info', 'div_easy', 'div_help', 'div_useful', 'div_relevance_prefer', 'div_diversity_prefer', 'div_prefer', 'div_why', 'div_improve']
|
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
import util
from errors import ValidationError
from view.widgets.field import Field
BLUE = (.2, .2, 1, 1)
GRAY = (.5, .5, .5, 1)
RED = (.5, 0, 0, 1)
WHITE = (1, 1, 1, 1)
class Board(BoxLayout):
def __init__(self, actor):
self.actor = actor
super(Board, self).__init__()
def generate(self):
for board in ('board_own', 'board_enemy'):
self.cols, self.rows = self.actor.board.SIZE
for i in range(self.rows):
for j in range(self.cols):
coord = util.to_coord(j, i)
if i == 0 and j == 0:
label = Label(size_hint=(None, None), size=(50, 50))
self.ids[board].add_widget(label)
elif i == 0:
label = Label(size_hint=(None, None), size=(50, 50), text=coord[0])
self.ids[board].add_widget(label)
elif j == 0:
label = Label(size_hint=(None, None), size=(50, 50), text=coord[1])
self.ids[board].add_widget(label)
else:
field = Field(background_color=BLUE)
field.coord = coord
field.background_color = BLUE
if board == 'board_enemy':
field.is_ship = None
field.disabled = True
field.bind(on_press=self.try_hit)
elif board == 'board_own':
field.is_ship = False
field.bind(on_press=self.place_ship)
self.ids[board].add_widget(field)
def toggle_enemy_board(self, disable=True):
for cell in self.ids.board_enemy.children:
if hasattr(cell, 'coord'):
if cell.is_ship is None:
cell.disabled = disable
def finish_setup(self):
try:
self.actor.board.validate()
except ValidationError as e:
print(e)
return
board = {} # ToDo: Investigate why it works without this being used
for cell in self.ids.board_own.children:
if hasattr(cell, 'coord'):
board[cell.coord] = cell.is_ship
cell.disabled = True
# self.ids.board_own.disabled = True
self.ids.setup_controls.disabled = True
self.ids.setup_controls.width = 0
self.actor.finish_setup()
def do_turn(self):
# self.ids.board_enemy.disabled = False
self.toggle_enemy_board(disable=False)
def place_ship(self, instance):
self.actor.board.place(instance.coord)
instance.is_ship = True
instance.background_color = GRAY
def try_hit(self, instance):
assert self.actor
coord = instance.coord
has_hit = self.actor.try_hit(coord)
if has_hit:
instance.background_color = RED
instance.is_ship = True
instance.disabled = True
else:
instance.background_color = WHITE
instance.is_ship = False
instance.disabled = True
# self.ids.board_enemy.disabled = True
self.toggle_enemy_board()
def enemy_hit(self, coord):
for cell in self.ids.board_own.children:
if hasattr(cell, 'coord') and cell.coord == coord:
break
else:
return
cell.background_color = RED
def enemy_missed(self, coord):
for cell in self.ids.board_own.children:
if hasattr(cell, 'coord') and cell.coord == coord:
break
else:
return
cell.background_color = WHITE
def inform_win(self, actor):
self.ids.message.text = f"Player {actor.name} has won!"
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from django.conf import settings
from django.shortcuts import get_object_or_404
from ratelimit.decorators import ratelimit
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from spackmon.apps.main.tasks import (
update_build_status,
update_build_phase,
get_build,
import_configuration,
)
from spackmon.apps.main.utils import BUILD_STATUS
from spackmon.apps.main.models import Build
from rest_framework.response import Response
from rest_framework.views import APIView
from ..auth import is_authenticated
import json
BUILD_STATUSES = [x[0] for x in BUILD_STATUS]
def get_build_environment(data):
"""Given a request, get the build environment. Return None if we are missing
something. Since we require a spec to always get a Build, for now it makes
sense to also include the full_hash and spack_version. If in the future
we just need a build environment, this can be removed.
"""
build_environment = {}
# Ensure we have required fields
for field in [
"host_os",
"platform",
"host_target",
"hostname",
"kernel_version",
"spack_version",
"full_hash",
]:
value = data.get(field)
if not value:
return
build_environment[field] = data.get(field)
return build_environment
class UpdateBuildStatus(APIView):
"""Given a spec, update the status of the BuildTask."""
permission_classes = []
allowed_methods = ("POST",)
@never_cache
@method_decorator(
ratelimit(
key="ip",
rate=settings.VIEW_RATE_LIMIT,
method="POST",
block=settings.VIEW_RATE_LIMIT_BLOCK,
)
)
def post(self, request, *args, **kwargs):
"""POST /ms1/builds/update/ to update one or more tasks"""
# If allow_continue False, return response
allow_continue, response, user = is_authenticated(request)
if not allow_continue:
return response
# Get the task id and status to cancel
data = json.loads(request.body)
status = data.get("status")
build_id = data.get("build_id")
# Build environment should be provided alongside tasks
if not build_id:
return Response(status=400, data={"message": "Missing required build id."})
# All statuses must be valid
if status not in BUILD_STATUSES:
return Response(
status=400,
data={
"message": "Invalid status. Choices are %s"
% ",".join(BUILD_STATUSES)
},
)
build = get_object_or_404(Build, pk=build_id)
# The requesting user must own the build
if build.owner != user:
return Response(
status=400,
data={"message": "You do not own the build and cannot update it."},
)
result = update_build_status(build, status)
return Response(status=result["code"], data=result)
class NewBuild(APIView):
"""Given a spec and environment information, create a new Build.
If the build already exists, we return the build_id with it.
"""
permission_classes = []
allowed_methods = ("POST",)
@never_cache
@method_decorator(
ratelimit(
key="ip",
rate=settings.VIEW_RATE_LIMIT,
method="POST",
block=settings.VIEW_RATE_LIMIT_BLOCK,
)
)
def post(self, request, *args, **kwargs):
"""POST /ms1/builds/new/ to start a new build"""
# If allow_continue False, return response
allow_continue, response, user = is_authenticated(request)
if not allow_continue:
return response
# Get the complete build environment
data = json.loads(request.body)
tags = data.get("tags")
build_environment = get_build_environment(data)
if not build_environment:
return Response(
status=400, data={"message": "Missing required build environment data."}
)
# Create the new build
result = get_build(**build_environment, tags=tags, owner=user)
# If a spec is included in the build, the requester is okay to create
# it given that it does not exist.
if "spec" in data and "spack_version" in data:
spack_version = data.get("spack_version")
result = import_configuration(data["spec"], spack_version)
result = get_build(**build_environment, tags=tags, owner=user)
# Prepare data with
return Response(status=result["code"], data=result)
class UpdatePhaseStatus(APIView):
"""Given a phase for a spec, update the BuildPhase."""
permission_classes = []
allowed_methods = ("POST",)
@never_cache
@method_decorator(
ratelimit(
key="ip",
rate=settings.VIEW_RATE_LIMIT,
method="POST",
block=settings.VIEW_RATE_LIMIT_BLOCK,
)
)
def post(self, request, *args, **kwargs):
"""POST /ms1/phases/metadata/ to update one or more tasks"""
print("POST /ms1/builds/phases/update/")
# If allow_continue False, return response
allow_continue, response, user = is_authenticated(request)
if not allow_continue:
return response
# Extra data here includes output, phase_name, and status
data = json.loads(request.body)
build_id = data.get("build_id")
if not build_id:
return Response(status=400, data={"message": "Missing required build_id."})
output = data.get("output")
phase_name = data.get("phase_name")
status = data.get("status")
# All of the above are required!
if not phase_name or not status:
return Response(
status=400,
data={"message": "phase_name, and status are required."},
)
build = get_object_or_404(Build, pk=build_id)
# The requesting user must own the build
if build.owner != user:
return Response(
status=400,
data={"message": "You do not own the build and cannot update it."},
)
# Update the phase
data = update_build_phase(build, phase_name, status, output)
return Response(status=data["code"], data=data)
|
<gh_stars>0
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import constants
import plots
from processing.ranking import rank_results_pandas
def scores_baselines():
dataset_name = 'path_set_100_no_singles'
modular_results = rank_results_pandas(dataset_name, 'modular_features_0', 0)
markov_results = rank_results_pandas(dataset_name, 'markov', 0)
pseudo_markov_results = rank_results_pandas(dataset_name, 'pseudo_markov', 0)
proximity_results = rank_results_pandas(dataset_name, 'proximity', 0)
proximity_ordered_results = rank_results_pandas(dataset_name, 'proximity_ordered', 0)
results_column = np.concatenate((modular_results, markov_results, pseudo_markov_results, proximity_results, proximity_ordered_results))
model_column = np.repeat(['Log-modular', 'Markov', 'Heuristic Markov', 'Proximity', 'Proximity Ordered'], constants.N_FOLDS)
dataset = pd.DataFrame({
'scores': 100 * results_column,
'model': model_column
})
ax = sns.barplot(x='model', y='scores', data=dataset, ci=95,
palette=sns.color_palette('Set1'))
ax.set_xlabel('Model')
ax.set_ylabel('Accuracy (\%)')
ax.set_title('Accuracy of baseline models')
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'baseline_models_100.eps'),
bbox_inches='tight')
plt.show()
def accuracy_fldc():
l_range = np.arange(5, 35, 5)
k_range = np.arange(5, 35, 5)
l_column = []
k_column = []
score_column = []
dataset_name = 'path_set_100_no_singles'
model_tpl = 'submod_f_0_l_{}_k_{}_iter_1000_noise_5_eta_0.1_adagrad_1'
for l_dim in l_range:
for k_dim in k_range:
model_name = model_tpl.format(l_dim, k_dim)
results = rank_results_pandas(dataset_name, model_name, 0)
score_column.extend(results)
l_column.extend([l_dim] * len(results))
k_column.extend([k_dim] * len(results))
dataset = pd.DataFrame({
'score': 100 * np.array(score_column),
'l': l_column,
'k': k_column
})
dataset = dataset.groupby(['l', 'k'])['score'].mean().unstack(1)
ax = sns.heatmap(dataset, vmin=10, vmax=20, annot=True, fmt='.1f',
linewidths=.5)
ax.set_xlabel('$K$')
ax.set_ylabel('$L$')
#ax.set_ylabel(r'Accuracy (\%)')
ax.set_title(r'Accuracy (\%)')
#ax.set_ylim([0, 45])
# modular_mean = 100*np.mean(rank_results_pandas(dataset_name, 'modular_features_0', 0))
# plt.plot(ax.get_xlim(), [modular_mean, modular_mean], linestyle='dotted')
#
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'large_fldc_dims.eps'),
bbox_inches='tight')
plt.show()
def comparison_large():
dataset_name = 'path_set_100_no_singles'
modular_results = rank_results_pandas(dataset_name, 'modular_features_0', 0)
markov_results = rank_results_pandas(dataset_name, 'markov', 0)
pseudo_markov_results = rank_results_pandas(dataset_name, 'pseudo_markov', 0)
proximity_results = rank_results_pandas(dataset_name, 'proximity', 0)
proximity_ordered_results = rank_results_pandas(dataset_name, 'proximity_ordered', 0)
flid_results = rank_results_pandas(dataset_name, 'submod_f_0_l_10_k_0_iter_1000_noise_5_eta_0.1_adagrad_1', 0)
fldc_results = rank_results_pandas(dataset_name, 'submod_f_0_l_10_k_10_iter_1000_noise_5_eta_0.1_adagrad_1', 0)
results_column = np.concatenate((markov_results, proximity_ordered_results, pseudo_markov_results, fldc_results))
# modular_results, proximity_results, flid_results,
model_column = np.repeat(['Markov', 'Prox. Order', 'Heuristic Markov', 'FLDC (10,10)'], constants.N_FOLDS)
#'Modular', 'Proximity', 'FLID (10)',
#type_column = np.repeat(['Set', 'Path', 'Path', 'Set', 'Path', 'Set'], constants.N_FOLDS)
dataset = pd.DataFrame({
'scores': 100 * results_column,
'model': model_column,
#'type': type_column
})
ax = sns.barplot(x='model', y='scores', data=dataset, ci=95,
palette=sns.color_palette('Set1'))
ax.set_xlabel('Model')
ax.set_ylabel('Accuracy (\%)')
ax.set_title('Accuracy for dataset of top 100 locations')
#
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'all_models_100_presentation_b.eps'),
bbox_inches='tight')
plt.show()
if __name__ == '__main__':
plots.setup()
sns.set_palette(sns.color_palette('Set1', 4))
comparison_large()
|
"""scrapli_netconf.response"""
import logging
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
from lxml import etree
from lxml.etree import Element
from scrapli.response import Response
from scrapli_netconf.constants import NetconfVersion
from scrapli_netconf.helper import remove_namespaces
LOG = logging.getLogger("response")
# "chunk match" matches two groups per section returned from the netconf server, first the length of
# the response, and second the response itself. we use the length of the response to validate the
# response is in fact X length. this regex is basically "start at line feed, and match "#123" where
# "123" is obviously any length of digits... then we don't capture zero or more newlines because we
# dont care about them. Next we have the main capture group -- this starts with a negative lookahead
# that says we want to stop matching as soon as we hit another "#123" *or* a "##" (end of message),
# after that we match anything "." and that is the "body" of the response
CHUNK_MATCH_1_1 = re.compile(pattern=rb"^#(\d+)(?:\n*)(((?!#\d+\n+|##).)*)", flags=re.M | re.S)
PARSER = etree.XMLParser(remove_blank_text=True, recover=True)
class NetconfResponse(Response):
def __init__(
self,
netconf_version: NetconfVersion,
xml_input: Element,
strip_namespaces: bool = True,
failed_when_contains: Optional[Union[bytes, List[bytes]]] = None,
**kwargs: Any,
):
"""
Scrapli Netconf NetconfResponse
Store channel_input, resulting output, and start/end/elapsed time information. Attempt to
determine if command was successful or not and reflect that in a failed attribute.
Args:
netconf_version: string of netconf version; `1.0`|`1.1`
xml_input: lxml Element of input to be sent to device
strip_namespaces: strip out all namespaces if True, otherwise ignore them
failed_when_contains: list of bytes that, if present in final output, represent a
failed command/interaction -- should generally be left alone for netconf. Note that
this differs from the base scrapli Response object as we want to be parsing/checking
for these strings in raw byte strings we get back from the device
kwargs: kwargs for instantiation of scrapli Response object supertype
Returns:
N/A # noqa: DAR202
Raises:
ValueError: if invalid netconf_version string
"""
if netconf_version not in (NetconfVersion.VERSION_1_0, NetconfVersion.VERSION_1_1):
raise ValueError(f"`netconf_version` should be one of 1.0|1.1, got `{netconf_version}`")
self.netconf_version = netconf_version
self.xml_input = xml_input
self.strip_namespaces = strip_namespaces
self.xml_result: Element
super().__init__(**kwargs)
if failed_when_contains is None:
# match on both opening and closing tags too so we never have to think about/compare
# things with namespaces (the closing tags wont have namespaces)
failed_when_contains = [
b"</rpc-error>",
b"</rpc-errors>",
b"<rpc-error>",
b"<rpc-errors>",
]
if isinstance(failed_when_contains, bytes):
failed_when_contains = [failed_when_contains]
self.failed_when_contains = failed_when_contains
self.error_messages: List[str] = []
def record_response(self, result: bytes) -> None:
"""
Record channel_input results and elapsed time of channel input/reading output
Args:
result: bytes result of channel_input
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.finish_time = datetime.now()
self.elapsed_time = (self.finish_time - self.start_time).total_seconds()
self.raw_result = result
if not self.failed_when_contains:
self.failed = False
elif not any(err in self.raw_result for err in self.failed_when_contains):
self.failed = False
if self.netconf_version == NetconfVersion.VERSION_1_0:
self._record_response_netconf_1_0()
else:
self._record_response_netconf_1_1()
if self.failed:
self._fetch_error_messages()
def _record_response_netconf_1_0(self) -> None:
"""
Record response for netconf version 1.0
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
# remove the message end characters and xml document header see:
# https://github.com/scrapli/scrapli_netconf/issues/1
self.xml_result = etree.fromstring(
self.raw_result.replace(b"]]>]]>", b"").replace(
b'<?xml version="1.0" encoding="UTF-8"?>', b""
),
parser=PARSER,
)
if self.strip_namespaces:
self.xml_result = remove_namespaces(self.xml_result)
self.result = etree.tostring(self.xml_result, pretty_print=True).decode()
else:
self.result = etree.tostring(self.xml_result, pretty_print=True).decode()
def _validate_chunk_size_netconf_1_1(self, result: Tuple[str, bytes]) -> None:
"""
Validate individual chunk size; handle parsing trailing new lines for chunk sizes
It seems that some platforms behave slightly differently than others (looking at you IOSXE)
in the way they count chunk sizes with respect to trailing whitespace. Per my reading of the
RFC, the response for a netconf 1.1 response should look like this:
```
##XYZ
<somexml>
##
```
Where "XYZ" is an integer number of the count of chars in the following chunk (the chars up
to the next "##" symbols), then the actual XML response, then a new line(!!!!) and a pair of
hash symbols to indicate the chunk is complete.
IOSXE seems to *not* want to see the newline between the XML payload and the double hash
symbols... instead when it sees that newline it immediately returns the response. This
breaks the core behavior of scrapli in that scrapli always writes the input, then reads the
written inputs off the channel *before* sending a return character. This ensures that we
never have to deal with stripping out the inputs and such because it has already been read.
With IOSXE Behaving this way, we have to instead use `send_input` with the `eager` flag set
-- this means that we do *not* read the inputs, we simply send a return. We then have to do
a little extra parsing to strip out the inputs, but thats no big deal...
Where this finally gets to "spacing" -- IOSXE seems to include trailing newlines *sometimes*
but not other times, whereas IOSXR (for example) *always* counts a single trailing newline
(after the XML). SO.... long story long... (the above chunk stuff doesn't necessarily matter
for this, but felt like as good a place to document it as any...) this method deals w/
newline counts -- we check the expected chunk length against the actual char count, the char
count with all trailing whitespace stripped, and the count of the chunk + a *single*
trailing newline character...
FIN
Args:
result: Tuple from re.findall parsing the full response object
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
expected_len = int(result[0])
result_value = result[1]
actual_len = len(result_value)
rstripped_len = len(result_value.rstrip())
trailing_newline_count = actual_len - rstripped_len
if trailing_newline_count > 1:
extraneous_trailing_newline_count = trailing_newline_count - 1
else:
extraneous_trailing_newline_count = 1
trimmed_newline_len = actual_len - extraneous_trailing_newline_count
if rstripped_len == 0:
# at least nokia tends to have itty bitty chunks of one element, and/or chunks that have
# *only* whitespace and our regex ignores this, so if there was/is nothing in the result
# section we can assume it was just whitespace and move on w/our lives
actual_len = expected_len
if expected_len == actual_len:
return
if expected_len == rstripped_len:
return
if expected_len == trimmed_newline_len:
return
LOG.critical(
f"Return element length invalid, expected {expected_len} got {actual_len} for "
f"element: {repr(result_value)}"
)
self.failed = True
def _record_response_netconf_1_1(self) -> None:
"""
Record response for netconf version 1.1
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
result_sections = re.findall(pattern=CHUNK_MATCH_1_1, string=self.raw_result)
# validate all received data
for result in result_sections:
self._validate_chunk_size_netconf_1_1(result=result)
self.xml_result = etree.fromstring(
b"\n".join(
[
# remove the message end characters and xml document header see:
# https://github.com/scrapli/scrapli_netconf/issues/1
result[1].replace(b'<?xml version="1.0" encoding="UTF-8"?>', b"")
for result in result_sections
]
),
parser=PARSER,
)
if self.strip_namespaces:
self.xml_result = remove_namespaces(self.xml_result)
self.result = etree.tostring(self.xml_result, pretty_print=True).decode()
else:
self.result = etree.tostring(self.xml_result, pretty_print=True).decode()
def _fetch_error_messages(self) -> None:
"""
Fetch all error messages (if any)
RFC states that there MAY be more than one rpc-error so we just xpath for all
"error-message" tags and pull out the text of those elements. The strip is just to remove
leading/trailing white space to make things look a bit nicer.
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
err_messages = self.xml_result.xpath("//rpc-error/error-message")
self.error_messages = [err.text.strip() for err in err_messages]
def get_xml_elements(self) -> Dict[str, Element]:
"""
Parse each section under "data" into a dict of {tag: Element} for easy viewing/parsing
Args:
N/A
Returns:
xml_elements: dictionary of tag: Element
Raises:
N/A
"""
xml_elements = {}
data_element = self.xml_result.find("data", namespaces=self.xml_result.nsmap)
# juniper doesn't return data in a "data" element for bare rpc calls, guard against that
# breaking the iterchildren()
if data_element is not None:
for child in data_element.iterchildren():
_tag = etree.QName(child.tag).localname
xml_elements[_tag] = child
return xml_elements
def textfsm_parse_output(self, to_dict: bool = True) -> Union[Dict[str, Any], List[Any]]:
"""
Override scrapli Response `textfsm_parse_output` method; not applicable for netconf
Args:
to_dict: ignore, only here to ensure compliance with supertype method
Returns:
N/A # noqa: DAR202
Raises:
NotImplementedError: always
"""
raise NotImplementedError("No textfsm parsing for netconf output!")
def genie_parse_output(self) -> Union[Dict[str, Any], List[Any]]:
"""
Override scrapli Response `genie_parse_output` method; not applicable for netconf
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
NotImplementedError: always
"""
raise NotImplementedError("No genie parsing for netconf output!")
|
"""Command line interface to mr.bob"""
import pkg_resources
import sys
import os
import shutil
import six
import argparse
from .configurator import Configurator
from .configurator import maybe_bool
from .bobexceptions import ConfigurationError
from .bobexceptions import TemplateConfigurationError
from .parsing import parse_config, update_config, pretty_format_config
# http://docs.python.org/library/argparse.html
parser = argparse.ArgumentParser(description='Filesystem template renderer')
parser.add_argument('template',
nargs="?",
help="""Template name to use for rendering. See
http://mrbob.readthedocs.org/en/latest/userguide.html#usage
for a guide to template syntax
""")
parser.add_argument('-O', '--target-directory',
default=".",
dest="target_directory",
help='Where to output rendered structure. Defaults to current directory')
parser.add_argument('-v', '--verbose',
action="store_true",
default=False,
help='Print more output for debugging')
parser.add_argument('-c', '--config',
action="store",
help='Configuration file to specify either [mr.bob] or [variables] sections')
parser.add_argument('-V', '--version',
action="store_true",
default=False,
help='Display version number')
parser.add_argument('-l', '--list-questions',
action="store_true",
default=False,
help='List all questions needed for the template')
parser.add_argument('-w', '--remember-answers',
action="store_true",
default=False,
help='Remember answers to .mrbob.ini file inside output directory')
parser.add_argument('-n', '--non-interactive',
dest='non_interactive',
action='store_true',
default=False,
help="Don't prompt for input. Fail if questions are required but not answered")
parser.add_argument('-q', '--quiet',
action="store_true",
default=False,
help='Suppress all but necessary output')
def main(args=sys.argv[1:]):
"""Main function called by `mrbob` command.
"""
options = parser.parse_args(args=args)
if options.version:
version = pkg_resources.get_distribution('mr.bob').version
return version
if not options.template:
parser.error('You must specify what template to use.')
userconfig = os.path.expanduser('~/.mrbob')
if os.path.exists(userconfig):
global_config = parse_config(userconfig)
global_bobconfig = global_config['mr.bob']
global_variables = global_config['variables']
global_defaults = global_config['defaults']
else:
global_bobconfig = {}
global_variables = {}
global_defaults = {}
original_global_bobconfig = dict(global_bobconfig)
original_global_variables = dict(global_variables)
original_global_defaults = dict(global_defaults)
if options.config:
try:
file_config = parse_config(options.config)
except ConfigurationError as e:
parser.error(e)
file_bobconfig = file_config['mr.bob']
file_variables = file_config['variables']
file_defaults = file_config['defaults']
else:
file_bobconfig = {}
file_variables = {}
file_defaults = {}
cli_variables = {} # TODO: implement variables on cli
cli_defaults = {} # TODO: implement defaults on cli
cli_bobconfig = {
'verbose': options.verbose,
'quiet': options.quiet,
'remember_answers': options.remember_answers,
'non_interactive': options.non_interactive,
}
bobconfig = update_config(update_config(global_bobconfig, file_bobconfig), cli_bobconfig)
variables = update_config(update_config(global_variables, file_variables), cli_variables)
defaults = update_config(update_config(global_defaults, file_defaults), cli_defaults)
c = None
if bobconfig['verbose']:
print('')
print('Configuration provided:')
print('')
print('[variables] from ~/.mrbob')
for line in pretty_format_config(original_global_variables):
print(line)
print('[variables] from --config file')
for line in pretty_format_config(file_variables):
print(line)
# TODO: implement variables on cli
# print('[variables] from command line interface')
# for line in pretty_format_config(file_variables):
# print(line)
print('[defaults] from ~/.mrbob')
for line in pretty_format_config(original_global_defaults):
print(line)
print('[defaults] from --config file')
for line in pretty_format_config(file_defaults):
print(line)
# TODO: implement defaults on cli
# print('[defaults] from command line interface')
# for line in pretty_format_config(file_defaults):
# print(line)
print('[mr.bob] from ~/.mrbob')
for line in pretty_format_config(original_global_bobconfig):
print(line)
print('[mr.bob] from --config file')
for line in pretty_format_config(file_bobconfig):
print(line)
print('[mr.bob] from command line interface')
for line in pretty_format_config(cli_bobconfig):
print(line)
try:
c = Configurator(template=options.template,
target_directory=options.target_directory,
bobconfig=bobconfig,
variables=variables,
defaults=defaults)
if options.list_questions:
return c.print_questions()
if c.questions and not maybe_bool(bobconfig['quiet']):
if options.non_interactive:
print('')
print('Welcome to mr.bob non-interactive mode. Questions will be answered by default values or hooks.')
print('')
else:
print('')
print('Welcome to mr.bob interactive mode. Before we generate directory structure, some questions need to be answered.')
print('')
print('Answer with a question mark to display help.')
print('Values in square brackets at the end of the questions show the default value if there is no answer.')
print('\n')
c.ask_questions()
if not options.non_interactive:
print('')
c.render()
if not maybe_bool(bobconfig['quiet']):
print("Generated file structure at %s" % os.path.realpath(options.target_directory))
print('')
return
except TemplateConfigurationError as e:
parser.error(six.u('TemplateConfigurationError: %s') % e.args[0])
except ConfigurationError as e:
parser.error(six.u('ConfigurationError: %s') % e.args[0])
finally:
if c and c.is_tempdir:
shutil.rmtree(c.template_dir)
if __name__ == '__main__': # pragma: nocover
print(main())
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Olympe sphinx-doc extension configuration file
from sphinx.ext.autodoc import FunctionDocumenter, ModuleLevelDocumenter
from sphinx.ext.autodoc import ClassDocumenter
from sphinx.domains.python import PyFunction, PyClasslike
from sphinx.util.logging import getLogger
from docutils.parsers.rst import Directive, directives
try:
from olympe.arsdkng.messages import ArsdkMessage, ArsdkMessageType
from olympe.arsdkng.enums import ArsdkEnum, ArsdkBitfield
olympe_available = True
except Exception:
olympe_available = False
logger = getLogger(__name__)
class ArsdkMessageDocumenter(FunctionDocumenter):
directivetype = 'function'
objtype = 'arsdk_message'
priority = 100
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, ArsdkMessage)
def add_directive_header(self, sig):
self.directivetype = (
"arsdk_cmd_message" if self.object.message_type is ArsdkMessageType.CMD else
"arsdk_event_message"
)
ModuleLevelDocumenter.add_directive_header(self, sig)
def format_args(self):
args = self.object.args_name[:]
args_default = self.object.args_default.copy()
if self.object.message_type is ArsdkMessageType.CMD:
args += ["_timeout"]
args += ["_no_expect"]
args_default.update(_timeout=self.object.timeout)
args_default.update(_no_expect=False)
else:
args += ["_policy"]
args_default.update(_policy="'check_wait'")
args += ["_float_tol"]
args_default.update(_float_tol=self.object.float_tol)
ret = "{}".format(", ".join(
map(lambda arg: arg + "=" + str(args_default[arg])
if arg in args_default else arg, args)))
ret = "({})".format(ret)
return ret
def format_signature(self):
return self.format_args()
class ArsdkEnumDocumenter(ClassDocumenter):
directivetype = "class"
objtype = 'arsdk_enum'
priority = 100
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, (ArsdkEnum.__class__, ArsdkEnum))
def add_directive_header(self, sig):
self.directivetype = "arsdk_enum"
ModuleLevelDocumenter.add_directive_header(self, sig)
def format_signature(self):
return ""
def document_members(self, all_members=False):
sourcename = self.get_sourcename()
if isinstance(self.object, ArsdkEnum.__class__):
for value in self.object:
self.add_line(" :{}: {} ({})".format(
value._name_, value.__doc__, value._value_), sourcename)
else:
super(ArsdkEnumDocumenter, self).document_members(all_members)
class DummyOptionSpec(object):
"""An option_spec allows any options."""
def __getitem__(self, key):
# type: (Any) -> Any
return lambda x: x
class ArsdkFeatureDirective(Directive):
option_spec = DummyOptionSpec()
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def __init__(self, name, arguments, options, *args):
self.directive_args = args
super(ArsdkFeatureDirective, self).__init__(name, arguments, options, *args)
def run(self):
automodule = directives._directives["automodule"]
options = dict.fromkeys(('inherited-members', 'members', 'undoc-members'))
message_module = "olympe.messages." + self.arguments[0]
enum_module = "olympe.enums." + self.arguments[0]
result = automodule("automodule", [message_module], options, *self.directive_args).run()
result += automodule("automodule", [enum_module], options, *self.directive_args).run()
return result
class PyArsdkMessageDirectiveBase(PyFunction):
pass
class PyArsdkCmdMessageDirective(PyArsdkMessageDirectiveBase):
"""
Description of an arsdk command message directive
"""
allow_nesting = True
def needs_arglist(self):
# type: () -> bool
return True
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
return "command message"
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
return "command_message"
class PyArsdkEventMessageDirective(PyArsdkMessageDirectiveBase):
"""
Description of an arsdk command message directive
"""
allow_nesting = True
def needs_arglist(self):
# type: () -> bool
return True
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
return "event message"
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
return "event_message"
class PyArsdkMessageDirective(PyArsdkMessageDirectiveBase):
"""
Description of an arsdk message directive
"""
allow_nesting = True
def needs_arglist(self) -> bool:
return True
def get_signature_prefix(self, sig: str) -> str:
return "message"
def get_index_text(self, modname: str, name_cls: str) -> str:
return "message"
class PyArsdkEnumDirective(PyClasslike):
"""
Description of an arsdk command message directive
"""
allow_nesting = True
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
return "enum"
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
return "enum"
def arsdk_skip_member(app, what, name, obj, skip, options):
if isinstance(obj, ArsdkBitfield.__class__):
return True
return False
def setup(app):
if not olympe_available:
logger.warning("Olympe ARSDK message documentation will not be available")
app.add_autodocumenter(ArsdkMessageDocumenter)
app.add_autodocumenter(ArsdkEnumDocumenter)
app.add_directive("auto_arsdkfeature", ArsdkFeatureDirective)
app.add_directive_to_domain("py", "arsdk_cmd_message", PyArsdkCmdMessageDirective)
app.add_directive_to_domain("py", "arsdk_event_message", PyArsdkEventMessageDirective)
app.add_directive_to_domain("py", "arsdk_enum", PyArsdkEnumDirective)
app.connect("autodoc-skip-member", arsdk_skip_member)
|
<reponame>deavid/pineboo
"""options module."""
from optparse import OptionParser
from optparse import Values
from typing import Optional, List
def parse_options(custom_argv: Optional[List] = None) -> Values:
"""Load and parse options."""
parser = OptionParser()
parser.add_option(
"-l",
"--load",
dest="project",
help="load projects/PROJECT.xml and run it",
metavar="PROJECT",
)
parser.add_option(
"-c",
"--connect",
dest="connection",
help="connect to database with user and password.",
metavar="user:passwd:driver_alias@host:port/database",
)
parser.add_option(
"-v", "--verbose", action="count", default=0, help="increase verbosity level"
) # default a 2 para ver los logger.info, 1 no los muestra
parser.add_option("-q", "--quiet", action="count", default=0, help="decrease verbosity level")
parser.add_option(
"--profile-time",
action="store_true",
dest="enable_profiler",
default=False,
help="Write profile information about CPU load after running",
)
parser.add_option(
"--trace-debug",
action="store_true",
dest="trace_debug",
default=False,
help="Write lots of trace information to stdout",
)
parser.add_option(
"--trace-loggers",
dest="trace_loggers",
default="",
help="Comma separated list of modules to enable TRACE output",
)
parser.add_option(
"--log-time",
action="store_true",
dest="log_time",
default=False,
help="Add timestamp to logs",
)
parser.add_option(
"--trace-signals",
action="store_true",
dest="trace_signals",
default=False,
help="Wrap up every signal, connect and emit, and give useful tracebacks",
)
parser.add_option("-a", "--action", dest="action", help="load action", metavar="ACTION")
parser.add_option(
"--no-python-cache",
action="store_true",
dest="no_python_cache",
default=False,
help="Always translate QS to Python",
)
parser.add_option(
"--preload",
action="store_true",
dest="preload",
default=False,
help="Load everything. Then exit. (Populates Pineboo cache)",
)
parser.add_option(
"--force-load",
dest="forceload",
default=None,
metavar="ACTION",
help="Preload actions containing string ACTION without caching. Useful to debug pythonyzer",
)
parser.add_option(
"--dgi", dest="dgi", default="qt", help="Change the gdi mode by default", metavar="DGI"
)
parser.add_option(
"--dgi_parameter",
dest="dgi_parameter",
help="Change the gdi mode by default",
metavar="DGIPARAMETER",
)
parser.add_option(
"--test", action="store_true", dest="test", default=False, help="Launch all test"
)
parser.add_option(
"--dbadmin",
action="store_true",
dest="enable_dbadmin",
default=False,
help="Enables DBAdmin mode",
)
parser.add_option(
"--quick",
action="store_true",
dest="enable_quick",
default=False,
help="Enables Quick mode",
)
parser.add_option(
"--no-x",
action="store_false",
dest="enable_gui",
default=True,
help="Disables graphical interface",
)
if custom_argv is None:
(options, args) = parser.parse_args()
else:
(options, args) = parser.parse_args(custom_argv)
# ---- OPTIONS POST PROCESSING -----
if options.forceload:
options.no_python_cache = True
options.preload = True
options.loglevel = 30 + (options.quiet - options.verbose) * 5
options.debug_level = 200 # 50 - (options.quiet - options.verbose) * 25
return options
|
from torch_geometric.nn import MessagePassing
from torch_geometric.nn.inits import glorot, uniform
import torch
from torch.nn import Linear, ReLU, Sequential, LayerNorm
from torch.nn import Parameter
import numpy as np
from codes.abstract.abstract_gnn import AbstractGNN
class RGCNLayer(MessagePassing):
"""
A simple implementation of R-GCN message passing and aggregation used for the synthetic task.
Modified slightly from regular pytorch-geometric to allow scaling and replacement of messages.
"""
def __init__(self,
in_dim,
out_dim,
num_relations):
super(RGCNLayer, self).__init__('add')
self.in_dim = in_dim
self.out_dim = out_dim
self.num_relations = num_relations
self.basis = Parameter(torch.Tensor(in_dim, out_dim * num_relations))
self.bias = Parameter(torch.Tensor(num_relations, out_dim))
self.residual = Sequential(Linear(in_dim + out_dim, out_dim),
LayerNorm(out_dim),
ReLU())
self.process_message = Sequential(LayerNorm(out_dim),
ReLU())
self.reset_parameters()
def reset_parameters(self):
size = self.num_relations * self.in_dim
glorot(self.basis)
uniform(size, self.bias)
def forward(self, x, edge_index, edge_type, message_scale=None, message_replacement=None):
""""""
size = [x.shape[0], x.shape[0]]
res = self.propagate(
edge_index, x=x, edge_type=edge_type, size=size, message_scale=message_scale,
message_replacement=message_replacement)
return res
def message(self, x_j, x_i, edge_type, message_scale, message_replacement):
b = torch.index_select(self.bias, 0, edge_type)
basis_messages = torch.matmul(x_j, self.basis).view(-1, self.bias.shape[0], self.out_dim)
count = torch.arange(edge_type.shape[0])
basis_messages = basis_messages[count, edge_type, :] + b
basis_messages = self.process_message(basis_messages)
if message_scale is not None:
basis_messages = basis_messages * message_scale.unsqueeze(-1)
if message_replacement is not None:
if basis_messages.shape == message_replacement.shape:
basis_messages = basis_messages + (1 - message_scale).unsqueeze(-1) * message_replacement
else:
basis_messages = basis_messages + (1 - message_scale).unsqueeze(-1) * message_replacement.unsqueeze(
0)
self.latest_messages = basis_messages
self.latest_source_embeddings = x_j
self.latest_target_embeddings = x_i
return basis_messages
def get_latest_source_embeddings(self):
return self.latest_source_embeddings
def get_latest_target_embeddings(self):
return self.latest_target_embeddings
def get_latest_messages(self):
return self.latest_messages
def update(self, aggr_out, x):
repr = torch.cat((aggr_out, x), 1)
return self.residual(repr)
class RGCN(AbstractGNN):
def __init__(self,input_dim,output_dim,n_relations,n_layers,inverse_edges=False,separate_relation_types_for_inverse=False):
AbstractGNN.__init__(self)
self.input_dim = input_dim
self.output_dim = output_dim
self.n_relations = n_relations
self.n_layers = n_layers
self.inverse_edges = inverse_edges
self.separate_relation_types_for_inverse = separate_relation_types_for_inverse
self.define_weights_and_layers()
def is_adj_mat(self):
return False
def define_weights_and_layers(self):
gnn_layers = []
use_rels = self.n_relations
if self.inverse_edges and self.separate_relation_types_for_inverse:
use_rels *= 2
for layer in range(self.n_layers):
gnn_layers.append(RGCNLayer(self.output_dim, self.output_dim, use_rels))
gnn_layers = torch.nn.ModuleList(gnn_layers)
self.gnn_layers = gnn_layers
self.W_input = torch.nn.Sequential(Linear(self.input_dim, self.output_dim),
LayerNorm(self.output_dim),
ReLU())
def set_device(self, device):
pass
def get_initial_layer_input(self, vertex_embeddings):
return self.W_input(vertex_embeddings)
def process_layer(self, vertex_embeddings, edges, edge_types, gnn_layer, message_scale, message_replacement, edge_direction_cutoff=None):
layer_output = gnn_layer(vertex_embeddings,
edges,
edge_types,
message_scale=message_scale,
message_replacement=message_replacement)
return layer_output
|
<reponame>valmikkpatel/whotracks.me
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from pathlib import Path
from whotracksme.data.loader import DataSource
from whotracksme.website.build.home import build_home, build_privacy_policy
from whotracksme.website.build.blog import (
build_blogpost_list,
build_blogpost_pages,
build_rss_feeds,
load_blog_posts
)
from whotracksme.website.build.websites import (
build_website_list,
build_website_pages_batch,
)
from whotracksme.website.build.trackers import (
build_trackers_list,
tracker_page_data,
tracker_page,
build_tracker_page_batch
)
from whotracksme.website.templates import (
create_site_structure,
copy_custom_error_pages,
generate_sitemap,
)
# from whotracksme.website.build.companies import build_company_pages
from whotracksme.website.build.companies import build_company_reach_chart_page
from whotracksme.website.build.data import build_tracker_db, build_api_batch
from whotracksme.website.build.explorer import build_explorer
from whotracksme.website.utils import print_progress
DATA_DIRECTORY = "data"
STATIC_PATH = "static"
DATA_FOLDER = 1 << 0
STATIC_FOLDER = 1 << 1
TEMPLATES_FOLDER = 1 << 2
BLOG_FOLDER = 1 << 3
ALL = (
DATA_FOLDER |
STATIC_FOLDER |
TEMPLATES_FOLDER |
BLOG_FOLDER
)
class Builder:
def __init__(self):
self.data_source = None
self.blog_posts = None
def build(self):
self.feed_event(ALL)
def on_explorer_folder_change(self):
self.feed_event(EXPLORER_FOLDER)
def on_data_folder_change(self):
self.feed_event(DATA_FOLDER)
def on_templates_folder_change(self):
self.feed_event(TEMPLATES_FOLDER)
def on_static_folder_change(self):
self.feed_event(STATIC_FOLDER)
def on_blog_folder_change(self):
self.feed_event(BLOG_FOLDER)
def feed_event(self, event):
futures = []
with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
###################################################################
# This needs to be first, as other tasks will need to write in #
# the resulting folders. #
###################################################################
# Depends on folder: 'static/'
if event & STATIC_FOLDER:
create_site_structure(static_path=STATIC_PATH)
print_progress(text='Create _site')
###################################################################
# We then reload data in memory, before generating the site #
###################################################################
# Depends on folder: 'data/'
if self.data_source is None or event & DATA_FOLDER:
# class where all data can be accessed from
data_source = DataSource()
print_progress(text='Load data sources')
# Depends on: 'blog/'
if self.blog_posts is None or event & BLOG_FOLDER:
self.blog_posts = load_blog_posts()
print_progress(text='Load blog posts')
###################################################################
# Once site structure has been created and data is refreshed, we #
# can build all parts of the site in parallel, since there is no #
# dependencies between them. #
###################################################################
# Depends on: 'templates/', 'data/'
if event & DATA_FOLDER or event & TEMPLATES_FOLDER:
print_progress(text='Generate error pages')
copy_custom_error_pages(data=data_source)
def batched_job(inp, batch_fn, batch_size, message):
batches = []
input_size = len(inp)
for batch in [inp[i:i + batch_size] for i in range(0, input_size, batch_size)]:
submission = executor.submit(batch_fn, batch=batch)
batches.append(submission)
futures.append(submission)
for i, f in enumerate(concurrent.futures.as_completed(batches)):
print_progress(text=f"{message} {min((i+1) * batch_size, input_size)}/{input_size}")
return batches
# Explorer: depends on 'data/'
if event & DATA_FOLDER or event & STATIC_FOLDER:
futures.append(executor.submit(
build_explorer,
))
# Depends on: 'data/', 'blog/', 'templates/'
if event & DATA_FOLDER or event & BLOG_FOLDER or event & TEMPLATES_FOLDER:
futures.append(executor.submit(
generate_sitemap,
blog_posts=self.blog_posts
))
# Depends on: 'data/', 'templates/'
if event & DATA_FOLDER or event & TEMPLATES_FOLDER:
# Home
build_home(data=data_source)
build_privacy_policy(data=data_source)
# Trackers
trackers = [id for id, _ in data_source.trackers.iter()]
batched_job(trackers, build_tracker_page_batch, 150, "Generate tracker pages")
build_trackers_list(data=data_source)
# Websites
websites = list(enumerate([id for id, _ in data_source.sites.iter()]))
batched_job(websites, build_website_pages_batch, 400, "Generate website pages")
build_website_list(data=data_source)
# Companies
build_company_reach_chart_page(data=data_source)
# Depends on: 'data/', 'blog/', 'templates/'
if event & DATA_FOLDER or event & BLOG_FOLDER or event & TEMPLATES_FOLDER:
futures.append(executor.submit(
build_blogpost_pages,
blog_posts=self.blog_posts
))
futures.append(executor.submit(
build_rss_feeds,
blog_posts=self.blog_posts
))
build_blogpost_list(
data=data_source,
blog_posts=self.blog_posts
)
if event & DATA_FOLDER:
futures.append(executor.submit(
build_tracker_db
))
trackers = [id for id, _ in data_source.trackers.iter()]
data_dir = Path('_site/data/trackers/global')
if not data_dir.exists():
data_dir.mkdir(parents=True)
batched_job(trackers, build_api_batch, 150, "Generate API pages")
# TODO: uncomment when company profiles are ready
# if args['site'] or args['companies']:
# company_process = Process(target=build_company_pages, args=(data_source,))
# company_process.start()
# Wait for all jobs to finish
concurrent.futures.wait(futures)
# Getting the `result` of each promise (although none is expected)
# allows to re-raise exception happening in children processes. If
# we don't do it, exceptions will be silently ignored.
for future in futures:
future.result()
print('Done')
|
import codecs
import csv
import datetime
import os
import pathlib
import re
import sqlite3
import sys
import simplekml
import magic
import shutil
from pathlib import Path
from bs4 import BeautifulSoup
class OutputParameters:
'''Defines the parameters that are common for '''
# static parameters
nl = '\n'
screen_output_file_path = ''
def __init__(self, output_folder):
now = datetime.datetime.now()
currenttime = str(now.strftime('%Y-%m-%d_%A_%H%M%S'))
self.report_folder_base = os.path.join(output_folder, 'WLEAPP_Reports_' + currenttime) # aleapp , aleappGUI, ileap_artifacts, report.py
self.temp_folder = os.path.join(self.report_folder_base, 'temp')
OutputParameters.screen_output_file_path = os.path.join(self.report_folder_base, 'Script Logs', 'Screen Output.html')
OutputParameters.screen_output_file_path_devinfo = os.path.join(self.report_folder_base, 'Script Logs', 'DeviceInfo.html')
os.makedirs(os.path.join(self.report_folder_base, 'Script Logs'))
os.makedirs(self.temp_folder)
def is_platform_windows():
'''Returns True if running on Windows'''
return os.name == 'nt'
def sanitize_file_path(filename, replacement_char='_'):
'''
Removes illegal characters (for windows) from the string passed. Does not replace \ or /
'''
return re.sub(r'[*?:"<>|\'\r\n]', replacement_char, filename)
def sanitize_file_name(filename, replacement_char='_'):
'''
Removes illegal characters (for windows) from the string passed.
'''
return re.sub(r'[\\/*?:"<>|\'\r\n]', replacement_char, filename)
def get_next_unused_name(path):
'''Checks if path exists, if it does, finds an unused name by appending -xx
where xx=00-99. Return value is new path.
If it is a file like abc.txt, then abc-01.txt will be the next
'''
folder, basename = os.path.split(path)
ext = None
if basename.find('.') > 0:
basename, ext = os.path.splitext(basename)
num = 1
new_name = basename
if ext != None:
new_name += f"{ext}"
while os.path.exists(os.path.join(folder, new_name)):
new_name = basename + "-{:02}".format(num)
if ext != None:
new_name += f"{ext}"
num += 1
return os.path.join(folder, new_name)
def open_sqlite_db_readonly(path):
'''Opens an sqlite db in read-only mode, so original db (and -wal/journal are intact)'''
if is_platform_windows():
if path.startswith('\\\\?\\UNC\\'): # UNC long path
path = "%5C%5C%3F%5C" + path[4:]
elif path.startswith('\\\\?\\'): # normal long path
path = "%5C%5C%3F%5C" + path[4:]
elif path.startswith('\\\\'): # UNC path
path = "%5C%5C%3F%5C\\UNC" + path[1:]
else: # normal path
path = "%5C%5C%3F%5C" + path
return sqlite3.connect (f"file:{path}?mode=ro", uri=True)
def does_column_exist_in_db(db, table_name, col_name):
'''Checks if a specific col exists'''
col_name = col_name.lower()
try:
db.row_factory = sqlite3.Row # For fetching columns by name
query = f"pragma table_info('{table_name}');"
cursor = db.cursor()
cursor.execute(query)
all_rows = cursor.fetchall()
for row in all_rows:
if row['name'].lower() == col_name:
return True
except sqlite3.Error as ex:
print(f"Query error, query={query} Error={str(ex)}")
pass
return False
def does_table_exist(db, table_name):
'''Checks if a table with specified name exists in an sqlite db'''
try:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}'"
cursor = db.execute(query)
for row in cursor:
return True
except sqlite3Error as ex:
logfunc(f"Query error, query={query} Error={str(ex)}")
return False
class GuiWindow:
'''This only exists to hold window handle if script is run from GUI'''
window_handle = None # static variable
progress_bar_total = 0
progress_bar_handle = None
@staticmethod
def SetProgressBar(n):
if GuiWindow.progress_bar_handle:
GuiWindow.progress_bar_handle.UpdateBar(n)
def logfunc(message=""):
with open(OutputParameters.screen_output_file_path, 'a', encoding='utf8') as a:
print(message)
a.write(message + '<br>' + OutputParameters.nl)
if GuiWindow.window_handle:
GuiWindow.window_handle.refresh()
def logdevinfo(message=""):
with open(OutputParameters.screen_output_file_path_devinfo, 'a', encoding='utf8') as b:
b.write(message + '<br>' + OutputParameters.nl)
""" def deviceinfoin(ordes, kas, vas, sources): # unused function
sources = str(sources)
db = sqlite3.connect(reportfolderbase+'Device Info/di.db')
cursor = db.cursor()
datainsert = (ordes, kas, vas, sources,)
cursor.execute('INSERT INTO devinf (ord, ka, va, source) VALUES(?,?,?,?)', datainsert)
db.commit() """
def html2csv(reportfolderbase):
#List of items that take too long to convert or that shouldn't be converted
itemstoignore = ['index.html',
'Distribution Keys.html',
'StrucMetadata.html',
'StrucMetadataCombined.html']
if os.path.isdir(os.path.join(reportfolderbase, '_CSV Exports')):
pass
else:
os.makedirs(os.path.join(reportfolderbase, '_CSV Exports'))
for root, dirs, files in sorted(os.walk(reportfolderbase)):
for file in files:
if file.endswith(".html"):
fullpath = (os.path.join(root, file))
head, tail = os.path.split(fullpath)
if file in itemstoignore:
pass
else:
data = open(fullpath, 'r', encoding='utf8')
soup=BeautifulSoup(data,'html.parser')
tables = soup.find_all("table")
data.close()
output_final_rows=[]
for table in tables:
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
file = (os.path.splitext(file)[0])
with codecs.open(os.path.join(reportfolderbase, '_CSV Exports', file +'.csv'), 'a', 'utf-8-sig') as csvfile:
writer = csv.writer(csvfile, quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerows(output_rows)
def tsv(report_folder, data_headers, data_list, tsvname, source_file=None):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
tsv_report_folder = os.path.join(report_folder_base, '_TSV Exports')
if os.path.isdir(tsv_report_folder):
pass
else:
os.makedirs(tsv_report_folder)
if os.path.exists(os.path.join(tsv_report_folder, tsvname +'.tsv')):
with codecs.open(os.path.join(tsv_report_folder, tsvname +'.tsv'), 'a') as tsvfile:
tsv_writer = csv.writer(tsvfile, delimiter='\t')
for i in data_list:
if source_file == None:
tsv_writer.writerow(i)
else:
row_data = list(i)
row_data.append(source_file)
tsv_writer.writerow(tuple(row_data))
else:
with codecs.open(os.path.join(tsv_report_folder, tsvname +'.tsv'), 'a', 'utf-8-sig') as tsvfile:
tsv_writer = csv.writer(tsvfile, delimiter='\t')
if source_file == None:
tsv_writer.writerow(data_headers)
for i in data_list:
tsv_writer.writerow(i)
else:
data_hdr = list(data_headers)
data_hdr.append("source file")
tsv_writer.writerow(tuple(data_hdr))
for i in data_list:
row_data = list(i)
row_data.append(source_file)
tsv_writer.writerow(tuple(row_data))
def timeline(report_folder, tlactivity, data_list, data_headers):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
tl_report_folder = os.path.join(report_folder_base, '_Timeline')
if os.path.isdir(tl_report_folder):
tldb = os.path.join(tl_report_folder, 'tl.db')
db = sqlite3.connect(tldb)
cursor = db.cursor()
cursor.execute('''PRAGMA synchronous = EXTRA''')
cursor.execute('''PRAGMA journal_mode = WAL''')
else:
os.makedirs(tl_report_folder)
#create database
tldb = os.path.join(tl_report_folder, 'tl.db')
db = sqlite3.connect(tldb, isolation_level = 'exclusive')
cursor = db.cursor()
cursor.execute(
"""
CREATE TABLE data(key TEXT, activity TEXT, datalist TEXT)
"""
)
db.commit()
a = 0
length = (len(data_list))
while a < length:
modifiedList = list(map(lambda x, y: x + ': ' + str(y), data_headers, data_list[a]))
cursor.executemany("INSERT INTO data VALUES(?,?,?)", [(str(data_list[a][0]), tlactivity, str(modifiedList))])
a += 1
db.commit()
db.close()
def kmlgen(report_folder, kmlactivity, data_list, data_headers):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
kml_report_folder = os.path.join(report_folder_base, '_KML Exports')
if os.path.isdir(kml_report_folder):
latlongdb = os.path.join(kml_report_folder, '_latlong.db')
db = sqlite3.connect(latlongdb)
cursor = db.cursor()
cursor.execute('''PRAGMA synchronous = EXTRA''')
cursor.execute('''PRAGMA journal_mode = WAL''')
db.commit()
else:
os.makedirs(kml_report_folder)
latlongdb = os.path.join(kml_report_folder, '_latlong.db')
db = sqlite3.connect(latlongdb)
cursor = db.cursor()
cursor.execute(
"""
CREATE TABLE data(key TEXT, latitude TEXT, longitude TEXT, activity TEXT)
"""
)
db.commit()
kml = simplekml.Kml(open=1)
a = 0
length = (len(data_list))
while a < length:
modifiedDict = dict(zip(data_headers, data_list[a]))
times = modifiedDict['Timestamp']
lon = modifiedDict['Longitude']
lat = modifiedDict['Latitude']
if lat:
pnt = kml.newpoint()
pnt.name = times
pnt.description = f"Timestamp: {times} - {kmlactivity}"
pnt.coords = [(lon, lat)]
cursor.execute("INSERT INTO data VALUES(?,?,?,?)", (times, lat, lon, kmlactivity))
a += 1
db.commit()
db.close()
kml.save(os.path.join(kml_report_folder, f'{kmlactivity}.kml'))
"""
Copyright 2021, CCL Forensics
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def utf8_in_extended_ascii(input_string, *, raise_on_unexpected=False):
"""Returns a tuple of bool (whether mis-encoded utf-8 is present) and str (the converted string)"""
output = [] # individual characters, join at the end
is_in_multibyte = False # True if we're currently inside a utf-8 multibyte character
multibytes_expected = 0
multibyte_buffer = []
mis_encoded_utf8_present = False
def handle_bad_data(index, character):
if not raise_on_unexpected: # not raising, so we dump the buffer into output and append this character
output.extend(multibyte_buffer)
multibyte_buffer.clear()
output.append(character)
nonlocal is_in_multibyte
is_in_multibyte = False
nonlocal multibytes_expected
multibytes_expected = 0
else:
raise ValueError(f"Expected multibyte continuation at index: {index}")
for idx, c in enumerate(input_string):
code_point = ord(c)
if code_point <= 0x7f or code_point > 0xf4: # ASCII Range data or higher than you get for mis-encoded utf-8:
if not is_in_multibyte:
output.append(c) # not in a multibyte, valid ascii-range data, so we append
else:
handle_bad_data(idx, c)
else: # potentially utf-8
if (code_point & 0xc0) == 0x80: # continuation byte
if is_in_multibyte:
multibyte_buffer.append(c)
else:
handle_bad_data(idx, c)
else: # start-byte
if not is_in_multibyte:
assert multibytes_expected == 0
assert len(multibyte_buffer) == 0
while (code_point & 0x80) != 0:
multibytes_expected += 1
code_point <<= 1
multibyte_buffer.append(c)
is_in_multibyte = True
else:
handle_bad_data(idx, c)
if is_in_multibyte and len(multibyte_buffer) == multibytes_expected: # output utf-8 character if complete
utf_8_character = bytes(ord(x) for x in multibyte_buffer).decode("utf-8")
output.append(utf_8_character)
multibyte_buffer.clear()
is_in_multibyte = False
multibytes_expected = 0
mis_encoded_utf8_present = True
if multibyte_buffer: # if we have left-over data
handle_bad_data(len(input_string), "")
return mis_encoded_utf8_present, "".join(output)
def media_to_html(media_path, files_found, report_folder):
def relative_paths(source, splitter):
splitted_a = source.split(splitter)
for x in splitted_a:
if 'LEAPP_Reports_' in x:
report_folder = x
splitted_b = source.split(report_folder)
return '.'+ splitted_b[1]
platform = is_platform_windows()
if platform:
media_path = media_path.replace('/', '\\')
splitter = '\\'
else:
splitter = '/'
thumb = media_path
for match in files_found:
filename = os.path.basename(match)
if filename.startswith('~'):
continue
if filename.startswith('._'):
continue
if media_path in match:
dirs = os.path.dirname(report_folder)
dirs = os.path.dirname(dirs)
env_path = os.path.join(dirs, 'temp')
if env_path in match:
source = match
source = relative_paths(source, splitter)
else:
path = os.path.dirname(match)
dirname = os.path.basename(path)
filename = Path(match)
filename = filename.name
locationfiles = Path(report_folder).joinpath(dirname)
Path(f'{locationfiles}').mkdir(parents=True, exist_ok=True)
shutil.copy2(match, locationfiles)
source = Path(locationfiles, filename)
source = relative_paths(source, splitter)
mimetype = magic.from_file(match, mime = True)
if 'video' in mimetype:
thumb = f'<video width="320" height="240" controls="controls"><source src="{source}" type="video/mp4">Your browser does not support the video tag.</video>'
elif 'image' in mimetype:
thumb = f'<img src="{source}"width="300"></img>'
else:
thumb = f'<a href="{source}"> Link to {mimetype} </>'
return thumb
def usergen(report_folder, data_list_usernames):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
udb_report_folder = os.path.join(report_folder_base, '_Usernames DB')
if os.path.isdir(udb_report_folder):
usernames = os.path.join(udb_report_folder, '_usernames.db')
db = sqlite3.connect(usernames)
cursor = db.cursor()
cursor.execute('''PRAGMA synchronous = EXTRA''')
cursor.execute('''PRAGMA journal_mode = WAL''')
db.commit()
else:
os.makedirs(udb_report_folder)
usernames = os.path.join(udb_report_folder, '_usernames.db')
db = sqlite3.connect(usernames)
cursor = db.cursor()
cursor.execute(
"""
CREATE TABLE data(username TEXT, appname TEXT, artifactname text, html_report text, data TEXT)
"""
)
db.commit()
a = 0
length = (len(data_list_usernames))
while a < length:
user = data_list_usernames[a][0]
app = data_list_usernames[a][1]
artifact = data_list_usernames[a][2]
html_report = data_list_usernames[a][3]
data = data_list_usernames[a][4]
cursor.execute("INSERT INTO data VALUES(?,?,?,?,?)", (user, app, artifact, html_report, data))
a += 1
db.commit()
db.close()
def ipgen(report_folder, data_list_ipaddress):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
udb_report_folder = os.path.join(report_folder_base, '_IPAddress DB')
if os.path.isdir(udb_report_folder):
ipaddress = os.path.join(udb_report_folder, '_ipaddresses.db')
db = sqlite3.connect(ipaddress)
cursor = db.cursor()
cursor.execute('''PRAGMA synchronous = EXTRA''')
cursor.execute('''PRAGMA journal_mode = WAL''')
db.commit()
else:
os.makedirs(udb_report_folder)
ipaddress = os.path.join(udb_report_folder, '_ipaddresses.db')
db = sqlite3.connect(ipaddress)
cursor = db.cursor()
cursor.execute(
"""
CREATE TABLE data(ipaddress TEXT, appname TEXT, artifactname text, html_report text, data TEXT)
"""
)
db.commit()
a = 0
length = (len(data_list_ipaddress))
while a < length:
ip_address = data_list_ipaddress[a][0]
app = data_list_ipaddress[a][1]
artifact = data_list_ipaddress[a][2]
html_report = data_list_ipaddress[a][3]
data = data_list_ipaddress[a][4]
cursor.execute("INSERT INTO data VALUES(?,?,?,?,?)", (ip_address, app, artifact, html_report, data))
a += 1
db.commit()
db.close()
|
import json
import os.path
import requests
import socket
import sys
from base64 import b64decode
from bs4 import BeautifulSoup as bs
from httpobs.conf import (SCANNER_ALLOW_LOCALHOST,
SCANNER_PINNED_DOMAINS)
from requests.structures import CaseInsensitiveDict
HSTS_URL = ('https://chromium.googlesource.com/chromium'
'/src/net/+/master/http/transport_security_state_static.json?format=TEXT')
def parse_http_equiv_headers(html: str) -> CaseInsensitiveDict:
http_equiv_headers = CaseInsensitiveDict()
# Try to parse the HTML
try:
soup = bs(html, 'html.parser')
except:
return http_equiv_headers
# Find all the meta tags
metas = soup.find_all('meta')
for meta in metas:
if meta.has_attr('http-equiv') and meta.has_attr('content'):
# Add support for multiple CSP policies specified via http-equiv
# See issue: https://github.com/mozilla/http-observatory/issues/266
# Note that this is so far only done for CSP and not for other types
# of http-equiv
if (meta.get('http-equiv', '').lower().strip() == 'content-security-policy' and
'Content-Security-Policy' in http_equiv_headers):
http_equiv_headers['Content-Security-Policy'] += '; ' + meta.get('content')
else:
http_equiv_headers[meta.get('http-equiv')] = meta.get('content')
# Technically not HTTP Equiv, but I'm treating it that way
elif meta.get('name', '').lower().strip() == 'referrer' and meta.has_attr('content'):
http_equiv_headers['Referrer-Policy'] = meta.get('content')
return http_equiv_headers
def retrieve_store_hsts_preload_list():
# Download the Google HSTS Preload List
try:
r = b64decode(requests.get(HSTS_URL).text).decode('utf-8').split('\n')
# Remove all the comments
r = ''.join([line.split('// ')[0] for line in r if line.strip() != '//'])
r = json.loads(r)
# Mapping of site -> whether it includes subdomains
hsts = {site['name']: {
'includeSubDomains': site.get('include_subdomains', False),
'includeSubDomainsForPinning':
site.get('include_subdomains', False) or site.get('include_subdomains_for_pinning', False),
'mode': site.get('mode'),
'pinned': True if 'pins' in site else False,
} for site in r['entries']}
# Add in the manually pinned domains
for pinned_domain in SCANNER_PINNED_DOMAINS:
hsts[pinned_domain] = {
'includeSubDomains': True,
'includeSubDomainsForPinning': True,
'mode': 'force-https',
'pinned': True
}
# Write json file to disk
__dirname = os.path.abspath(os.path.dirname(__file__))
__filename = os.path.join(__dirname, '..', 'conf', 'hsts-preload.json')
with open(__filename, 'w') as f:
json.dump(hsts, f, indent=2, sort_keys=True)
except:
print('Unable to download the Chromium HSTS preload list.', file=sys.stderr)
def sanitize_headers(headers: dict) -> dict:
"""
:param headers: raw headers object from a request's response
:return: that same header, after sanitization
"""
try:
if len(str(headers)) <= 16384:
return dict(headers)
else:
return None
except:
return None
def valid_hostname(hostname: str):
"""
:param hostname: The hostname requested in the scan
:return: Hostname if it's valid, None if it's an IP address, otherwise False
"""
# Block attempts to scan things like 'localhost' if not allowed
if ('.' not in hostname or 'localhost' in hostname) and not SCANNER_ALLOW_LOCALHOST:
return False
# First, let's try to see if it's an IPv4 address
try:
socket.inet_aton(hostname) # inet_aton() will throw an exception if hostname is not a valid IP address
return None # If we get this far, it's an IP address and therefore not a valid fqdn
except:
pass
# And IPv6
try:
socket.inet_pton(socket.AF_INET6, hostname) # same as inet_aton(), but for IPv6
return None
except:
pass
# Then, try to do a lookup on the hostname; this should return at least one entry and should be the first time
# that the validator is making a network connection -- the same that requests would make.
try:
hostname_ips = socket.getaddrinfo(hostname, 443)
# This shouldn't trigger, since getaddrinfo should generate saierror if there's no A records. Nevertheless,
# I want to be careful in case of edge cases. This does make it hard to test.
if len(hostname_ips) < 1:
return False
except:
return False
# If we've made it this far, then everything is good to go! Woohoo!
return hostname
|
<gh_stars>0
import torch
from torch import nn
from base import BaseGanCriterion
# Adapted from https://github.com/meyerscetbon/LinearSinkhorn/blob/01943c72b03ea4b56df12ca5a9d0c1da2516d1ae/EXP_GAN/torch_lin_sinkhorn.py
class Lin_Sinkhorn_AD(torch.autograd.Function):
@staticmethod
def forward(ctx, x_emb, y_emb, reg, niter_sin, lam=1e-6, tau=1e-9):
phi_x = x_emb.squeeze().double().to(x_emb.device)
phi_y = y_emb.squeeze().double().to(y_emb.device)
n = phi_x.size()[0]
m = phi_y.size()[0]
a = (1.0 / n) * torch.ones(n)
a = a.double().to(phi_x.device)
b = (1.0 / m) * torch.ones(m)
b = b.double().to(phi_y.device)
actual_nits = 0
u = 1.0 * torch.ones(n).double().to(phi_x.device)
v = 1.0 * torch.ones(m).double().to(phi_y.device)
err = 0.0
u_trans = torch.matmul(phi_x, torch.matmul(phi_y.t(), v)) + lam
v_trans = torch.matmul(phi_y, torch.matmul(phi_x.t(), u)) + lam
for k in range(niter_sin):
u = a / u_trans
v_trans = torch.matmul(phi_y, torch.matmul(phi_x.t(), u)) + lam
v = b / v_trans
u_trans = torch.matmul(phi_x, torch.matmul(phi_y.t(), v)) + lam
err = torch.sum(torch.abs(u * u_trans - a)) + torch.sum(
torch.abs(v * v_trans - b)
)
actual_nits += 1
if err < tau:
break
if k % 10 == 0:
### Stpping Criteria ###s
with torch.no_grad():
err = torch.sum(torch.abs(u * u_trans - a)) + torch.sum(
torch.abs(v * v_trans - b)
)
if err < tau:
break
ctx.u = u
ctx.v = v
ctx.reg = reg
ctx.phi_x = phi_x
ctx.phi_y = phi_y
cost = reg * (torch.sum(a * torch.log(u)) + torch.sum(b * torch.log(v)) - 1)
return cost
@staticmethod
def backward(ctx, grad_output):
u = ctx.u
v = ctx.v
reg = ctx.reg
phi_x = ctx.phi_x
phi_y = ctx.phi_y
grad_input = grad_output.clone()
grad_phi_x = (
grad_input
* torch.matmul(u.view(-1, 1), torch.matmul(phi_y.t(), v).view(1, -1))
* (-reg)
)
grad_phi_y = (
grad_input
* torch.matmul(v.view(-1, 1), torch.matmul(phi_x.t(), u).view(1, -1))
* (-reg)
)
return grad_phi_x, grad_phi_y, None, None, None, None, None
class Critic(BaseGanCriterion):
def __init__(self, parent, regularization=None, **kwargs):
super().__init__(parent=parent, regularization=regularization, **kwargs)
self.fn_loss = Lin_Sinkhorn_AD.apply
def calculate(self, z, x, y):
# Check D with real data
critic_real = self.model["critic"](x)
# Check D with fake data generated by G
gen_x = self.model["generator"](z).detach()
critic_fake = self.model["critic"](gen_x)
# calculate criterion
loss_real_fake = self.fn_loss(critic_real, critic_fake, self.configs['eps'], self.configs['n_iter'])
loss_real_realv = self.fn_loss(critic_real, critic_real, self.configs['eps'], self.configs['n_iter'])
loss_fake_fakev = self.fn_loss(critic_fake, critic_fake, self.configs['eps'], self.configs['n_iter'])
loss = self.Tensor.MoneFloat.double() * (2 * loss_real_fake - loss_real_realv - loss_fake_fakev)
# Return dict for regularization parameters
reg_params = dict(
network=self.model['critic'],
real_data=x,
fake_data=gen_x,
critic_real=critic_real,
critic_fake=critic_fake,
)
loss_scalar = {
"critic_loss_xy": loss_real_fake,
"critic_loss_xx": loss_real_realv,
"critic_loss_yy": loss_real_realv
}
self.logger(Scalar=loss_scalar)
return loss, reg_params
class Generator(BaseGanCriterion):
def __init__(self, parent, regularization=None, **kwargs):
super().__init__(parent=parent, regularization=regularization, **kwargs)
self.fn_loss = Lin_Sinkhorn_AD.apply
def calculate(self, z, x, y):
# Check D with real data
critic_real = self.model["critic"](x)
# Check D with fake data generated by G
gen_x = self.model["generator"](z)
critic_fake = self.model["critic"](gen_x)
# calculate criterion
loss_real_fake = self.fn_loss(critic_real, critic_fake, self.configs['eps'], self.configs['n_iter'])
loss_real_realv = self.fn_loss(critic_real, critic_real, self.configs['eps'], self.configs['n_iter'])
loss_fake_fakev = self.fn_loss(critic_fake, critic_fake, self.configs['eps'], self.configs['n_iter'])
loss = self.Tensor.OneFloat.double() * (2 * loss_real_fake - loss_real_realv - loss_fake_fakev)
# Return dict for regularization parameters
reg_params = dict(
network=self.model['generator'],
real_data=x,
fake_data=gen_x,
critic_real=critic_real,
critic_fake=critic_fake,
)
loss_scalar = {
"generator_loss_xy": loss_real_fake,
"generator_loss_xx": loss_real_realv,
"generator_loss_yy": loss_real_realv
}
self.logger(Scalar=loss_scalar)
return loss, reg_params
|
<gh_stars>1-10
# Copyright 2019 Shin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os.path
import numpy as np
import tensorflow as tf
def _bilinear_initializer(n_channels, kernel_size, cross_channel=False):
"""
Creates a weight matrix that performs bilinear interpolation.
:param n_channels: The number of channels, one per semantic class.
:param kernel_size: The filter size, which is 2x the up-sampling factor,
eg. a kernel/filter size of 4 up-samples 2x.
:param cross_channel: Add contribution from all other channels to each channel.
Defaults to False, meaning that each channel is up-sampled separately without
contribution from the other channels.
:return: A tf.constant_initializer with the weight initialized to bilinear interpolation.
"""
# Make a 2D bilinear kernel suitable for up-sampling of the given (h, w) size.
upscale_factor = (kernel_size+1)//2
if kernel_size % 2 == 1:
center = upscale_factor - 1
else:
center = upscale_factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
bilinear = (1-abs(og[0]-center)/upscale_factor) * (1-abs(og[1]-center)/upscale_factor)
# The kernel filter needs to have shape [kernel_height, kernel_width, in_channels, num_filters]
weights = np.zeros([kernel_size, kernel_size, n_channels, n_channels])
if cross_channel:
for i in range(n_channels):
for j in range(n_channels):
weights[:, :, i, j] = bilinear
else:
for i in range(n_channels):
weights[:, :, i, i] = bilinear
return tf.constant_initializer(value=weights, dtype=tf.float32)
class Model(object):
"""Base class for building the FCN model."""
def __init__(self, image_shape, n_classes, vgg16_weights_path):
"""
Creates a FCN model for semantic segmentation of images.
:param image_shape: The images' input shape of the model, including padding.
Both width and height must be divisible by 32.
:param n_classes: The number of semantic classes, excluding the void/ignore class.
:param vgg16_weights_path: The filename path to the pre-trained VGG16 numpy weights.
"""
if len(image_shape) != 2:
raise ValueError('Parameter image_shape must be 2D. Got {} dimensions.'.format(len(image_shape)))
if not os.path.isfile(vgg16_weights_path):
raise ValueError('VGG16 weights file not found. Check path {}'.format(vgg16_weights_path))
self.image_shape = image_shape
self.n_classes = n_classes
self.vgg16_weights_path = vgg16_weights_path
self.saver = None # Cannot instantiate a saver before any variables are created
def save_protobuf(self, model_path, tags):
"""
Saves the model's meta graph and variables to the specified folder with the specified tags.
All operations are saved including the optimizer's state for resuming training.
:param model_path: The directory of the model graph and variables.
:param tags: A list of model qualifiers used to describe the model, such as ['FCN8', 'VGG16'].
:return: None
"""
builder = tf.saved_model.Builder(model_path)
builder.add_meta_graph_and_variables(tf.get_default_session(), tags)
builder.save()
def load_protobuf(self, model_path, tags):
"""
Loads the model from a SavedModel as specified by tags.
The tags must match with the ones used at time of saving the model.
:param model_path: The directory of the model graph and variables.
:param tags: The list of model qualifiers used to describe the model at saving time,
such as ['FCN8', 'VGG16'].
:return: The MetaGraphDef protocol buffer loaded in the current session.
"""
if not os.path.exists(model_path):
raise ValueError('Folder not found: {}'.format(model_path))
model = tf.saved_model.loader.load(tf.get_default_session(), tags, model_path)
return model
def save_variables(self, variables_filename, global_step):
"""
Saves the model's variables to the specified filename.
:param variables_filename: The location of the filename to save.
:param global_step: The number of training iterations. Appended to the filename.
:return: None
"""
if self.saver is None:
self.saver = tf.train.Saver(max_to_keep=3)
# Note that the meta graph could also be saved if we wanted to
self.saver.save(tf.get_default_session(), variables_filename, global_step=global_step, write_meta_graph=False)
def load_variables(self, variables_filename):
"""
Loads the model's variables from the specified filename.
The corresponding meta graph must be created before calling this method.
:param variables_filename: The location of the filename to load.
:return: None
"""
if not os.path.exists(variables_filename + '.data-00000-of-00001'):
raise ValueError('File not found: {}'.format(variables_filename + '.data-00000-of-00001'))
# Do not use the same saver object for both loading and saving.
# Any new variables added to the graph after loading won't be saved properly.
# Therefore the class saver is reserved for saving actions only.
saver = tf.train.Saver(max_to_keep=3)
saver.restore(tf.get_default_session(), variables_filename)
def __call__(self, model_name, saved_model=None, saved_variables=None):
"""
:param model_name: The name of the model to create, one of FCN32, FCN16 or FCN8.
:param saved_model: Optional 2-keys dictionary to restore a model variables and operations:
1. 'model_path' is the path to the model's protobuf file.
2. 'tags' is a list of model qualifiers.
used to continue staged training, or for serving purposes.
:param saved_variables: Optional filename path to restore pre-trained FCN weights.
:return: The output layer of the chosen model.
"""
if model_name not in ('FCN32', 'FCN16', 'FCN8'):
raise ValueError('{} is an invalid model'.format(model_name))
if (saved_model is not None) & (saved_variables is not None):
raise ValueError('Only one of \'saved_model\' or \'saved_variables\' parameters can be different from None')
# Create a FCN model, restoring VGG16 pre-trained weights or FCN32/FCN16
# pre-trained weights in case of staged training of respectively FCN16/FCN8.
if saved_model is None:
print('Building {} model...'.format(model_name))
self.inputs = tf.placeholder(tf.float32, [None, *self.image_shape, 3], name='inputs')
self.labels = tf.placeholder(tf.float32, [None, *self.image_shape], name='labels')
self.keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')
self._fcn_base()
if model_name == 'FCN32':
self.outputs = self._fcn_32()
elif model_name == 'FCN16':
self._fcn_32()
if saved_variables is not None:
print('Restoring FCN32 pre-trained weights...')
self.load_variables(saved_variables)
self.outputs = self._fcn_16()
else:
self._fcn_32()
self._fcn_16()
if saved_variables is not None:
print('Restoring FCN16 pre-trained weights...')
self.load_variables(saved_variables)
self.outputs = self._fcn_8()
# Load a pre-trained model graph and its weights to resume training or for inference.
else:
print('Loading {} model...'.format(model_name))
if model_name not in saved_model['tags']: # Ensure the model being loaded is the one expected
raise ValueError(
'Invalid model tags. Expected {}, but got {}.'.format(model_name, str(saved_model['tags'])))
self.load_protobuf(**saved_model)
# Retrieve key tensors from the graph.
graph = tf.get_default_graph()
self.inputs = graph.get_tensor_by_name('inputs:0')
self.labels = graph.get_tensor_by_name('labels:0')
self.keep_prob = graph.get_tensor_by_name('keep_prob:0')
self.fcn32_out = graph.get_tensor_by_name('fcn32_out:0')
self.logits = tf.get_default_graph().get_tensor_by_name('logits:0')
if 'FCN32' in saved_model['tags']:
self.outputs = self.fcn32_out
elif 'FCN16' in saved_model['tags']:
self.fcn16_out = graph.get_tensor_by_name('fcn16_out:0')
self.outputs = self.fcn16_out
elif 'FCN8' in saved_model['tags']:
self.fcn16_out = graph.get_tensor_by_name('fcn16_out:0')
self.fcn8_out = graph.get_tensor_by_name('fcn8_out:0')
self.outputs = self.fcn8_out
else:
raise ValueError(
'Invalid model tags. Expected FCN32, FCN16, or FCN8 but got {}.'.format(
str(saved_model['tags'])))
return self.outputs
def _fcn_base(self):
"""
Builds the base FCN layers using VGG16. This base has the following differences with VGG16:
1. The last layer (ie. the classifier) of VGG16 is removed.
2. The remaining fully connected layers of VGG16 (`fc6` and `fc7`) are convolutionized.
All layers are initialised with VGG16 pre-trained weights.
The RGB mean of the ImageNet training set is subtracted to the input images batch.
:return: The last layer of the base network, ie. the convolutionized version of `fc7`.
"""
weights = np.load(self.vgg16_weights_path)
# Subtract the mean RGB value, computed on the VGG16 training set, from each pixel
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32,
shape=[1, 1, 1, 3], name='img_mean')
images = self.inputs - mean
# Block 1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(weights['conv1_1_W'], name='weights', dtype=tf.float32, expected_shape=[3, 3, 3, 64])
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv1_1_b'], name='biases', dtype=tf.float32, expected_shape=[64])
out = tf.nn.bias_add(conv, bias)
self.conv1_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(weights['conv1_2_W'], name='weights', dtype=tf.float32, expected_shape=[3, 3, 64, 64])
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv1_2_b'], name='biases', dtype=tf.float32, expected_shape=[64])
out = tf.nn.bias_add(conv, bias)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.pool1 = tf.nn.max_pool(self.conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# Block 2
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(weights['conv2_1_W'], name='weights', dtype=tf.float32, expected_shape=[3, 3, 64, 128])
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv2_1_b'], name='biases', dtype=tf.float32, expected_shape=[128])
out = tf.nn.bias_add(conv, bias)
self.conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(weights['conv2_2_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 128, 128])
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv2_2_b'], name='biases', dtype=tf.float32, expected_shape=[128])
out = tf.nn.bias_add(conv, bias)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.pool2 = tf.nn.max_pool(self.conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
# Block 3
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(weights['conv3_1_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 128, 256])
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv3_1_b'], name='biases', dtype=tf.float32, expected_shape=[256])
out = tf.nn.bias_add(conv, bias)
self.conv3_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(weights['conv3_2_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 256, 256])
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv3_2_b'], name='biases', dtype=tf.float32, expected_shape=[256])
out = tf.nn.bias_add(conv, bias)
self.conv3_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(weights['conv3_3_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 256, 256])
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv3_3_b'], name='biases', dtype=tf.float32, expected_shape=[256])
out = tf.nn.bias_add(conv, bias)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.pool3 = tf.nn.max_pool(self.conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
self.layer3_out = tf.identity(self.pool3, name='layer3_out')
# Block 4
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(weights['conv4_1_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 256, 512])
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv4_1_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv4_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(weights['conv4_2_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 512, 512])
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv4_2_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv4_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(weights['conv4_3_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 512, 512])
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv4_3_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.pool4 = tf.nn.max_pool(self.conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool4')
self.layer4_out = tf.identity(self.pool4, name='layer4_out')
# Block 5
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(weights['conv5_1_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 512, 512])
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv5_1_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv5_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(weights['conv5_2_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 512, 512])
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv5_2_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv5_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(weights['conv5_3_W'], name='weights', dtype=tf.float32,
expected_shape=[3, 3, 512, 512])
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['conv5_3_b'], name='biases', dtype=tf.float32, expected_shape=[512])
out = tf.nn.bias_add(conv, bias)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.pool5 = tf.nn.max_pool(self.conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool5')
# Block 6 is a convolutionized version of fc6 with kernel shape (25088, 4096)
with tf.name_scope('conv6') as scope:
kernel = tf.Variable(weights['fc6_W'].reshape(7, 7, 512, 4096), name='weights',
dtype=tf.float32, expected_shape=[7, 7, 512, 4096])
conv = tf.nn.conv2d(self.pool5, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['fc6_b'], name='biases', dtype=tf.float32, expected_shape=[4096])
out = tf.nn.bias_add(conv, bias)
self.conv6 = tf.nn.relu(out, name=scope)
self.drop1 = tf.nn.dropout(self.conv6, self.keep_prob, name='dropout1')
# Block 7 is a convolutionized version of fc7 with kernel shape (4096, 4096)
with tf.name_scope('conv7') as scope:
kernel = tf.Variable(weights['fc7_W'].reshape(1, 1, 4096, 4096), name='weights',
dtype=tf.float32, expected_shape=[1, 1, 4096, 4096])
conv = tf.nn.conv2d(self.drop1, kernel, [1, 1, 1, 1], padding='SAME')
bias = tf.Variable(weights['fc7_b'], name='biases', dtype=tf.float32, expected_shape=[4096])
out = tf.nn.bias_add(conv, bias)
self.conv7 = tf.nn.relu(out, name=scope)
self.drop2 = tf.nn.dropout(self.conv7, self.keep_prob, name='dropout2')
self.layer7_out = tf.identity(self.drop2, name='layer7_out')
def _fcn_32(self):
"""
Builds the FCN32 network specific layers on top of the base FCN layers.
This function must be called after calling the `_fcn_base` function.
It performs:
1. Class prediction at stride 32 of the last layer of the FCN base.
2. Bilinear interpolation back to the input image shape.
:return: The output layer of FCN32.
"""
# Apply 1x1 convolution to predict classes of layer 7 at stride 32
self.conv7_classes = tf.layers.conv2d(self.layer7_out, filters=self.n_classes+1, kernel_size=1,
kernel_initializer=tf.zeros_initializer(), name="conv7_classes")
# 32x bilinear interpolation
self.fcn32_out = tf.image.resize_bilinear(self.conv7_classes, self.image_shape, name="fcn32_out")
return self.fcn32_out
def _fcn_16(self):
"""
Builds the FCN16 network specific layers on top of the FCN32 layers.
This function must be called after calling the `_fcn_base` and `_fcn_32` functions.
It performs:
1. 2x up-sampling of the ouptput layer of FCN32. The weights are trainable and
initialized to bilinear interpolation.
2. Class prediction of layer 4 of VGG16 at stride 16.
3. Element-wise sum of the above tensors, thereby implementing the first skip connection.
4. Bilinear interpolation back to the input image shape.
:return: The output layer of FCN16.
"""
# Apply 1x1 convolution to predict classes of layer 4 at stride 16
self.pool4_classes = tf.layers.conv2d(self.layer4_out, filters=self.n_classes+1, kernel_size=1,
kernel_initializer=tf.zeros_initializer(), name="pool4_classes")
# Up-sample (2x) conv7 class predictions to match the size of layer 4
self.fcn32_upsampled = tf.layers.conv2d_transpose(self.conv7_classes, filters=self.n_classes+1,
kernel_size=4, strides=2, padding='SAME', use_bias=False,
kernel_initializer=_bilinear_initializer(self.n_classes+1, 4),
name="fcn32_upsampled")
# Add a skip connection between class predictions of layer 4 and up-sampled class predictions of layer 7
self.skip_1 = tf.add(self.pool4_classes, self.fcn32_upsampled, name="skip_cnx_1")
# 16x bilinear interpolation
self.fcn16_out = tf.image.resize_bilinear(self.skip_1, self.image_shape, name="fcn16_out")
return self.fcn16_out
def _fcn_8(self):
"""
Builds the FCN8 network specific layers on top of the FCN16 layers.
This function must be called after calling the `_fcn_base`, `_fcn_32` and `_fcn_16` functions.
It performs:
1. 2x up-sampling of the output layer of FCN16. The weights are trainable and
initialized to bilinear interpolation.
2. Class prediction of layer 3 of VGG16 at stride 8.
3. Element-wise sum of the above tensors, thereby implementing the second skip connection.
4. Bilinear interpolation back to the input image shape.
:return: The output layer of FCN8.
"""
# Apply 1x1 convolution to predict classes of layer 3 at stride 8
self.pool3_classes = tf.layers.conv2d(self.layer3_out, filters=self.n_classes+1, kernel_size=1,
kernel_initializer=tf.zeros_initializer(), name="pool3_classes")
# Up-sample (2x) skip_1 class predictions to match the size of layer 3
self.fcn16_upsampled = tf.layers.conv2d_transpose(self.skip_1, filters=self.n_classes+1,
kernel_size=4, strides=2, padding='SAME', use_bias=False,
kernel_initializer=_bilinear_initializer(self.n_classes+1, 4),
name="fcn16_upsampled")
# Add a skip connection between class predictions of layer 4 and up-sampled class predictions of layer 7
self.skip_2 = tf.add(self.pool3_classes, self.fcn16_upsampled, name="skip_cnx_2")
# 8x bilinear interpolation
self.fcn8_out = tf.image.resize_bilinear(self.skip_2, self.image_shape, name="fcn8_out")
return self.fcn8_out
|
from os import stat
import time
import os
import atexit
from datetime import datetime
from rpi_ws281x import *
from prices import getPrices
from config import LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_BRIGHTNESS, LED_INVERT
from config import testing, nightmode, beginSleep, stopSleep, static, staticColor, interval
import logging
logging.basicConfig(filename='/home/pi/BitcoinPriceLED/led.log', filemode='a', level=logging.INFO)
errorCount = 0
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
def priceUpdater(currentPrice):
oldPrice = currentPrice
currentPrice = getPrices('BTCUSD')
change = float(currentPrice) - float(oldPrice)
changePercentage = (abs(change)/float(oldPrice))*100
if change > 0:
trend= '+'
else:
trend= '-'
return currentPrice, oldPrice, trend, changePercentage
def exit_handler():
strip.begin()
for i in range(0, strip.numPixels()):
strip.setPixelColor(i, Color(0,0,0))
strip.show()
def colorPicker(changePercentage):
if changePercentage > 2.5:
return 0
elif changePercentage > 0.5:
return 90
elif changePercentage >=0:
return 180
def staticLight():
strip.begin()
while True:
# Checks nightmode and adjusts brightness accordingly
now = datetime.now()
hour = int(now.strftime('%H'))
if nightmode == True and hour >= beginSleep and hour < stopSleep:
strip.setBrightness(1)
else:
strip.setBrightness(LED_BRIGHTNESS)
for i in range(0, strip.numPixels()):
strip.setPixelColor(i, Color(staticColor[0],staticColor[1],staticColor[2]))
strip.show()
time.sleep(900)
def main():
try:
currentPrice = getPrices('BTCUSD')
except:
currentPrice = 0
errorCount = 0
strip.begin()
while True:
now = datetime.now()
# Checks nightmode and adjusts brightness accordingly
hour = int(now.strftime('%H'))
if nightmode == True and hour >= beginSleep and hour < stopSleep:
strip.setBrightness(1)
nightmodeActive = True
else:
strip.setBrightness(LED_BRIGHTNESS)
nightmodeActive = False
brightness = strip.getBrightness()
try:
prices = priceUpdater(currentPrice)
oldPrice = prices[1]
currentPrice = prices[0]
changePercentage = prices[3]
trend = prices[2]
if trend == '+':
logging.info(f'Time: {now.strftime("%H:%M:%S")} - old price: {oldPrice}, current price: {currentPrice}, trend: +{changePercentage}% - Nightmode: {nightmodeActive}, Brightness: {brightness}')
r = colorPicker(changePercentage)
g = 255
b = 0
errorCount= 0
elif trend == '-':
logging.info(f'Time: {now.strftime("%H:%M:%S")} - old price: {oldPrice}, current price: {currentPrice}, trend: -{changePercentage}% - Nightmode: {nightmodeActive}, Brightness: {brightness}')
r = 255
g = colorPicker(changePercentage)
b = 0
errorCount = 0
for i in range(0, strip.numPixels()):
strip.setPixelColor(i, Color(r,g,b))
strip.show()
time.sleep(interval)
except Exception as e:
errorCount += 1
now = datetime.now()
logging.error(f'{now.strftime("%H:%M:%S")} - ERROR: {e}')
if errorCount > 5:
logging.error(f'{now.strftime("%H:%M:%S")} - ERROR-Mode activated - Currently {errorCount} failures in a row: {e}')
for i in range(0, strip.numPixels()):
strip.setPixelColor(i, Color(230,0,125))
strip.show()
logging.error(f'{now.strftime("%H:%M:%S")} - Restarting led.service now')
os.system('sudo systemctl restart led')
time.sleep(60)
if errorCount > 10:
logging.error(f'{now.strftime("%H:%M:%S")} - System seems to be stuck in an error-loop...')
logging.error(f'{now.strftime("%H:%M:%S")} - Stoping led.service and ledServer.service now...')
os.system('sudo systemctl stop ledServer')
os.system('sudo systemctl stop led')
time.sleep(10)
if __name__ == "__main__":
try:
if static != True:
main()
else:
staticLight()
except KeyboardInterrupt:
exit_handler
atexit.register(exit_handler)
|
<reponame>kwinkunks/modelr_app<gh_stars>0
"""
Handler classes that deal with data, and not webpages. These
functions can be called as web apis
"""
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import blobstore
from google.appengine.ext import webapp as webapp2
from google.appengine.api import images
# For image serving
import cloudstorage as gcs
from PIL import Image
import time
import logging
import stripe
import json
import StringIO
from constants import admin_id, PRICE, tax_dict
from lib_auth import verify, authenticate, send_message
from lib_db import Rock, Scenario, User, ModelrParent,\
ActivityLog, ModelServedCount,\
ImageModel, EarthModel, Fluid, Item, Server, \
get_items_by_name_and_user, get_all_items_user, deep_delete,\
get_by_id
from lib_util import posterize, RGBToString
class ModelrAPI(webapp2.RequestHandler):
"""
Base class for modelr apis. Mostly a skeleton right now
"""
def verify(self):
"""
Verify that the current user is a legimate user. Returns the
user object from the database if true, otherwise returns None.
TODO: This shouldn't be done with browser cookies
"""
cookie = self.request.cookies.get('user')
if cookie is None:
return
try:
user, password = cookie.split('|')
except ValueError:
return
return verify(user, password, ModelrParent.all().get())
class dbAPI(ModelrAPI):
entity = Item
def get(self):
"""
Get the requested item(s) from the user's database
"""
self.response.headers['Content-Type'] = 'application/json'
user = self.verify()
if ("keys" in self.request.arguments()):
keys = self.request.get("keys")
items = self.entity.get(keys)
if type(items) is list:
output = json.dumps([item.json
for item in items])
else:
output = json.dumps(items.json)
elif ("all" in self.request.arguments()):
output = json.dumps([item.json
for item in
get_all_items_user(self.entity, user)])
elif ("ls" in self.request.arguments()):
output = json.dumps([item.simple_json
for item in get_all_items_user(self.entity,
user)])
elif ("name" in self.request.arguments()):
name = self.request.get("name")
item = get_items_by_name_and_user(self.entity, name, user)
output = json.dumps(item[0].json)
elif ("id" in self.request.arguments()):
item_id = int(self.request.get("id"))
item = get_by_id(self.entity, item_id, user)
output = json.dumps(item.json)
else:
self.error(502)
self.response.out.write(output)
@authenticate
def delete(self, user):
try:
key = self.request.get('key')
item = self.entity.get(key)
if item.user != user.user_id:
raise Exception
# delete item and all its children
deep_delete(item)
activity = "removed_item"
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
except Exception as e:
print e
self.error(502)
class ScenarioHandler(ModelrAPI):
"""
API to the scenario database
"""
def get(self):
self.response.headers['Content-Type'] = 'application/json'
# Get the user but don't redirect. Anyone can play with public
# scenarios.
user = self.verify()
name = self.request.get('name')
if user:
scenarios = Scenario.all()
scenarios.ancestor(user)
scenarios.filter("user =", user.user_id)
scenarios.filter("name =", name)
scenarios = scenarios.fetch(1)
else:
scenarios = []
# Get Evan's default scenarios (created with the admin)
scen = Scenario.all()\
.ancestor(ModelrParent.all().get())\
.filter("user_id =", admin_id)
scen = Scenario.all().filter("name =", name).fetch(100)
if scen:
scenarios += scen
if scenarios:
logging.info(scenarios[0])
logging.info(scenarios[0].data)
scenario = scenarios[0]
self.response.out.write(scenario.data)
else:
self.response.out.write('null')
activity = "fetched_scenario"
if user:
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
return
@authenticate
def delete(self, user):
name = self.request.get('name')
scenarios = Scenario.all()
scenarios.ancestor(user)
scenarios.filter("user =", user.user_id)
scenarios.filter("name =", name)
scenarios = scenarios.fetch(100)
for scenario in scenarios:
scenario.delete()
activity = "removed_scenario"
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
# Output for successful post reception
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
@authenticate
def post(self, user):
# Output for successful post reception
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
name = self.request.get('name')
group = self.request.get('group')
logging.info(('name', name))
data = self.request.get('json')
logging.info(data)
scenarios = Scenario.all()
scenarios.ancestor(user)
scenarios.filter("user =", user.user_id)
scenarios.filter("name =", name)
scenarios = scenarios.fetch(1)
# Rewrite if the name exists, create new one if it doesn't
if scenarios:
scenario = scenarios[0]
else:
scenario = Scenario(parent=user)
scenario.user = user.user_id
scenario.name = name
scenario.group = group
# Save in Db
scenario.data = data.encode()
scenario.put()
activity = "modified_scenario"
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
class RockHandler(dbAPI):
entity = Rock
@authenticate
def post(self, user):
# Adds a rock to the database, will throw an error
# if the rock name already exists
try:
name = self.request.get("name")
rocks = get_items_by_name_and_user(self.entity, name, user)
# Rewrite if the rock exists
if rocks:
# write out error message
raise
else:
rock = Rock(parent=user)
rock.user = user.user_id
# Populate the object
rock.vp = float(self.request.get('vp'))
rock.vs = float(self.request.get('vs'))
rock.rho = float(self.request.get('rho'))
rock.vp_std = float(self.request.get('vp_std'))
rock.vs_std = float(self.request.get('vs_std'))
rock.rho_std = float(self.request.get('rho_std'))
rock.porosity = float(self.request.get('porosity'))
rock.vclay = float(self.request.get('vclay'))
rock.description = self.request.get('description')
rock.name = self.request.get('name')
rock.group = self.request.get('group')
fluid_key = self.request.get("rock-fluid")
if fluid_key != "None":
rock.fluid_key = Fluid.get(str(fluid_key)).key()
# Save in the database
rock.put()
activity = "added_rock"
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
except Exception as e:
# send error
print e
self.error(502)
@authenticate
def put(self, user):
# Updates a rock database object
# Get the database key from the request
try:
key = self.request.get("db_key")
rock = Rock.get(key)
# Update the rock
rock.vp = float(self.request.get('vp'))
rock.vs = float(self.request.get('vs'))
rock.rho = float(self.request.get('rho'))
rock.vp_std = float(self.request.get('vp_std'))
rock.vs_std = float(self.request.get('vs_std'))
rock.rho_std = float(self.request.get('rho_std'))
rock.porosity = float(self.request.get('porosity'))
rock.vclay = float(self.request.get('vclay'))
rock.description = self.request.get('description')
rock.name = self.request.get('name')
rock.group = self.request.get('group')
fluid_key = self.request.get("rock-fluid")
# This makes sure the fluid exists
try:
rock.fluid_key = Fluid.get(fluid_key).key()
except:
rock.fluid_key = None
rock.name = self.request.get('name')
rock.put()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
except Exception as e:
# Write out error message
print e
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
return
class ImageModelHandler(dbAPI):
def get(self):
user = self.verify()
if "all" in self.request.arguments():
models = get_all_items_user(ImageModel, user)
data = [{"colours": [RGBToString(j[1]) for j in
Image.open(
blobstore.BlobReader(i.image.key()))
.convert('RGB').getcolors()],
"image": images.get_serving_url(i.image),
"key": str(i.key()),
"earth_models": [j.json for j in
EarthModel.all().ancestor(i)
.filter("user =",
user.user_id if user else None)
.fetch(1000)]}
for i in models]
else:
self.error(502)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
@authenticate
def delete(self, user):
image_key = self.request.get("image_key")
image = ImageModel.get(image_key)
deep_delete(image)
class FluidHandler(dbAPI):
entity = Fluid
@authenticate
def post(self, user):
try:
name = self.request.get("name")
fluids = get_items_by_name_and_user(self.entity, name, user)
# Rewrite if the rock exists
if fluids:
raise Exception
else:
fluid = Fluid(parent=user)
fluid.user = user.user_id
fluid.rho_w = float(self.request.get('rho_w'))
fluid.rho_hc = float(self.request.get('rho_hc'))
fluid.kw = float(self.request.get('kw'))
fluid.khc = float(self.request.get('khc'))
fluid.sw = float(self.request.get('sw'))
fluid.name = name
fluid.description = self.request.get("description")
fluid.group = self.request.get("group")
fluid.put()
activity = "added_fluid"
ActivityLog(user_id=user.user_id,
activity=activity,
parent=ModelrParent.all().get()).put()
except Exception as e:
# Handle error
print e
self.error(502)
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
@authenticate
def put(self, user):
# Get the database key from the request
try:
key = self.request.get("db_key")
fluid = Fluid.get(key)
# Update the fluid
fluid.rho_w = float(self.request.get('rho_w'))
fluid.rho_hc = float(self.request.get('rho_hc'))
fluid.kw = float(self.request.get('kw'))
fluid.khc = float(self.request.get('khc'))
fluid.sw = float(self.request.get('sw'))
fluid.description = self.request.get('description')
fluid.name = self.request.get('name')
fluid.group = self.request.get('group')
fluid.name = self.request.get('name')
fluid.put()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
except Exception as e:
# Write out error message
print e
pass
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('All OK!!')
return
class EarthModelHandler(dbAPI):
entity = EarthModel
@authenticate
def post(self, user):
try:
data = json.loads(self.request.body)
name = data["name"]
image_key = data["image_key"]
image_model = ImageModel.get(image_key)
# See if we are overwriting
earth_model = EarthModel.all().ancestor(image_model)
earth_model = earth_model.filter('user =', user.user_id)
earth_model = earth_model.filter('name =', name).get()
if earth_model:
earth_model.data = json.dumps(data)
else:
earth_model = EarthModel(user=user.user_id,
data=json.dumps(data),
name=name,
parent=image_model)
earth_model.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(earth_model.json))
except Exception as e:
# TODO Handle failure
print "KLASJFDAJLKFSDA", e
self.error(502)
@authenticate
def delete(self, user):
try:
# Get the root of the model
input_model_key = self.request.get('input_image_id')
name = self.request.get('name')
image_model = ImageModel.get(input_model_key)
model = EarthModel.all().ancestor(image_model)
model = model.filter("user =", user.user_id)
model = model.filter("name =", name).get()
if model:
model.delete()
self.response.out.write(json.dumps({'success': True}))
except Exception as e:
print e
self.response.out.write(json.dumps({'success': False}))
class StripeHandler(ModelrAPI):
'''
Handle webhook POSTs from Stripe
'''
def post(self):
event = json.loads(self.request.body)
# Get the event id and retrieve it from Stripe
# anybody can post, doing it this way is more secure
# event_id = event_json["id"]
# event = stripe.Event.retrieve(event_id)
if event["type"] == "invoice.payment_succeeded":
# For testing, change it to a known user in stripe
# and use the webhooks testings
# event["data"]["object"]["customer"] = \
# "cus_3ZL6yHJqE8DfTx"
# event["data"]["object"]["total"] = price
stripe_id = event["data"]["object"]["customer"]
amount = PRICE
event_id = event["data"]["object"]["id"]
user = User.all().ancestor(ModelrParent.all().get())
user = user.filter("stripe_id =", stripe_id).fetch(1)
# Serious issue here, we need to deal with this in a
# a clever way
if not user:
message = ("Failed to find modelr user for stripe " +
"user %s, but was invoiced by stripe "
% (stripe_id))
send_message(subject="Non-existent user invoiced",
message=message)
self.response.write("ALL OK")
return
tax = tax_dict.get(user[0].tax_code, None)
if not tax:
self.response.write("ALL OK")
return
# Tax them up
stripe.InvoiceItem.create(customer=stripe_id,
amount=int(amount * tax),
currency="usd",
description="Canadian Taxes")
self.response.write("ALL OK")
elif (event["type"] == 'customer.subscription.deleted'):
# for stripe
self.response.write("ALL OK")
stripe_id = event["data"]["object"]["customer"]
user = User.all().ancestor(ModelrParent.all().get())
user = user.filter("stripe_id =", stripe_id).get()
# This should never ever happen
if not user:
message = ("Failed to find modelr user for stripe " +
"user %s, but was invoiced by stripe "
% (stripe_id))
send_message(subject="Non-existent user canceled",
message=message)
return
user.delete()
self.response.write("ALL OK")
elif (event["type"] == 'customer.subscription.created'):
message = str(event)
send_message(subject=event["type"],
message=message)
self.response.write("All OK")
# Send an email otherwise. We can trim this down to ones we
# actually care about.
else:
# Too many hooks, too much noise. commented out
#message = str(event)
#send_message(subject=event["type"],
# message=message)
self.response.write("ALL OK")
class Upload(blobstore_handlers.BlobstoreUploadHandler,
ModelrAPI):
"""
Handles image uploads from users. Allows them to upload images to
the blobstore
"""
@authenticate
def post(self, user):
# Get the blob files
upload_files = self.get_uploads()
blob_info = upload_files[0]
# All this is in a try incase the image format isn't accepted
try:
# Read the image file
reader = blobstore.BlobReader(blob_info.key())
im = Image.open(reader, 'r')
im = im.convert('RGB').resize((350, 350))
im = posterize(im)
output = StringIO.StringIO()
im.save(output, format='PNG')
bucket = '/modelr_live_bucket/'
output_filename = (bucket + str(user.user_id) + '/2' +
str(time.time()))
gcsfile = gcs.open(output_filename, 'w')
gcsfile.write(output.getvalue())
output.close()
gcsfile.close()
# Make a blob reference
bs_file = '/gs' + output_filename
output_blob_key = blobstore.create_gs_key(bs_file)
ImageModel(parent=user,
user=user.user_id,
image=output_blob_key).put()
self.redirect('/model')
except Exception as e:
print e
self.redirect('/model?error=True')
class UpdateCreditCardHandler(ModelrAPI):
@authenticate
def post(self, user):
try:
card_token = self.request.get("card_token")
stripe_id = user.stripe_id
# Create the new credit card
customer = stripe.Customer.retrieve(stripe_id)
card = customer.sources.create(source=card_token)
# set as the default credit card
customer.default_source = card
customer.save()
self.response.write(json.dumps({"message":
"successfully updated card"}))
except stripe.InvalidRequestError as e:
self.response.write(json.dumps({"message": e.msg}))
class ModelServed(ModelrAPI):
def post(self):
models_served = ModelServedCount.all().get()
models_served.count += 1
models_served.put()
class BackendServerHandler(ModelrAPI):
def get(self):
hostname = Server.all().get().host
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'hostname': hostname}))
|
import pylab
import nest
import math as math
'''
Create objects to run experiment with
Method of connecting populations:
http://www.nest-simulator.org/introduction-to-pynest/part-2-populations-of-neurons/
'''
multimeter = nest.Create("multimeter",10)
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
spikedetector = nest.Create("spike_detector",
params={"withgid": True, "withtime": True})
'''noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])'''
# values for neurons taken from http://neuralensemble.org/docs/PyNN/examples/Izhikevich.html?highlight=izhikevich
pop1 = nest.Create("izhikevich",10,{'V_m':-70.0,'I_e':18.0,'a':0.005,'b':0.2,'c':-65.0,'d':6.0})
pop2 = nest.Create("izhikevich",1,{'V_m':-70.0,'I_e':4.0,'a':0.02,'b':0.2,'c':-65.0,'d':6.0})
#pop1 = nest.Create("izhikevich",{'a':0.02,'b':0.2,'d':6.0})
#pop2 = nest.Create("izhikevich",{'a':0.02,'b':0.2,'d':6.0})
#pop1 = nest.Create("izhikevich")
#pop2 = nest.Create("izhikevich")
'''
Form connections between objects and run sim
'''
'''syn_dict_ex = {"weight": 1.2}
syn_dict_in = {"weight": -2.0}
nest.Connect([noise[0]], pop1, syn_spec=syn_dict_ex)
nest.Connect([noise[1]], pop1, syn_spec=syn_dict_in)'''
#nest.SetStatus(pop1, {"I_e": 376.0})
#nest.SetStatus(pop1, {"I_e": 10.0})
#nest.Connect(pop1, pop2, syn_spec = {"weight":-10.0})
# find number of neurons in layer
print('len pop1:')
print(len(pop1))
perc_ex = 0
perc_inh = 0
def createSyn(input_layer, output_layer, fire_rate_ratio):
'''
Note: later uneven numbers of neurons in layers
could be added but for now using even.
Ratio of 1.0 creates 50% ex and 50% inh
2.0 creates 66% ex and 33% inh
0.5 creates 33% ex and 66% inh
TODO: check if ratio calc works exactly right
"fixed_total_number. Here n connections are created ... from the
populations pre and ... post."
TODO: for now synapses are one-to-one to control ratio of responses.
In the future more e.g. one-to-many should be made while controlling
activity between layers
'''
len_in_layer = len(input_layer)
len_out_layer = len(output_layer)
if fire_rate_ratio >= 1.0:
perc_ex = fire_rate_ratio / (fire_rate_ratio+1)
perc_inh = 1 - perc_ex
elif fire_rate_ratio < 1.0:
perc_inh = (1/fire_rate_ratio) / ((1/fire_rate_ratio)+1)
perc_ex = 1 - perc_inh
print (perc_ex)
print (perc_inh)
conn_ex = math.floor(len_in_layer*perc_ex)
conn_inh = len_in_layer - conn_ex
print (conn_ex)
print (conn_inh)
Je = 2.0
Ji = -4.0
conn_dict_ex = {"rule": "fixed_total_number", "N":conn_ex, "autapses": False)#, "multapses": False}
conn_dict_in = {"rule": "fixed_total_number", "N":conn_inh, "autapses": False}#, "multapses": False}
syn_dict_ex = {"weight": Je}
syn_dict_in = {"weight": Ji}
nest.Connect(pop1, pop2, conn_dict_ex, syn_dict_ex)
nest.Connect(pop1, pop2, conn_dict_in, syn_dict_in)
'''for i in range(numb_ex):
nest.Connect(input_layer[i], output_layer[i], syn_spec=syn_dict_ex)
for i in range(numb_inh):
nest.Connect(input_layer[i], output_layer[i], syn_spec=syn_dict_in)'''
createSyn(pop1,pop2,0.928)
nest.Connect(multimeter, pop1)
nest.Connect(multimeter2, pop2)
nest.Connect(pop1, spikedetector)
nest.Simulate(350.0)
'''
Record activity
'''
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
dmm2 = nest.GetStatus(multimeter2)[0]
Vms2 = dmm2["events"]["V_m"]
ts2 = dmm2["events"]["times"]
'''
Plot results
'''
pylab.figure(1)
pylab.plot(ts, Vms)
pylab.figure(2)
pylab.plot(ts2, Vms2)
'''dSD = nest.GetStatus(spikedetector,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
pylab.figure(3)
pylab.plot(ts, evs, ".")'''
pylab.show() |
""" This module is the high-level wrapper for running a test on a list of input
datasets in order to collect statistics on alignment of each dataset to an
astrometric catalog."""
import datetime
import os
import shutil
import numpy as np
from astropy.table import Table
import pytest
from drizzlepac import align as alignimages
def pytest_generate_tests(metafunc):
"""Get the command line option."""
start_row = metafunc.config.option.start_row
num_rows = metafunc.config.option.num_rows
master_list = metafunc.config.option.master_list
# Check to see if the specified file exists in the current working directory
if not os.path.exists(master_list):
# If not, copy the default file from the installation directory
# Find where this module has been installed.
install_dir = os.path.dirname(__file__)
default_file = os.path.join(install_dir, master_list)
if os.path.exists(default_file):
# Copy the file
shutil.copy2(default_file, os.getcwd())
# Read a randomized table
data_table = Table.read(master_list, format='ascii.csv')
data_list = get_dataset_list(data_table)
# Extract the subset rows
start_row = int(start_row)
end_row = start_row + int(num_rows)
print("\nTEST_RANDOM. Start row: {} Number of rows to process: {}.".format(start_row, num_rows))
print("MASTER_TABLE: {}".format(master_list))
random_candidate_table = data_list[start_row:end_row]
print(random_candidate_table)
metafunc.parametrize('dataset', random_candidate_table)
@pytest.mark.bigdata
@pytest.mark.slow
@pytest.mark.unit
def test_randomlist(tmpdir, dataset):
""" Tests which validate whether mosaics can be aligned to an astrometric standard.
Characteristics of these tests:
* A reference WCS is generated based upon all the input images for
the field.
* A source astrometric catalog is created using the Photutils
package to detect explicitly sources in the images.
* An astrometric catalog is created to extract astrometric positions
for the found sources in the input images' field-of-view using
GAIADR2 (preferred) or GAIADR1.
* Cross matching/fitting is done between found sources and catalog
coordinates with the Tweakwcs package.
* The quality of the fit is evaluated against a minimum threshold and
potentially another fit algorithm is invoked or an alternative
catalog is used in an effort to obtain a better quality fit.
* If the option is set, the WCS information is updated for the
input exposures. The default is False.
* No mosaic is generated.
* An output table containing characterizations of the process and
associated fit is generated.
Success Criteria:
* Success criterion hard-coded for this test represents whether a
statistical sample (70%) of ACS and WFC3 datasets were able to be
aligned to within 10mas RMS.
* RMS values are extracted from the table output from `perform_align`
* This criterion will need to be determined by the user after the test has been
run based on how many datasets were run and skipped.
The input master_list CSV file is
output from a database and lists associations and singletons for ACS
and WFC3 instruments randomly sorted. The actual data files are
downloaded from MAST via astroquery.
This test file can be executed in the following manner:
$ pytest -n # -s --basetemp=/internal/hladata/yourUniqueDirectoryHere --bigdata --slow
--master_list ACSWFC3ListDefault50.csv --start_row 0 --num_rows 50 test_randomlist.py >&
test_random_output.txt &
$ tail -f test_random_output.txt
* The `-n #` option can be used to run tests in parallel if `pytest-xdist` has
been installed where `#` is the number of cpus to use.
* Note: When running this test, the `--basetemp` directory should be set to a unique
existing directory to avoid deleting previous test output.
* The default master list exists in the tests/hap directory and contains 50 datasets. The
full master list of thousands of datasets resides in Artifactory as ACSWFC3List.csv
(https://bytesalad.stsci.edu/artifactory/hst-hla-pipeline/dev/master_lists).
"""
print("TEST_RANDOM. Dataset: ", dataset)
output_name = dataset + '.ecsv'
current_dt = datetime.datetime.now()
print(str(current_dt))
subdir = ""
prevdir = os.getcwd()
# create working directory specified for the test
if not tmpdir.ensure(subdir, dir=True):
curdir = tmpdir.mkdir(subdir).strpath
else:
curdir = tmpdir.join(subdir).strpath
os.chdir(curdir)
try:
dataset_table = alignimages.perform_align([dataset], archive=False,
clobber=True, debug=False,
update_hdr_wcs=False,
print_fit_parameters=True,
print_git_info=False,
output=False)
# Filtered datasets
if dataset_table['doProcess'].sum() == 0:
pytest.skip("TEST_RANDOM. Filtered Dataset: {}.".format(dataset))
# Datasets to process
elif dataset_table['doProcess'].sum() > 0:
# Determine images in dataset to be processed and the number of images
# This is in case an image was filtered out (e.g., expotime = 0)
index = np.where(dataset_table['doProcess'] == 1)[0]
fit_qual = dataset_table['fit_qual'][index[0]]
# Update the table with the dataset_key which is really just a counter
dataset_table['completed'][:] = True
dataset_table.write(output_name, format='ascii.ecsv')
if fit_qual > 4:
pytest.fail("TEST_RANDOM. Unsuccessful Dataset (fit_qual = 5): {}.".format(dataset))
else:
assert 0 < fit_qual <= 4
# Catch anything that happens as this dataset will be considered a failure, but
# the processing of datasets should continue. This is meant to catch
# unexpected errors and generate sufficient output exception
# information so algorithmic problems can be addressed.
except Exception as except_details:
print(except_details)
pytest.fail("TEST_RANDOM. Exception Dataset: {}\n", dataset)
finally:
# Perform some clean up
if os.path.isfile('ref_cat.ecsv'):
os.remove('ref_cat.ecsv')
if os.path.isfile('refcatalog.cat'):
os.remove('refcatalog.cat')
for filename in os.listdir():
if filename.endswith('flt.fits') or filename.endswith('flc.fits'):
os.remove(filename)
# Return to original directory
os.chdir(prevdir)
def get_dataset_list(table_name):
""" Standalone function to read the Astropy table and get the dataset names
Parameters
==========
table_name : str
Filename of the input master CSV file containing individual
images or association names, as well as observational
information regarding the images
Returns
=======
dataset_names: list
List of individual image or association base (IPPSSOOT) names
"""
dataset_names = []
# Determine if the data is part of an association or is an individual image
for imgid, asnid in zip(table_name['observationID'], table_name['asnID']):
# Protect against incomplete lines, missing asnID values, ...
# Happens when lines like "(236319 rows affected)" are included
if isinstance(asnid, np.ma.core.MaskedConstant):
continue
# If the asnID is the string NONE, this is an individual image,
# and it is necessary to get the individual image dataset name.
# Otherwise, this is an association dataset, so just add the asnID.
if asnid.upper() == "NONE":
dataset_names.append(imgid)
else:
dataset_names.append(asnid)
# Turn into a set to remove duplicate ASNID entries, only want 1 per ASN
dataset = set()
# Return results as a list of unique dataset names while retaining the original order
return [x for x in dataset_names if x not in dataset and not dataset.add(x)]
|
#!/usr/bin/env python3
import asyncio, contextvars, functools, json, re, shutil, sys, urllib.request
from bs4 import BeautifulSoup
PREAMBLE = r"""# Unofficial “odd-jobbed rankings”
This “rankings” is almost certainly riddled with errors, inaccuracies, and
missing information, and should be treated as such. This is just for informal
use, so please don’t take it too seriously. The levels of the characters listed
here are fetched directly from [the official MapleLegends
rankings](https://maplelegends.com/ranking/all) via [a shitty Python
script](https://codeberg.org/oddjobs/odd-jobbed_rankings/src/branch/master/update-async.py).
To make the “rankings” actually maintainable, off-island characters who have
not yet achieved level 45, islanders who have not yet achieved level 40, and
campers who have not yet achieved level 10 are not represented here.
“IGN” stands for “in-game name”. The “name” entries are mostly for discerning
when two or more characters are controlled by the same player. The names, I’ve
done on a best-effort basis, and some of them are just Discord identifiers
(which, it should be noted, can be changed at more or less any time, for any
reason).
Unknown or uncertain information is denoted by a question mark (“?”).
\*Not a member of <b>Suboptimal</b>.
| IGN | name | level | job(s) | guild |
| :--------- | :----------- | ----: | :--------------------- | ------------- |
"""
SUBOPTIMAL = {"Flow", "Oddjobs", "Southperry", "Victoria", "Newbology"}
SPECIAL_MARKDOWN_RE = re.compile(r"_|\*|\[|\]|<|>|#")
async def to_thread(func, /, *args, **kwargs):
"""
Polyfill because Python 3.8 (see: Ubuntu 20.04) doesn't have this function
HAHAHAH
https://github.com/python/cpython/blob/main/Lib/asyncio/threads.py
"""
loop = asyncio.get_running_loop()
ctx = contextvars.copy_context()
func_call = functools.partial(ctx.run, func, *args, **kwargs)
return await loop.run_in_executor(None, func_call)
def markdown_esc(s):
return SPECIAL_MARKDOWN_RE.sub(lambda mo: rf"\{mo.group(0)}", s)
with open("./chars.json", "r", encoding="UTF-8") as chars_json:
chars = json.load(chars_json)["chars"]
def fetch_lvl(i):
try:
ign = chars[i]["ign"]
url = f"https://maplelegends.com/levels?name={ign}"
with urllib.request.urlopen(url) as res:
html = res.read()
soup = BeautifulSoup(html, "lxml")
level = 0
for table_child in soup.table.children:
if table_child.name == "tr":
for tr_child in table_child.children:
if tr_child.name == "td":
level = int(tr_child.string)
break
if level > 0:
break
except BaseException as e:
print(
f"Exception ocurred while fetching level for IGN {ign}",
file=sys.stderr,
)
raise e
if level < 1:
print(f"Could not get level for IGN {ign}", file=sys.stderr)
# We aren't using a mutex or really any synchronisation primitives, but
# that's okay because the GIL will save us. The only reason that this
# script even runs faster than the synchronous version is because the GIL
# is released when performing I/O...
chars[i]["level"] = level
asyncio.run(
asyncio.wait(
[to_thread(fetch_lvl, i) for i in range(len(chars))],
return_when=asyncio.ALL_COMPLETED,
)
)
with open("./README.md.temp", "w", encoding="UTF-8") as readme:
readme.write(PREAMBLE)
for char in sorted(chars, key=lambda c: c["level"], reverse=True):
readme.write(
f"| {char['ign']} | {markdown_esc(char['name']) if char['name'] else '?'} | {char['level'] if char['level'] else '?'} | {markdown_esc(char['job'])} | {char['guild'] if char['guild'] else markdown_esc('[none]')}{'' if char['guild'] in SUBOPTIMAL else markdown_esc('*')} |\n"
)
shutil.move("./README.md.temp", "./README.md")
|
# Copyright 2017 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import uuid
import mock
import pytest
from taskflow.patterns import linear_flow
from artman.pipelines import code_generation
from artman.pipelines import gapic_generation
from artman.pipelines import pipeline_base
from artman.tasks import io_tasks
from artman.utils import pipeline_util
class CodeGenerationPipelineBaseTests(unittest.TestCase):
@mock.patch.object(pipeline_base.PipelineBase, '__init__')
@mock.patch.object(uuid, 'uuid4')
def test_constructor(self, uuid4, super_init):
uuid4.return_value = '00000000'
cgpb = code_generation.CodeGenerationPipelineBase(None,
remote_mode=True,
)
# Assert that the superclass constructor was called.
super_init.assert_called_once()
# Assert that the expected keyword arguments were sent.
_, _, kwargs = super_init.mock_calls[0]
assert len(kwargs) == 5
assert kwargs['tarfile'] == '00000000.tar.gz'
assert kwargs['bucket_name'] == 'pipeline'
assert kwargs['src_path'] == '00000000.tar.gz'
assert kwargs['dest_path'].endswith('00000000.tar.gz')
assert kwargs['remote_mode'] is True
@mock.patch.object(pipeline_base.PipelineBase, '__init__')
def test_constructor_not_remote_mode(self, super_init):
cgpb = code_generation.CodeGenerationPipelineBase(None,
remote_mode=False,
)
# Assert that the superclass constructor was called.
super_init.assert_called_once()
# Assert that the expected keyword arguments were sent.
_, _, kwargs = super_init.mock_calls[0]
assert len(kwargs) == 1
assert kwargs['remote_mode'] is False
def test_do_build_flow(self):
CGPB = code_generation.CodeGenerationPipelineBase
with mock.patch.object(CGPB, 'validate_kwargs') as validate:
cgpb = CGPB(
gapic_generation.GapicTaskFactory(),
language='python', publish='noop'
)
validate.assert_called_once()
flow = cgpb.do_build_flow(language='python', publish='noop',
gapic_code_dir='output')
assert isinstance(flow, linear_flow.Flow)
assert len(flow) == 9
def test_do_build_flow_no_gapic(self):
CGPB = code_generation.CodeGenerationPipelineBase
with mock.patch.object(CGPB, 'validate_kwargs') as validate:
cgpb = CGPB(
gapic_generation.GapicTaskFactory(),
language='python', publish='noop'
)
validate.assert_called_once()
flow = cgpb.do_build_flow(language='python', publish='noop')
assert isinstance(flow, linear_flow.Flow)
assert len(flow) == 7
@mock.patch.object(pipeline_util, 'validate_exists')
@mock.patch.object(pipeline_util, 'validate_does_not_exist')
def test_validation(self, does_not_exist, does_exist):
gtf = gapic_generation.GapicTaskFactory()
gcpb = code_generation.CodeGenerationPipelineBase(gtf,
language='python',
publish='noop',
)
does_exist.assert_called_once()
does_not_exist.assert_called_once()
def test_additional_remote_tasks(self):
CGPB = code_generation.CodeGenerationPipelineBase
with mock.patch.object(CGPB, 'validate_kwargs') as validate:
cgpb = CGPB(
gapic_generation.GapicTaskFactory(),
language='python', publish='noop',
)
validate.assert_called_once()
remote_tasks = cgpb.additional_tasks_for_remote_execution()
assert len(remote_tasks) == 3
# Check that we got the actual tasks that we expect.
expected = (
io_tasks.PrepareUploadDirTask,
io_tasks.BlobUploadTask,
io_tasks.CleanupTempDirsTask,
)
for task, class_ in zip(remote_tasks, expected):
assert isinstance(task, class_)
class TaskFactoryBaseTests(unittest.TestCase):
def test_get_tasks_nie(self):
tfb = code_generation.TaskFactoryBase()
with pytest.raises(NotImplementedError):
assert tfb.get_tasks()
def test_get_validate_kwargs_nie(self):
tfb = code_generation.TaskFactoryBase()
with pytest.raises(NotImplementedError):
assert tfb.get_validate_kwargs()
def test_get_invalid_kwargs_nie(self):
tfb = code_generation.TaskFactoryBase()
with pytest.raises(NotImplementedError):
assert tfb.get_invalid_kwargs()
|
<gh_stars>1-10
"""
raspi_camera.py: Provide a PEPI-compatible Camera backed by a connected Raspberry Pi Camera Module.
"""
import threading
import time
import atexit
import logging
from picamera import *
from picamera.array import PiRGBArray
from server import AbstractCamera
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017, <NAME>'
__version__ = '3.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
class RaspPiCamera(AbstractCamera):
"""
RaspPiCamera is a concrete ``AbstractCamera`` that uses the Raspberry Pi Camera Module
v1/v2 to obtain imagery. It is capable of taking pictures in various
resolutions, but defaults to the maximum resolution of 2592x1944.
It essentially serves as a convenient wrapper around PiCamera, but
in the PEPI format.
"""
SUPPORTS_STREAMING = True
MAX_RESOLUTION = (2592, 1944)
def __init__(self, resolution=MAX_RESOLUTION):
# type: ((int, int)) -> None
super(RaspPiCamera, self).__init__()
self.camera = PiCamera()
self.req_resolution = resolution
self.camera.resolution = self.req_resolution
self.camera.start_preview()
self.camera.framerate = 30
self.lock = threading.RLock()
# noinspection PyShadowingNames
def cleanup(self):
"""
Cleans up the camera by closing the connection to the camera.
:param self: a RaspPiCamera object
:return: None
"""
logging.info('Closing camera from RaspPiCamera..')
self.camera.close()
logging.info('Camera closed')
atexit.register(cleanup, self)
def still(self):
# type: () -> [[(int, int, int)]]
"""
Captures a still from PiCamera with its current setup.
"""
with self.lock:
# Lock is necessary on the camera because this method can be called from different threads with
# reference to this Imager, but the camera is not thread-safe.
with PiRGBArray(self.camera) as stream:
start = time.time()
self.camera.capture(stream, format='rgb', use_video_port=False)
logging.debug('Full-res capture took: {}'.format(time.time() - start))
return stream.array
# return stream.array.tolist() # TODO: change to tolist()?
def low_res_still(self):
# type: () -> [[(int, int, int)]]
"""
Captures a 640x480 still from PiCamera natively.
"""
with self.lock:
old_resolution = self.camera.resolution
self.camera.resolution = (640, 480)
with PiRGBArray(self.camera) as stream:
start = time.time()
self.camera.capture(stream, format='rgb', use_video_port=True)
logging.debug('Low-res capture took : {}'.format(time.time() - start))
self.camera.resolution = old_resolution
return stream.array
# return stream.array.tolist() # TODO: change to tolist()?
def get_current_resolution(self):
# type: () -> (int, int)
"""
Gets the resolution of this PiCamera
"""
return self.camera.resolution
def get_max_resolution(self):
# type: () -> (int, int)
"""
Gets the maximum supported resolution of this PiCamera
"""
return self.MAX_RESOLUTION
def set_resolution(self, x, y):
# type: (int, int) -> None
"""
Sets the resolution of this camera for all future captures from it,
if the provided resolution is valid.
:param x: the x-dimension of the desired resolution
:param y: the y-dimension of the desired resolution
"""
with self.lock:
self.camera.resolution = [x, y]
|
<reponame>code-review-doctor/project-application
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse
from django.views.generic import TemplateView, UpdateView, FormView
from project_core.forms.call_question import CallQuestionForm, CallQuestionFromTemplateQuestionForm
from project_core.models import CallQuestion, CallPart
class CallPartQuestionView(TemplateView):
template_name = 'logged/call_part-question_answer-detail.tmpl'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
call_question = CallQuestion.objects.get(pk=self.kwargs['call_question_pk'])
call = call_question.call_part.call
context['call_question'] = call_question
context.update({'active_section': 'calls',
'active_subsection': 'call-list',
'sidebar_template': 'logged/_sidebar-calls.tmpl'
})
url_call_parts_anchor = reverse('logged-call-detail', kwargs={'pk': call.pk}) + '#parts'
context['breadcrumb'] = [{'name': 'Calls', 'url': reverse('logged-call-list')},
{'name': f'Details ({call.short_name})',
'url': url_call_parts_anchor},
{'name': 'List Call Parts',
'url': reverse('logged-call-part-list', kwargs={'call_pk': call.pk})},
{'name': f'Call Part ({call_question.call_part.title_rendered()})',
'url': reverse('logged-call-part-detail',
kwargs={'call_pk': call.pk,
'call_part_pk': call_question.call_part.pk})},
{'name': f'Question: {call_question.question_text}'}
]
return context
class CallPartQuestionUpdate(SuccessMessageMixin, UpdateView):
model = CallQuestion
template_name = 'logged/call_part-question_answer-form.tmpl'
form_class = CallQuestionForm
pk_url_kwarg = 'call_question_pk'
context_object_name = 'call_question'
success_message = 'Call question updated'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
call_question = context['call_question']
context['call'] = call_question.call_part.call
context['callpart'] = call_question.call_part
context.update({'active_section': 'calls',
'active_subsection': 'call-list',
'sidebar_template': 'logged/_sidebar-calls.tmpl'
})
call = call_question.call_part.call
url_call_parts_anchor = reverse('logged-call-detail', kwargs={'pk': call.pk}) + '#parts'
context['breadcrumb'] = [{'name': 'Calls', 'url': reverse('logged-call-list')},
{'name': f'Details ({call.short_name})',
'url': url_call_parts_anchor},
{'name': 'List Call Parts',
'url': reverse('logged-call-part-list', kwargs={'call_pk': call.pk})},
{'name': f'Call Part ({call_question.call_part.title_rendered()})',
'url': reverse('logged-call-part-detail',
kwargs={'call_pk': call.pk,
'call_part_pk': call_question.call_part.pk})},
{'name': f'Question: {call_question.question_text}'}
]
return context
def get_success_url(self):
return reverse('logged-call-part-question-detail', kwargs={'call_pk': self.object.call_part.call.pk,
'call_question_pk': self.object.pk
})
class CallPartQuestionTemplateQuestionUpdate(SuccessMessageMixin, FormView):
template_name = 'logged/call_part-question_answer-form.tmpl'
form_class = CallQuestionFromTemplateQuestionForm
success_message = 'Question(s) added'
def get_context_data(self, **kwargs):
call_part: CallPart = CallPart.objects.get(pk=self.kwargs['call_part_pk'])
call = call_part.call
context = super().get_context_data(**kwargs)
context['call'] = call
context['callpart'] = call_part
context.update({'active_section': 'calls',
'active_subsection': 'call-list',
'sidebar_template': 'logged/_sidebar-calls.tmpl'})
context['breadcrumb'] = [{'name': 'Calls', 'url': reverse('logged-calls')},
{'name': f'Details ({call.little_name()})',
'url': reverse('logged-call-detail', kwargs={'pk': call.pk})},
{'name': f'Call Part ({call_part.title_rendered()})',
'url': reverse('logged-call-part-detail', kwargs={'call_pk': call.pk,
'call_part_pk': call_part.pk
}
)
},
{'name': 'View Call Question'}]
return context
def form_valid(self, form):
is_valid_form = super().form_valid(form)
if is_valid_form:
form.save()
return is_valid_form
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['call_part_pk'] = self.kwargs['call_part_pk']
return kwargs
def get_success_url(self):
call_pk = self.kwargs['call_pk']
return reverse('logged-call-update', kwargs={'pk': call_pk}) + '#parts'
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import importlib
import json
from collections import OrderedDict
from paddlenlp.transformers import *
from paddlenlp.utils.downloader import COMMUNITY_MODEL_PREFIX, get_path_from_url
from paddlenlp.utils.env import MODEL_HOME
from paddlenlp.utils.log import logger
__all__ = [
"AutoModel", "AutoModelForPretraining",
"AutoModelForSequenceClassification", "AutoModelForTokenClassification",
"AutoModelForQuestionAnswering", "AutoModelForMultipleChoice",
"AutoModelForMaskedLM", "AutoModelForCausalLM", "AutoEncoder",
"AutoDecoder", "AutoGenerator", "AutoDiscriminator",
"AutoModelForConditionalGeneration"
]
MAPPING_NAMES = OrderedDict([
# Base model mapping
("Albert", "albert"),
("BigBird", "bigbird"),
("BlenderbotSmall", "blenderbot_small"),
("Blenderbot", "blenderbot"),
("ConvBert", "convbert"),
("MobileBert", "mobilebert"),
("ChineseBert", "chinesebert"),
("CTRL", "ctrl"),
("DistilBert", "distilbert"),
("Electra", "electra"),
("Skep", "skep"),
("ErnieCtm", "ernie_ctm"),
("ErnieDoc", "ernie_doc"),
("ErnieGram", "ernie_gram"),
("ErnieGen", "ernie_gen"),
("Ernie", "ernie"),
("ErnieM", "ernie_m"),
("GPT", "gpt"),
("LayoutXLM", "layoutxlm"),
("LayoutLMv2", "layoutlmv2"),
("LayoutLM", "layoutlm"),
("MBart", "mbart"),
("MPNet", "mpnet"),
("NeZha", "nezha"),
("Roberta", "roberta"),
("RoFormer", "roformer"),
("Reformer", "reformer"),
("SqueezeBert", "squeezebert"),
("T5", "t5"),
("TinyBert", "tinybert"),
("Bert", "bert"),
("Bart", "bart"),
("UNIMO", "unimo"),
("UnifiedTransformer", "unified_transformer"),
("XLNet", "xlnet"),
])
def get_name_mapping(task='Model'):
'''
Task can be 'Model', 'ForPretraining', 'ForSequenceClassification', 'ForTokenClassification',
'ForQuestionAnswering', 'ForMultipleChoice', 'ForMaskedLM', 'ForCausalLM', 'Encoder', 'Decoder',
'Generator', 'Discriminator', 'ForConditionalGeneration'.
'''
NAME_MAPPING = OrderedDict()
for key, value in MAPPING_NAMES.items():
import_class = key + task
new_key = key + 'Model_Import_Class'
NAME_MAPPING[new_key] = import_class
NAME_MAPPING[import_class] = value
return NAME_MAPPING
def get_init_configurations():
CONFIGURATION_MODEL_MAPPING = OrderedDict()
for key, class_name in MAPPING_NAMES.items():
import_class = importlib.import_module(
f"paddlenlp.transformers.{class_name}.modeling")
model_name = getattr(import_class, key + 'Model')
if key == 'ErnieGen':
name = tuple(
model_name.ernie_gen_pretrained_init_configuration.keys())
else:
name = tuple(model_name.pretrained_init_configuration.keys())
CONFIGURATION_MODEL_MAPPING[name] = key + 'Model'
return CONFIGURATION_MODEL_MAPPING
class _BaseAutoModelClass:
# Base class for auto models.
_pretrained_model_dict = None
_name_mapping = None
_task_choice = False
model_config_file = "model_config.json"
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path).`"
)
@classmethod
def _from_pretrained(cls,
pretrained_model_name_or_path,
task=None,
*model_args,
**kwargs):
if task:
if cls._task_choice == True:
cls._name_mapping = get_name_mapping(task)
else:
print('We only support task choice for AutoModel.')
all_model_names = []
for pretrained_model_names, model_name in cls._pretrained_model_dict.items(
):
for name in pretrained_model_names:
all_model_names.append(name)
# From built-in pretrained models
if pretrained_model_name_or_path in all_model_names:
for pretrained_model_names, model_name in cls._pretrained_model_dict.items(
):
# From built-in pretrained models
for pattern in pretrained_model_names:
if pattern == pretrained_model_name_or_path:
init_class = cls._name_mapping[model_name +
'_Import_Class']
class_name = cls._name_mapping[init_class]
import_class = importlib.import_module(
f"paddlenlp.transformers.{class_name}.modeling")
model_class = getattr(import_class, init_class)
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args,
**kwargs)
# From local dir path
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path,
cls.model_config_file)
if os.path.exists(config_file):
with io.open(config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
# class name corresponds to this configuration
init_class = init_kwargs.pop("init_class", None)
if init_class:
for model_flag, name in MAPPING_NAMES.items():
if model_flag in init_class:
model_name = model_flag + 'Model'
break
else:
# From pretrained_model_name_or_path
for model_flag, name in MAPPING_NAMES.items():
if name in pretrained_model_name_or_path.lower():
model_name = model_flag + 'Model'
break
init_class = cls._name_mapping[model_name + '_Import_Class']
class_name = cls._name_mapping[init_class]
import_class = importlib.import_module(
f"paddlenlp.transformers.{class_name}.modeling")
model_name = getattr(import_class, init_class)
return model_name.from_pretrained(pretrained_model_name_or_path,
*model_args, **kwargs)
# Assuming from community-contributed pretrained models
else:
community_config_path = os.path.join(COMMUNITY_MODEL_PREFIX,
pretrained_model_name_or_path,
cls.model_config_file)
default_root = os.path.join(MODEL_HOME,
pretrained_model_name_or_path)
try:
resolved_vocab_file = get_path_from_url(community_config_path,
default_root)
except RuntimeError as err:
logger.error(err)
raise RuntimeError(
f"Can't load weights for '{pretrained_model_name_or_path}'.\n"
f"Please make sure that '{pretrained_model_name_or_path}' is:\n"
"- a correct model-identifier of built-in pretrained models,\n"
"- or a correct model-identifier of community-contributed pretrained models,\n"
"- or the correct path to a directory containing relevant modeling files(model_weights and model_config).\n"
)
if os.path.exists(resolved_vocab_file):
with io.open(resolved_vocab_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
# class name corresponds to this configuration
init_class = init_kwargs.pop("init_class", None)
if init_class:
for model_flag, name in MAPPING_NAMES.items():
if model_flag in init_class:
model_name = model_flag + 'Model'
break
else:
# From pretrained_model_name_or_path
for model_flag, name in MAPPING_NAMES.items():
if name in pretrained_model_name_or_path.lower():
model_name = model_flag + 'Model'
break
init_class = cls._name_mapping[model_name + '_Import_Class']
class_name = cls._name_mapping[init_class]
import_class = importlib.import_module(
f"paddlenlp.transformers.{class_name}.modeling")
model_name = getattr(import_class, init_class)
return model_name.from_pretrained(pretrained_model_name_or_path,
*model_args, **kwargs)
class AutoModel(_BaseAutoModelClass):
"""
AutoClass can help you automatically retrieve the relevant model given the provided
pretrained weights/vocabulary.
AutoModel is a generic model class that will be instantiated as one of the base model classes
when created with the from_pretrained() classmethod.
"""
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('Model')
_task_choice = True
@classmethod
def from_pretrained(cls,
pretrained_model_name_or_path,
task=None,
*model_args,
**kwargs):
'''
Creates an instance of `AutoModel`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): Name of pretrained model or dir path
to load from. The string can be:
- Name of a built-in pretrained model
- Name of a community-contributed pretrained model.
- Local directory path which contains model weights file("model_state.pdparams")
and model config file ("model_config.json").
task (str): Specify a downstream task. Task can be 'Model', 'ForPretraining',
'ForSequenceClassification', 'ForTokenClassification', 'ForQuestionAnswering',
'ForMultipleChoice', 'ForMaskedLM', 'ForCausalLM', 'Encoder', 'Decoder',
'Generator', 'Discriminator', 'ForConditionalGeneration'.
We only support specify downstream tasks in AutoModel. Defaults to `None`.
*args (tuple): Position arguments for model `__init__`. If provided,
use these as position argument values for model initialization.
**kwargs (dict): Keyword arguments for model `__init__`. If provided,
use these to update pre-defined keyword argument values for model
initialization. If the keyword is in `__init__` argument names of
base model, update argument values of the base model; else update
argument values of derived model.
Returns:
PretrainedModel: An instance of `AutoModel`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModel
# Name of built-in pretrained model
model = AutoModel.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModel'>
# Name of community-contributed pretrained model
model = AutoModel.from_pretrained('yingyibiao/bert-base-uncased-sst-2-finetuned')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModel'>
# Load from local directory path
model = AutoModel.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModel'>
# choose task
model = AutoModel.from_pretrained('bert-base-uncased', task='ForPretraining')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertForPretraining'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, task,
*model_args, **kwargs)
class AutoModelForPretraining(_BaseAutoModelClass):
"""
AutoModelForPretraining.
"""
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForPretraining')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForPretraining`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForPretraining`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForPretraining
# Name of built-in pretrained model
model = AutoModelForPretraining.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForPretraining'>
# Name of community-contributed pretrained model
model = AutoModelForPretraining.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForPretraining'>
# Load from local directory path
model = AutoModelForPretraining.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForPretraining'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForSequenceClassification(_BaseAutoModelClass):
'''
AutoModelForSequenceClassification.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForSequenceClassification')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForSequenceClassification`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForSequenceClassification`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForSequenceClassification
# Name of built-in pretrained model
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForSequenceClassification'>
# Name of community-contributed pretrained model
model = AutoModelForSequenceClassification.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForSequenceClassification'>
# Load from local directory path
model = AutoModelForSequenceClassification.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForSequenceClassification'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForTokenClassification(_BaseAutoModelClass):
'''
AutoModelForTokenClassification.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForTokenClassification')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForTokenClassification`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForTokenClassification`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForTokenClassification
# Name of built-in pretrained model
model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForTokenClassification'>
# Name of community-contributed pretrained model
model = AutoModelForTokenClassification.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForTokenClassification'>
# Load from local directory path
model = AutoModelForTokenClassification.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForTokenClassification'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForQuestionAnswering(_BaseAutoModelClass):
'''
AutoModelForQuestionAnswering.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForQuestionAnswering')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForQuestionAnswering`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForQuestionAnswering`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForQuestionAnswering
# Name of built-in pretrained model
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForQuestionAnswering'>
# Name of community-contributed pretrained model
model = AutoModelForQuestionAnswering.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForQuestionAnswering'>
# Load from local directory path
model = AutoModelForQuestionAnswering.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForQuestionAnswering'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForMultipleChoice(_BaseAutoModelClass):
'''
AutoModelForMultipleChoice.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForMultipleChoice')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForMultipleChoice`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForMultipleChoice`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForMultipleChoice
# Name of built-in pretrained model
model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMultipleChoice'>
# Name of community-contributed pretrained model
model = AutoModelForMultipleChoice.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMultipleChoice'>
# Load from local directory path
model = AutoModelForMultipleChoice.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMultipleChoice'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForMaskedLM(_BaseAutoModelClass):
'''
AutoModelForMaskedLM.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForMaskedLM')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForMaskedLM`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForMaskedLM`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForMaskedLM
# Name of built-in pretrained model
model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMaskedLM'>
# Name of community-contributed pretrained model
model = AutoModelForMaskedLM.from_pretrained('iverxin/bert-base-japanese')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMaskedLM'>
# Load from local directory path
model = AutoModelForMaskedLM.from_pretrained('./my_bert/')
print(type(model))
# <class 'paddlenlp.transformers.bert.modeling.BertModelForMaskedLM'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForCausalLM(_BaseAutoModelClass):
'''
AutoModelForCausalLM.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForCausalLM')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForCausalLM`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForCausalLM`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForCausalLM
# Name of built-in pretrained model
model = AutoModelForCausalLM.from_pretrained('gpt2-en')
print(type(model))
# <class 'paddlenlp.transformers.gpt.modeling.GPTLMHeadModel'>
# Name of community-contributed pretrained model
model = AutoModelForCausalLM.from_pretrained('junnyu/distilgpt2')
print(type(model))
# <class 'paddlenlp.transformers.gpt.modeling.GPTLMHeadModel'>
# Load from local directory path
model = AutoModelForCausalLM.from_pretrained('./my_gpt/')
print(type(model))
# <class 'paddlenlp.transformers.gpt.modeling.GPTLMHeadModel'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoEncoder(_BaseAutoModelClass):
'''
AutoEncoder.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('Encoder')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoEncoder`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoEncoder`.
Example:
.. code-block::
from paddlenlp.transformers import AutoEncoder
# Name of built-in pretrained model
model = AutoEncoder.from_pretrained('bart-base',vocab_size=20000)
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartEncoder'>
# Load from local directory path
model = AutoEncoder.from_pretrained('./my_bart/')
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartEncoder'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoDecoder(_BaseAutoModelClass):
'''
AutoDecoder.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('Decoder')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoDecoder`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoDecoder`.
Example:
.. code-block::
from paddlenlp.transformers import AutoDecoder
# Name of built-in pretrained model
model = AutoDecoder.from_pretrained('bart-base', vocab_size=20000)
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartEncoder'>
# Load from local directory path
model = AutoDecoder.from_pretrained('./my_bart/')
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartEncoder'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoGenerator(_BaseAutoModelClass):
'''
AutoGenerator.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('Generator')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoGenerator`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoGenerator`.
Example:
.. code-block::
from paddlenlp.transformers import AutoGenerator
# Name of built-in pretrained model
model = AutoGenerator.from_pretrained('electra-small')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraGenerator'>
# Name of community-contributed pretrained model
model = AutoGenerator.from_pretrained('junnyu/hfl-chinese-legal-electra-small-generator')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraGenerator'>
# Load from local directory path
model = AutoGenerator.from_pretrained('./my_electra/')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraGenerator'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoDiscriminator(_BaseAutoModelClass):
'''
AutoDiscriminator.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('Discriminator')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoDiscriminator`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoDiscriminator`.
Example:
.. code-block::
from paddlenlp.transformers import AutoDiscriminator
# Name of built-in pretrained model
model = AutoDiscriminator.from_pretrained('electra-small')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraDiscriminator'>
# Name of community-contributed pretrained model
model = AutoDiscriminator.from_pretrained('junnyu/hfl-chinese-legal-electra-small-generator')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraDiscriminator'>
# Load from local directory path
model = AutoDiscriminator.from_pretrained('./my_electra/')
print(type(model))
# <class 'paddlenlp.transformers.electra.modeling.ElectraDiscriminator'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
class AutoModelForConditionalGeneration(_BaseAutoModelClass):
'''
AutoModelForConditionalGeneration.
'''
CONFIGURATION_MODEL_MAPPING = get_init_configurations()
_pretrained_model_dict = CONFIGURATION_MODEL_MAPPING
_name_mapping = get_name_mapping('ForConditionalGeneration')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
'''
Creates an instance of `AutoModelForConditionalGeneration`. Model weights are loaded
by specifying name of a built-in pretrained model, or a community contributed model,
or a local file directory path.
Args:
pretrained_model_name_or_path (str): See :class:`AutoModel`.
*args (tuple): See :class:`AutoModel`.
**kwargs (dict): See :class:`AutoModel`.
Returns:
PretrainedModel: An instance of `AutoModelForConditionalGeneration`.
Example:
.. code-block::
from paddlenlp.transformers import AutoModelForConditionalGeneration
# Name of built-in pretrained model
model = AutoModelForConditionalGeneration.from_pretrained('bart-base')
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartForConditionalGeneration'>
# Load from local directory path
model = AutoModelForConditionalGeneration.from_pretrained('./my_bart/')
print(type(model))
# <class 'paddlenlp.transformers.bart.modeling.BartForConditionalGeneration'>
'''
return cls._from_pretrained(pretrained_model_name_or_path, *model_args,
**kwargs)
|
# encoding=utf-8
import unittest
from lightcycle.arena import LightCycleArena
from lightcycle.basebot import LightCycleBaseBot, LightCycleRandomBot
from lightcycle.player import Player
from lightcycle.security import blacklisted_modules
class TestArena(unittest.TestCase):
def setUp(self):
self.player1 = Player('Player 1', LightCycleRandomBot)
self.player2 = Player('Player 2', LightCycleRandomBot)
self.width = 10
self.height = 10
def test_regular_match(self):
match = LightCycleArena((self.player1, self.player2), self.width, self.height).start()
# Ojo que si los dos crashean, el test da un error que no deberia (EMPATE)
print match
self.assertIn('winner', match['result'], 'There should be a winner')
self.assertEqual(match['result']['lost'].values(), ['Crashed'], 'The loser should have crashed')
def test_string_bot_class(self):
botsrc = ('''class LightCycleRandomBot(LightCycleBaseBot):\n'''
''' def get_next_step(self, *args, **kwargs):\n'''
''' return "N"''')
player3 = Player('Player 3', botsrc)
player4 = Player('Player 4', botsrc)
match = LightCycleArena((player3, player4), self.width, self.height).start()
self.assertEqual(match['result']['lost'].values(), ['Crashed'], 'The loser should have crashed')
def test_invalid_bot_inheritance(self):
class InvalidBot(object):
pass
player3 = Player('Player 3', InvalidBot)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'][player3.username].startswith('Exception'), True, 'Player 3 should raise an exception')
def test_bots_crashing_on_each_other(self):
class EastBot(LightCycleBaseBot):
def get_next_step(self, *args, **kwargs):
return 'E'
class WestBot(LightCycleBaseBot):
def get_next_step(self, *args, **kwargs):
return 'W'
player3 = Player('Player 3', EastBot)
player4 = Player('Player 4', WestBot)
match = LightCycleArena((player3, player4), self.width, 1).start()
self.assertEqual(len(match['result']['lost']), 2)
self.assertNotIn('winner', match['result'])
def test_invalid_move(self):
class InvalidMoveBot(LightCycleBaseBot):
def get_next_step(self, *args, **kwargs):
return 'The 3rd dimension!'
player3 = Player('Player 3', InvalidMoveBot)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'], {player3.username: 'Invalid output'}, 'Player 3 should return invalid output')
def test_timeout_on_instantiation(self):
import time
class LightCycleDelay(LightCycleRandomBot):
def __init__(self, *args, **kwargs):
time.sleep(10)
super(LightCycleDelay, self).__init__(*args, **kwargs)
player3 = Player('Player 3', LightCycleDelay)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'], {player3.username: 'Timeout'}, 'Player 3 should timeout on instantiation')
def test_timeout_on_move(self):
import time
class LightCycleDelay(LightCycleRandomBot):
def get_next_step(self, *args, **kwargs):
time.sleep(10)
return super(LightCycleDelay, self).get_next_step(*args, **kwargs)
player3 = Player('Player 3', LightCycleDelay)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'], {player3.username: 'Timeout'}, 'Player 3 should timeout on move')
def test_bot_crash_on_init(self):
class BrokenLightCycle(LightCycleRandomBot):
def __init__(self, *args, **kwargs):
return 1/0
player3 = Player('Player 3', BrokenLightCycle)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'][player3.username], 'Exception (integer division or modulo by zero)', 'Player 3 should timeout due to a crash')
def test_bot_crash_on_move(self):
class BrokenLightCycle(LightCycleRandomBot):
def get_next_step(self, *args, **kwargs):
return 1/0
player3 = Player('Player 3', BrokenLightCycle)
match = LightCycleArena((self.player1, player3), self.width, self.height).start()
self.assertEqual(match['result']['winner'], self.player1.username)
self.assertEqual(match['result']['lost'][player3.username], 'Exception (integer division or modulo by zero)', 'Player 3 should timeout due to a crash')
def test_tie(self):
class BrokenLightCycle(LightCycleRandomBot):
def get_next_step(self, *args, **kwargs):
return 1/0
player3 = Player('Player 3', BrokenLightCycle)
player4 = Player('Player 4', BrokenLightCycle)
match = LightCycleArena((player3, player4), self.width, self.height).start()
self.assertNotIn('winner', match['result'])
self.assertEqual(match['result']['lost'],
{player3.username: 'Exception (integer division or modulo by zero)',
player4.username: 'Exception (integer division or modulo by zero)'},
'Players 3 and 4 should both timeout simultaneously due to a crash (it was a tie)')
def test_attacks(self):
import random
m = random.choice(blacklisted_modules)
botsrc = ('''class LightCycleRandomBot(LightCycleBaseBot):\n'''
''' def get_next_step(self, *args, **kwargs):\n'''
''' import %s;return "N"''' % m)
player3 = Player('Player 3', botsrc)
player4 = Player('Player 4', botsrc)
match = LightCycleArena((player3, player4), self.width, self.height).start()
self.assertEqual(match['result']['lost'],
{player3.username: 'Exception (No module named %s)' % m,
player4.username: 'Exception (No module named %s)' % m},
'Players 3 and 4 should both timeout simultaneously due to an invalid import')
|
<filename>coherence/log.py
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2013, <NAME> <<EMAIL>>
# Copyright 2018, <NAME> <<EMAIL>>
import io
import logging
import os
import sys
import traceback
# If you want to debug cohen,
# set the below variable to: logging.DEBUG
DEFAULT_COHEN_LOG_LEVEL = logging.WARN
LOG_FORMAT = ('[%(levelname)-18s][$BOLD%(name)-15s$RESET] '
'%(message)s ($BOLD%(filename)s$RESET:%(lineno)d)')
ENV_VAR_NAME = 'COHEN_DEBUG'
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
# The background is set with 40 plus the number of the color,
# and the foreground with 30
# These are the sequences need to get colored output
RESET_SEQ = '\033[0m'
COLOR_SEQ = '\033[1;%dm'
BOLD_SEQ = '\033[1m'
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
# This is taken from std.-module logging, see Logger.findCaller below.
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): # support for py2exe
_srcfile = f'coherence{os.sep}log{__file__[-4:]}'
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
_srcfiles = (_srcfile, logging._srcfile)
loggers = {}
def get_main_log_level():
global loggers
if ENV_VAR_NAME in os.environ:
return os.environ[ENV_VAR_NAME]
log_root = loggers.get('coherence', None)
if log_root is None:
log_level = DEFAULT_COHEN_LOG_LEVEL
else:
log_level = log_root.level
return log_level
def formatter_message(message, use_color=True):
if use_color:
message = message.replace(
'$RESET', RESET_SEQ).replace(
'$BOLD', BOLD_SEQ)
else:
message = message.replace(
'$RESET', '').replace(
'$BOLD', '')
return message
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = \
COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ColoredLogger(logging.Logger):
FORMAT = LOG_FORMAT
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
logging.Logger.__init__(self, name, get_main_log_level())
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
# print(self.handlers)
if console not in self.handlers:
self.addHandler(console)
# print(self.handlers)
return
def findCaller(self, stack_info=False, use_color=True):
'''
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
'''
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = '(unknown file)', 0, '(unknown function)', None
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename in _srcfiles:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
logging.setLoggerClass(ColoredLogger)
class LogAble(object):
'''
Base class for objects that want to be able to log messages with
different level of severity. The levels are, in order from least
to most: log, debug, info, warning, error.
'''
logCategory = 'default'
'''Implementors can provide a category to log their messages under.'''
_Loggable__logger = None
FORMAT = LOG_FORMAT
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self):
global loggers
if loggers.get(self.logCategory):
self._logger = loggers.get(self.logCategory)
self._logger.propagate = False
else:
self._logger = logging.getLogger(self.logCategory)
self._logger.propagate = False
loggers[self.logCategory] = self._logger
self.debug(f'Added logger with logCategory: {self.logCategory}')
return
def log(self, message, *args, **kwargs):
self._logger.log(message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
self._logger.warning(message, *args, **kwargs)
def info(self, message, *args, **kwargs):
self._logger.info(message, *args, **kwargs)
def critical(self, message, *args, **kwargs):
self._logger.critical(message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
self._logger.debug(message, *args, **kwargs)
def error(self, message, *args, **kwargs):
self._logger.error(message, *args, **kwargs)
def exception(self, message, *args, **kwargs):
self._logger.exception(message, *args, **kwargs)
fatal = critical
warn = warning
msg = info
def get_logger(log_category):
global loggers
log = loggers.get(log_category, None)
if log is None:
log = logging.getLogger(log_category)
log.setLevel(get_main_log_level())
log.propagate = False
return log
def init(logfilename=None, loglevel=logging.WARN):
global loggers
if loggers.get('coherence'):
log = loggers.get('coherence')
log.setLevel(loglevel)
return log
else:
logging.addLevelName(100, 'NONE')
logging.basicConfig(
filename=logfilename,
level=loglevel,
format=LOG_FORMAT)
logger = logging.getLogger()
logger.setLevel(loglevel)
logger.propagate = False
loggers['coherence'] = logger
logger.debug(f'Added logger with logCategory: {"coherence"}')
|
<gh_stars>0
# Python Profiler v3
# Copyright (c) 2015-2017 <NAME>
# TODO:
# [x] Record only functions in StackLines
# [ ] Handle per-line hotspots as separate structure (not nested) - ?
# [ ] Handle timeline as separate structure
# [x] Use unique stack IDs to dedupe stack tuples
# [ ] Merge profile data method
# [ ] add custom metadata values to profile data (e.g. url, op, user id) for filtering / grouping
# [ ] filter/merge profile data by metadata
# [x] Expose randomize parameter for stochastic sampling
# [x] Add rate control (remove interval)
# - is this more or less misleading if we don't adjust for profiler overhead to achieve rate?
# - not adjusting for drift might be handy for estimating profiler performance/overheads
# [x] Finish linux platform driver (get thread CPU times seems to be unfinished!!)
# [ ] Windows platform driver
# [ ] Tidy up platform drivers and make a nice platform choosing function
# [ ] Convert into proper Python module + split into submodules
# [ ] Basic (temp) dump function (flat) - replace with proper collated version from stack tree
# [ ] Filter out long tail option (collate items with low ticks as 'Other') to remove noise
# [ ] Post process to build stack/call graph (have exporters work from this graph instead of raw data) - ?
# [ ] Record process ID in addition to thread?
# [ ] Option to merge processes
# [ ] Option to merge threads
# [ ] Test performance / optimize on various platforms
# [ ] Serialize (+append?) to file (lock file?)
# [ ] Load from file
# [ ] HTML5 exporter with drill-down
# [ ] Import/exporter framework
# [ ] Export to standard profiler formats (e.g. python, callgrind, firefox ThreadProfile json)
# [ ] Make Python 3 compatible
# [ ] Decorator to wrap a function with profiler
# [ ] Function to watch a function in profiler? (e.g. store code object in dict and check)
# [ ] Option to filter out standard (and custom) libraries? (path prefixes?)
# [ ] Figure out how to play nicely with time.sleep(), etc. - do we need to patch it?
# - EINTR / silent signal interrupts
# - breaks sleep/timeout behaviour in programs - provide optional monkey patches?
# - or just accept that signals break waits, and is fixed eventually by PEP475
# ('serious' code should be handling EINTR anyway?)
# [ ] Figure out how to avoid having to patch thread, wherever possible
# - maybe spawn a test thread on module import to detect if thread IDs match ?
# [x] Make interval private on profiler (or don't store)
# [x] Move all running time stats etc. into _profile_data - already done
import os
import time
import random
from contextlib import contextmanager
# - Scheduler ------------------------------------------------------------------
# Base class for repeated periodic function call
class IntervalScheduler(object):
default_rate = 1
def __init__(self, interval_func, interval=0.01, stochastic=False, func_args=(), func_kwargs={}):
self.interval = interval
self._random = None
if stochastic:
# Our own Random to avoid side effects on shared PRNG
self._random = random.Random()
self._running = False
self._interval_func = interval_func
self._func_args = func_args
self._func_kwargs = func_kwargs
self._init()
def start(self):
if not self.is_running():
self._start()
self._running = True
def stop(self):
if self.is_running():
self._stop()
self._running = False
def is_running(self):
return self._running
def get_next_interval(self):
if self._random:
return (2.0 * self._random.random() * self.interval)
else:
return self.interval
def tick(self, frame):
self._interval_func(*self._func_args, _interrupted_frame=frame, **self._func_kwargs)
# Sub-classes should override the following methods to implement a scheduler
# that will call self.tick() every self.interval seconds.
# If the scheduler interupts a Python frame, it should pass the frame that was
# interrupted to tick(), otherwise it should pass in None.
def _init(self):
pass
def _start(self):
raise NotImplementedError()
def _stop(self):
raise NotImplementedError()
# Uses a separate sleeping thread, which wakes periodically and calls self.tick()
class ThreadIntervalScheduler(IntervalScheduler):
default_rate = 100
def _init(self):
import threading
self._thread = None
self._stopping = False
self._event = threading.Event()
def _start(self):
import threading
self._event.clear()
def thread_func():
while not self._event.is_set():
self._event.wait(timeout=self.get_next_interval())
self.tick(None)
self._thread = threading.Thread(target=thread_func, name='profiler')
self._thread.daemon = True
self._thread.start()
def _stop(self):
self._event.set()
self._thread.join()
self._stopping = False
import signal
# Signals the main thread every interval, which calls the tick() method when
# the timer event is triggered.
# Note that signal handlers are blocked during system calls, library calls, etc.
# in the main thread.
# We compensate for this by keeping track of real, user cpu, and system cpu
# usage between ticks on each thread.
# We prefer ITIMER_REAL, because that will be triggered immediately upon
# returning from a long-blocking system call, so we can add the ticks to the
# most appropriate function.
# However, if the main thread is blocked for a significant period, this will
# reduce the accuracy of samples in other threads, because only the main
# thread handles signals. In such situations, the ThreadIntervalScheduler might
# be more accurate.
# We don't specify an interval and reschedule the next tick ourselves. This
# allows us to dynamically change the sample interval to avoid aliasing, and
# prevents the signal interrupting itself, which can lead to stack errors,
# some strange behaviour when threads are being join()ed, and polluting the
# profile data with stack data from the profiler.
class SignalIntervalScheduler(IntervalScheduler):
default_rate = 1000
timer = signal.ITIMER_REAL
signal = signal.SIGALRM
def _start(self):
def signal_handler(signum, frame):
self.tick(frame)
if self._run:
signal.setitimer(self.timer, self.get_next_interval(), 0)
signal.signal(self.signal, signal_handler)
signal.siginterrupt(self.signal, False)
self._run = True
signal.setitimer(self.timer, self.get_next_interval(), 0)
def _stop(self):
self._run = False
signal.setitimer(self.timer, 0, 0)
# - Platform-specific stuff ----------------------------------------------------
import thread
import threading
class ThreadPlatform(object):
def __init__(self):
self.name = ''
self.lock = threading.Lock()
self._registered_threads = {}
self._original_start_new_thread = thread.start_new_thread
self.platform_init()
def _patch_thread(self):
assert threading.current_thread().name == 'MainThread'
with self.lock:
self._registered_threads[threading.current_thread().ident] = self.get_current_thread_id()
def start_new_thread_wrapper(func, args, kwargs={}):
def thread_func(func, args, kwargs):
system_tid = self.get_current_thread_id()
with self.lock:
self._registered_threads[threading.current_thread().ident] = system_tid
return func(*args, **kwargs)
return self._original_start_new_thread(thread_func, (func, args, kwargs))
thread.start_new_thread = start_new_thread_wrapper
threading._start_new_thread = start_new_thread_wrapper
def _unpatch_thread(self):
with self.lock:
self._registered_threads = {}
thread.start_new_thread = _original_start_new_thread
threading._start_new_thread = _original_start_new_thread
def _get_patched_thread_id(self, python_ident):
#with self.lock:
return self._registered_threads.get(python_ident)
def platform_init(self):
raise NotImplementedError()
def get_thread_id_from_python_ident(self, python_ident):
raise NotImplementedError()
def get_current_thread_id(self):
raise NotImplementedError()
def get_thread_cpu_time(self, thread_id=None):
raise NotImplementedError()
# Single-threaded CPU times using os.times(),
# which actually gives CPU times for the whole
# process.
# Will give bad results if there are actually
# other threads running!
class SingleThreadedPlatform(ThreadPlatform):
def platform_init(self):
pass
def get_thread_id_from_python_ident(self):
return 0
def get_current_thread_id(self):
return 0
def get_thread_cpu_time(self, thread_id=None):
time_info = os.times()
return time_info[0] + time_info[1]
class MacPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('libc'))
self._mach_thread_self = libc.mach_thread_self
self._mach_thread_self.restype = ctypes.c_uint
# TODO: check these field definitions
class time_value_t(ctypes.Structure):
_fields_ = [
("seconds", ctypes.c_int),
("microseconds",ctypes.c_int)
]
class thread_basic_info(ctypes.Structure):
_fields_ = [
("user_time", time_value_t),
("system_time",time_value_t),
("cpu_usage",ctypes.c_int),
("policy",ctypes.c_int),
("run_state",ctypes.c_int),
("flags",ctypes.c_int),
("suspend_count",ctypes.c_int),
("sleep_time",ctypes.c_int)
]
thread_info = libc.thread_info
thread_info.restype = ctypes.c_int
thread_info.argtypes = [
ctypes.c_uint,
ctypes.c_int,
ctypes.POINTER(thread_basic_info),
ctypes.POINTER(ctypes.c_uint)
]
self._thread_info = thread_info
self._THREAD_BASIC_INFO = 3
self._out_info = thread_basic_info()
self._count = ctypes.c_uint(ctypes.sizeof(self._out_info) / ctypes.sizeof(ctypes.c_uint))
self._patch_thread()
def get_thread_id_from_python_ident(self, python_ident):
return self._get_patched_thread_id(python_ident)
def get_current_thread_id(self):
return self._mach_thread_self()
def get_thread_cpu_time(self, python_ident=None):
import ctypes
# TODO: Optimize with shared structs, sizes, to minimize allocs per tick
if python_ident is None:
thread_id = self.get_current_thread_id()
else:
thread_id = self.get_thread_id_from_python_ident(python_ident)
out_info = self._out_info
result = self._thread_info(
thread_id,
self._THREAD_BASIC_INFO,
ctypes.byref(out_info),
ctypes.byref(self._count),
)
if result != 0:
return 0.0
user_time = out_info.user_time.seconds + out_info.user_time.microseconds / 1000000.0
system_time = out_info.system_time.seconds + out_info.system_time.microseconds / 1000000.0
return user_time + system_time
class LinuxPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
pthread = ctypes.CDLL(ctypes.util.find_library('pthread'))
libc = ctypes.CDLL(ctypes.util.find_library('c'))
pthread_t = ctypes.c_ulong
clockid_t = ctypes.c_long
time_t = ctypes.c_long
NANOSEC = 1.0 / 1e9
CLOCK_THREAD_CPUTIME_ID = 3 # from linux/time.h
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_nsec', ctypes.c_long),
]
# wrap pthread_self()
pthread_self = pthread.pthread_self
pthread.argtypes = []
pthread_self.restype = pthread_t
# wrap pthread_getcpuclockid()
pthread_getcpuclockid = pthread.pthread_getcpuclockid
pthread_getcpuclockid.argtypes = [pthread_t, ctypes.POINTER(clockid_t)]
pthread_getcpuclockid.restype = clockid_t
# wrap clock_gettime()
clock_gettime = libc.clock_gettime
clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
clock_gettime.restype = ctypes.c_int
def get_current_thread_id():
return pthread_self()
def get_thread_cpu_time(thread_id=None):
if thread_id is None:
thread_id = pthread_self()
# First, get the thread's CPU clock ID
clock_id = clockid_t()
error = pthread_getcpuclockid(thread_id, ctypes.pointer(clock_id))
if error:
return None
# Now get time from clock...
result = timespec()
error = clock_gettime(clock_id, ctypes.pointer(result))
if error:
return None
cpu_time = result.tv_sec + result.tv_nsec * NANOSEC
return cpu_time
self._get_current_thread_id = get_current_thread_id
self._get_thread_cpu_time = get_thread_cpu_time
def get_current_thread_id(self):
return self._get_current_thread_id()
def get_thread_cpu_time(thread_id=None):
return self._get_thread_cpu_time(thread_id)
import sys
if sys.platform == 'darwin':
thread_platform = MacPThreadPlatform()
elif sys.platform == 'linux':
thread_platform = LinuxPThreadPlatform()
# TODO: Windows support
else:
try:
import thread
except ImportError:
pass
else:
import warnings
warnings.warn('Multi-threaded CPU times not supported on this platform!')
thread_platform = SingleThreadedPlatform()
# - Sample data ----------------------------------------------------------------
import collections
StackLine = collections.namedtuple('StackLine', ['type', 'name', 'file', 'line', 'data'])
def stack_line_from_frame(frame, stype='func', data=None):
code = frame.f_code
return StackLine(stype, code.co_name, code.co_filename, code.co_firstlineno, data)
class SampleData(object):
__slots__ = ['rtime', 'cputime', 'ticks']
def __init__(self):
self.rtime = 0.0 # Real / wall-clock time
self.cputime = 0.0 # User CPU time (single thread)
self.ticks = 0 # Actual number of samples
def __str__(self):
return 'SampleData<r=%.3f, cpu=%.3f, t=%d>' % (
self.rtime,
self.cputime,
self.ticks
)
def __repr__(self):
return str(self)
class RawProfileData(object):
def __init__(self):
self.stack_line_id_map = {} # Maps StackLines to IDs
self.stack_tuple_id_map = {} # Map tuples of StackLine IDs to IDs
self.stack_data = {} # Maps stack ID tuples to SampleData
self.time_running = 0.0 # Total amount of time sampling has been active
self.total_ticks = 0 # Total number of samples we've taken
def add_sample_data(self, stack_list, rtime, cputime, ticks):
sm = self.stack_line_id_map
sd = self.stack_line_id_map.setdefault
stack_tuple = tuple(
sd(stack_line, len(sm))
for stack_line in stack_list
)
stack_tuple_id = self.stack_tuple_id_map.setdefault(
stack_tuple,
len(self.stack_tuple_id_map),
)
if stack_tuple_id in self.stack_data:
sample_data = self.stack_data[stack_tuple_id]
else:
sample_data = self.stack_data[stack_tuple_id] = SampleData()
sample_data.rtime += rtime
sample_data.cputime += cputime
sample_data.ticks += ticks
self.total_ticks += ticks
def dump(self, sort='rtime'):
assert sort in SampleData.__slots__
# Quick util function to dump raw data in a vaguely-useful format
# TODO: replace with proper text exporter with sort parameters, etc.
print '%s:\n\n %d samples taken in %.3fs:\n' % (
self.__class__.__name__,
self.total_ticks,
self.time_running,
)
print ' Ordered by: %s\n' % sort
# Invert stack -> ID map
stack_line_map = dict([
(v, k)
for k, v
in self.stack_line_id_map.items()
])
stack_map = dict([
(v, k)
for k, v
in self.stack_tuple_id_map.items()
])
lines = [
(getattr(sample_data, sort), stack_id, sample_data)
for stack_id, sample_data
in self.stack_data.items()
]
lines.sort()
lines.reverse()
print ' ticks rtime cputime filename:lineno(function)'
for _, stack_id, sample_data in lines:
stack = stack_map[stack_id]
stack_line = stack_line_map[stack[0]]
print ' %7d % 8.3f % 8.3f %s:%d(%s) : %r' % (
sample_data.ticks,
sample_data.rtime,
sample_data.cputime,
os.path.basename(stack_line.file),
stack_line.line,
stack_line.name,
stack,
)
print
class ThreadClock(object):
__slots__ = ['rtime', 'cputime']
def __init__(self):
self.rtime = 0.0
self.cputime = 0.0
class Profiler(object):
_scheduler_map = {
'signal':SignalIntervalScheduler,
'thread':ThreadIntervalScheduler
}
def __init__(
self,
scheduler_type='signal', # Which scheduler to use
collect_stacks=True, # Collect full call-tree data?
rate=None,
stochastic=False,
):
self.collect_stacks = collect_stacks
assert (
scheduler_type in self._scheduler_map
or isinstance(scheduler_type, IntervalScheduler)
), 'Unknown scheduler type'
self.scheduler_type = scheduler_type
if isinstance(scheduler_type, str):
scheduler_type = self._scheduler_map[scheduler_type]
if rate is None:
rate = scheduler_type.default_rate
self._scheduler = scheduler_type(
self.sample,
interval=1.0/rate,
stochastic=stochastic,
)
self.reset()
def reset(self):
self._profile_data = RawProfileData()
self._thread_clocks = {} # Maps from thread ID to ThreadClock
self._last_tick = 0
self.total_samples = 0
self.sampling_time = 0.0
self._empty_stack = [StackLine(None, 'null', '', 0, None)]
self._start_time = 0.0
def sample(self, _interrupted_frame=None):
sample_time = time.time()
current_frames = sys._current_frames()
current_thread = thread.get_ident()
for thread_ident, frame in current_frames.items():
if thread_ident == current_thread:
frame = _interrupted_frame
if frame is not None:
# 1.7 %
stack = [stack_line_from_frame(frame)]
if self.collect_stacks:
frame = frame.f_back
while frame is not None:
stack.append(stack_line_from_frame(frame))
frame = frame.f_back
stack.append(StackLine('thread', str(thread_ident), '', 0, None)) # todo: include thread name?
# todo: include PID?
# todo: include custom metadata/labels?
# 2.0 %
if thread_ident in self._thread_clocks:
thread_clock = self._thread_clocks[thread_ident]
cputime = thread_platform.get_thread_cpu_time(thread_ident)
else:
thread_clock = self._thread_clocks[thread_ident] = ThreadClock()
cputime = thread_platform.get_thread_cpu_time(thread_ident)
# ~5.5%
self._profile_data.add_sample_data(
stack,
sample_time - self.last_tick,
cputime - thread_clock.cputime,
1
)
thread_clock.cputime = cputime
else:
self._profile_data.add_sample_data(
self._empty_stack, sample_time - self.last_tick, 0.0, 1
)
self.last_tick = sample_time
self.total_samples += 1
self.sampling_time += time.time() - sample_time
def start(self):
import threading
# reset thread clocks...
self._thread_clocks = {}
for thread in threading.enumerate():
thread_clock = ThreadClock()
self._thread_clocks[thread.ident] = thread_clock
cputime = thread_platform.get_thread_cpu_time(thread.ident)
thread_clock.cputime = cputime
self._start_time = self.last_tick = time.time()
self._scheduler.start()
@contextmanager
def activated(self):
try:
self.start()
yield self
finally:
self.stop()
def stop(self):
self._scheduler.stop()
self._profile_data.time_running += time.time() - self._start_time
self._start_time = 0.0
def busy(rate=100):
import time
profiler = Profiler(rate=rate)
with profiler.activated():
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
return profiler
|
<gh_stars>0
from django.test import TestCase
from mock import MagicMock, patch
from django.db import IntegrityError
import time
from hud_api_replace.geocode import (
GoogleGeocode, _geocode_compute_key, _extract_zip_code, _geocode_cached_data, _convert_data, geocode_get_data)
from hud_api_replace.models import CachedGeodata
class TestGoogleGeocode(TestCase):
def setUp(self):
self.gc = GoogleGeocode(20005)
def test_init(self):
""" Testing __init__ """
self.assertEqual(self.gc.zipcode, 20005)
self.assertEqual(type(self.gc.invisible_zipcodes), dict)
self.assertTrue(len(self.gc.invisible_zipcodes) > 0)
self.assertTrue(self.gc.privateKey is not None)
self.assertTrue(self.gc.clientID is not None)
def test_is_usa_or_territory__correct(self):
""" Testing is_usa_or_territory, GUAM, USVI, ASamoa, PRico, Cnmi, Rmi, Usa """
self.assertTrue(self.gc.is_usa_or_territory("123 Address, GUAM something else"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, UsVi"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, American Samoa else"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, Puerto Rico something else"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, Cnmi"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, rmi"))
self.assertTrue(self.gc.is_usa_or_territory("123 Address, Usa"))
def test_is_usa_or_territory__wrong(self):
""" Testing is_usa_or_territory, Not USA address """
self.assertFalse(self.gc.is_usa_or_territory("123 Address, Mexico City, Mexico"))
def test_signed_url__correct(self):
""" Testing signed_url, correct url """
url = "http://maps.googleapis.com/maps/api/geocode/json?address=New+York&sensor=false&client=clientID"
expected = "http://maps.googleapis.com/maps/api/geocode/json?address=New+York&sensor=false&client=clientID&signature=KrU1TzVQM7Ur0i8i7K3huiw3MsA="
self.gc.privateKey = "<KEY>
self.gc.clientID = "clientID"
signed_url = self.gc.signed_url(url)
self.assertEqual(signed_url, expected)
def test_signed_url__empty(self):
""" Testing signed_url, empty url """
url = ''
signed_url = self.gc.signed_url(url)
self.assertEqual(signed_url, None)
@patch.object(GoogleGeocode, 'signed_url')
@patch('urllib2.urlopen')
def test_request_google_maps(self, mock_urlopen, mock_signed_url):
""" Testing request_google_maps """
def urlopen_check_param(param):
self.param = param
mm = MagicMock()
mm.read.return_value = '{"Success":"success"}'
return mm
mock_urlopen.side_effect = urlopen_check_param
expected = "http://maps.googleapis.com/maps/api/geocode/json?address=20005&sensor=false&client=" + self.gc.clientID
mock_signed_url.return_value = expected
response = self.gc.request_google_maps(20005)
self.assertTrue(expected in self.param)
self.assertEqual(response['Success'], 'success')
def test_geocode_compute_key__empty(self):
"""Testing _geocode_compute_key, with empty argument."""
result = _geocode_compute_key('')
self.assertEqual(result, '')
def test_geocode_compute_key__zipcode(self):
"""Testing _geocode_compute_key, zipcode as argument."""
result = _geocode_compute_key('20005')
self.assertEqual(result, '20005')
result = _geocode_compute_key('20005-1999')
self.assertEqual(result, '20005-1999')
def test_geocode_compute_key__address(self):
"""Testing _geocode_compute_key, with an address."""
result = _geocode_compute_key('123 Some str, Washington DC, 20005')
self.assertEqual(result, '123 NONE SOME ST.|WASHINGTON,DC|20005')
def test_extract_zip_code__empty(self):
"""Testing _extract_zip_code, with empty argument."""
result = _extract_zip_code('')
self.assertEqual(result, '')
def test_extract_zip_code__zipcode(self):
"""Testing _extract_zip_code, with zip code."""
result = _extract_zip_code('20005')
self.assertEqual(result, '20005')
result = _extract_zip_code('20005-1999')
self.assertEqual(result, '20005-1999')
def test_extract_zip_code__address(self):
"""Testing _extract_zip_code, with a key generated from an address."""
result = _extract_zip_code('123 NONE SOME ST.|WASHINGTON,DC|20005')
self.assertEqual(result, '20005')
def test_geocode_cached_data__new(self):
"""Testing _geocode_cached_data, with an arg that is not in caches."""
self.assertRaises(Exception, _geocode_cached_data, 'DEFINITELY-NOT-CACHED')
def test_geocode_cached_data__existent(self):
"""Testing _geocode_cached_data, with empty argument."""
cg = CachedGeodata(key='NEW-RECORD', lat=1, lon=2, expires=time.time() + 10000)
cg.save()
result = _geocode_cached_data('NEW-RECORD')
self.assertTrue('result' in result)
self.assertEqual(result['result'][0], 'NEW-RECORD')
self.assertEqual(result['result'][1], 1)
self.assertEqual(result['result'][2], 2)
def test_geocode_cached_data__expired(self):
"""Testing _geocode_cached_data, with an expired record."""
cg = CachedGeodata(key='EXPIRED-RECORD', lat=1, lon=2, expires=time.time() - 10)
cg.save()
self.assertRaises(Exception, _geocode_cached_data, 'EXPIRED-RECORD')
def test_convert_data__good_data(self):
"""Testing _convert_data, with an expected data structure."""
data = {'result': ['20005', 'LAT', 'LON']}
result = _convert_data(data)
self.assertTrue('zip' in result)
self.assertEqual(result['zip']['zipcode'], '20005')
self.assertEqual(result['zip']['lat'], 'LAT')
self.assertEqual(result['zip']['lng'], 'LON')
def test_conver_data__bad_data(self):
"""Testing _convert_data, with bad data structure."""
data = {'esult': ['20005', 'LAT', 'LON']}
result = _convert_data(data)
self.assertEqual(result, data)
@patch('hud_api_replace.geocode.GoogleGeocode.google_maps_api')
def test_geocode_get_data__new(self, mock_geocode):
"""Testing geocode_get_data, with new argument."""
mock_geocode.return_value = {'result': ['20005', 11, 22]}
result = geocode_get_data('DEFINITELY-NOT_CACHED')
self.assertTrue('zip' in result)
self.assertEqual(result['zip']['zipcode'], '20005')
self.assertEqual(result['zip']['lat'], 11)
self.assertEqual(result['zip']['lng'], 22)
def test_geocode_get_data__existent(self):
"""Testing geocode_get_data, with cached argument."""
cg = CachedGeodata(key='20005', lat=111, lon=222, expires=time.time() + 10000)
cg.save()
result = geocode_get_data('20005')
self.assertTrue('zip' in result)
self.assertEqual(result['zip']['zipcode'], '20005')
self.assertEqual(result['zip']['lat'], 111)
self.assertEqual(result['zip']['lng'], 222)
@patch('hud_api_replace.geocode.GoogleGeocode.google_maps_api')
def test_geocode_get_data__expired(self, mock_geocode):
"""Testing geocode_get_data, with empty argument."""
mock_geocode.return_value = {'result': ['20006', 11, 22]}
cg = CachedGeodata(key='20006', lat=111, lon=222, expires=time.time() - 10000)
cg.save()
result = geocode_get_data('20006')
self.assertEqual(result['zip']['zipcode'], '20006')
self.assertEqual(result['zip']['lat'], 11)
self.assertEqual(result['zip']['lng'], 22)
|
# -*- coding: utf-8 -*-
"""
CoCart API Class
"""
__title__ = "cocart-api"
__version__ = "1.0.0"
__author__ = "<NAME> @ CoCart"
__license__ = "MIT"
from requests import request
from json import dumps as jsonencode
from time import time
from cocart.oauth import CoCartOAuth
from requests.auth import HTTPBasicAuth
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
class CoCartAPI(object):
""" CoCart API Class """
def __init__(self, url, consumer_key, consumer_secret, **kwargs):
self.url = url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.wp_api = kwargs.get("wp_api", "wp-json")
self.version = kwargs.get("version", "cocart/v1")
self.is_ssl = self.__is_ssl()
self.timeout = kwargs.get("timeout", 5)
self.verify_ssl = kwargs.get("verify_ssl", True)
self.query_string_auth = kwargs.get("query_string_auth", False)
self.userAgent = kwargs.get("user_agent", self.__defaultUserAgent())
def __defaultUserAgent(__version__):
""" Using default User Agent """
return f"CoCart API {__version__}"
def __is_ssl(self):
""" Check if url use HTTPS """
return self.url.startswith("https")
def __get_url(self, endpoint):
""" Get URL for requests """
url = self.url
api = self.wp_api
if url.endswith("/") is False:
url = f"{url}/"
return f"{url}{api}/{self.version}/{endpoint}"
def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = CoCartOAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
method=method,
oauth_timestamp=kwargs.get("oauth_timestamp", int(time()))
)
return oauth.get_oauth_url()
def __request(self, method, endpoint, data, params=None, **kwargs):
""" Do requests """
if params is None:
params = {}
url = self.__get_url(endpoint)
auth = None
headers = {
"user-agent": self.userAgent,
"accept": "application/json"
}
if self.is_ssl is True and self.query_string_auth is False:
auth = HTTPBasicAuth(self.consumer_key, self.consumer_secret)
elif self.is_ssl is True and self.query_string_auth is True:
params.update({
"consumer_key": self.consumer_key,
"consumer_secret": self.consumer_secret
})
else:
encoded_params = urlencode(params)
url = f"{url}?{encoded_params}"
url = self.__get_oauth_url(url, method, **kwargs)
if data is not None:
data = jsonencode(data, ensure_ascii=False).encode('utf-8')
headers["content-type"] = "application/json;charset=utf-8"
return request(
method=method,
url=url,
verify=self.verify_ssl,
auth=auth,
params=params,
data=data,
timeout=self.timeout,
headers=headers,
**kwargs
)
def get(self, endpoint, **kwargs):
""" Get requests """
return self.__request("GET", endpoint, None, **kwargs)
def post(self, endpoint, data, **kwargs):
""" POST requests """
return self.__request("POST", endpoint, data, **kwargs)
def put(self, endpoint, data, **kwargs):
""" PUT requests """
return self.__request("PUT", endpoint, data, **kwargs)
def delete(self, endpoint, **kwargs):
""" DELETE requests """
return self.__request("DELETE", endpoint, None, **kwargs)
def options(self, endpoint, **kwargs):
""" OPTIONS requests """
return self.__request("OPTIONS", endpoint, None, **kwargs)
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME>. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Napalm driver for Cisco S350 devices.
Read https://napalm.readthedocs.io for more information.
"""
from __future__ import print_function
from __future__ import unicode_literals
import netaddr
import re
import socket
from netmiko import ConnectHandler
from napalm.base import NetworkDriver
from napalm.base.exceptions import (
CommandErrorException,
ConnectionClosedException,
)
from napalm.base.helpers import canonical_interface_name
import napalm.base.constants as C
import napalm.base.canonical_map
# make may own base_interfaces for s350
s350_base_interfaces = {
**napalm.base.canonical_map.base_interfaces,
"fa": "FastEthernet",
"gi": "GigabitEthernet",
"te": "TengigabitEthernet",
}
class S350Driver(NetworkDriver):
"""Napalm driver for S350."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""Constructor."""
self.device = None
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
if optional_args is None:
optional_args = {}
self._dest_file_system = optional_args.get("dest_file_system", None)
# Netmiko possible arguments
netmiko_argument_map = {
"port": None,
"secret": "",
"verbose": False,
"keepalive": 30,
"global_delay_factor": 1,
"use_keys": False,
"key_file": None,
"ssh_strict": False,
"system_host_keys": False,
"alt_host_keys": False,
"alt_key_file": "",
"ssh_config_file": None,
"allow_agent": False,
}
# Allow for passing additional Netmiko arguments
self.netmiko_optional_args = {}
for k, v in netmiko_argument_map.items():
try:
self.netmiko_optional_args[k] = optional_args[k]
except KeyError:
pass
self.port = optional_args.get("port", 22)
self.device = None
self.force_no_enable = optional_args.get("force_no_enable", False)
def open(self):
"""Open a connection to the device."""
self.device = ConnectHandler(
device_type="cisco_s300",
host=self.hostname,
username=self.username,
password=<PASSWORD>,
**self.netmiko_optional_args,
)
if not self.force_no_enable:
self.device.enable()
def _discover_file_system(self):
try:
return self.device._autodetect_fs()
except Exception:
msg = (
"Netmiko _autodetect_fs failed (to work around specify "
"dest_file_system in optional_args)."
)
raise CommandErrorException(msg)
def close(self):
"""Close the connection to the device."""
self.device.disconnect()
def cli(self, commands):
output = {}
try:
for cmd in commands:
output[cmd] = self.device.send_command(cmd)
return output
except (socket.error, EOFError) as e:
raise ConnectionClosedException(str(e))
def _send_command(self, command):
"""Wrapper for self.device.send.command().
If command is a list will iterate through commands until valid command.
"""
try:
if isinstance(command, list):
for cmd in command:
output = self.device.send_command(cmd)
if "% Invalid" not in output:
break
else:
output = self.device.send_command(command)
return output.strip()
except (socket.error, EOFError) as e:
raise ConnectionClosedException(str(e))
def _parse_uptime(self, uptime_str):
"""Parse an uptime string into number of seconds"""
uptime_str = uptime_str.strip()
days, timespec = uptime_str.split(",")
hours, minutes, seconds = timespec.split(":")
uptime_sec = (int(days) * 86400) + (int(hours) * 3600) + (int(minutes) * 60) + int(seconds)
return uptime_sec
def get_arp_table(self, vrf=""):
"""
Get the ARP table, the age isn't readily available so we leave that out for now.
vrf is needed for test - no support on s350
"""
arp_table = []
output = self._send_command("show arp")
for line in output.splitlines():
# A VLAN may not be set for the entry
if "vlan" not in line:
continue
if len(line.split()) == 4:
interface, ip, mac, _ = line.split()
elif len(line.split()) == 5:
if1, if2, ip, mac, _ = line.split()
interface = "{} {}".format(if1, if2)
elif len(line.split()) == 6:
_, _, interface, ip, mac, _ = line.split()
else:
raise ValueError("Unexpected output: {}".format(line.split()))
interface = canonical_interface_name(interface, s350_base_interfaces)
entry = {
"interface": interface,
"mac": napalm.base.helpers.mac(mac),
"ip": ip,
"age": 0.0,
}
arp_table.append(entry)
return arp_table
def get_config(self, retrieve="all", full=False, sanitized=False):
"""
get_config for S350. Since this firmware doesn't support a candidate
configuration we leave it empty.
"""
configs = {
"startup": "",
"running": "",
"candidate": "",
}
if retrieve in ("all", "startup"):
startup = self._send_command("show startup-config")
configs["startup"] = self._get_config_filter(startup)
if retrieve in ("all", "running"):
# IOS supports "full" only on "show running-config"
run_full = " detailed" if full else ""
running = self._send_command("show running-config" + run_full)
configs["running"] = self._get_config_filter(running)
if sanitized:
configs = self._get_config_sanitized(configs)
return configs
def _get_config_filter(self, config):
# The output of get_config should be directly usable by load_replace_candidate()
# remove header
filter_strings = [
r"(?sm)^config-file-header.*^@$",
]
for ft in filter_strings:
config = re.sub(ft, "", config)
return config
def _get_config_sanitized(self, configs):
# Do not output sensitive information
# use Cisco IOS filters
configs = napalm.base.helpers.sanitize_configs(configs, C.CISCO_SANITIZE_FILTERS)
# defina my own filters
s350_filters = {
r"^(.* password) (\S+) (\S+) (.*)$": r"\1 \2 <removed> \4",
r"^(snmp-server location) (\S+).*$": r"\1 <removed>",
}
configs = napalm.base.helpers.sanitize_configs(configs, s350_filters)
return configs
def get_facts(self):
"""Return a set of facts from the device."""
serial_number, fqdn, os_version, hostname, domainname = ("Unknown",) * 5
# Submit commands to the device.
show_ver = self._send_command("show version")
show_sys = self._send_command("show system")
show_inv = self._send_command("show inventory")
show_hosts = self._send_command("show hosts")
show_int_st = self._send_command("show interfaces status")
os_version = self._get_facts_parse_os_version(show_ver)
# hostname
hostname = self._get_facts_hostname(show_sys)
# special case for SG500 fw v1.4.x
if hostname == "Unknown":
hostname = self._get_facts_hostname_from_config(
self._send_command("show running-config")
)
# uptime
uptime_str = self._get_facts_uptime(show_sys)
uptime = self._parse_uptime(uptime_str)
# serial_number and model
inventory = self._get_facts_parse_inventory(show_inv)["1"]
serial_number = inventory["sn"]
model = inventory["pid"]
# fqdn
domainname = napalm.base.helpers.textfsm_extractor(self, "hosts", show_hosts)[0]
domainname = domainname["domain_name"]
if domainname == "Domain":
domainname = "Unknown"
if domainname != "Unknown" and hostname != "Unknown":
fqdn = "{0}.{1}".format(hostname, domainname)
# interface_list
interfaces = []
show_int_st = show_int_st.strip()
# remove the header information
show_int_st = re.sub(
r"(^-.*$|^Port .*$|^Ch .*$)|^\s.*$|^.*Flow.*$", "", show_int_st, flags=re.M
)
for line in show_int_st.splitlines():
if not line:
continue
interface = line.split()[0]
interface = canonical_interface_name(interface, s350_base_interfaces)
interfaces.append(str(interface))
return {
"fqdn": str(fqdn),
"hostname": str(hostname),
"interface_list": interfaces,
"model": str(model),
"os_version": str(os_version),
"serial_number": str(serial_number),
"uptime": uptime,
"vendor": "Cisco",
}
def _get_facts_hostname_from_config(self, show_running):
# special case for SG500 fw v1.4.x
hostname = "Unknown"
for line in show_running.splitlines():
if line.startswith("hostname "):
_, hostname = line.split("hostname")
hostname = hostname.strip()
break
return hostname
def _get_facts_hostname(self, show_sys):
hostname = "Unknown"
for line in show_sys.splitlines():
if line.startswith("System Name:"):
_, hostname = line.split("System Name:")
hostname = hostname.strip()
break
return hostname
def _get_facts_uptime(self, show_sys):
i = 0
syslines = []
fields = []
uptime_header_lineNo = None
uptime_str = None
for line in show_sys.splitlines():
# All models except SG500 fw 1.4.x
if line.startswith("System Up Time (days,hour:min:sec):"):
_, uptime_str = line.split("System Up Time (days,hour:min:sec):")
break
line = re.sub(r" *", " ", line, re.M)
line = line.strip()
fields = line.split(" ")
syslines.append(fields)
if "Unit" in syslines[i] and "time" in syslines[i]:
uptime_header_lineNo = i
i += 1
# SG500 fw 1.4.x
if not uptime_str:
uptime_str = syslines[uptime_header_lineNo + 2][1]
return uptime_str
def _get_facts_parse_inventory(self, show_inventory):
""" inventory can list more modules/devices """
# make 1 module 1 line
show_inventory = re.sub(r"\nPID", " PID", show_inventory, re.M)
# delete empty lines
show_inventory = re.sub(r"^\n", "", show_inventory, re.M)
show_inventory = re.sub(r"\n\n", "", show_inventory, re.M)
show_inventory = re.sub(r"\n\s*\n", r"\n", show_inventory, re.M)
lines = show_inventory.splitlines()
modules = {}
for line in lines:
match = re.search(
r"""
^
NAME:\s"(?P<name>\S+)"\s*
DESCR:\s"(?P<descr>[^"]+)"\s*
PID:\s(?P<pid>\S+)\s*
VID:\s(?P<vid>.+\S)\s*
SN:\s(?P<sn>\S+)\s*
""",
line,
re.X,
)
module = match.groupdict()
modules[module["name"]] = module
if modules:
return modules
def _get_facts_parse_os_version(self, show_ver):
# os_version
# detect os ver > 2
if re.search(r"^Active-image", show_ver):
for line in show_ver.splitlines():
# First version line is the active version
if re.search(r"Version:", line):
_, os_version = line.split("Version: ")
break
elif re.search(r"^SW version", show_ver):
for line in show_ver.splitlines():
if re.search(r"^SW version", line):
_, ver = line.split(" ")
os_version, _ = ver.split(" (")
break
else:
# show_ver = re.sub(r'^\n', '', show_ver, re.M)
for line in show_ver.splitlines():
line = re.sub(r" *", " ", line, re.M)
line = line.strip()
line_comps = line.split(" ")
if line_comps[0] == "1":
os_version = line_comps[1]
break
return os_version
def get_interfaces(self):
"""
get_interfaces() implementation for S350
"""
interfaces = {}
show_status_output = self._send_command("show interfaces status")
show_description_output = self._send_command("show interfaces description")
# by documentation SG350
show_jumbo_frame = self._send_command("show ports jumbo-frame")
match = re.search(r"Jumbo frames are enabled", show_jumbo_frame, re.M)
if match:
mtu = 9000
else:
mtu = 1518
mac = "0"
for status_line in show_status_output.splitlines():
if "Up" in status_line or "Down" in status_line:
if "Po" in status_line:
interface, _, _, speed, _, _, link_state = status_line.split()
else:
interface, _, _, speed, _, _, link_state, _, _ = status_line.split()
# Since the MAC address for all the local ports are equal, get the address
# from the first port and use it everywhere.
if mac == "0":
show_system_output = self._send_command("show lldp local " + interface)
mac = show_system_output.splitlines()[0].split(":", maxsplit=1)[1].strip()
if speed == "--":
is_enabled = False
speed = 0
else:
is_enabled = True
speed = int(speed)
is_up = link_state == "Up"
for descr_line in show_description_output.splitlines():
description = 0
if descr_line.startswith(interface):
description = " ".join(descr_line.split()[1:])
break
# last_flapped can not be get - setting to default
entry = {
"is_up": is_up,
"is_enabled": is_enabled,
"speed": speed,
"mtu": mtu,
"last_flapped": -1.0,
"description": description,
"mac_address": napalm.base.helpers.mac(mac),
}
interface = canonical_interface_name(interface, s350_base_interfaces)
interfaces[interface] = entry
return interfaces
def get_interfaces_ip(self):
"""Returns all configured interface IP addresses."""
interfaces = {}
show_ip_int = self._send_command("show ip interface")
header = True # cycle trought header
for line in show_ip_int.splitlines():
if header:
# last line of first header
match = re.match(r"^---+ -+ .*$", line)
if match:
header = False
fields_end = self._get_ip_int_fields_end(line)
continue
# next header, stop processing text
if re.match(r"^---+ -+ .*$", line):
break
line_elems = self._get_ip_int_line_to_fields(line, fields_end)
# only valid interfaces
# in diferent firmwares there is 'Status' field allwais on last place
if line_elems[len(line_elems) - 1] != "Valid":
continue
cidr = line_elems[0]
interface = line_elems[1]
ip = netaddr.IPNetwork(cidr)
family = "ipv{0}".format(ip.version)
interface = canonical_interface_name(interface, s350_base_interfaces)
interfaces[interface] = {family: {str(ip.ip): {"prefix_length": ip.prefixlen}}}
return interfaces
def _get_ip_int_line_to_fields(self, line, fields_end):
""" dynamic fields lenghts """
line_elems = {}
index = 0
f_start = 0
for f_end in fields_end:
line_elems[index] = line[f_start:f_end].strip()
index += 1
f_start = f_end
return line_elems
def _get_ip_int_fields_end(self, dashline):
""" fields length are diferent device to device, detect them on horizontal lin """
fields_end = [m.start() for m in re.finditer(" ", dashline.strip())]
# fields_position.insert(0,0)
fields_end.append(len(dashline))
return fields_end
def get_lldp_neighbors(self):
"""get_lldp_neighbors implementation for s350"""
neighbors = {}
output = self._send_command("show lldp neighbors")
header = True # cycle trought header
local_port = "" # keep previous context - multiline syname
remote_port = ""
remote_name = ""
for line in output.splitlines():
if header:
# last line of header
match = re.match(r"^--------- -+ .*$", line)
if match:
header = False
fields_end = self._get_lldp_neighbors_fields_end(line)
continue
line_elems = self._get_lldp_neighbors_line_to_fields(line, fields_end)
# info owerflow to the other line
if line_elems[0] == "" or line_elems[4] == "" or line_elems[5] == "":
# complete owerflown fields
local_port = local_port + line_elems[0]
remote_port = remote_port + line_elems[2]
remote_name = remote_name + line_elems[3]
# then reuse old values na rewrite previous entry
else:
local_port = line_elems[0]
remote_port = line_elems[2]
remote_name = line_elems[3]
local_port = canonical_interface_name(local_port, s350_base_interfaces)
neighbor = {
"hostname": remote_name,
"port": remote_port,
}
neighbor_list = [
neighbor,
]
neighbors[local_port] = neighbor_list
return neighbors
def _get_lldp_neighbors_line_to_fields(self, line, fields_end):
""" dynamic fields lenghts """
line_elems = {}
index = 0
f_start = 0
for f_end in fields_end:
line_elems[index] = line[f_start:f_end].strip()
index += 1
f_start = f_end
return line_elems
def _get_lldp_neighbors_fields_end(self, dashline):
""" fields length are diferent device to device, detect them on horizontal lin """
fields_end = [m.start() for m in re.finditer(" ", dashline)]
fields_end.append(len(dashline))
return fields_end
def _get_lldp_line_value(self, line):
"""
Safe-ish method to get the value from an 'lldp neighbors $IF' line.
"""
try:
value = line.split(":")[1:][0].strip()
except KeyError:
value = "N/A"
return value
def get_lldp_neighbors_detail(self, interface=""):
"""
get_lldp_neighbors_detail() implementation for s350
"""
details = {}
# First determine all interfaces with valid LLDP neighbors
for local_port in self.get_lldp_neighbors().keys():
if interface:
if interface == local_port:
entry = self._get_lldp_neighbors_detail_parse(local_port)
local_port = canonical_interface_name(local_port, s350_base_interfaces)
details[local_port] = [
entry,
]
else:
entry = self._get_lldp_neighbors_detail_parse(local_port)
local_port = canonical_interface_name(local_port, s350_base_interfaces)
details[local_port] = [
entry,
]
return details
def _get_lldp_neighbors_detail_parse(self, local_port):
# Set defaults, just in case the remote fails to provide a field.
(
remote_port_id,
remote_port_description,
remote_chassis_id,
remote_system_name,
remote_system_description,
remote_system_capab,
remote_system_enable_capab,
) = ("N/A",) * 7
output = self._send_command("show lldp neighbors {}".format(local_port))
for line in output.splitlines():
if line.startswith("Port ID"):
remote_port_id = line.split()[-1]
elif line.startswith("Device ID"):
remote_chassis_id = line.split()[-1]
elif line.startswith("Port description"):
remote_port_description = self._get_lldp_line_value(line)
elif line.startswith("System Name"):
remote_system_name = self._get_lldp_line_value(line)
elif line.startswith("System description"):
remote_system_description = self._get_lldp_line_value(line)
elif line.startswith("Capabilities"):
caps = self._get_lldp_neighbors_detail_capabilities_parse(line)
remote_port_id = canonical_interface_name(remote_port_id, s350_base_interfaces)
entry = {
"parent_interface": "N/A",
"remote_port": remote_port_id,
"remote_port_description": remote_port_description,
"remote_chassis_id": remote_chassis_id,
"remote_system_name": remote_system_name,
"remote_system_description": remote_system_description,
"remote_system_capab": caps,
"remote_system_enable_capab": caps,
}
return entry
def _get_lldp_neighbors_detail_capabilities_parse(self, line):
# Only the enabled capabilities are displayed.
try:
# Split a line like 'Capabilities: Bridge, Router, Wlan-Access-Point'
capabilities = line.split(":")[1:][0].split(",")
except KeyError:
capabilities = []
caps = []
# For all capabilities, except 'Repeater', the shorthand
# is the first character.
for cap in capabilities:
cap = cap.strip()
if cap == "Repeater":
caps.append("r")
else:
caps.append(cap[0])
return caps
def get_ntp_servers(self):
"""Returns NTP servers."""
ntp_servers = {}
output = self._send_command("show sntp status")
servers = re.findall(r"^Server\s*:\s*(\S+)\s*.*$", output, re.M)
for server in servers:
ntp_servers[server] = {}
return ntp_servers
def is_alive(self):
"""Returns an indication of the state of the connection."""
null = chr(0)
if self.device is None:
return {"is_alive": False}
# Send a NUL byte to keep the connection alive.
try:
self.device.write_channel(null)
return {"is_alive": self.device.remote_conn.transport.is_active()}
except (socket.error, EOFError):
# If we couldn't send it, the connection is not available.
return {"is_alive": False}
# If we made it here, assume the worst.
return {"is_alive": False}
@property
def dest_file_system(self):
# First ensure we have an open connection.
if self.device and self._dest_file_system is None:
self._dest_file_system = self._discover_file_system()
return self._dest_file_system
|
import pytest
import numpy as np
from qcodes.dataset.param_spec import ParamSpec
from qcodes.dataset.measurements import Measurement
from qcodes.tests.instrument_mocks import ArraySetPointParam, Multi2DSetPointParam
from qcodes.instrument.parameter import Parameter
# pylint: disable=unused-import
from qcodes.tests.dataset.temporary_databases import dataset, experiment
# pylint: enable=unused-import
@pytest.fixture
def scalar_dataset(dataset):
n_params = 3
n_rows = 10**3
params_indep = [ParamSpec(f'param_{i}',
'numeric',
label=f'param_{i}',
unit='V')
for i in range(n_params)]
params = params_indep + [ParamSpec(f'param_{n_params}',
'numeric',
label=f'param_{n_params}',
unit='Ohm',
depends_on=params_indep)]
for p in params:
dataset.add_parameter(p)
dataset.mark_started()
dataset.add_results([{p.name: np.int(n_rows*10*pn+i)
for pn, p in enumerate(params)}
for i in range(n_rows)])
dataset.mark_completed()
yield dataset
@pytest.fixture
def scalar_dataset_with_nulls(dataset):
"""
A very simple dataset. A scalar is varied, and two parameters are measured
one by one
"""
sp = ParamSpec('setpoint', 'numeric')
val1 = ParamSpec('first_value', 'numeric', depends_on=(sp,))
val2 = ParamSpec('second_value', 'numeric', depends_on=(sp,))
for p in [sp, val1, val2]:
dataset.add_parameter(p)
dataset.mark_started()
dataset.add_results([{sp.name: 0, val1.name: 1},
{sp.name: 0, val2.name: 2}])
dataset.mark_completed()
yield dataset
@pytest.fixture(scope="function",
params=["array", "numeric"])
def array_dataset(experiment, request):
meas = Measurement()
param = ArraySetPointParam()
meas.register_parameter(param, paramtype=request.param)
with meas.run() as datasaver:
datasaver.add_result((param, param.get(),))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function",
params=["array", "numeric"])
def array_dataset_with_nulls(experiment, request):
"""
A dataset where two arrays are measured, one as a function
of two other (setpoint) arrays, the other as a function of just one
of them
"""
meas = Measurement()
meas.register_custom_parameter('sp1', paramtype=request.param)
meas.register_custom_parameter('sp2', paramtype=request.param)
meas.register_custom_parameter('val1', paramtype=request.param,
setpoints=('sp1', 'sp2'))
meas.register_custom_parameter('val2', paramtype=request.param,
setpoints=('sp1',))
with meas.run() as datasaver:
sp1_vals = np.arange(0, 5)
sp2_vals = np.arange(5, 10)
val1_vals = np.ones(5)
val2_vals = np.zeros(5)
datasaver.add_result(('sp1', sp1_vals),
('sp2', sp2_vals),
('val1', val1_vals))
datasaver.add_result(('sp1', sp1_vals),
('val2', val2_vals))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function",
params=["array", "numeric"])
def multi_dataset(experiment, request):
meas = Measurement()
param = Multi2DSetPointParam()
meas.register_parameter(param, paramtype=request.param)
with meas.run() as datasaver:
datasaver.add_result((param, param.get(),))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function")
def array_in_scalar_dataset(experiment):
meas = Measurement()
scalar_param = Parameter('scalarparam', set_cmd=None)
param = ArraySetPointParam()
meas.register_parameter(scalar_param)
meas.register_parameter(param, setpoints=(scalar_param,),
paramtype='array')
with meas.run() as datasaver:
for i in range(1, 10):
scalar_param.set(i)
datasaver.add_result((scalar_param, scalar_param.get()),
(param, param.get()))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function")
def varlen_array_in_scalar_dataset(experiment):
meas = Measurement()
scalar_param = Parameter('scalarparam', set_cmd=None)
param = ArraySetPointParam()
meas.register_parameter(scalar_param)
meas.register_parameter(param, setpoints=(scalar_param,),
paramtype='array')
np.random.seed(0)
with meas.run() as datasaver:
for i in range(1, 10):
scalar_param.set(i)
param.setpoints = (np.arange(i),)
datasaver.add_result((scalar_param, scalar_param.get()),
(param, np.random.rand(i)))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function")
def array_in_scalar_dataset_unrolled(experiment):
meas = Measurement()
scalar_param = Parameter('scalarparam', set_cmd=None)
param = ArraySetPointParam()
meas.register_parameter(scalar_param)
meas.register_parameter(param, setpoints=(scalar_param,),
paramtype='numeric')
with meas.run() as datasaver:
for i in range(1, 10):
scalar_param.set(i)
datasaver.add_result((scalar_param, scalar_param.get()),
(param, param.get()))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture(scope="function",
params=["array", "numeric"])
def array_in_str_dataset(experiment, request):
meas = Measurement()
scalar_param = Parameter('textparam', set_cmd=None)
param = ArraySetPointParam()
meas.register_parameter(scalar_param, paramtype='text')
meas.register_parameter(param, setpoints=(scalar_param,),
paramtype=request.param)
with meas.run() as datasaver:
for i in ['A', 'B', 'C']:
scalar_param.set(i)
datasaver.add_result((scalar_param, scalar_param.get()),
(param, param.get()))
try:
yield datasaver.dataset
finally:
datasaver.dataset.conn.close()
@pytest.fixture
def standalone_parameters_dataset(dataset):
n_params = 3
n_rows = 10**3
params_indep = [ParamSpec(f'param_{i}',
'numeric',
label=f'param_{i}',
unit='V')
for i in range(n_params)]
params = params_indep + [ParamSpec(f'param_{n_params}',
'numeric',
label=f'param_{n_params}',
unit='Ohm',
depends_on=params_indep[0:1])]
for p in params:
dataset.add_parameter(p)
dataset.mark_started()
dataset.add_results([{p.name: np.int(n_rows*10*pn+i)
for pn, p in enumerate(params)}
for i in range(n_rows)])
dataset.mark_completed()
yield dataset
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: batch_norm"""
import akg
import akg.tvm
import akg.utils as utils
from akg.utils.format_transform import get_shape
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor,
akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor,
akg.tvm.tensor.Tensor,
float, (bool, type(None)), (dict, type(None)), (str, type(None)))
def BatchNorm(data, mean, var, gamma, beta, eps, polyhedral=True, attrs=None, target=utils.CCE):
"""
Batch normalization.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
mean (tvm.tensor.Tensor): Tensor of type float16, float32 as mean.
var (tvm.tensor.Tensor): Tensor of type float16, float32 as variance.
gamma (tvm.tensor.Tensor): Tensor of type float16, float32 for scaling.
beta (tvm.tensor.Tensor): Tensor of type float16, float32 for bias.
eps (float): A small float added to variance to avoid dividing by zero.
polyhedral (bool): Whether to schedule with polyhedral.
attrs (dict): Schedule attributes for op.
Returns:
outs (tvm.tensor.Tensor): Tensor for normalized, scaled, shifted data.
Supported Platforms:
'Ascend'
"""
for tensor in (data, mean, var, gamma, beta):
utils.check_shape(get_shape(tensor))
utils.ops_dtype_check(tensor.dtype, utils.DtypeForDavinci.ALL_FLOAT)
shape = get_shape(data)
dtype = data.dtype
if len(shape) != 4 and len(shape) != 5:
raise RuntimeError("Only support 4-dim OR 5-dim batch norm!")
inp_eps = akg.tvm.const(eps, dtype=dtype)
# var + eps
veps = akg.lang.ascend.vadds(var, inp_eps)
# sqrt(var + eps)
power_num = akg.tvm.const(0.5, dtype=data.dtype)
if len(shape) == 5:
_, channel_1, _, _, channel_0 = data.shape
new_shape = (channel_1, channel_0)
vlog_t = akg.tvm.compute(new_shape,
lambda c1, c0:
akg.tvm.log(veps[0, c1, 0, 0, c0]),
name="vlog_t")
vmuls_t = akg.tvm.compute(
new_shape, lambda c1, c0: vlog_t[c1, c0] * power_num, name="vmuls_t")
sveps = akg.tvm.compute(new_shape,
lambda c1, c0: akg.tvm.exp(vmuls_t[c1, c0]),
name="sveps")
mean2 = akg.lang.ascend.vmuls(mean, akg.tvm.const(-1, data.dtype))
dmean = akg.tvm.compute(
shape,
lambda b, c1, h, w, c0:
data[b, c1, h, w, c0] + mean2[0, c1, 0, 0, c0],
name="dmean")
rsveps = akg.tvm.compute(
new_shape,
lambda c1, c0: akg.tvm.const(1, data.dtype) / sveps[c1, c0],
name="rsveps")
dmsve = akg.tvm.compute(
shape,
lambda b, c1, h, w, c0: dmean[b, c1, h, w, c0] * rsveps[c1, c0],
name="dmsve")
dmsveg = akg.tvm.compute(
shape,
lambda b, c1, h, w, c0:
dmsve[b, c1, h, w, c0] * gamma[0, c1, 0, 0, c0],
name="dmsveg")
outs = akg.tvm.compute(
shape,
lambda b, c1, h, w, c0:
dmsveg[b, c1, h, w, c0] + beta[0, c1, 0, 0, c0],
name="output")
else:
_, channel, _, _ = data.shape
vlog_t = akg.tvm.compute(
(channel,), lambda c: akg.tvm.log(veps[0, c, 0, 0]), name="vlog_t")
vmuls_t = akg.tvm.compute(
(channel,), lambda c: vlog_t[c] * power_num, name="vmuls_t")
sveps = akg.tvm.compute(
(channel,), lambda c: akg.tvm.exp(vmuls_t[c]), name="sveps")
mean2 = akg.lang.ascend.vmuls(mean, akg.tvm.const(-1, data.dtype))
dmean = akg.tvm.compute(shape,
lambda b, c, h, w:
data[b, c, h, w] + mean2[0, c, 0, 0],
name="dmean")
rsveps = akg.tvm.compute((channel,),
lambda c:
akg.tvm.const(1, data.dtype) / sveps[c],
name="rsveps")
dmsve = akg.tvm.compute(shape,
lambda b, c, h, w:
dmean[b, c, h, w] * rsveps[c], name="dmsve")
dmsveg = akg.tvm.compute(shape,
lambda b, c, h, w:
dmsve[b, c, h, w] * gamma[0, c, 0, 0],
name="dmsveg")
outs = akg.tvm.compute(shape,
lambda b, c, h, w:
dmsveg[b, c, h, w] + beta[0, c, 0, 0],
name="output")
if polyhedral:
return outs
def comp_func(s):
"""schedule function"""
data_ub = s.cache_read(data, "local.UB", [dmean])
mean_ub = s.cache_read(mean, "local.UB", [mean2])
gamma_ub = s.cache_read(gamma, "local.UB", [dmsveg])
var_ub = s.cache_read(var, "local.UB", [veps])
beta_ub = s.cache_read(beta, "local.UB", [outs])
outs_ub = s.cache_write(outs, "local.UB")
split_axis = {}
for i in range(len(attrs["tile"])):
split_axis["axis" + str(i)] = s[outs].split(outs.op.axis[i], attrs["tile"][i])
split_axis_sorted = sorted(split_axis.items())
s[data_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[mean_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[var_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[gamma_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[beta_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[dmsveg].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[dmsve].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[rsveps].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[dmean].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[mean2].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[sveps].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[vmuls_t].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[vlog_t].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[veps].compute_at(s[outs], split_axis_sorted[-1][1][0])
s[veps].set_scope("local.UB")
s[vlog_t].set_scope("local.UB")
s[vmuls_t].set_scope("local.UB")
s[sveps].set_scope("local.UB")
s[mean2].set_scope("local.UB")
s[dmean].set_scope("local.UB")
s[rsveps].set_scope("local.UB")
s[dmsve].set_scope("local.UB")
s[dmsveg].set_scope("local.UB")
s[outs_ub].compute_at(s[outs], split_axis_sorted[-1][1][0])
return outs, comp_func
|
<gh_stars>1-10
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError
from django.shortcuts import render, get_object_or_404, redirect
from math import ceil
from .forms import TextForm, CasefileForm, UploadCase, AddCasesForm
from .models import Case, CaseFile, RawFile
from .permissions import check_ownership, check_casefile_ownership, owned_cases, owned_casefiles
from .searchtools import basic_search
from .imports import convert_docx
def index(request):
context = {
}
return render(request, 'cases/index.html', context)
# shows the user a singular case via its case id
# offers the option to edit the case
def case(request, case_id):
specific_case = Case.objects.get(pk=case_id)
can_edit = check_ownership(request, case_id)
context = {
'specific_case': specific_case,
'can_edit': can_edit,
}
return render(request, 'cases/case.html', context)
def casefile(request, casefile_id):
specific_casefile = CaseFile.objects.get(pk=casefile_id)
context = {
'specific_casefile': specific_casefile,
}
return render(request, 'cases/casefile.html', context)
# basic search function of site, displays cases matching query
# replaces %20 (the stand-in for a space in a url) with an actual space
# for the database query to avoid querying the database with nonsense
def search(request, search_term, page_number):
users_casefiles = owned_casefiles(request)
context = basic_search(search_term, page_number)
context['users_casefiles'] = users_casefiles
if request.method == 'POST':
form = AddCasesForm(request.POST)
if form.is_valid():
casefile = CaseFile.objects.get(title=form.cleaned_data['title'])
for cases in form.cleaned_data['cases']:
casefile.cases.add(cases.id)
return redirect('search', search_term=search_term, page_number=page_number)
else:
form = AddCasesForm()
context['form'] = form
return render(request, 'cases/display_cases.html', context)
# shows the currently logged-in user all of their cases
# displays them ten pages at a time - eventually will add ability to
# have the user set how many results per page they want globally
@login_required
def my_cases(request, page_number):
results_per_page = 20
users_cases = owned_cases(request)
users_casefiles = owned_casefiles(request)
number_of_cases = len(users_cases)
number_of_pages = ceil(number_of_cases / results_per_page)
if page_number != 0:
result_number = page_number * results_per_page
lower_bound_results = (page_number - 1) * results_per_page
else:
result_number = results_per_page
next_page = page_number + 1
previous_page = page_number - 1
if request.method == 'POST':
form = AddCasesForm(request.POST)
if form.is_valid():
casefile = CaseFile.objects.get(title=form.cleaned_data['title'])
for cases in form.cleaned_data['cases']:
casefile.cases.add(cases.id)
return redirect('my_cases', page_number=page_number)
else:
form = AddCasesForm()
context = {
'latest_cases': users_cases[lower_bound_results:result_number],
'total_pages': number_of_pages,
'current_page': page_number,
'next_page': next_page,
'previous_page': previous_page,
'users_casefiles': users_casefiles,
'form': form,
}
return render(request, 'cases/display_cases.html', context)
# shows a list of all the user's owned casefiles
# contains link to allow user to add details to or edit fields
@login_required
def my_casefiles(request):
users_casefiles = owned_casefiles(request)
context = {
'users_casefiles': users_casefiles,
}
return render(request, 'cases/display_casefiles.html', context)
# creates a new case using similar code to edit, using the
# ckeditor plugin, and saves it as a new case to the database
@login_required
def create_case(request):
if request.method == 'POST':
form = TextForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
text = form.cleaned_data['text']
author = request.user
new_case = Case.objects.create_case(title, text, author)
return redirect('case', casefile_id=new_case.pk)
else:
form = TextForm()
return render(request, 'cases/edit_cases.html', {'form': form})
# creates an empty casefile and description for a casefile
# user must edit in additional data
@login_required
def create_casefile(request):
if request.method == 'POST':
form = CasefileForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
description = form.cleaned_data['description']
author = request.user
new_casefile = CaseFile.objects.create_casefile(title, description, author)
return redirect('casefile', casefile_id=new_casefile.pk)
else:
form = CasefileForm()
return render(request, 'cases/create_casefile.html', {'form': form})
# takes and uploads a file, then calls the process_case url to complete the file
@login_required
def upload_case(request):
if request.method == 'POST':
form = UploadCase(request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
file = form.cleaned_data['file']
author = request.user
new_file = RawFile.objects.upload_file(title, file, author)
file_id = new_file.id
return redirect('process_case', file_id=file_id)
else:
form = UploadCase()
return render(request, 'cases/upload_case.html', {'form': form})
# uses the mammoth library to convert an uploaded docx file into html
# deletes the file after attempting to convert it, to conserve disk space
@login_required
def process_case(request, file_id):
file = RawFile.objects.get(id=file_id)
converted_file = convert_docx(file.file.url)
title = file.title
text = converted_file[0]
author = request.user
new_case = Case.objects.create_case(title, text, author)
file.delete()
return redirect('edit', case_id=new_case.id)
# basic case text editor which uses the TinyMCE plugin within the form
# form will be prefilled with the previous case text, allowing user to edit
@login_required
def edit(request, case_id):
can_edit = check_ownership(request, case_id)
specific_case = Case.objects.get(pk=case_id)
default_text = specific_case.text
default_title = specific_case.title
if can_edit:
if request.method == 'POST':
form = TextForm(request.POST)
if form.is_valid():
new_title = form.cleaned_data['title']
new_text = form.cleaned_data['text']
old_case = Case.objects.get(pk=case_id)
old_case.text = new_text
old_case.title = new_title
old_case.save()
return redirect('case', case_id=specific_case.pk)
else:
form = TextForm(initial={'title': default_title,
'text': default_text})
context = {
'form': form,
'case_id': specific_case.pk
}
return render(request, 'cases/edit_cases.html', context)
else:
raise PermissionDenied
|
# coding: utf-8
import os, time, pickle, random, sys, math
from datetime import datetime
import numpy as np
from time import localtime, strftime
import logging, scipy
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
import matplotlib.pyplot as plt
import hickle as hkl
from skimage.measure import compare_mse
from skimage.measure import compare_ssim
#GPU setting and Global parameters
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[1]
#checkpoint = "checkpoint"
checkpoint = sys.argv[2]
graph_dir = sys.argv[3]
block_size = sys.argv[4]
tl.global_flag['mode']='DFHiC'
tl.files.exists_or_mkdir(checkpoint)
tl.files.exists_or_mkdir(graph_dir)
batch_size = int(sys.argv[5]) #128
lr_init = 1e-4
beta1 = 0.9
#n_epoch_init = 100
n_epoch_init = 1
n_epoch = 500
lr_decay = 0.1
decay_every = int(n_epoch / 2)
ni = int(np.sqrt(batch_size))
def calculate_psnr(mat1,mat2):
data_range=np.max(mat1)-np.min(mat1)
err=compare_mse(mat1,mat2)
return 10 * np.log10((data_range ** 2) / err)
def calculate_ssim(mat1,mat2):
data_range=np.max(mat1)-np.min(mat1)
return compare_ssim(mat1,mat2,data_range=data_range)
train_data=np.load("preprocess/data/GM12878/train_data_raw_ratio16.npz")
lr_mats_full=train_data['train_lr']
hr_mats_full=train_data['train_hr']
lr_mats_train = lr_mats_full[:int(0.95*len(lr_mats_full))]
hr_mats_train = hr_mats_full[:int(0.95*len(hr_mats_full))]
lr_mats_valid = lr_mats_full[int(0.95*len(lr_mats_full)):]
hr_mats_valid = hr_mats_full[int(0.95*len(hr_mats_full)):]
# zssr
def DFHiC(t_matrix, is_train=False, reuse=False):
w_init = tf.random_normal_initializer(stddev=0.02)
b_init = None # tf.constant_initializer(value=0.0)
# g_init = tf.random_normal_initializer(1., 0.02)
with tf.variable_scope("DFHiC", reuse=reuse) as vs:
x = InputLayer(t_matrix, name='in')
################## multi_dialted_cnn ##########################
n_0 = Conv2d(x, 32, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n32s1/c1/0')
n_1 = Conv2d(n_0, 32, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n32s1/c1/1')
n_2 = Conv2d(n_1, 32, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n32s1/c1/2')
n_3 = ElementwiseLayer([n_0, n_2], tf.add, name='add')
n_4 = Conv2d(n_3, 64, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c1/3')
n_5 = Conv2d(n_4, 64, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c1/4')
n_6 = ElementwiseLayer([n_4, n_5], tf.add, name='add')
n_7 = Conv2d(n_6, 128, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n128s1/c1/5')
n_8 = Conv2d(n_7, 128, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n128s1/c1/6')
n_9 = ElementwiseLayer([n_7, n_8], tf.add, name='add')
n_10 = Conv2d(n_9, 256, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n256s1/c1/7')
n_11 = Conv2d(n_10, 256, (3, 3), (1, 1), dilation_rate=(2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='n256s1/c1/8')
n = Conv2d(n_11, 1, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n1s1/c/m')
n = ElementwiseLayer([n, x], tf.add, name='add')
return n
t_matrix = tf.placeholder('float32', [None, block_size, block_size, 1], name='input_hic_matrix')
t_target_matrix = tf.placeholder('float32', [None, block_size, block_size, 1], name='t_target_hic_matrix')
net = DFHiC(t_matrix, is_train=True, reuse=False)
net_test = DFHiC(t_matrix, is_train=False, reuse=True)
l1_loss = tl.cost.absolute_difference_error(net.outputs, t_target_matrix, is_mean=True)
g_vars = tl.layers.get_variables_with_name('DFHiC', True, True)
with tf.variable_scope('learning_rate'):
lr_v = tf.Variable(lr_init, trainable=False)
g_optim = tf.train.AdamOptimizer(lr_v, beta1=beta1).minimize(l1_loss, var_list=g_vars)
#summary variables
merged_summary = tf.summary.scalar("l1_loss", l1_loss)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
#record variables for TensorBoard visualization
summary_writer=tf.summary.FileWriter('%s'%graph_dir,graph=tf.get_default_graph())
wait=0
patience=20
best_mse_val = np.inf
best_epoch=0
for epoch in range(0, n_epoch + 1):
## update learning rate
if epoch != 0 and (epoch % decay_every == 0):
#new_lr_decay = lr_decay**(epoch // decay_every)
new_lr_decay=1
sess.run(tf.assign(lr_v, lr_init * new_lr_decay))
log = " ** new learning rate: %f (for DFHiC)" % (lr_init * new_lr_decay)
print(log)
elif epoch == 0:
sess.run(tf.assign(lr_v, lr_init))
log = " ** init lr: %f decay_every_init: %d, lr_decay: %f (for DFHiC)" % (lr_init, decay_every, lr_decay)
print(log)
epoch_time = time.time()
total_loss = 0
for idx in range(0, len(hr_mats_train)-batch_size, batch_size):
b_mats_input = lr_mats_train[idx:idx + batch_size]
b_mats_target = hr_mats_train[idx:idx + batch_size]
errM, _ = sess.run([l1_loss, g_optim], {t_matrix: b_mats_input, t_target_matrix: b_mats_target})
print("Epoch [%2d/%2d] time: %4.4fs, mse: %.6f" %
(epoch, n_epoch, time.time() - epoch_time, errM))
#validation
hr_mats_pre = np.zeros(hr_mats_valid.shape)
for i in range(hr_mats_pre.shape[0]//batch_size):
hr_mats_pre[batch_size*i:batch_size*(i+1)] = sess.run(net_test.outputs, {t_matrix: lr_mats_valid[batch_size*i:batch_size*(i+1)]})
hr_mats_pre[batch_size*(i+1):] = sess.run(net_test.outputs, {t_matrix: lr_mats_valid[batch_size*(i+1):]})
mse_val=np.median(list(map(compare_mse,hr_mats_pre[:,:,:,0],hr_mats_valid[:,:,:,0])))
if mse_val < best_mse_val:
wait=0
best_mse_val = mse_val
#save the model with minimal MSE in validation samples
tl.files.save_npz(net.all_params, name=checkpoint + '/{}_best.npz'.format(tl.global_flag['mode']), sess=sess)
best_epoch=epoch
# np.savetxt(checkpoint + 'best_epoch.txt',np.array(best_epoch))
else:
wait+=1
if wait >= patience:
print("Early stopping! The validation median mse is %.6f\n"%best_mse_val)
#sys.exit()
log = "[*] Epoch: [%2d/%2d] time: %4.4fs, valid_mse:%.8f\n" % (epoch, n_epoch, time.time() - epoch_time,mse_val)
print(log)
#record variables for TensorBoard visualization
summary=sess.run(merged_summary,{t_matrix: b_mats_input, t_target_matrix: b_mats_target})
summary_writer.add_summary(summary, epoch)
print("epoch")
print(best_epoch) |
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client-side fork interop tests as a unit test."""
import six
import subprocess
import sys
import threading
import unittest
from grpc._cython import cygrpc
from tests.fork import methods
# New instance of multiprocessing.Process using fork without exec can and will
# hang if the Python process has any other threads running. This includes the
# additional thread spawned by our _runner.py class. So in order to test our
# compatibility with multiprocessing, we first fork+exec a new process to ensure
# we don't have any conflicting background threads.
_CLIENT_FORK_SCRIPT_TEMPLATE = """if True:
import os
import sys
from grpc._cython import cygrpc
from tests.fork import methods
cygrpc._GRPC_ENABLE_FORK_SUPPORT = True
os.environ['GRPC_POLL_STRATEGY'] = 'epoll1'
methods.TestCase.%s.run_test({
'server_host': 'localhost',
'server_port': %d,
'use_tls': False
})
"""
_SUBPROCESS_TIMEOUT_S = 30
@unittest.skipUnless(
sys.platform.startswith("linux"),
"not supported on windows, and fork+exec networking blocked on mac")
@unittest.skipUnless(six.PY2, "https://github.com/grpc/grpc/issues/18075")
class ForkInteropTest(unittest.TestCase):
def setUp(self):
start_server_script = """if True:
import sys
import time
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import service as interop_service
from tests.unit import test_common
server = test_common.test_server()
test_pb2_grpc.add_TestServiceServicer_to_server(
interop_service.TestService(), server)
port = server.add_insecure_port('[::]:0')
server.start()
print(port)
sys.stdout.flush()
while True:
time.sleep(1)
"""
self._server_process = subprocess.Popen(
[sys.executable, '-c', start_server_script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S,
self._server_process.kill)
try:
timer.start()
self._port = int(self._server_process.stdout.readline())
except ValueError:
raise Exception('Failed to get port from server')
finally:
timer.cancel()
def testConnectivityWatch(self):
self._verifyTestCase(methods.TestCase.CONNECTIVITY_WATCH)
def testCloseChannelBeforeFork(self):
self._verifyTestCase(methods.TestCase.CLOSE_CHANNEL_BEFORE_FORK)
def testAsyncUnarySameChannel(self):
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_SAME_CHANNEL)
def testAsyncUnaryNewChannel(self):
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_NEW_CHANNEL)
def testBlockingUnarySameChannel(self):
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_SAME_CHANNEL)
def testBlockingUnaryNewChannel(self):
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_NEW_CHANNEL)
def testInProgressBidiContinueCall(self):
self._verifyTestCase(methods.TestCase.IN_PROGRESS_BIDI_CONTINUE_CALL)
def testInProgressBidiSameChannelAsyncCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL)
def testInProgressBidiSameChannelBlockingCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL)
def testInProgressBidiNewChannelAsyncCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL)
def testInProgressBidiNewChannelBlockingCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL)
def tearDown(self):
self._server_process.kill()
def _verifyTestCase(self, test_case):
script = _CLIENT_FORK_SCRIPT_TEMPLATE % (test_case.name, self._port)
process = subprocess.Popen(
[sys.executable, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S, process.kill)
try:
timer.start()
try:
out, err = process.communicate(timeout=_SUBPROCESS_TIMEOUT_S)
except TypeError:
# The timeout parameter was added in Python 3.3.
out, err = process.communicate()
except subprocess.TimeoutExpired:
process.kill()
raise RuntimeError('Process failed to terminate')
finally:
timer.cancel()
self.assertEqual(
0, process.returncode,
'process failed with exit code %d (stdout: %s, stderr: %s)' %
(process.returncode, out, err))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
<gh_stars>0
#!/usr/bin/env python
"""
models.py
"""
from __future__ import division
from __future__ import print_function
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from lr import LRSchedule
# --
# Model
class GSSupervised(nn.Module):
def __init__(self,
input_dim,
n_nodes,
n_classes,
layer_specs,
aggregator_class,
prep_class,
sampler_class, adj, train_adj,
lr_init=0.01,
weight_decay=0.0,
lr_schedule='constant',
epochs=10):
super(GSSupervised, self).__init__()
# --
# Define network
# Sampler
self.train_sampler = sampler_class(adj=train_adj)
self.val_sampler = sampler_class(adj=adj)
self.train_sample_fns = [partial(self.train_sampler, n_samples=s['n_train_samples']) for s in layer_specs]
self.val_sample_fns = [partial(self.val_sampler, n_samples=s['n_val_samples']) for s in layer_specs]
# Prep
self.prep = prep_class(input_dim=input_dim, n_nodes=n_nodes)
input_dim = self.prep.output_dim
# Network
agg_layers = []
for spec in layer_specs:
agg = aggregator_class(
input_dim=input_dim,
output_dim=spec['output_dim'],
activation=spec['activation'],
)
agg_layers.append(agg)
input_dim = agg.output_dim # May not be the same as spec['output_dim']
# use sequential when there is no need to use external input between layer.
self.agg_layers = nn.Sequential(*agg_layers)
self.fc = nn.Linear(input_dim, n_classes, bias=True)
# --
# Define optimizer
self.lr_scheduler = partial(getattr(LRSchedule, lr_schedule), lr_init=lr_init)
self.lr = self.lr_scheduler(0.0)
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=weight_decay)
def forward(self, ids, feats, train=True):
# Sample neighbors
sample_fns = self.train_sample_fns if train else self.val_sample_fns
has_feats = feats is not None
tmp_feats = feats[ids] if has_feats else None
all_feats = [self.prep(ids, tmp_feats, layer_idx=0)]
for layer_idx, sampler_fn in enumerate(sample_fns):
ids = sampler_fn(ids=ids).contiguous().view(-1)
tmp_feats = feats[ids] if has_feats else None
all_feats.append(self.prep(ids, tmp_feats, layer_idx=layer_idx + 1))
# Sequentially apply layers, per original (little weird, IMO)
# Each iteration reduces length of array by one
for agg_layer in self.agg_layers.children():
all_feats = [agg_layer(all_feats[k], all_feats[k + 1]) for k in range(len(all_feats) - 1)]
assert len(all_feats) == 1, "len(all_feats) != 1"
out = F.normalize(all_feats[0], dim=1) # ?? Do we actually want this? ... Sometimes ...
return self.fc(out)
def set_progress(self, progress):
self.lr = self.lr_scheduler(progress)
LRSchedule.set_lr(self.optimizer, self.lr)
def train_step(self, ids, feats, targets, loss_fn):
self.optimizer.zero_grad()
preds = self(ids, feats, train=True)
loss = loss_fn(preds, targets.squeeze())
loss.backward()
torch.nn.utils.clip_grad_norm(self.parameters(), 5)
self.optimizer.step()
return preds
|
<filename>ocd_backend/transformers/gedeputeerdestaten.py<gh_stars>10-100
from datetime import datetime
from urllib.parse import urljoin
import requests
from ocd_backend import settings
from ocd_backend.app import celery_app
from ocd_backend.log import get_source_logger
from ocd_backend.models import *
from ocd_backend.transformers import BaseTransformer
from ocd_backend.utils.misc import strip_scheme
log = get_source_logger('gedeputeerdestaten')
class GedeputeerdeStatenTransformer(BaseTransformer):
def __init__(self, *args, **kwargs):
self.date_mapping = {
'januari': '01',
'februari': '02',
'maart': '03',
'april': '04',
'mei': '05',
'juni': '06',
'juli': '07',
'augustus': '08',
'september': '09',
'oktober': '10',
'november': '11',
'december': '12',
}
def get_meeting_date(self, nl_date_str):
result = nl_date_str
for m, n in self.date_mapping.items():
result = result.replace('%s' % (m,), n)
result = result.strip().replace(' ', '-')
if len(result) < 10:
result = '0' + result
return datetime.strptime(result, '%d-%m-%Y')
def _get_documents_as_media_urls(self, details):
media_urls = []
details_url = details.xpath('//meta[contains(@property, "og:url")]/@content')[0]
for node in details.xpath('//a[contains(@class, "importLink")]'):
media_urls.append({
'note': ''.join(node.xpath('.//text()')),
'original_url': urljoin(details_url, node.xpath('./@href')[0])
})
return media_urls
@celery_app.task(bind=True, base=GedeputeerdeStatenTransformer, autoretry_for=settings.AUTORETRY_EXCEPTIONS, retry_backoff=True)
def gs_meeting_item(self, content_type, raw_item, canonical_iri, cached_path, **kwargs):
original_item = self.deserialize_item(content_type, raw_item)
self.source_definition = kwargs['source_definition']
source_defaults = {
'source': self.source_definition['key'],
'supplier': 'greenvalley',
'collection': 'meeting',
'canonical_iri': canonical_iri,
'cached_path': cached_path,
}
original_id = str(original_item.xpath(self.source_definition['item_id_xpath'])[0])
try:
content = requests.get(original_id).content
except Exception as e:
log.error(str(e))
content = ''
if content == '':
log.erorr('Could not get detailed gedeputeerde staten page')
return
province = TopLevelOrganization(self.source_definition['allmanak_id'],
source=self.source_definition['key'],
supplier='allmanak',
collection='province')
details = self.deserialize_item('application/html', content)
event = Meeting(original_id, **source_defaults)
event.has_organization_name = province
raw_datum_str = ''.join(
details.xpath('//div[contains(@class, "type-meta")]//text()')).split(':')[-1]
clean_date_str = self.get_meeting_date(raw_datum_str)
event.start_date = event.end_date = clean_date_str
event.last_discussed_at = event.start_date
event.name = ''.join(details.xpath('//h1//text()'))
event.classification = ['GS-Besluit']
event.description = ''.join(details.xpath('//div[contains(@class, "type-inhoud")]//text()'))
event.organization = province
event.status = EventConfirmed
event.attachment = []
for doc in self._get_documents_as_media_urls(details):
# It seems that there is no shorter identifier available than the URL
attachment = MediaObject(strip_scheme(doc['original_url']),
source=self.source_definition['key'],
supplier=self.source_definition.get('supplier', self.source_definition['key']),
collection='attachment')
attachment.canonical_iri = doc['original_url']
attachment.has_organization_name = province
attachment.identifier_url = doc['original_url'] # Trick to use the self url for enrichment
attachment.original_url = doc['original_url']
attachment.name = doc['note']
attachment.is_referenced_by = event
attachment.last_discussed_at = event.start_date
event.attachment.append(attachment)
event.save()
return event
|
<reponame>MatthewGerber/rl
import logging
from typing import Dict, Optional
from rlai.actions import Action
from rlai.agents.mdp import MdpAgent
from rlai.environments.mdp import ModelBasedMdpEnvironment
from rlai.gpi.dynamic_programming.evaluation import evaluate_v_pi, evaluate_q_pi
from rlai.gpi.dynamic_programming.improvement import improve_policy_with_v_pi
from rlai.gpi.improvement import improve_policy_with_q_pi
from rlai.meta import rl_text
from rlai.states.mdp import MdpState
@rl_text(chapter=4, page=80)
def iterate_policy_v_pi(
agent: MdpAgent,
environment: ModelBasedMdpEnvironment,
theta: float,
update_in_place: bool
) -> Dict[MdpState, float]:
"""
Run policy iteration on an agent using state-value estimates.
:param agent: MDP agent. Must contain a policy `pi` that has been fully initialized with instances of
`rlai.states.mdp.ModelBasedMdpState`.
:param environment: Model-based MDP environment to evaluate.
:param theta: See `evaluate_v_pi`.
:param update_in_place: See `evaluate_v_pi`.
:return: Final state-value estimates.
"""
v_pi: Optional[Dict[MdpState, float]] = None
improving = True
i = 0
while improving:
logging.info(f'Policy iteration {i + 1}')
v_pi, _ = evaluate_v_pi(
agent=agent,
environment=environment,
theta=theta,
num_iterations=None,
update_in_place=update_in_place,
initial_v_S=v_pi
)
improving = improve_policy_with_v_pi(
agent=agent,
environment=environment,
v_pi=v_pi
) > 0
i += 1
logging.info(f'Policy iteration terminated after {i} iteration(s).')
return v_pi
@rl_text(chapter=4, page=80)
def iterate_policy_q_pi(
agent: MdpAgent,
environment: ModelBasedMdpEnvironment,
theta: float,
update_in_place: bool
) -> Dict[MdpState, Dict[Action, float]]:
"""
Run policy iteration on an agent using state-value estimates.
:param agent: MDP agent. Must contain a policy `pi` that has been fully initialized with instances of
`rlai.states.mdp.ModelBasedMdpState`.
:param environment: Model-based MDP environment to evaluate.
:param theta: See `evaluate_q_pi`.
:param update_in_place: See `evaluate_q_pi`.
:return: Final state-action value estimates.
"""
q_pi: Optional[Dict[MdpState, Dict[Action, float]]] = None
improving = True
i = 0
while improving:
logging.info(f'Policy iteration {i + 1}')
q_pi, _ = evaluate_q_pi(
agent=agent,
environment=environment,
theta=theta,
num_iterations=None,
update_in_place=update_in_place,
initial_q_S_A=q_pi
)
improving = improve_policy_with_q_pi(
agent=agent,
q_pi=q_pi
) > 0
i += 1
logging.info(f'Policy iteration terminated after {i} iteration(s).')
return q_pi
@rl_text(chapter=4, page=82)
def iterate_value_v_pi(
agent: MdpAgent,
environment: ModelBasedMdpEnvironment,
theta: float,
evaluation_iterations_per_improvement: int,
update_in_place: bool
) -> Dict[MdpState, float]:
"""
Run dynamic programming value iteration on an agent using state-value estimates.
:param agent: MDP agent. Must contain a policy `pi` that has been fully initialized with instances of
`rlai.states.mdp.ModelBasedMdpState`.
:param environment: Model-based MDP environment to evaluate.
:param theta: See `evaluate_v_pi`.
:param evaluation_iterations_per_improvement: Number of policy evaluation iterations to execute for each iteration
of improvement (e.g., passing 1 results in Equation 4.10).
:param update_in_place: See `evaluate_v_pi`.
:return: Final state-value estimates.
"""
v_pi: Optional[Dict[MdpState, float]] = None
i = 0
while True:
logging.info(f'Value iteration {i + 1}')
v_pi, delta = evaluate_v_pi(
agent=agent,
environment=environment,
theta=None,
num_iterations=evaluation_iterations_per_improvement,
update_in_place=update_in_place,
initial_v_S=v_pi
)
improve_policy_with_v_pi(
agent=agent,
environment=environment,
v_pi=v_pi
)
i += 1
if delta < theta:
break
logging.info(f'Value iteration of v_pi terminated after {i} iteration(s).')
return v_pi
@rl_text(chapter=4, page=84)
def iterate_value_q_pi(
agent: MdpAgent,
environment: ModelBasedMdpEnvironment,
theta: float,
evaluation_iterations_per_improvement: int,
update_in_place: bool
) -> Dict[MdpState, Dict[Action, float]]:
"""
Run value iteration on an agent using state-action value estimates.
:param agent: MDP agent. Must contain a policy `pi` that has been fully initialized with instances of
`rlai.states.mdp.ModelBasedMdpState`.
:param environment: Model-based MDP environment to evaluate.
:param theta: See `evaluate_q_pi`.
:param evaluation_iterations_per_improvement: Number of policy evaluation iterations to execute for each iteration
of improvement.
:param update_in_place: See `evaluate_q_pi`.
:return: Final state-action value estimates.
"""
q_pi: Optional[Dict[MdpState, Dict[Action, float]]] = None
i = 0
while True:
logging.info(f'Value iteration {i + 1}')
q_pi, delta = evaluate_q_pi(
agent=agent,
environment=environment,
theta=None,
num_iterations=evaluation_iterations_per_improvement,
update_in_place=update_in_place,
initial_q_S_A=q_pi
)
improve_policy_with_q_pi(
agent=agent,
q_pi=q_pi
)
i += 1
if delta < theta:
break
logging.info(f'Value iteration of q_pi terminated after {i} iteration(s).')
return q_pi
|
import random
import requests
try:
import simplejson as json
except ImportError:
import json
import urllib3
import logging
from urllib.parse import urljoin
from pin_py.utils.settings import HEADERS_DEFAULT, PROXIES_LIST
log = logging.getLogger('HTTP')
def get_instance_for_request():
"""Added a method to increase the pool size for the connections."""
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000)
sess.mount('http://', adapter)
sess.mount('https://', adapter)
return sess
def check_status_code(response, status_code):
if response:
try:
status_code = response.status_code
except:
pass
return status_code
def store_http_information(url, error_info, status_code=None):
pass
# from contentstudio.models.elastic.monitor import HttpMonitor
# domain_url = urlparse(url).netloc
# http_item = HttpMonitor(url=url,
# domain=domain_url,
# error_info=error_info,
# created_at=pendulum.now('UTC'))
# if status_code:
# http_item.status_code = status_code
# http_item.save()
def get_random_public_proxy():
log.info('Loading random public proxy')
# from contentstudio.models.mongo.model import Proxies
# proxy_address = Proxies.random_proxy()
# try:
# proxy = 'http://{0}'.format(proxy_address[0].ip_address)
# except TypeError:
# proxy = 'http://{0}'.format(list(proxy_address)[0].ip_address)
# return proxy
def retry_without_proxy(url, timeout, headers):
response = None
status_code = None
try:
response = requests.get(url, timeout=timeout, headers=headers)
except requests.exceptions.SSLError:
try:
response = requests.get(url, timeout=timeout, headers=headers, verify=False)
except Exception as ex:
status_code = check_status_code(response, status_code)
store_http_information(url, type(ex).__name__, status_code)
except Exception as ex:
status_code = check_status_code(response, status_code)
store_http_information(url, type(ex).__name__, status_code)
return response
class Requests():
def __init__(self, fine_proxy=None, public_proxy=False):
"""
:param fine_proxy: used for the social analyzer
"""
self.set_proxy(public_proxy)
self.text = None
self.status_code = 0
self.content = None
self.url = None
self.headers = {}
self.fine_proxy = fine_proxy
self.public_proxy = public_proxy
def set_proxy(self, public_proxy):
if public_proxy:
proxy = get_random_public_proxy()
else:
pass
# MERGED_PROXIES = PROXIES_LIST
# proxy = random.choice(MERGED_PROXIES)
# self.proxy = {
# "http": proxy,
# "https": proxy.replace('http://', 'https://')
# }
def get(self, url, use_proxy=False, headers=HEADERS_DEFAULT, timeout=60, backconnect=False):
# url validation
if not url.startswith('http://') and not url.startswith('https://'):
if url.startswith('//'):
url = 'http:' + url
elif url.startswith('/'):
url = 'http:/' + url
else:
url = 'http://' + url
# changing the use_proxy to fine_proxy meanwhile we get the issue resolve from the d4networks.
if use_proxy:
if self.public_proxy:
response = get_instance_for_request().get(url, timeout=timeout, proxies=self.proxy,
headers=headers)
else:
try:
response = get_instance_for_request().get(url, timeout=timeout, proxies=self.proxy,
headers=headers)
except urllib3.exceptions.ProtocolError:
# In case of the bad status line error, we do not try to get with the proxy.
response = retry_without_proxy(url, timeout, headers)
except requests.exceptions.SSLError:
# In case of the bad status line error, we do not try to get with the proxy.
response = retry_without_proxy(url, timeout, headers)
except urllib3.exceptions.ReadTimeoutError:
response = retry_without_proxy(url, timeout, headers)
except requests.exceptions.ProxyError:
response = retry_without_proxy(url, timeout, headers)
except requests.exceptions.ConnectionError:
response = retry_without_proxy(url, timeout, headers)
except requests.exceptions.ReadTimeout:
response = retry_without_proxy(url, timeout, headers)
except requests.exceptions.TooManyRedirects:
response = retry_without_proxy(url, timeout, headers)
else:
response = None
try:
response = get_instance_for_request().get(url, timeout=timeout,
headers=headers)
except requests.exceptions.SSLError:
try:
response = requests.get(url, timeout=timeout, headers=headers, verify=False)
except Exception as ex:
store_http_information(url, type(ex).__name__)
except Exception as ex:
store_http_information(url, type(ex).__name__)
self.set_response(response)
return response
def post(self, url, body=None, use_proxy=False, headers=HEADERS_DEFAULT):
if use_proxy:
response = get_instance_for_request().post(url, data=body, proxies=self.proxy, headers=headers)
else:
response = get_instance_for_request().post(url, data=body, headers=headers)
self.set_response(response)
return self
def set_response(self, response):
if response:
self.status_code = response.status_code
self.content = response.content
self.text = response.text
self.url = response.url
self.headers = response.headers
class SocialRequests(object):
def __init__(self, access_token):
self.access_token = access_token
def send(self, path):
request = Requests()
path_join = path + '&access_token=%s' % self.access_token
print(urljoin('https://graph.facebook.com/v2.8/', path_join))
response = request.get(urljoin('https://graph.facebook.com/v2.8/', path_join))
return json.loads(response.text)
def fb_pageid_request(self, path):
request = Requests()
response = request.get(
"https://graph.facebook.com/v2.7/" + path + "?fields=link,name,description,fan_count,category,username,picture&access_token=" + self.access_token + "&format=json")
return json.loads(response.text)
class RequestConnection():
def __init__(self):
self._pool_size()
def _pool_size(self):
"""Increase default pool size."""
self.requests = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=1000, pool_maxsize=1000)
self.requests.mount('http://', adapter)
self.requests.mount('https://', adapter)
def perform_request(self, url, **kwargs):
"""Performing request and returning a response"""
url = self._url_join(url)
if kwargs.get('proxy') and kwargs.get('proxy') == True:
kwargs['proxies'] = self._random_proxy()
kwargs.pop('proxy') # remove proxy if present
if kwargs.get('method') and kwargs.get('method') == 'POST':
kwargs.pop('method')
return self.requests.post(url, **kwargs)
try:
return self.requests.get(url, **kwargs)
except requests.exceptions.SSLError:
# In case of the bad status line error, we do not try to get with the proxy.
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
except urllib3.exceptions.ReadTimeoutError:
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
except requests.exceptions.ProxyError:
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
except requests.exceptions.ConnectionError:
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
except requests.exceptions.ReadTimeout:
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
except requests.exceptions.TooManyRedirects:
return retry_without_proxy(url, 60, HEADERS_DEFAULT)
def _random_proxy(self):
"""Random proxy dict"""
proxy = random.choice(PROXIES_LIST)
return {
"http": proxy,
"https": proxy.replace('http://', 'https://')
}
def _url_join(self, url):
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url
|
# -*- coding: utf-8 -*-
# Module for loading .nxs files from SIXS beamline @ SOLEIL
# To be used together with the dictionnary 'alias_dict.txt'
# Code from <NAME> @ Soleil Synchrotron
# Modified 02052019 by <NAME> @ CNRS IM2NP: PEP8 + removed unused functions
"""
nxsReady.
This module contains Classes and functions to load data at SIXS beamline.
"""
import tables
import numpy
import pickle
print("importing nxsReady")
print("'alias_dict.txt' path expected in 'specfile_name' parameter")
print("You can copy it from /Lib/site-packages/bcdi/preprocessing/")
class PrefParameters:
"""Class for preference parameters."""
def __init__(self):
self.namedisplays = [
"my suggestion",
"short name",
"short and family name",
"full name",
"nxs name",
]
self.inamedisplay = 0
class DataSet:
"""
Class dealing with datasets.
It reads the file and stores it in an object, from this object we can retrieve
the data to use it: MyFileObject=nxsRead.DataSet(path/filename, filename)
"""
# define a classe to enter .nxs fiel-related parameters
# the long name is the pathename
# short name should be the filenmane
# alias_dict: the alias dictionnary, which should be located in the root directory
# of the experiment
def __init__(
self, longname, shortname, alias_dict, datafilter=None, pref=None, scan="FLY"
):
alias_dict = pickle.load(open(alias_dict, "rb"))
if pref is None:
pref = PrefParameters()
self.shortname = shortname
self.THRESHOLD = 0
shift = 0
if scan == "FLY":
shift = 1
if scan == "SBS":
shift = 0
if scan == "HCS":
shift = 1
try:
fichier = tables.open_file(longname, "r")
self.nodedatasizes = [] # list of data array lengths
for leaf in fichier.list_nodes("/")[0].scan_data:
self.nodedatasizes.append(leaf.shape[0])
self.npts = max(self.nodedatasizes)
# we select only nodes of the same size, smaller arrays (e.g. of size 1)
# are let aside here it generate the attributes of the DataSet class by
# defining their type
self.nodenames = [] # node names (ex data_01)
self.nodelongnames = [] # node comprehensible name AKA complete
# (ex: i14-c-cx2/ex/diff-uhv-k/position)
self.nodenicknames = []
# shortening of the long name AKA the last part of longname
self.alias = []
self.data = numpy.empty(0) # empty table creation
self.waveL = fichier.list_nodes("/")[0].SIXS.Monochromator.wavelength[0]
self.energymono = fichier.list_nodes("/")[0].SIXS.Monochromator.energy[0]
if fichier.list_nodes("/")[0].end_time.shape == ():
self.end_time = fichier.list_nodes("/")[0].end_time.read().tostring()
if fichier.list_nodes("/")[0].end_time.shape == (1,):
self.end_time = fichier.list_nodes("/")[0].end_time[0]
# here we assign the values to the attributes previously generated
for leaf in fichier.list_nodes("/")[0].scan_data:
nodelongname = ""
nodenickname = ""
if len(leaf.shape) == 1:
if leaf.shape[0] == self.npts:
self.nodenames.append(leaf.name)
try:
nodelongname = leaf.attrs.long_name.decode("UTF-8")
except tables.exceptions.NoSuchNodeError:
nodelongname = (
str(leaf)
.split()[0]
.split("/")[-1]
.split("_")[-1]
.lower()
)
if len(nodelongname) == 0:
nodelongname = leaf.name # if no name keep nxs file name
self.nodelongnames.append(nodelongname)
self.data = numpy.concatenate((self.data, leaf.read()[1:]))
# add data to numpy array and remove the first point
if pref.inamedisplay <= 1:
nodenickname = nodelongname.split("/")[-1]
# take just the last part of the longname
self.nodenicknames.append(nodenickname)
elif pref.inamedisplay == 2:
try:
namesplit = nodelongname.split("/")
nodenickname = namesplit[-2] + "/" + namesplit[-1]
# take the two last if possible
self.nodenicknames.append(nodenickname)
except IndexError:
self.nodenicknames.append(nodelongname)
elif pref.inamedisplay == 3:
self.nodenicknames.append(nodelongname)
# take the full long name
elif pref.inamedisplay == 4:
self.nodenicknames.append(leaf.name) # take nxs file name
if alias_dict:
try:
alias = alias_dict[nodelongname.lower()]
if alias in self.alias:
alias += "#"
self.alias.append(alias)
self.__dict__[alias] = leaf.read()[shift:]
except KeyError:
self.alias.append(nodenickname)
self.__dict__[nodenickname] = leaf.read()[shift:]
elif len(leaf.shape) == 3:
if leaf.shape[1] == 1065:
if shift:
self.efilm = leaf[:-shift]
else:
self.efilm = leaf[:]
# Careful: process is different for HCS, SBS and FLY
if leaf.shape[1] == 240:
if shift:
self.xfilm = leaf[:-shift]
else:
self.xfilm = leaf[:]
if leaf.shape[1] == 516:
if shift:
self.mfilm = leaf[:-shift]
else:
self.mfilm = leaf[:]
except ValueError:
print("probleme le fichier ", longname, "est corrompu")
self.npts = 0
self.nmotors = 0
self.mins = numpy.empty(0)
self.maxs = numpy.empty(0)
self.data = numpy.empty(0)
fichier.close()
return
except tables.exceptions.NoSuchNodeError:
print("probleme le fichier ", longname, "est corrompu")
self.npts = 0
self.nmotors = 0
self.mins = numpy.empty(0)
self.maxs = numpy.empty(0)
self.data = numpy.empty(0)
fichier.close()
return
else:
fichier.close()
self.npts = self.npts - 1 # remove the 1st point that is uncorrect due to
# the operation strategy of simplescan
self.nmotors = len(self.nodenames) # number of columns kept
# if display preferences are "my suggestion",
# we look for a name appearing several times
# in this case we choose the longest name
if pref.inamedisplay == 0:
for i in range(self.nmotors - 1): # pas la peine de faire le dernier point!
nickname = self.nodenicknames[i]
if nickname in self.nodenicknames[i + 1 :]: # item in double
nodelongname = self.nodelongnames[i]
namesplit = nodelongname.split("/")
try:
nodenickname = namesplit[-2] + "/" + namesplit[-1]
# take the two last
except IndexError:
nodenickname = nodelongname # take the two last
self.nodenicknames[i] = nodenickname
j = i
try:
while 1:
j = self.nodenicknames.index(j + 1)
self.nodenicknames[j] = nodenickname
# careful, it is not garanteed that nodenickname!=nickname
except ValueError:
pass
self.data = self.data.reshape((self.nmotors, self.npts))
test = numpy.any(self.data != 0, axis=0)
# if non-zero value, the condition is verified
self.data = numpy.compress(test, self.data, axis=1)
if datafilter is not None:
# filter values while looking at the condition on the filter
if datafilter.role != "none" and datafilter.ifil > -1:
if datafilter.irole == 1:
self.data = numpy.compress(
self.data[datafilter.ifil] > datafilter.value, self.data, axis=1
)
elif datafilter.irole == 2:
self.data = numpy.compress(
self.data[datafilter.ifil] < datafilter.value, self.data, axis=1
)
elif datafilter.irole == 3:
self.data = numpy.compress(
self.data[datafilter.ifil] != datafilter.value,
self.data,
axis=1,
)
self.npts = self.data.shape[1] # number of points not totally null
if self.npts == 0: # no more points after filtering
self.mins = numpy.zeros(self.nmotors)
self.maxs = numpy.ones(self.nmotors)
else:
self.mins = numpy.amin(self.data, axis=1) # boundary for each parameter
self.maxs = numpy.amax(self.data, axis=1)
self.namelist = self.alias
|
from pyrates.utility.visualization import plot_timeseries, create_cmap
from pyrates.ir import CircuitIR
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
import matplotlib as mpl
plt.style.reload_library()
plt.style.use('ggplot')
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['axes.titlesize'] = 12
mpl.rcParams['axes.titleweight'] = 'bold'
mpl.rcParams['axes.labelsize'] = 12
mpl.rcParams['axes.labelcolor'] = 'black'
mpl.rcParams['axes.labelweight'] = 'bold'
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['xtick.color'] = 'black'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['legend.fontsize'] = 12
# parameters
dt = 1e-2
T = 1000.0
dts = 1e-1
freq = 14.0
amp = 1.2
sim_steps = int(np.round(T/dt, decimals=0))
# ctx = np.zeros((sim_steps, 1))
# ctx[50000, 0] = 600.0
# ctx = gaussian_filter1d(ctx, 100, axis=0)
time = np.linspace(0., T, sim_steps)
ctx = np.sin(2.0*np.pi*freq*time*1e-3)*amp
#plt.plot(ctx)
#plt.show()
eic = CircuitIR.from_yaml("config/stn_gpe_str/stn_gpe_str").compile(backend='numpy', solver='scipy', step_size=dt)
results, t = eic.run(simulation_time=T, sampling_step_size=dts, profile=True,
outputs={'stn': 'stn/stn_op/R',
'gpe-p': 'gpe_p/stn_op/R',
'gpe-a': 'gpe_a/stn_op/R',
'msn-d1': 'msn_d1/stn_op/R',
'msn-d2': 'msn_d2/stn_op/R',
'fsi-d1': 'fsi_d1/fsi_op/R',
'fsi-d2': 'fsi_d2/fsi_op/R'
},
# inputs={'stn/stn_op/ctx': amp + ctx,
# 'msn/str_msn_op/ctx': amp + ctx,
# 'fsi/str_fsi_op/ctx': amp + ctx}
)
results = results * 1e3
fig, axes = plt.subplots(nrows=2, dpi=200, figsize=(10, 5))
ax = plot_timeseries(results, cmap=create_cmap('cubehelix', as_cmap=False, n_colors=7), ax=axes[0])
#plt.legend(['STN', 'GPe_p', 'GPe_a', 'STR'])
ax.set_title('Healthy Firing Rates')
ax.set_ylabel('firing rate (spikes/s)')
ax.set_xlabel('time (ms)')
#ax.set_ylim(0.0, 100.0)
#ax.set_xlim(20.0, 240.0)
# av_signal = results.loc[:, ('STN', 'stn')] - results.loc[:, ('GPe_proto', 'gpe_p')] - results.loc[:, ('MSN', 'msn')]
# ax = plot_timeseries(av_signal, cmap=create_cmap('cubehelix', as_cmap=False, n_colors=1), ax=axes[1])
# ax.set_title('GPi input (STN - GPe_p - MSN)')
# ax.set_ylabel('firing rate (spikes/s)')
# ax.set_xlabel('time (ms)')
plt.tight_layout()
plt.show()
|
# -*- coding: utf-8 -*-
"""
File: fantasypremierleagueapi.py
Path: fantasypremierleague/
Author: <NAME>
"""
# Python Imports
from functools import wraps
from pprint import pprint
# Third Party Imports
import requests
# Local Imports
BASE_URL = 'https://fantasy.premierleague.com/drf'
DATA_ENDPOINT = 'bootstrap-static'
PLAYER_ENDPOINT = 'element-summary/{player_id}'
def ensure_one_item(f):
"""This decorator is used to ensure that when we are searching
for a specific item, we return only one result. Otherwise, we
raise an exception.
"""
@wraps(f)
def wrapped(*args, **kwargs):
val = f(*args, **kwargs)
if len(val) == 0:
raise TypeError('No data found with the given parameters')
elif len(val) > 1:
raise TypeError('Too much data found with the given parameters.')
return val[0]
return wrapped
def data():
"""Returns all data from the BASE_URL provided above
"""
url = '{}/{}'.format(BASE_URL, DATA_ENDPOINT)
return requests.get(url).json()
def player(name=None, id=None, info_only=False):
"""Based off name or id, we will return statistics for a
specific player. If we only want basic information on the player,
pass the info_only kwarg as True which will not return fixtures
and other info.
id - player id
name - name should be a str "LastName,FirstName"
info_only - bool, True if we only want basics (Name, current stats, etc)
"""
player_info = _player_info(name=name, id=id)
if info_only:
return player_info
url = '{}/{}'.format(BASE_URL, PLAYER_ENDPOINT)
ret_val = requests.get(url.format(player_id=player_info['id'])).json()
ret_val['information'] = player_info
return ret_val
@ensure_one_item
def team(name=None, id=None):
"""Returns data on a given team name or team id
"""
if not name and not id:
raise TypeError("You must provide either a team name "
"or a team id.")
data_ = data()
# Id is more specific, so try that first
teams = []
if id:
teams = [x for x in data_['teams'] if x['id'] == id]
elif name:
teams = [x for x in data_['teams']
if x['name'].lower() == name.lower()]
return teams
@ensure_one_item
def _player_info(name=None, id=None):
"""This will return the basic information on a player
Player Name should be a str "LastName,FirstName"
"""
if not name and not id:
raise TypeError("You must provide either a player's name "
"or a player's id.")
data_ = data()
players = []
if id:
players = [x for x in data_['elements']
if x['id'] == id]
elif name:
try:
first_name = name.split(',')[1].strip()
second_name = name.split(',')[0].strip()
except IndexError:
raise TypeError('Please enter a name in format '
'"LastName,FirstName"')
players = [x for x in data_['elements']
if x['first_name'] == first_name
and x['second_name'] == second_name]
return players
def dream_team():
"""Returns the current dream_team
"""
data_ = data()
return [x for x in data_['elements'] if x['in_dreamteam']]
def top(num_results=10, position=None, sort_key='total_points', reverse=False):
"""Returns the top given number of players overall. If a position
is given, we return the top ten in that position.
positions = 1 - 2 - 3 - 4 (goalie - def - mid - fwd)
"""
data_ = sorted(data()['elements'],
key = lambda e: e[sort_key],
reverse=reverse)
if position:
data_ = [x for x in data_ if x['element_type'] == position]
return data_[:num_results]
|
"""
Module for camera and video recording:
Depends on :
- opencv2 (conda install --channel conda-forge opencv)
- hdf5, pyqtgraph, progressbar2 (normal conda channel)
"""
import sys
import signal
import os
import gzip
import numpy as np
try:
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from PyQt5.QtWidgets import (
QMainWindow,
QWidget,
QPushButton,
QVBoxLayout,
QHBoxLayout,
QApplication,
QLabel,
)
has_qtgraph = True
except ModuleNotFoundError:
has_qtgraph = False
print("pyqtgraph is not installed.")
import cv2
import h5py
import time
from progressbar import ProgressBar
import asyncio
from pymanip.asynctools import synchronize_function
class CameraTimeout(Exception):
pass
def save_image(im, ii, basename, zerofill, file_format, compression, compression_level):
if file_format == "raw":
filename = ("{:}-{:0" + str(zerofill) + "d}.li16").format(basename, ii + 1)
im.tofile(filename)
elif file_format == "npy":
filename = ("{:}-{:0" + str(zerofill) + "d}.npy").format(basename, ii + 1)
np.save(filename, im)
elif file_format == "npy.gz":
filename = ("{:}-{:0" + str(zerofill) + "d}.npy.gz").format(basename, ii + 1)
with gzip.open(filename, "wb") as f:
np.save(f, im)
elif file_format in ("hdf", "hdf5"):
filename = ("{:}-{:0" + str(zerofill) + "d}.hdf5").format(basename, ii + 1)
with h5py.File(filename, "w") as f:
f.attrs["counter"] = im.metadata["counter"]
f.attrs["timestamp"] = im.metadata["timestamp"].timestamp()
# compression='gzip' trop lent pour 30 fps
# compression='lzf' presque bon mais un peu lent à 30 fps
f.create_dataset("image", data=im, compression=compression)
else:
filename = ("{:}-{:0" + str(zerofill) + "d}.{:}").format(
basename, ii + 1, file_format
)
if file_format == "png":
params = (cv2.IMWRITE_PNG_COMPRESSION, compression_level)
else:
params = None
cv2.imwrite(filename, im, params)
class MetadataArray(np.ndarray):
""" Array with metadata. """
def __new__(cls, input_array, metadata=None):
obj = np.asarray(input_array).view(cls)
obj.metadata = metadata
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.metadata = getattr(obj, "metadata", None)
class Camera:
"""
Subclasses must implement:
- acquisition_oneshot method
- acquisition generator
- resolution, name, bitdepth properties
"""
def __init__(self):
super(Camera, self).__init__()
def __enter__(self):
return self
def __exit__(self, type_, value, cb):
if hasattr(self, "preview_generator"):
self.preview_generator = None
def preview(self, backend="cv", slice_=None, zoom=0.5, rotate=0):
if backend == "cv":
self.preview_cv(slice_, zoom, rotate)
elif backend == "qt":
self.preview_qt(slice_, zoom, None, rotate)
else:
raise RuntimeError('Unknown backend "' + backend + '"')
async def preview_async_cv(self, slice_, zoom, name, rotate=0):
minimum = None
maximum = None
cv2.namedWindow(name)
try:
preview_generator = self.acquisition_async()
async for im in preview_generator:
# if minimum is None:
if True:
minimum = np.min(im)
maximum = np.max(im)
# print('min, max:', minimum, maximum)
maxint = np.iinfo(im.dtype).max
if rotate == 90.0:
im = cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
if slice_:
img = (maxint // (maximum - minimum)) * (
im[slice_[0] : slice_[1], slice_[2] : slice_[3]] - minimum
)
else:
img = (maxint // (maximum - minimum)) * (im - minimum)
l, c = img.shape
cv2.imshow(name, cv2.resize(img, (int(c * zoom), int(l * zoom))))
k = cv2.waitKey(1)
if k in (0x1B, ord("s")):
clean = await preview_generator.asend(True)
if not clean:
print("Generator not cleaned")
break
await asyncio.sleep(0.001)
except KeyboardInterrupt:
pass
finally:
cv2.destroyAllWindows()
def preview_cv(self, slice_, zoom, rotate=0):
return synchronize_function(
self.preview_async_cv, slice_, zoom, name="Preview", rotate=rotate
)
def preview_exitHandler(self):
"""
This method sends a stop signal to the camera acquisition generator
"""
clean = self.preview_generator.send(True)
if not clean:
print("Generator not cleaned")
def display_crosshair(self):
# add a centered crosshair for self-reflection
if self.crosshair_chkbox.isChecked():
self.vLine = pg.InfiniteLine(
pos=(self.camera.Width / 2, 0), angle=90, movable=False
)
self.hLine = pg.InfiniteLine(
pos=(0, self.camera.Height / 2), angle=0, movable=False
)
self.image_view.addItem(self.vLine, ignoreBounds=True)
self.image_view.addItem(self.hLine, ignoreBounds=True)
else:
self.image_view.removeItem(self.vLine)
self.image_view.removeItem(self.hLine)
def preview_qt(self, slice, zoom, app=None, rotate=0):
if app:
self.app = app
just_started = False
elif not hasattr(self, "app"):
self.app = QtGui.QApplication([])
self.app.aboutToQuit.connect(self.preview_exitHandler)
just_started = True
else:
just_started = False
# create window if it does not already exists
if not hasattr(self, "window"):
self.window = QtGui.QMainWindow()
# self.window.resize(*self.resolution)
self.window.resize(800, 600)
self.window.setWindowTitle(self.name)
self.image_view = pg.ImageView()
self.window.setCentralWidget(self.image_view)
self.range_set = False
# adding widget for controlling the background subtraction
# and a crosshair overlay
self.central_widget = QWidget()
self.tools_widget = QWidget()
self.central_layout = QVBoxLayout(self.central_widget)
self.tools_layout = QHBoxLayout(self.tools_widget)
self.crosshair_chkbox = QtGui.QCheckBox("Crosshair", self.tools_widget)
self.subtraction_chkbox = QtGui.QCheckBox(
"Background subtraction", self.tools_widget
)
self.learning_label = QLabel(
"Learning rate [0, 1] :", parent=self.tools_widget
)
self.spnbx_learning = QtGui.QDoubleSpinBox(
parent=self.tools_widget, value=0.05
)
self.spnbx_learning.setRange(0, 1)
self.spnbx_learning.setSingleStep(0.01)
self.spnbx_learning.setDecimals(3)
self.acq_btn = QtGui.QPushButton("Acquisition", self.tools_widget)
self.exposure_label = QLabel(
"Exposure time (s) :", parent=self.tools_widget
)
self.spnbox_exposure = QtGui.QDoubleSpinBox(
parent=self.tools_widget, value=0.001
)
self.spnbox_exposure.setRange(0.000033, 67.108895)
self.spnbox_exposure.setSingleStep(0.0001)
self.spnbox_exposure.setDecimals(4)
self.tools_layout.addWidget(self.crosshair_chkbox)
self.tools_layout.addWidget(self.subtraction_chkbox)
self.tools_layout.addWidget(self.learning_label)
self.tools_layout.addWidget(self.spnbx_learning)
self.tools_layout.addWidget(self.exposure_label)
self.tools_layout.addWidget(self.spnbox_exposure)
self.tools_layout.addWidget(self.acq_btn)
self.central_layout.addWidget(self.image_view)
self.central_layout.addWidget(self.tools_widget)
self.window.setCentralWidget(self.central_widget)
self.crosshair_chkbox.stateChanged.connect(self.display_crosshair)
# hide useless buttons
self.image_view.ui.roiBtn.hide()
self.image_view.ui.menuBtn.hide()
self.window.show()
# instantiate generator
if not hasattr(self, "preview_generator"):
self.preview_generator = self.acquisition(timeout=5, raise_on_timeout=False)
if just_started:
self.bkgrd = None
# update view with latest image if it is ready
# do nothing otherwise (to allow GUI interaction while waiting
# for camera reading)
img = next(self.preview_generator)
if img is not None:
if rotate == 90.0:
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
if self.subtraction_chkbox.isChecked():
if self.bkgrd is None:
self.bkgrd = img
self.range_set = False
learning_rate = self.spnbx_learning.value()
self.bkgrd = (1 - learning_rate) * self.bkgrd + learning_rate * img
self.bkgrd = self.bkgrd.astype(np.int32)
img = img - self.bkgrd # self.bkgrd - img
img[img < 0] = 0
img = img.astype(np.uint16)
self.image_view.setImage(
img.T, autoRange=False, autoLevels=False, autoHistogramRange=False
)
if not self.range_set:
self.image_view.autoRange()
self.image_view.autoLevels()
self.range_set = True
# set timer for refreshing in 10 ms
QtCore.QTimer.singleShot(
10, lambda: self.preview_qt(slice, zoom, self.app, rotate)
)
if just_started:
QtGui.QApplication.instance().exec_()
def acquire_to_files(self, *args, **kwargs):
return synchronize_function(self.acquire_to_files_async, *args, **kwargs)
def acquire_signalHandler(self, *args, **kwargs):
"""
This method sends a stop signal to the camera acquisition generator
"""
self.acqinterrupted = True
async def acquire_to_files_async(
self,
num,
basename,
zerofill=4,
dryrun=False,
file_format="png",
compression=None,
compression_level=3,
verbose=True,
delay_save=False,
progressbar=True,
initialising_cams=None,
**kwargs
):
"""
Acquire num images and saves to disk
- basename, zerofill: filename parameters
- dryrun = True: acquire but don't actually save [default: False]
- file_format:
'raw' -> li16 (binary little-endian 16 bits integers)
'npy' -> numpy npy file
'npy.gz' -> gzip compressed numpy file
'hdf5' -> hdf5, with optional compression [default None]
'png', 'jpg', 'tif' -> image format with opencv imwrite
with optional compression level for PNG
[default: 3]
- compression (optional) for HDF5
- compression_level (optional) for PNG
- delay_save: records in RAM and save at this end
returns: image_counter, frame_datetime as lists
"""
# signal handling
if sys.platform == "win32":
signal.signal(signal.SIGINT, self.acquire_signalHandler)
else:
loop = asyncio.get_event_loop()
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(
getattr(signal, signame), self.acquire_signalHandler
)
dirname = os.path.dirname(basename)
if len(dirname):
try:
os.makedirs(dirname)
except FileExistsError:
pass
count = []
dt = []
if verbose:
dateformat = "%A %d %B %Y - %X"
starttime = time.time()
starttime_str = time.strftime(dateformat, time.localtime(starttime))
print("Camera acquisition started: " + starttime_str)
if progressbar:
bar = ProgressBar(max_value=num)
computation_time = 0.0
images = list()
ii = 0
acqgen = self.acquisition_async(
num, initialising_cams=initialising_cams, **kwargs
)
self.acqinterrupted = False
async for im in acqgen:
if dryrun:
continue
if ii == 0:
print(im.dtype)
if delay_save:
images.append(im.copy())
else:
start_time = time.process_time()
save_image(
im,
ii,
basename,
zerofill,
file_format,
compression,
compression_level,
)
computation_time += time.process_time() - start_time
if hasattr(im, "metadata"):
count.append(im.metadata["counter"])
ts = im.metadata["timestamp"]
try:
ts = ts.timestamp()
except AttributeError:
pass
dt.append(ts)
ii += 1
if progressbar:
try:
bar.update(ii)
except Exception:
print(ii)
await asyncio.sleep(0.001)
if self.acqinterrupted:
print("")
print("Signal caught... Stopping camera acquisition...")
clean = await acqgen.asend(True)
if not clean:
print("Camera was not successfully interrupted")
break
if progressbar:
print("")
if delay_save and not dryrun:
print("Acquisition complete. Saving to disk...")
if progressbar:
bar = ProgressBar(max_value=ii)
for ii, im in enumerate(images):
start_time = time.process_time()
save_image(
im,
ii,
basename,
zerofill,
file_format,
compression,
compression_level,
)
computation_time += time.process_time() - start_time
if progressbar:
try:
bar.update(ii + 1)
except Exception:
print(ii)
await asyncio.sleep(0.0001)
if progressbar:
print("")
dt = np.array(dt)
if verbose:
print(
"Average saving time per image:",
1000 * computation_time / (ii + 1),
"ms",
)
print("average fps =", 1 / np.mean(dt[1:] - dt[:-1]))
if images:
print("image size:", images[0].shape)
print("image dtype:", images[0].dtype)
return count, dt
|
<reponame>vivekparasharr/Challenges-and-Competitions
import pandas as pd
import numpy as np
sephora = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-30/sephora.csv')
ulta = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-30/ulta.csv')
allCategories = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-30/allCategories.csv')
allShades = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-30/allShades.csv')
allNumbers = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-30/allNumbers.csv')
# summary of a database in terms of number of unique elements in each column
def vp_summ(df):
print('#columns:', df.shape[1]) # number of columns
print('#rows:', df.shape[0]) # number of rows
for r in df.columns:
print(r, ':', # column name
df[r].unique().shape[0], # number of unique elements in the column
'| example:', df[r][0]) # example of the first element in the column
vp_summ(allShades)
p = allShades.groupby(['brand']).nunique()[['product']].reset_index().sort_values(by='product', ascending=False)
h = allShades.groupby(['brand']).nunique()[['hex']].reset_index().sort_values(by='hex', ascending=False)
m = pd.merge(left=p, right=h, left_on='brand', right_on='brand', how='inner')
m = m.loc[:10]
m['hex2']=m.hex * (-1)
# Plotting
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True, vertical_spacing=0.02,)
#subplot_titles=("Plot 1", "Plot 2"))
fig.add_trace(
go.Bar(x=list(m['brand']), y=list(m['hex2']),
text=list(m['hex']),textposition='auto'),
row=1, col=1
)
fig.add_trace(
go.Bar(x=list(m['brand']), y=list(m['product']),
text=list(m['product']),textposition='auto'),
row=2, col=1
)
# Set the visibility ON
# fig.update_yaxes(title='y', visible=True, showticklabels=False)
# Set the visibility OFF
fig.update_yaxes(title='y', visible=False, showticklabels=False)
# Title
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.25,xanchor='center',yanchor='top',
font=dict(family='Arial',size=24,color='grey'),showarrow=False,
text="Makeup Products vs Shades"))
# Subtitle
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.13,xanchor='center',yanchor='top',
font=dict(family='Arial',size=14,color='grey'),showarrow=False,
text="Plot showing number of products vs number of shades for top 10 brands"))
# Notes
fig.add_annotation(dict(xref='paper',yref='paper',x=-0.005,y=0.75,xanchor='right',yanchor='top',
font=dict(family='Arial',size=14,color='grey'),showarrow=False,
text="Shades ->"))
fig.add_annotation(dict(xref='paper',yref='paper',x=-0.005,y=0.25,xanchor='right',yanchor='top',
font=dict(family='Arial',size=14,color='grey'),showarrow=False,
text="Products ->"))
# Footer
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.32,xanchor='center',yanchor='top',
font=dict(family='Arial', size=12, color='grey'),showarrow=False,
text='#TidyTuesday - 2021/03/30 | twitter.com/vivekparasharr | github.com/vivekparasharr'))
fig.show()
|
<reponame>Tanevski3/python-caa-algorithm<filename>index.py
import matplotlib.pyplot as plt
import networkx as nx
from graph import Graph
print("Welcome to CAA!- You are a lion trying to escape the jungle maze...")
def convert_to_networkx_graph(graph):
"""
:rtype: object
"""
G = nx.Graph(name = "CAA implementation")
for n in graph.get_graph():
G.add_node(n.get_name())
for key, values in graph.get_graph().items():
for v in values:
G.add_edge(key.get_name(), v.get_name())
return G
while True:
# full_name = input("Enter your name king: ")
# if not full_name:
# continue
# print("Let the games begin `" + full_name + "`")
maze = [[0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 2]]
dict = {
'A': ['B', 'F'],
'B': ['C'],
'C': ['D', 'H'],
'D': ['E'],
'E': ['J'],
'F': ['G', 'K'],
'G': ['L'],
'H': ['I'],
'I': ['N', 'J'],
'J': ['K'],
'K': ['L','P'],
'L': ['M'],
'M': ['R'],
'N': ['S', 'O'],
'O': ['P'],
'P': ['Q'],
'Q': ['R'],
'R': ['S'],
'S': ['T'],
'T': []
}
g = Graph(dict, start='A', directed=False, whos_happy=['O'], whos_sad=['J', 'H', 'T'])
print("W connected to N: " + str(g.is_connected('W', 'N')))
print("A connected to B: " + str(g.is_connected('A', 'B')))
print("Path from 'A' to 'B': " + str(g.find_path('A', 'B')))
print("Path from 'A' to 'D': " + str(g.find_path('A', 'D')))
print("Path from 'A' to 'E': " + str(g.find_path('A', 'E')))
print("Path from 'A' to 'T': " + str(g.find_path('A', 'T')))
print("Path from 'A' to 'О': " + str(g.find_path('A', 'О')))
print("You start at: " + str(g.current()))
print("Next: " + str(g.next()))
print("Previous: " + str(g.previous()))
print("Go to: " + str(g.go_to('B')))
print("Current: " + str(g.current()))
print("Next: " + str(g.next()))
print("Previous: " + str(g.previous()))
print("Go to: " + str(g.go_to('C')))
print("Current: " + str(g.current()))
print("Next: " + str(g.next()))
print("Previous: " + str(g.previous()))
print("Go to: " + str(g.go_to('B')))
print("Current: " + str(g.current()))
print("Next: " + str(g.next()))
print("Previous: " + str(g.previous()))
print("Output: " + g.__str__())
visual_graph = convert_to_networkx_graph(g)
pos=nx.spring_layout(visual_graph,iterations=100)
filename = nx.draw(visual_graph, pos, with_labels=True, node_size=500, font_size=16)
plt.show()
exit(1) |
<reponame>HA1907/MSc-Dissertation
import json, pickle, os
# cd drive/MyDrive/'Colab Notebooks'/Thesis/PeerRead/code/accept_classify/
# Train Data and Target
p_train_path = "../../data/iclr_2017/train/parsed_pdfs/"
p_train_file = lambda f: p_train_path+f
p_train_file_names = os.listdir(p_train_path)
r_train_path = "../../data/iclr_2017/train/reviews/"
r_train_file = lambda f: r_train_path+f
r_train_file_names = os.listdir(r_train_path)
def getPaper(name):
with open(p_train_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
if not file['metadata']['sections'] is None:
text=[x['text'] for x in file['metadata']['sections']]
all_text = ' '.join(text)
return all_text
def getReviewFinal(name):
with open(r_train_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
return file['accepted']
train_data = []
train_target = []
for id,name in enumerate(tqdm(p_train_file_names)):
if getPaper(name):
train_data.append(getPaper(name))
train_target.append(getReviewFinal(r_train_file_names[id]))
# Validation Data and Target
p_valid_path = "../../data/iclr_2017/dev/parsed_pdfs/"
p_valid_file = lambda f: p_valid_path+f
p_valid_file_names = os.listdir(p_valid_path)
r_valid_path = "../../data/iclr_2017/dev/reviews/"
r_valid_file = lambda f: r_valid_path+f
r_valid_file_names = os.listdir(r_valid_path)
def getPaper(name):
with open(p_valid_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
if not file['metadata']['sections'] is None:
text=[x['text'] for x in file['metadata']['sections']]
all_text = ' '.join(text)
return all_text
def getReviewFinal(name):
with open(r_valid_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
return file['accepted']
val_data = []
val_target = []
for id,name in enumerate(tqdm(p_valid_file_names)):
if getPaper(name):
val_data.append(getPaper(name))
val_target.append(getReviewFinal(r_valid_file_names[id]))
# Test Data
p_test_path = "../../data/iclr_2017/test/parsed_pdfs/"
p_test_file = lambda f: p_test_path+f
p_test_file_names = os.listdir(p_test_path)
r_test_path = "../../data/iclr_2017/test/reviews/"
r_test_file = lambda f: r_test_path+f
r_test_file_names = os.listdir(r_test_path)
def getPaper(name):
with open(p_test_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
if not file['metadata']['sections'] is None:
text=[x['text'] for x in file['metadata']['sections']]
all_text = ' '.join(text)
return all_text
def getReviewFinal(name):
with open(r_test_file(name)) as jsonFile:
file = json.load(jsonFile)
jsonFile.close()
return file['accepted']
test_data = []
test_target = []
for id,name in enumerate(tqdm(p_test_file_names)):
if getPaper(name):
test_data.append(getPaper(name))
test_target.append(getReviewFinal(r_test_file_names[id]))
# ONCE ONLY
all_data = [train_data, train_target, val_data, val_target, test_data, test_target]
with open('../../my_data/01-Paper-Acceptance/paper_review', 'wb') as f:
pickle.dump(all_data, f)
# with open('../../my_data/01-Paper-Acceptance/paper_review', "rb") as f:
# all_data=pickle.load(f)
# train_data, train_target, val_data, val_target, test_data, test_target = all_data
|
<reponame>RRBuilder/RRBot
import requests
from functools import lru_cache, wraps
from datetime import datetime, timedelta
def timed_lru_cache(seconds: int, maxsize: int = 128):
def wrapper_cache(func):
func = lru_cache(maxsize=maxsize)(func)
func.lifetime = timedelta(seconds=seconds)
func.expiration = datetime.utcnow() + func.lifetime
@wraps(func)
def wrapped_func(*args, **kwargs):
if datetime.utcnow() >= func.expiration:
func.cache_clear()
func.expiration = datetime.utcnow() + func.lifetime
return func(*args, **kwargs)
return wrapped_func
return wrapper_cache
def TimeSnip(timewhen):
timewhen = int(str(timewhen)[0:-3])
return timewhen
@lru_cache(maxsize = 250)
def UUIDFetch(username):
uuid = ""
uuidurl = f'https://api.mojang.com/users/profiles/minecraft/{username}?'
uuidget = requests.get(uuidurl)
r = requests.head(uuidurl)
if r.status_code != 200:
success = False
else:
uuid = uuidget.json()['id']
success = True
return uuid, success
def DateDisplay(timevar):
if timevar == 0:
DateData = "Player online or data not found."
else:
DateData = datetime.fromtimestamp(int(timevar/1000))
return DateData
def LengthProcess(Start, End):
Length = End - Start
if Length < 0 or Length == 0:
TimeData = "Player online/Game ongoing."
else:
Milis = int(Length)
Seconds = (Milis/1000)%60
Seconds = int(Seconds)
Minutes = (Milis/(1000*60))%60
Minutes = int(Minutes)
Hours = (Milis/(1000*60*60))%24
Hours = int(Hours)
if Hours == 0:
TimeData = "%dm:%ds" % (Minutes, Seconds)
else:
TimeData = "%dh:%dm:%ds" % (Hours, Minutes, Seconds)
return TimeData
def GameReadable(gameType):
game = ""
if gameType == "BEDWARS":
game = "Bedwars"
pass
elif gameType == "UHC" or gameType == "No data" or gameType == "N/A":
game = gameType
pass
elif gameType == "SKYWARS":
game = "Skywars"
pass
elif gameType == "BUILD_BATTLE":
game = "Build battle"
pass
elif gameType == "DUELS":
game = "Duels"
pass
elif gameType == "PROTOTYPE":
game = "Prototype"
pass
elif gameType == "HOUSING":
game = "Housing"
pass
elif gameType == "PIT":
game = "Pit"
pass
elif gameType == "MURDER_MYSTERY":
game = "Murder mystery"
pass
elif gameType == "MCGO":
game = "Cops and crims"
pass
elif gameType == "BATTLEGROUND":
game = "Warlords"
pass
elif gameType == "GINGERBREAD":
game = "Turbo cart racers"
pass
elif gameType == "LEGACY":
game = "Classic games"
pass
elif gameType == "SMP":
game = "Smp"
pass
elif gameType == "REPLAY":
game = "Replay"
pass
elif gameType == "SKYBLOCK":
game = "Skyblock"
pass
elif gameType == "SUPER_SMASH":
game = "Smash heroes"
pass
elif gameType == "SPEED_UHC":
game = "Speed UHC"
pass
elif gameType == "WALLS3":
game = "Megawalls"
pass
elif gameType == "ARENA":
game = "Arena"
pass
elif gameType == "ARCADE":
game = "Aracade"
pass
elif gameType == "VAMPIREZ":
game = "VampireZ"
pass
elif gameType == "TNTGAMES":
game = "TNT Games"
pass
elif gameType == "SURVIVAL_GAMES":
game = "Blitz SG"
pass
elif gameType == "PAINTBALL":
game = "Paintball"
pass
elif gameType == "WALLS":
game = "The walls"
pass
elif gameType == "QUAKECRAFT":
game = "Quake"
pass
elif gameType == "SKYCLASH":
game = "Skyclash"
pass
elif gameType == "TRUE_COMBAT":
game = "Crazy walls"
pass
return game
|
import os
import numpy as np
import pandas as pd
from tensorflow.keras.callbacks import EarlyStopping
from kerastuner.tuners import Hyperband
from modules.models import LanguageModel
from modules.utils.data_utils import DataGenerator
def run_model_fitting(PROJECT_NAME):
"""
"""
SENTENCE_DECODER = pd.read_pickle(
f'results\\objects\\{PROJECT_NAME}\\sentence_decoder.pkl'
)
TARGET_DECODER = pd.read_pickle(
f'results\\objects\\{PROJECT_NAME}\\target_decoder.pkl'
)
TUNING_FRACTION = 0.1
BTCH_LIST = os.listdir(f'data\\preprocessed\\{PROJECT_NAME}\\inputs')
BTCH = [i for i in range(len(BTCH_LIST))]
BTCH = np.random.choice(
BTCH, int(len(BTCH) * TUNING_FRACTION), replace=False)
TR_BTCH = BTCH[: int(len(BTCH) * 0.8)]
TS_BTCH = BTCH[int(len(BTCH) * 0.8):]
tr_generator = DataGenerator(
list_batches=TR_BTCH,
project_name=PROJECT_NAME,
shuffle=True,
multi_target=True
)
ts_generator = DataGenerator(
list_batches=TS_BTCH,
project_name=PROJECT_NAME,
shuffle=True,
multi_target=True
)
model = LanguageModel(
max_vocab=len(SENTENCE_DECODER) + 1,
multi_target=True,
max_target_1=len(SENTENCE_DECODER) + 1,
max_target_2=len(TARGET_DECODER)
)
ES = EarlyStopping(
monitor='val_loss',
min_delta=0.0001,
patience=5,
verbose=1,
mode='auto',
restore_best_weights=True
)
tuner_obj = Hyperband(
hypermodel=model,
max_epochs=30,
hyperband_iterations=1,
objective='val_loss',
directory='o',
project_name=f'{PROJECT_NAME}'
)
tuner_obj.search(
tr_generator,
epochs=30,
callbacks=[ES],
verbose=2,
validation_data=ts_generator
)
BTCH = [i for i in range(len(BTCH_LIST))]
BTCH = np.random.choice(BTCH, int(len(BTCH)), replace=False)
TR_BTCH = BTCH[: int(len(BTCH) * 0.8)]
TS_BTCH = BTCH[int(len(BTCH) * 0.8):]
tr_generator = DataGenerator(
list_batches=TR_BTCH,
project_name=PROJECT_NAME,
shuffle=True,
multi_target=True
)
ts_generator = DataGenerator(
list_batches=TS_BTCH,
project_name=PROJECT_NAME,
shuffle=True,
multi_target=True
)
model = tuner_obj.get_best_models(1)[0]
ES = EarlyStopping(
monitor='val_loss',
min_delta=0.0001,
patience=5,
verbose=1,
mode='auto',
restore_best_weights=True
)
model.fit(
tr_generator,
epochs=50,
verbose=2,
callbacks=[ES],
validation_data=ts_generator
)
model.save(f'results\\models\\{PROJECT_NAME}')
if __name__ == '__main__':
PROJECT_NAME = input('Provide project name: ')
run_model_fitting(PROJECT_NAME=PROJECT_NAME)
|
<gh_stars>0
"""
"""
from fusion_review.itrace import IntensityTrace
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
class IntensityTraceFigurePanel:
def __init__(self, num_traces, num_rows, num_columns, intensity_database):
#GET RID OF NUM_TRACES AND JUST USE ID.NUM_TRACES
self.id = intensity_database
self.stidx = 0
self.curridx = self.stidx
self.rows = num_rows
self.cols = num_columns
self.isSingle = False
self.inverted = False
self.disp = True
self.figs = ((num_traces - self.stidx) // (self.rows * self.cols)) + 1
if self.rows == self.cols == 1:
self.isSingle = True
self.figs -= 1
def invert_colors(self):
if self.inverted:
mpl.rc("axes", edgecolor="white", facecolor="gray", labelcolor="white")
mpl.rc("text", color="white")
mpl.rc("figure", facecolor="black")
mpl.rc("xtick", color="white")
mpl.rc("ytick", color="white")
else:
mpl.rc("axes", edgecolor="black", facecolor="white", labelcolor="black")
mpl.rc("text", color="black")
mpl.rc("figure", facecolor="white")
mpl.rc("xtick", color="black")
mpl.rc("ytick", color="black")
def handle_multiple_plots(self, axes):
for row_idx in range(0, self.rows):
for col_idx in range(0, self.cols):
if self.rows > 1:
coords = (row_idx, col_idx)
else:
coords = (col_idx,)
axes[coords].label_outer()
if self.curridx >= self.id.num_traces:
break
self.setup(axes[coords])
axes[coords].set_title("Trace {} of {}".format(self.curridx+1, self.id.num_traces), fontsize=8)
start_line_color = "orange" if self.id.df["isFusion"][self.curridx] else "tab:blue"
axes[coords].axvline(x=self.id.start, color=start_line_color, linestyle="dashed")
self.curridx += 1
if self.curridx >= self.id.num_traces:
break
if self.disp:
plt.show(block=False)
def handle_single_plot(self, panel_count):
self.setup(plt)
plt.axvline(x=self.id.start, color="b", linestyle="dashed", zorder=0)
plt.title("Trace {} of {}".format(panel_count, self.figs), fontsize=16)
fig = plt.gcf()
fig.set_size_inches(12, 5)
plt.xticks(ticks=[200*i for i in range(0, len(self.id.full_time) // 200)])
if self.disp:
plt.show(block=False)
self.curridx += 1
def setup(self, axes):
it = IntensityTrace(self.curridx+1, self.id)
it.set_raw_norm_data()
for key in it.datad:
curr_color = it.datad[key]["c"]
curr_z = it.datad[key]["z"]
if key == "TruncDataNorm":
axes.plot(self.id.full_time, np.asarray(self.id.df["RawDataNorm"][self.curridx]), zorder=curr_z, color=curr_color)
continue
if it.isFusion:
fusion_interval_points = it.get_fusion_data()
axes.axvline(x=fusion_interval_points[0], color="r", linestyle="dashed", zorder=0)
axes.axvline(x=fusion_interval_points[1], color="r", zorder=0)
axes.axvline(x=fusion_interval_points[2], color="r", linestyle="dashed", zorder=0)
return axes
def form_plot(self):
for panel in range(self.stidx, self.figs):
fig, axes = self.form_panel(panel + 1)
if self.isSingle:
self.handle_single_plot(axes)
else:
self.handle_multiple_plots(axes)
def form_panel(self, panel_count):
if not self.isSingle:
fig, ax = plt.subplots(self.rows, self.cols)
plt.subplots_adjust(hspace=0.4)
fig.suptitle("Figure Panel {} of {}".format(panel_count, self.figs), fontsize=16)
fig.set_size_inches(12, 5)
return fig, ax
|
<gh_stars>0
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase
class BatchPandasUDAFITTests(PyFlinkBlinkBatchTableTestCase):
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: a.max(), result_type=DataTypes.INT(), func_type="pandas")
t.group_by("a") \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,6.0,5", "2,3.0,3", "3,3.0,2"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a'],
[DataTypes.INT()])
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
self.t_env.register_table_sink("Results", table_sink)
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["5"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.TINYINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
t.group_by("a") \
.select("a, a + 1 as b, a + 2 as c") \
.group_by("a, b") \
.select("a, b, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2,2.0,6", "2,3,3.0,8", "3,4,4.0,10"])
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
self.assertEqual(self.counter_sum, self.counter.get_count())
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
import board
import busio
from time import sleep, time
from math import atan, atan2, cos, pi, sin
from digitalio import DigitalInOut, Direction, Pull
DEBUG = True
PIN_ONBOARD_LED = board.D13
PIN_PACKET_RECEIVED_LED = board.D6
PIN_PACKET_SENT_LED = board.D9
SEND_PACKET_INTERVAL_MIN = 0.6
SPI_SCK = board.SCK
SPI_MISO = board.MISO
SPI_MOSI = board.MOSI
RFM69_CS = DigitalInOut(board.D4)
RFM69_RST = DigitalInOut(board.D5)
RFM69_NETWORK_NODE = 103
RFM69_SEND_TO_NODE = 102
RFM69_SEND_TIMEOUT_SEC = 5.0
RFM69_RECEIVE_TIMEOUT_SEC = 5.0
# Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
RFM69_RADIO_FREQ_MHZ = 915.0
import adafruit_rfm69
def millis():
return time() * 1000
def minutes(start, decimal=0):
# 60 seconds * 1000 ms = 1 minute
return round((millis() - start) / 60000, decimal)
def blinkLED(led, wait=0.2, cycles=1):
for cy in range(cycles):
led.value = True
sleep(wait)
led.value = False
sleep(wait)
def pack(number, length=4):
n = number
g = []
while n > 0:
g.append(n & 0xFF)
n = n >> 8
t = ""
for index in range(len(g)):
t = chr(g[index]) + t
while len(t) < length:
t = chr(0) + t
return t
def unpack(st):
n = 0
index = 0
l = len(st)
while index < len(st):
#for index in range(len(st)):
#sh = 8 * (len(st) - index - 1)
sh = 8 * (l - index - 1)
s = st[index]
o = ord(s)
n = n + (o << sh)
index += 1
return n
# Initialize the onboard LED
heartBeatLED = DigitalInOut(PIN_ONBOARD_LED)
heartBeatLED.direction = Direction.OUTPUT
packetReceivedLED = DigitalInOut(PIN_PACKET_RECEIVED_LED)
packetReceivedLED.direction = Direction.OUTPUT
packetSentLED = DigitalInOut(PIN_PACKET_SENT_LED)
packetSentLED.direction = Direction.OUTPUT
# Initialize the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Initialize the SPI bus
spi = busio.SPI(SPI_SCK, MOSI=SPI_MOSI, MISO=SPI_MISO)
print()
print("This is node #{0}".format(RFM69_NETWORK_NODE))
print()
# Initialze RFM69 radio
print("Initializing the RFM69 radio")
rfm69 = adafruit_rfm69.RFM69(spi, RFM69_CS, RFM69_RST, RFM69_RADIO_FREQ_MHZ)
# Optionally set an encryption key (16 byte AES key). MUST match both
# on the transmitter and receiver (or be set to None to disable/the default).
rfm69.encryption_key = b'\<KEY>'
rfm69Celsius = rfm69.temperature
rfm69Fahrenheit = round(rfm69Celsius * 1.8 + 32, 1)
# Print out some RFM69 chip state:
print("RFM69 Radio Data")
print(' Temperature: {0}°F ({1}°C)'.format(rfm69Fahrenheit, rfm69Celsius))
print(' Frequency: {0} MHz'.format(round(rfm69.frequency_mhz, 0)))
print(' Bit rate: {0} kbit/s'.format(rfm69.bitrate / 1000))
print(' Frequency deviation: {0} kHz'.format(rfm69.frequency_deviation / 1000))
loopCount = 0
receivedPacket = False
packetReceivedCount = 0
packetSentCount = 0
resendPacket = False
ackPacketsReceived = 0
acknowledged = False
firstRun = True
startSendMillis = millis()
print()
while True:
blinkLED(heartBeatLED)
loopCount += 1
if DEBUG:
print("Loop #{0:6d}".format(loopCount))
print()
currentSendMinutes = minutes(startSendMillis, 1)
#
# RFM69 radio stuff
#
if acknowledged or firstRun or currentSendMinutes >= SEND_PACKET_INTERVAL_MIN:
packetSentLED.value = True
sleep(0.5)
startSendMillis = millis()
if not resendPacket:
# Pack the packet
packetSentCount += 1
packedPacketNumber = pack(packetSentCount, 4)
packedFromNode = pack(RFM69_NETWORK_NODE, 2)
packedToNode = pack(RFM69_SEND_TO_NODE, 2)
packedType = pack(1, 1)
packedTotalPackets = pack(25, 1)
packedSubPacketNumber = pack(12, 1)
payload = "Hello node {0}".format(RFM69_SEND_TO_NODE)
outPacketStart = packedPacketNumber + packedFromNode + packedToNode + packedType
outPacketEnd = packedTotalPackets + packedSubPacketNumber + payload
outPacketLength = len(outPacketStart + outPacketEnd) + 1
packedPacketLength = pack(outPacketLength, 1)
outPacket = outPacketStart + packedPacketLength + outPacketEnd
print("Sending packet #{0}, '{1}' message!".format(packetSentCount, payload))
print()
try:
rfm69.send(bytes(outPacket, "utf-8", RFM69_SEND_TIMEOUT_SEC))
except RuntimeError:
pass
print('Waiting for packets...')
inPacket = rfm69.receive(timeout=RFM69_RECEIVE_TIMEOUT_SEC)
if inPacket is None:
# Packet has not been received
resendPacket = True
receivedPacket = False
packetReceivedLED.value = False
print('Received nothing!')
print()
sleep(0.5)
else:
# Received a new packet!
receivedPacket = True
packetReceivedCount += 1
packetReceivedLED.value = True
# Start unpacking the packet
packetNumberIn = unpack(inPacket[0:4])
fromNodeAddressIn = unpack(inPacket[4:6])
toNodeAddressIn = unpack(inPacket[6:8])
typeIn = unpack(outPacket[8:9])
if typeIn == 1:
# Standard Packet
payloadIn = inPacket[12:]
elif typeIn == 2:
# Acknowledgement Packet
payloadIn = inPacket[10:]
else:
print("Invalid Packet: Type {0}".format(typeIn))
payloadInText = str(payloadIn, 'ASCII')
if typeIn == 2 and payloadInText == "ACK":
# ACK packet
acknowledged = True
print("Received ACK of packet {0} from node {1}".format(packetNumberIn, fromNodeAddressIn))
else:
# New packet
print()
print("Received new type {0} packet {1} from node {2}, raw bytes '{3}'".format(typeIn, packetNumberIn, fromNodeAddressIn, inPacket))
#
# Add packet validation here
#
# Finish unpacking the packet
if typeIn == 1:
packetLenthIn = unpack(inPacket[9:10])
totalPacketsIn = unpack(inPacket[10:11])
subPacketNumberIn = unpack(inPacket[11:12])
print("Received payload (ASCII): '{0}'".format(payloadInText))
sleep(0.2)
packetReceivedLED.value = False
if typeIn != 2:
# ACK the packet
packetSentLED.value = True
sleep(0.5)
ackPacket = pack(packetNumberIn, 4) + pack(RFM69_NETWORK_NODE, 2) + pack(fromNodeAddress, 2) + pack(RFM69_SEND_TO_NODE, 2) + pack(2, 1)
ackPacketLength = len(ackPacket) + 4
ackPacket = ackPacket + pack(ackPacketLength, 1) + "ACK"
print("Sending ACK of packet {0} to node {1}".format(packetNumberIn, fromNodeAddressIn))
try:
rfm69.send(bytes(ackPacket, "utf-8", RFM69_SEND_TIMEOUT_SEC))
except RuntimeError:
pass
packetSentLED.value = False
firstRun = False
sleep(0.2)
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define asv benchmark suite that estimates the speed of applications.
"""
import pennylane as qml
from pennylane import numpy as np
from pennylane.templates.subroutines import UCCSD
from functools import partial
from ..benchmark_functions.vqe import benchmark_vqe
from ..benchmark_functions.hamiltonians import ham_lih
from ..benchmark_functions.qaoa import benchmark_qaoa
from ..benchmark_functions.machine_learning import benchmark_machine_learning
import networkx as nx
class VQE_light:
"""Benchmark the VQE algorithm using different number of optimization steps and grouping
options."""
params = ([1, 3], [False, True])
param_names = ["n_steps", "optimize"]
def time_hydrogen(self, n_steps, optimize):
"""Time a VQE algorithm with the UCCSD ansatz for computing the ground state energy of the
hydrogen molecule."""
hyperparams = {"n_steps": n_steps, "optimize": optimize}
benchmark_vqe(hyperparams)
def peakmem_hydrogen(self, n_steps, optimize):
"""Benchmark the peak memory usage of the VQE algorithm with the UCCSD ansatz for computing
the ground state energy of the hydrogen molecule."""
hyperparams = {"n_steps": n_steps, "optimize": optimize}
benchmark_vqe(hyperparams)
class VQE_heavy:
"""Benchmark the VQE algorithm using different grouping options for the lithium hydride molecule
with 2 active electrons and 8 active spin-orbitals. The sto-3g basis set and UCCSD ansatz are
used."""
params = [False, True]
param_names = ["optimize"]
timeout = 600 # 10 minutes
repeat = (1, 1, 600) # Only collect one sample
number = 1 # one iteration in each sample
def setup(self, optimize):
s_wires = [[0, 1, 2],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5, 6, 7]]
d_wires = [[[0, 1], [2, 3]],
[[0, 1], [2, 3, 4, 5]],
[[0, 1], [2, 3, 4, 5, 6, 7]],
[[0, 1], [3, 4]],
[[0, 1], [3, 4, 5, 6]],
[[0, 1], [4, 5]],
[[0, 1], [4, 5, 6, 7]],
[[0, 1], [5, 6]],
[[0, 1], [6, 7]]]
hf_state = np.array([1, 1, 0, 0, 0, 0, 0, 0])
self.ham = ham_lih
self.ansatz = partial(UCCSD, init_state=hf_state, s_wires=s_wires, d_wires=d_wires)
self.parameters = np.array(
[
6.39225682,
-0.99471664,
-4.2026237,
-4.48579097,
9.8033157,
1.19030864,
-3.89924719,
7.25037131,
-0.95897967,
-0.75287453,
0.92252162,
1.10633277,
0.94911997,
1.09138887,
5.27297259,
]
)
self.device = qml.device("default.qubit", wires=len(hf_state))
def time_lih(self, optimize):
"""Time the VQE algorithm for the lithium hydride molecule."""
hyperparams = {
"ham": self.ham,
"ansatz": self.ansatz,
"params": self.parameters,
"device": self.device,
"optimize": optimize,
}
benchmark_vqe(hyperparams)
def peakmem_lih(self, optimize):
"""Benchmark the peak memory usage of the VQE algorithm for the lithium hydride molecule."""
hyperparams = {
"ham": self.ham,
"ansatz": self.ansatz,
"params": self.parameters,
"device": self.device,
"optimize": optimize,
}
benchmark_vqe(hyperparams)
class QAOA_light:
"""Benchmark the QAOA algorithm for finding the minimum vertex cover of a small graph using
different number of layers."""
params = [1, 5]
param_names = ["n_layers"]
def time_minvertex_light(self, n_layers):
"""Time a QAOA algorithm for finding the minimum vertex cover of a small graph."""
hyperparams = {"n_layers": n_layers}
benchmark_qaoa(hyperparams)
def peakmem_minvertex_light(self, n_layers):
"""Benchmark the peak memory usage of QAOA algorithm for finding the minimum vertex cover of
a small graph."""
hyperparams = {"n_layers": n_layers}
benchmark_qaoa(hyperparams)
class QAOA_heavy:
"""Benchmark the QAOA algorithm for finding the minimum vertex cover of a large graph."""
n_layers = 5
graph = nx.complete_graph(20)
timeout = 600 # 10 minutes
repeat = (1, 1, 600) # Only collect one sample
number = 1 # one iteration in each sample
def time_minvertex_heavy(self):
"""Time a QAOA algorithm for finding the minimum vertex cover of a large graph."""
hyperparams = {"n_layers": self.n_layers, "graph": self.graph}
benchmark_qaoa(hyperparams)
def peakmem_minvertex_heavy(self):
"""Benchmark the peak memory usage of a QAOA algorithm for finding the minimum vertex cover
of a large graph."""
hyperparams = {"n_layers": self.n_layers, "graph": self.graph}
benchmark_qaoa(hyperparams)
class ML_light:
"""Benchmark a hybrid quantum-classical machine learning application with a small dataset."""
params = ["autograd", "torch", "tf"]
param_names = ["interface"]
n_features = 4
n_samples = 20
def time_ml_light(self, interface):
"""Time 50 training steps of a hybrid quantum machine learning example."""
hyperparams = {
"n_layers": self.n_features,
"n_samples": self.n_samples,
"interface": interface,
}
benchmark_machine_learning(hyperparams)
def peakmem_ml_light(self, interface):
"""Benchmark peak memory of 50 training steps of a hybrid quantum machine learning example
."""
hyperparams = {
"n_layers": self.n_features,
"n_samples": self.n_samples,
"interface": interface,
}
benchmark_machine_learning(hyperparams)
class ML_heavy:
"""Benchmark a hybrid quantum-classical machine learning application with a large dataset."""
params = ["autograd", "torch", "tf"]
param_names = ["interface"]
n_features = 10
n_samples = 100
timeout = 600 # 10 minutes
repeat = (1, 1, 600) # Only collect one sample
number = 1 # one iteration in each sample
def time_ml_heavy(self, interface):
"""Time 50 training steps of a hybrid quantum machine learning example."""
hyperparams = {
"n_layers": self.n_features,
"n_samples": self.n_samples,
"interface": interface,
}
benchmark_machine_learning(hyperparams)
def peakmem_ml_heavy(self, interface):
"""Benchmark peak memory of 50 training steps of a hybrid quantum machine learning example."""
hyperparams = {
"n_layers": self.n_features,
"n_samples": self.n_samples,
"interface": interface,
}
benchmark_machine_learning(hyperparams)
|
<gh_stars>0
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from aws_orbit import plugins
from aws_orbit.models.context import Context, ContextSerDe, FoundationContext
from aws_orbit.remote_files import cdk_toolkit, eksctl, env, foundation, helm, kubectl, teams
from aws_orbit.services import ecr, ssm
_logger: logging.Logger = logging.getLogger(__name__)
def delete_image(args: Tuple[str, ...]) -> None:
_logger.debug("args %s", args)
env_name: str = args[0]
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context)
if len(args) == 2:
image_name: str = args[1]
else:
raise ValueError("Unexpected number of values in args.")
env.deploy(context=context, eks_system_masters_roles_changes=None)
_logger.debug("Env changes deployed")
ecr.delete_repo(repo=f"orbit-{context.name}/{image_name}")
_logger.debug("Docker Image Destroyed from ECR")
def destroy_teams(args: Tuple[str, ...]) -> None:
_logger.debug("args %s", args)
env_name: str = args[0]
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context)
_logger.debug("context.name %s", context.name)
plugins.PLUGINS_REGISTRIES.load_plugins(context=context, plugin_changesets=[], teams_changeset=None)
kubectl.write_kubeconfig(context=context)
_logger.debug("Plugins loaded")
for team_context in context.teams:
plugins.PLUGINS_REGISTRIES.destroy_team_plugins(context=context, team_context=team_context)
_logger.debug("Plugins destroyed")
for team_context in context.teams:
helm.destroy_team(context=context, team_context=team_context)
_logger.debug("Helm Charts uninstalled")
kubectl.destroy_teams(context=context)
_logger.debug("Kubernetes Team components destroyed")
eksctl.destroy_teams(context=context)
_logger.debug("EKS Team Stacks destroyed")
teams.destroy_all(context=context)
_logger.debug("Teams Stacks destroyed")
ssm.cleanup_teams(env_name=context.name)
def destroy_env(args: Tuple[str, ...]) -> None:
_logger.debug("args %s", args)
env_name: str = args[0]
context: "Context" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=Context)
_logger.debug("context.name %s", context.name)
helm.destroy_env(context=context)
_logger.debug("Helm Charts uninstalled")
kubectl.destroy_env(context=context)
_logger.debug("Kubernetes Environment components destroyed")
eksctl.destroy_env(context=context)
_logger.debug("EKS Environment Stacks destroyed")
env.destroy(context=context)
_logger.debug("Env Stack destroyed")
cdk_toolkit.destroy(context=context)
_logger.debug("CDK Toolkit Stack destroyed")
def destroy_foundation(args: Tuple[str, ...]) -> None:
_logger.debug("args %s", args)
env_name: str = args[0]
context: "FoundationContext" = ContextSerDe.load_context_from_ssm(env_name=env_name, type=FoundationContext)
_logger.debug("context.name %s", context.name)
foundation.destroy(context=context)
_logger.debug("Demo Stack destroyed")
cdk_toolkit.destroy(context=context)
_logger.debug("CDK Toolkit Stack destroyed")
|
<filename>main.py
from __future__ import print_function
from flask import Flask
import numpy as np
import pandas as pd
from flask import render_template
from flask import request
from flask import abort, redirect, url_for
from flask import send_from_directory
from CoverModel.project import *
from werkzeug.utils import secure_filename
import sys
import os
import copy
UPLOAD_FOLDER = os.getcwd()+'/uploads'
ALLOWED_EXTENSIONS = set(['csv'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploads', methods=['POST', 'GET'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
#print(filename,file=sys.stderr)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
Use_ModelSGBoost(filename = filename)
return redirect(url_for('uploaded_file',
filename=filename +'_predicted.csv'))
return render_template('upfile.html')
@app.route('/downloads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
@app.route('/', methods=['POST', 'GET'])
def GPSData_Prediction():
dict_para = {}
dict_para['flag']=0
dict_para['SoilType']=0
dict_para['CoverType']=0
dataDict = {}
if request.method == 'POST':
PCA_Method = Data_PCAReduction()
data_dict = request.form
for item in data_dict.items():
if item[1] != '':
dataDict[item[0]] = [float(item[1])]
else:
pass
if len(dataDict) == 11:
#print(len(dataDict),file=sys.stderr)
dict_para['flag']=1
df = pd.DataFrame(dataDict)
df = PCA_Method.PCA_Reduction(df, ['Hillshade_9am','Hillshade_Noon','Hillshade_3pm'], 1, 'Hillshade')
df = PCA_Method.PCA_Reduction(df, ['Horizontal_Distance_To_Hydrology','Vertical_Distance_To_Hydrology'], 1, 'Distance_To_Hydrology')
columns = list(df.columns)
input_Soil = list(np.array(df[columns])[0])
temp = copy.deepcopy(input_Soil)
UM = Use_Model()
dict_para['SoilType'] = int(UM.Predict_Soil(input_Soil))
input_Cover = temp+[dict_para['SoilType']]
dict_para['CoverType'] = int(UM.Predict_Cover(input_Cover))
elif len(dataDict) == 12:
dict_para['flag']=2
df = pd.DataFrame(dataDict)
df = PCA_Method.PCA_Reduction(df, ['Hillshade_9am','Hillshade_Noon','Hillshade_3pm'], 1, 'Hillshade')
df = PCA_Method.PCA_Reduction(df, ['Horizontal_Distance_To_Hydrology','Vertical_Distance_To_Hydrology'], 1, 'Distance_To_Hydrology')
columns = list(df.columns)
input_Cover = list(np.array(df[columns])[0])
UM = Use_Model()
dict_para['CoverType'] = int(UM.Predict_Cover(input_Cover))
else:
pass
return render_template('hello.html', dict=dict_para)
if __name__ == '__main__':
app.run()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from detr.util.misc import NestedTensor
class PositionalEncoding(nn.Module):
"""Regular positional encoding module that returns the encoding.
From: https://pytorch.org/tutorials/beginner/transformer_tutorial.html
"""
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
"""Returns positional encoding to input sequence
Args:
x (tensor): (bsz, d_model, seq_len) Input sequence.
Returns:
(tensor): (seq_len, 1, d_model) Input sequence added with
positional encoding.
"""
return self.pe[:x.size(2), :].to(x.device)
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
|
<filename>src/spider/spider/core.py<gh_stars>0
#!/usr/bin/env python3
import json
import importlib
import socket
from geojson import Feature, Point, FeatureCollection
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
import cobwebs
from cobwebs.config import get_config, import_plugin
# data_test = {
# "action": "add",
# "data": {
# "address": "test",
# "description": "",
# "price": "1000",
# "date": "",
# "surface": "",
# "groundsurface": "",
# "url": [],
# "photos": [],
# "extra": {}
# }
# }
class Spider:
def __init__(self):
self.global_config = get_config()
driver_module = importlib.import_module(self.global_config['main']['mq_driver'])
self.mq_driver = driver_module.driver
self.geolocator = Nominatim()
self.plugins = import_plugin()
self.logger = cobwebs.getLogger("spider.api")
def __get_geocode(self, address):
_location = None
_feature = None
try:
try:
_location = self.geolocator.geocode("{}, France".format(address))
except Exception as e:
self.logger.error(str(e))
else:
self.logger.debug(_location)
# self.logger.info("address={}".format(_ad['address']))
# self.logger.info("_location={}".format(_location))
_feature = Feature(geometry=Point((_location.longitude, _location.latitude)))
except socket.timeout:
self.logger.warning("Timeout during retrieving geocode for {}".format(address))
except GeocoderTimedOut:
self.logger.warning("Timeout during retrieving geocode for {}".format(address))
return _feature
def sync(self):
ads = []
for _plug_name, _plugin in self.plugins.items():
try:
_ads = _plugin.__driver__.compute()
self.logger.info('Trying to open plugin {}'.format(_plug_name))
for _ad in _ads:
_ad['feature'] = self.__get_geocode(_ad['address'])
request = {"action": "add", "data": _ad}
data = self.mq_driver.rpc.send("db_driver", json.dumps(request),
self.global_config['main']['mq_host'])
if data:
print("sync {}".format(_ad['address']))
ads.append(_ad)
except AttributeError as e:
self.logger.error('Unable to open plugin {}'.format(_plug_name))
self.logger.debug(str(e))
# print('Unable to open plugin {}'.format(_plug_name))
return {"action": "sync", "number": len(ads), 'message': " ".join(map(lambda x: x['address'], ads))}
def get(self, req_data=None):
if not req_data:
request = {"action": "list", "data": None}
data = self.mq_driver.rpc.send("db_driver", json.dumps(request), self.global_config['main']['mq_host'])
for _data in data:
yield _data
else:
request = {"action": "get", "data": req_data}
data = self.mq_driver.rpc.send("db_driver", json.dumps(request), self.global_config['main']['mq_host'])
for _data in data:
yield _data
def purge(self, only_hidden=False, quick_delete=False):
if quick_delete:
request = {"action": "purge", "data": None}
data = self.mq_driver.rpc.send("db_driver", json.dumps(request), self.global_config['main']['mq_host'])
request['data'] = data
elif only_hidden:
request = {"action": "purge", "data": None}
ids = {}
data = self.get()
for item in data:
if not item['show']:
ret = self.delete((item['id'],))
ids[item['id']] = ret['data'][item['id']]
request['data'] = ids
else:
request = {"action": "purge", "data": None}
ids = {}
data = self.get()
for item in data:
ret = self.delete((item['id'],))
ids[item['id']] = ret['data'][item['id']]
request['data'] = ids
return request
def delete(self, ids):
request = {"action": "delete", "data": ids}
request['data'] = dict()
for _id in ids:
_request = dict(request)
_request['data'] = _id
data = self.mq_driver.rpc.send("db_driver", json.dumps(_request), self.global_config['main']['mq_host'])
request['data'][_id] = data
return request
def hide(self, ids):
request = {"action": "hide", "data": ids}
request['data'] = dict()
for _id in ids:
_request = dict(request)
_request['data'] = _id
data = self.mq_driver.rpc.send("db_driver", json.dumps(_request), self.global_config['main']['mq_host'])
request['data'][_id] = data
return request
|
<reponame>alan-turing-institute/QUIPP-workflow
"""
Code to calculate disclosure risk metric.
"""
import argparse
import json
import os
import pickle
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, "utilities"))
from utils import handle_cmdline_args
# constants
path_save_max_values = "./dict_max_matches.pkl"
# if output_mode = 3, save the pdfs (see below for more info)
path_save_p_dist_all = "./p_dist_all.pkl"
# column that contains indices of the original (not synthesized) dataset
# when matching rows between two datasets, ignore indx_column
indx_column = "idx"
# mode 1: only return the maximum
# this is mode is idential to mode 2 with threshold_max = 0.999
# mode 2: return all values more than np.max(p.d.f of one intruder row)*threshold_max
# mode 3: return full probability distribution for each intruder row
# output probability distribution over each row of the released data,
# e.g., if the released data has 9000 rows and the intruder's data has 1000 rows,
# a numpy array with 1000 x 9000 dimensions will be created.
output_mode = 1
# the following value will be used to extract "found" rows from the released data:
# np.max(p.d.f of one intruder row)*threshold_max
# this should help with floating point comparisons of numbers that are very close
threshold_max = 0.999
def compare_rows(row_check, dataframe_check, drop_column="idx"):
"""Find all the matched rows in dataframe_check given a row to check (row_check)"""
# all(1) means that all items of row_check should match with
# rows in dataframe_check except for drop_column
dataframe_matched = dataframe_check[(dataframe_check.drop(drop_column, axis=1) ==
row_check.drop(drop_column)).all(1)]
return dataframe_matched
def main():
# read command line options
args = handle_cmdline_args()
verbose = False
with open(args.infile) as f:
synth_params = json.load(f)
if not (synth_params["enabled"] and
synth_params['privacy_parameters_disclosure_risk']['enabled']):
return
print("[INFO] Calculating disclosure risk privacy metrics")
# read dataset name from .json
dataset = synth_params["dataset"]
synth_method = synth_params["synth-method"]
path_released_ds = args.outfile_prefix
if synth_method == 'sgf':
path_original_ds = os.path.join(path_released_ds,
os.path.basename(dataset) + "_numcat.csv")
else:
path_original_ds = os.path.abspath(dataset) + '.csv'
# read parameters from .json
parameters = synth_params["parameters"]
disclosure_risk_parameters = synth_params["privacy_parameters_disclosure_risk"]
# read original data set
data_full = pd.read_csv(path_original_ds)
# read/set intruder samples number
if disclosure_risk_parameters['num_samples_intruder'] > data_full.shape[0]:
sys.exit("Intruder samples cannot be more than original dataset samples: "
+ disclosure_risk_parameters["num_samples_intruder"] + " > " + data_full.shape[0])
elif disclosure_risk_parameters['num_samples_intruder'] == -1:
num_samples_intruder = data_full.shape[0]
else:
num_samples_intruder = disclosure_risk_parameters['num_samples_intruder']
# sample indexes and use them to select rows from original data to form intruder dataset
# also save indexes to .json
np.random.seed(parameters['random_state'])
indexes = np.random.choice(data_full.shape[0], num_samples_intruder, replace=False).tolist()
data_intruder = data_full.loc[indexes, disclosure_risk_parameters['vars_intruder']]
data_intruder.to_csv(path_released_ds + "/intruder_data.csv", index=False)
with open(path_released_ds + "/intruder_indexes.json", 'w') as f:
json.dump(indexes, f)
# itdr: intruder
df_itdr = pd.read_csv(path_released_ds + "/intruder_data.csv")
# XXX should be changed after indices are added to real/synthetic data
df_itdr["idx"] = df_itdr.index
# list of paths of the released/synthetic datasets
list_paths_released_ds = glob(path_released_ds + "/synthetic_data_*.csv")
list_paths_released_ds.sort()
dict_matches = {}
num_rows_released = False
num_files_released = False
# rlsd: released
# itdr: intruder
if verbose:
print("Finding similar rows between released and intruder's datasets...\n")
for i_rlsd, one_released_ds in enumerate(list_paths_released_ds):
if verbose:
print(f"Processing {one_released_ds} ...")
df_rlsd = pd.read_csv(one_released_ds)
if not num_rows_released:
num_rows_released = len(df_rlsd)
num_files_released = len(list_paths_released_ds)
# XXX should be changed after indices are added to real/synthetic data
df_rlsd["idx"] = df_rlsd.index
# consider only columns that intruder has access to
df_rlsd_cols_selected = df_rlsd[df_itdr.columns]
for i_itdr, one_intruder_row in df_itdr.iterrows():
row_matches = compare_rows(one_intruder_row,
df_rlsd_cols_selected,
drop_column=indx_column)
matches_num_rows = len(row_matches)
if matches_num_rows > 0:
matches_indx_list = row_matches.idx.to_list()
else:
matches_indx_list = []
if not f"{i_itdr}" in dict_matches.keys():
dict_matches[f"{i_itdr}"] = [matches_indx_list]
else:
dict_matches[f"{i_itdr}"].append(matches_indx_list)
if verbose:
print("Creating probability distributions for each row in intruder's dataset...")
p_dist_all = np.array([])
dict_max_matches = {}
for i_itdr in dict_matches:
if verbose:
print(".", end="", flush=True)
# first create a zero array with num_rows_released as the number of entries
p_dist_row = np.zeros(num_rows_released)
for m_rlsd in range(num_files_released):
indicator_row = dict_matches[i_itdr][m_rlsd]
len_indicator_row = len(indicator_row)
# Part of equation 6 in "Accounting for Intruder Uncertainty Due to
# Sampling When Estimating Identification Disclosure Risks in
# Partially Synthetic Data" paper.
p_dist_row[indicator_row] += np.ones(len_indicator_row)/len_indicator_row
# normalize based on the number of released datasets
p_dist_row /= float(num_files_released)
# output_mode == 3, returns full probability
if output_mode == 3:
if len(p_dist_all) == 0:
p_dist_all = np.vstack([p_dist_row])
else:
p_dist_all = np.vstack([p_dist_all, p_dist_row])
# store indices and values correspond to p_dist_row >= (np.max(p_dist_row)*threshold_max)
indx_max_matches = np.where(p_dist_row >= (np.max(p_dist_row)*threshold_max))[0].tolist()
values_max_matches = p_dist_row[indx_max_matches].tolist()
dict_max_matches[f"{i_itdr}"] = [indx_max_matches, values_max_matches]
# save outputs
with open(path_save_max_values, "wb") as output_file:
pickle.dump(dict_max_matches, output_file)
# output_mode == 3, returns full probability
if output_mode == 3:
with open(path_save_p_dist_all, "wb") as output_file:
pickle.dump(p_dist_all, output_file)
# Plot p.d.f computed in the previous step
# This only works with output_mode == 3 (return full probability)
row_select = 0
while (row_select >= 0) and (output_mode == 3):
row_select = int(input("\n\nSelect a row (indexed from 0) in the "
"intruder's dataset. (or enter -1 to exit) "))
if row_select < 0:
break
elif row_select >= len(df_itdr):
print(f"[ERROR] total number of rows in the intruder's dataset: {len(df_itdr)}")
continue
# print the selected row in dict_matches
print(dict_matches[f"{row_select}"])
# plot the p.d.f.
plt.figure(figsize=(12, 6))
plt.plot(p_dist_all[row_select, :].T, c="k")
plt.xlabel("Released data row", size=22)
plt.ylabel("p.d.f", size=22)
plt.xticks(size=16)
plt.yticks(size=16)
plt.title(f"Intruder row: {row_select}", size=24)
plt.grid()
plt.tight_layout()
plt.show()
# Calculate privacy metrics
with open(path_released_ds + "/intruder_indexes.json") as f_intruder_indexes:
intruder_indexes = json.load(f_intruder_indexes)
c = {key: len(value[0]) for key, value in dict_max_matches.items()}
I = {key: np.multiply(intruder_indexes[int(key)] in value[0], 1) for key, value in dict_max_matches.items()}
products = {k: c.get(k) * I.get(k) for k in set(c)}
K = {key: np.multiply(value == 1, 1) for key, value in products.items()}
c_indicator = {key: np.multiply(value == 1, 1) for key, value in c.items()}
EMRi = sum({k: I.get(k) / c.get(k) for k in set(c)}.values())
EMRi_norm = EMRi / disclosure_risk_parameters['num_samples_intruder']
TMRi = float(sum(K.values()))
TMRi_norm = TMRi / disclosure_risk_parameters['num_samples_intruder']
TMRa = TMRi / sum(c_indicator.values())
metrics = {'EMRi': EMRi, 'TMRi': TMRi, 'TMRa': TMRa, 'EMRi_norm': EMRi_norm, 'TMRi_norm': TMRi_norm}
if verbose:
print(f"\nDisclosure risk metrics: {metrics}")
with open(path_released_ds + "/disclosure_risk.json", 'w') as f:
json.dump(metrics, f, indent=4)
if __name__ == '__main__':
main()
|
<reponame>sbussmann/Bussmann2015
"""
2014 November 13
<NAME>
Overlay ALMA contours on IRAC 3.6um, 4.5um, 8.0um 3-color image.
"""
from astropy.table import Table
#import aplpy
#from PIL import Image
import numpy
from astropy.io import fits
from astropy import wcs
from pylab import savefig
import img_scale
import yaml
import matplotlib.pyplot as plt
import math
from matplotlib.patches import Ellipse
import matplotlib
def transform(imloc, ra_center, dec_center, radial_extent, vmax=1.5):
""" Rescale and trim input image. """
hdu = fits.open(imloc)
im = hdu[0].data
thisisalma = False
if im.ndim == 4:
thisisalma = True
im = im[0, 0, :, :]
optical_header = hdu[0].header
bad = im * 0 != 0
good = im * 0 == 0
im[bad] = im[good].min()
# compute the (x, y) center and pixel scale in the optical image
wcs_optical = wcs.WCS(optical_header, naxis=2)
pixxy = wcs_optical.wcs_world2pix(ra_center, dec_center, 1)
x_optical = numpy.round(pixxy[0])
y_optical = numpy.round(pixxy[1])
headerkeys = optical_header.keys()
cd1_1 = headerkeys.count('CD1_1')
if cd1_1 == 0:
cdelt1_optical = numpy.abs(optical_header['CDELT1'] * 3600)
cdelt2_optical = numpy.abs(optical_header['CDELT2'] * 3600)
else:
cdelt1_optical = numpy.abs(optical_header['CD1_1'] * 3600)
cdelt2_optical = numpy.abs(optical_header['CD2_2'] * 3600)
cd11 = optical_header['CD1_1']
cd12 = optical_header['CD1_2']
cd21 = optical_header['CD2_1']
cd22 = optical_header['CD2_2']
cdelt1_optical = numpy.sqrt(cd11 ** 2 + cd12 ** 2) * 3600
cdelt2_optical = numpy.sqrt(cd21 ** 2 + cd22 ** 2) * 3600
if cd12 == 0:
cd12 = cd11 / 1e8
cdratio = numpy.abs(cd11 / cd12)
if cdratio < 1:
cdratio = 1 / cdratio
#if cdratio < 1e2:
#print "This shit ain't rotated yo!"
#import pdb; pdb.set_trace()
x_extent = numpy.round(radial_extent / cdelt1_optical)
y_extent = numpy.round(radial_extent / cdelt2_optical)
nx = im[:, 0].size
ny = im[0, :].size
#angularradius = 0.5 * 60
#pixscale = numpy.sqrt(cdelt1_optical * cdelt2_optical)
#pixradius = numpy.round(angularradius / pixscale).astype(int)
#x1 = str(x_optical.astype(int) - pixradius)
#x2 = str(x_optical.astype(int) + pixradius)
#y1 = str(y_optical.astype(int) - pixradius)
#y2 = str(y_optical.astype(int) + pixradius)
#region = '[' + x1 + ':' + x2 + ',' + y1 + ':' + y2 + ']'
#cutfile = 'fitscutouts/' + dataname + '_' + ifilt + '_cut.fits'
#cmd = 'imcopy ' + optloc + region + ' ' + cutfile
#print cmd
# make the trimmed optical image
#if dataname == 'XMM109':
# optical_trimmed = numpy.ones([2 * y_extent, 2 * x_extent])
#else:
if (x_optical - x_extent < 0) | (x_optical + x_extent > nx) | \
(y_optical - y_extent < 0) | (y_optical + y_extent > ny):
# pad with zeros
import pdb; pdb.set_trace()
trimmed = numpy.zeros([2 * y_extent, 2 * x_extent])
nx_pad = trimmed[0, :].size
ny_pad = trimmed[:, 0].size
if x_optical - x_extent < 0:
xr0 = 0
xrr0 = x_extent - x_optical
else:
xr0 = x_optical - x_extent
xrr0 = 0
if y_optical - y_extent < 0:
yr0 = 0
yrr0 = y_extent - y_optical
else:
yr0 = y_optical - y_extent
yrr0 = 0
if x_optical + x_extent > nx:
xr1 = nx
xrr1 = nx_pad / 2 + (nx - x_optical)
else:
xr1 = x_optical + x_extent
xrr1 = nx_pad
if y_optical + y_extent > ny:
yr1 = ny
yrr1 = ny_pad / 2 + (ny - y_optical)
else:
yr1 = y_optical + y_extent
yrr1 = ny_pad
trimmed[yrr0:yrr1, xrr0:xrr1] = im[yr0:yr1, xr0:xr1]
else:
xr0 = x_optical - x_extent
yr0 = y_optical - y_extent
xr1 = x_optical + x_extent
yr1 = y_optical + y_extent
trimmed = im[yr0:yr1, xr0:xr1]
#print(vmax)
if not thisisalma:
trimmed -= trimmed.min()# - 1
trimmed = img_scale.sqrt(trimmed, scale_max=vmax)
#trimmed *= 2.5
#trimmed = numpy.sqrt(trimmed)
#trimmed -= trimmed.min()
#trimmedsort = numpy.sort(trimmed).flatten()
#ntrimmed = trimmedsort.size
#vmax = trimmedsort[0.99999 * ntrimmed]
#vmin = trimmedsort[0.5 * ntrimmed]
#print(vmin, vmax)
#toohigh = trimmed > vmax
#toolow = trimmed < vmin
#trimmed[toolow] = vmin
#trimmed[toohigh] = vmax
return trimmed
# blue color is IRAC channel 1: 3.6um
blue = 'irac1'
# green is 4.5um
green = 'irac2'
# red is 8.0um
red = 'irac4'
# set font properties
font = {'family' : 'Arial Narrow',
'weight' : 'bold',
'size' : 10}
matplotlib.rc('font', **font)
matplotlib.rcParams['axes.linewidth'] = 1.5
iractargetloc = '../Data/iractargetlist.txt'
iractargetlist = Table.read(iractargetloc, format='ascii')
goodfitloc = '../Data/uvfitlist.dat'
goodfitdat = Table.read(goodfitloc, format='ascii')
ntarget = len(iractargetlist)
for itarget in range(ntarget):
# get the appropriate image center and radial extent
target = iractargetlist['target'][itarget]
match = goodfitdat['shortname'] == target
goodfit = goodfitdat['intrinsic'][match][0]
dataname = goodfitdat['dataname'][match][0]
modfitloc = '../../../../ModelFits/' + dataname + '/' + goodfit
configloc = modfitloc + '/config.yaml'
configfile = open(configloc)
config = yaml.load(configfile)
ra_center = config['Region0']['RACentroid']
dec_center = config['Region0']['DecCentroid']
radial_extent = 9.5#config['Region0']['RadialExtent']
# Make the RGB image
imdir = '../../fitscutouts/' + dataname
blueimloc = imdir + '_' + blue + '.fits'
blueim = transform(blueimloc, ra_center, dec_center, radial_extent,
vmax=0.7)
greenimloc = imdir + '_' + green + '.fits'
greenim = transform(greenimloc, ra_center, dec_center, radial_extent,
vmax=0.7)
redimloc = imdir + '_' + red + '.fits'
redim = transform(redimloc, ra_center, dec_center, radial_extent)
optical_header = fits.getheader(blueimloc)
naxis = redim[:, 0].size
cubefits = '../Figures/' + target + '_rgb.fits'
rgbArray = numpy.zeros((naxis, naxis, 3))
rgbArray[:, :, 0] = redim
rgbArray[:, :, 1] = greenim
rgbArray[:, :, 2] = blueim
plt.clf()
fig = plt.figure(figsize=(3.0, 3.0))
ax = fig.add_subplot(1, 1, 1)
plt.subplots_adjust(left=0.08, right=0.97, top=0.97,
bottom=0.08, wspace=0.35)
# load the corresponding ALMA image
submm_band = '870'
ALMA_file = '../../fitscutouts/' + dataname + '_' + submm_band + '.fits'
ALMA_hdu = fits.open(ALMA_file)
ALMA_fullimage = ALMA_hdu[0].data
ALMA_trimmed = transform(ALMA_file, ra_center, dec_center, radial_extent)
ALMA_header = ALMA_hdu[0].header
bmaj = ALMA_header['BMAJ'] * 3600
bmin = ALMA_header['BMIN'] * 3600
bpa = ALMA_header['BPA']
cdelt1_ALMA = numpy.abs(ALMA_header['CDELT1'] * 3600)
cdelt2_ALMA = numpy.abs(ALMA_header['CDELT2'] * 3600)
wcs_ALMA = wcs.WCS(ALMA_header, naxis=2)
pixxy = wcs_ALMA.wcs_world2pix(ra_center, dec_center, 1)
x_ALMA = numpy.round(pixxy[0])
y_ALMA = numpy.round(pixxy[1])
mask = ALMA_fullimage.copy()
mask[:] = 0
nx = mask[:, 0].size
ny = mask[0, :].size
innerhalf = 0
mask[innerhalf:ny - innerhalf, innerhalf:nx - innerhalf] = 1
x_extent = radial_extent / 4. / cdelt1_ALMA
y_extent = radial_extent / 4. / cdelt2_ALMA
xr0 = x_ALMA - x_extent
yr0 = y_ALMA - y_extent
xr1 = x_ALMA + x_extent
yr1 = y_ALMA + y_extent
mask[yr0:yr1, xr0:xr1] = 0
goodregion = mask == 1
rms = ALMA_fullimage[goodregion].std()
extent = [radial_extent, -radial_extent, -radial_extent, radial_extent]
plt.imshow(rgbArray, interpolation='nearest', \
extent=extent, origin='lower')
#plt.colorbar()
# define the x- and y-vectors for the contour plot
nx_ALMA = ALMA_trimmed[0, :].size
ny_ALMA = ALMA_trimmed[:, 0].size
x_vector = (nx_ALMA / 2 - numpy.arange(nx_ALMA)) * cdelt1_ALMA
y_vector = (numpy.arange(ny_ALMA) - ny_ALMA / 2) * cdelt2_ALMA
#cellplus = celldata*(2*xrad+1.1)/(2*xrad)
#cmodx = ( numpy.arange(2*xrad) - xrad ) * (-cellplus) - celldata/2.
#cmody = ( numpy.arange(2*yrad) - yrad ) * cellplus + celldata/2.
# define contour level parameters
#plevs = [4 * rms]
#nlevs = [-4 * rms]
plevs = 4 * rms * 2 ** (numpy.arange(10))
nlevs = sorted(-4 * rms * 2 ** (numpy.arange(4)))
#pcline = 'solid'
#ncline = 'dashed'
# draw the contours
plt.contour(x_vector, y_vector, ALMA_trimmed, colors='white', levels=plevs, \
linewidths=1.0)
plt.contour(x_vector, y_vector, ALMA_trimmed, colors='white', levels=nlevs, \
linewidths=1.0)
plt.minorticks_on()
plt.tick_params(width=1.5, which='both')
plt.tick_params(length=2, which='minor')
plt.tick_params(length=4, which='major')
#plt.xlabel(r'$\Delta$RA (arcsec)', fontsize='x-large')
#plt.ylabel(r'$\Delta$Dec (arcsec)', fontsize='x-large')
#xhi = xrad * cdelt1_optical
#xlo = -xrad * celldata
#yhi = yrad * celldata
#ylo = -yrad * celldata
#axisrange = [numpy.float(xhi), numpy.float(xlo), numpy.float(ylo),
# numpy.float(yhi)]
plt.axis(extent)
normx = extent[0] - extent[1]
normy = extent[3] - extent[2]
bparad = bpa / 180 * math.pi
beamx = numpy.abs(numpy.sin(bparad) * bmaj) + numpy.abs(numpy.cos(bparad) *
bmin)
beamy = numpy.abs(numpy.cos(bparad) * bmaj) + numpy.abs(numpy.sin(bparad) *
bmin)
dx = normx # xhi - xlo
dy = normy # yhi - ylo
bufferx = 0.05
buffery = 0.05
xpos = 1 - beamx / dx / 2 - bufferx
ypos = beamy / dy / 2 + buffery
e = Ellipse((xpos, ypos), bmin / normx, bmaj / normy, angle=bpa,
ec='black', lw=0.1, transform=ax.transAxes, fc='white',
zorder=10)
ax.add_artist(e)
plt.text(0.95, 0.95, target, fontsize='xx-large', \
color='white', va='top', ha='right', transform=ax.transAxes)
#imshow(rgbArray, origin='lower')
savefig('../Figures/' + target + '_rgb.pdf')
#import pdb; pdb.set_trace()
#aplpy.make_rgb_cube([redim, greenim, blueim], cubefits)
#import pdb; pdb.set_trace()
#cubeim = '../Figures/' + target + '_rgb.png'
#aplpy.make_rgb_image([redimloc, greenimloc, blueimloc], cubeim,
# stretch_r='sqrt', stretch_g='sqrt', stretch_b='sqrt',
# pmax_r=99, pmax_g=99, pmax_b=99)
#gc = aplpy.FITSFigure(redimloc)
#gc.show_rgb(cubeim)
#gc.save('../Figures/' + target + '_test.png')
#import pdb; pdb.set_trace()
|
<gh_stars>1-10
#
# DeepR<NAME>
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
from src.tracks.track import Track
import src.personalize.configuration.personal_track_annotations as config
class Baadal2020Track(Track):
def __init__(self):
super().__init__()
self._ui_name = "Baadal Track"
self._ui_description = "Baadal is the Hindi word for cloud. The Baadal track combines long arching straightaways perfect for passing opportunities coupled with tight windings corners."
self._ui_length_in_m = 39.0 # metres
self._ui_width_in_cm = 107 # centimetres
self._world_name = "AmericasGenerated..."
self._track_sector_dividers = [90, 160, 210]
self._annotations = config.baadal_2020_annotations
self._track_width = 1.07
self._track_waypoints = [
[-5.66000294685364, 3.958880424499509],
[-5.7570354938507045, 3.8440994024276773],
[-5.85366678237915, 3.728980541229248],
[-5.9498066902160645, 3.6134519577026367],
[-6.045382499694827, 3.4974545240402195],
[-6.140244483947754, 3.3808740377426147],
[-6.23428201675415, 3.263625979423523],
[-6.327301025390625, 3.145568013191223],
[-6.419062852859497, 3.026531457901001],
[-6.509337902069092, 2.9063609838485718],
[-6.597744464874268, 2.784808039665222],
[-6.683832406997681, 2.6616060733795166],
[-6.767106056213379, 2.5364795923233032],
[-6.846842050552368, 2.409065008163452],
[-6.922170162200928, 2.278998017311096],
[-6.992099046707153, 2.1459575295448303],
[-7.055515289306641, 2.0096899271011353],
[-7.111108064651489, 1.8700449466705322],
[-7.1574482917785645, 1.7270634770393372],
[-7.193072557449341, 1.5810449719429016],
[-7.216602563858032, 1.4326010346412659],
[-7.226910352706909, 1.282662034034729],
[-7.223280429840088, 1.1324154734611511],
[-7.205502510070801, 0.9831812381744385],
[-7.173877477645874, 0.8362545371055603],
[-7.12910008430481, 0.6927842497825623],
[-7.072093963623047, 0.5537201464176178],
[-7.003833532333374, 0.4198261573910713],
[-6.925275087356567, 0.2917060856707394],
[-6.837337017059326, 0.1698335036635399],
[-6.740900278091431, 0.05456584692001343],
[-6.636804103851318, -0.05384095013141632],
[-6.525861978530884, -0.15522754937410355],
[-6.408857583999634, -0.24954623728990555],
[-6.286554336547852, -0.33689118549227715],
[-6.159768104553223, -0.41759130731225014],
[-6.029304027557373, -0.49220180232077837],
[-5.895843029022217, -0.5613089203834534],
[-5.76002049446106, -0.6256571263074875],
[-5.622736930847168, -0.6868360713124275],
[-5.485501050949097, -0.7481200397014618],
[-5.3488609790802, -0.8107256591320038],
[-5.211751937866211, -0.8722924590110779],
[-5.073456287384033, -0.9311444908380508],
[-4.933642864227295, -0.9862889796495438],
[-4.792094469070435, -1.036812037229538],
[-4.648606538772583, -1.0815193057060242],
[-4.503013372421265, -1.1188102066516876],
[-4.355359077453613, -1.1467895805835724],
[-4.206019878387451, -1.1635716259479523],
[-4.055790424346924, -1.1672404110431671],
[-3.9059489965438843, -1.1560732126235962],
[-3.758147954940796, -1.128980278968811],
[-3.6141995191574097, -1.0859194993972778],
[-3.4754245281219482, -1.0282890498638153],
[-3.3421205282211304, -0.9589154720306396],
[-3.3421205282211304, -0.9589154720306396],
[-3.2135485410690308, -0.8810970187187195],
[-3.088540554046631, -0.7976623773574829],
[-2.965657591819763, -0.7111210376024246],
[-2.843257427215576, -0.6238968744874],
[-2.7195885181427, -0.5384870730340481],
[-2.59333598613739, -0.4569522733800113],
[-2.4632264375686646, -0.38176506757736206],
[-2.3256880044937134, -0.32156120985746384],
[-2.179579973220825, -0.28706925362348557],
[-2.0299980640411377, -0.273239403963089],
[-1.8798149824142456, -0.27655796706676483],
[-1.7314364910125732, -0.2997157424688339],
[-1.5887594819068909, -0.34645455330610275],
[-1.4560675024986267, -0.41673270240426064],
[-1.3352516293525696, -0.5059823356568813],
[-1.2248230278491974, -0.6078851073980331],
[-1.1215990483760834, -0.7171147763729095],
[-1.0225889980793, -0.8301890939474106],
[-0.9255050122737885, -0.9449261128902435],
[-0.8285874128341675, -1.0598032176494598],
[-0.7303529530763626, -1.1735552251338959],
[-0.6294289901852608, -1.2849246263504028],
[-0.5243336632847786, -1.3923614621162415],
[-0.41355532407760587, -1.4939134716987612],
[-0.2957906424999237, -1.5872640013694763],
[-0.17018990218639374, -1.6697425246238708],
[-0.03682074695825577, -1.738929569721222],
[0.10313616693019867, -1.7936034798622131],
[0.2477007545530796, -1.8346160650253296],
[0.3949059396982193, -1.864874541759491],
[0.5434390008449554, -1.8878219723701477],
[0.6926046311855316, -1.906228482723236],
[0.8420867025852192, -1.9218769669532774],
[0.9917286932468414, -1.9359135627746582],
[1.141459047794342, -1.9489820003509521],
[1.2912429571151733, -1.9614179730415344],
[1.4410669803619394, -1.9733539819717407],
[1.590930461883545, -1.9847924709320068],
[1.7408375144004822, -1.9956409931182861],
[1.8907954692840576, -2.0057650208473206],
[2.0408074855804443, -2.015045464038849],
[2.190874457359314, -2.0233999490737915],
[2.3409935235977173, -2.0307684540748596],
[2.4911584854125977, -2.037106990814209],
[2.6413655281066886, -2.042383551597595],
[2.7916065454483032, -2.0465720295906067],
[2.9418740272521973, -2.049651026725769],
[3.09216046333313, -2.051601529121399],
[3.2424575090408325, -2.0524014234542847],
[3.3927565813064575, -2.052029013633728],
[3.543047547340394, -2.050457537174225],
[3.6933209896087646, -2.047650456428528],
[3.843564510345459, -2.0435635447502136],
[3.9937654733657837, -2.038130462169647],
[4.14390754699707, -2.031254470348358],
[4.293969392776489, -2.0228084325790405],
[4.4439239501953125, -2.012627959251404],
[4.593732833862305, -2.000502049922943],
[4.743346929550171, -1.9861674904823303],
[4.892695903778076, -1.9693000316619873],
[5.041685104370117, -1.9495030641555786],
[5.190182447433472, -1.926309049129486],
[5.338014125823975, -1.899193525314331],
[5.484953880310059, -1.8676044344902039],
[5.630725145339966, -1.831009030342102],
[5.774989366531372, -1.788862943649292],
[5.917303562164307, -1.740548014640808],
[6.057107925415039, -1.685393512248993],
[6.193664073944092, -1.6226370334625244],
[6.326013803482054, -1.551440477371217],
[6.4528703689575195, -1.4708830118179321],
[6.572597980499268, -1.3800653219223022],
[6.683197021484376, -1.278355926275252],
[6.782935857772827, -1.1659643352031708],
[6.870671510696411, -1.043982058763504],
[6.94607400894165, -0.913995623588562],
[7.009504556655884, -0.777768462896347],
[7.061788320541382, -0.6368705928325653],
[7.104021072387695, -0.49264395236968994],
[7.137322902679443, -0.34608865529298605],
[7.16273045539856, -0.197959803044796],
[7.180895090103149, -0.04877026006579399],
[7.192022800445557, 0.10111184790730476],
[7.1959075927734375, 0.25135190784931183],
[7.191936492919922, 0.4015893936157207],
[7.179050445556641, 0.5513217002153397],
[7.155781030654907, 0.6997877955436707],
[7.120370864868164, 0.845827043056488],
[7.071115016937255, 0.9877869188785569],
[7.007126569747925, 1.1237331628799438],
[6.928703069686889, 1.2519059181213392],
[6.837066888809204, 1.370985507965088],
[6.7338244915008545, 1.4801740050315857],
[6.620632886886597, 1.579014003276825],
[6.4989495277404785, 1.6671879887580872],
[6.369964361190796, 1.7443130612373352],
[6.234682559967039, 1.8097364306449897],
[6.094013452529907, 1.8625964522361755],
[5.949002504348755, 1.9020445346832275],
[5.80089807510376, 1.927435040473938],
[5.651031017303465, 1.9384949803352356],
[5.5007874965667725, 1.9354159832000732],
[5.351424932479858, 1.9189165234565735],
[5.203908443450928, 1.8902084231376648],
[5.058817148208621, 1.8510524630546576],
[4.916203022003174, 1.8036289811134338],
[4.77564549446106, 1.7504124641418457],
[4.636264801025391, 1.694172441959381],
[4.496432542800903, 1.6390774846076965],
[4.35332989692688, 1.5933269262313843],
[4.20458459854126, 1.5746829509735107],
[4.049918055534363, 1.601656436920166],
[3.9198169708251953, 1.6621540188789368],
[3.788592576980591, 1.735415518283844],
[3.6582034826278687, 1.810169517993927],
[3.5249539613723755, 1.879661500453949],
[3.386465549468994, 1.9379565119743347],
[3.2422115802764893, 1.9799144864082336],
[3.0937575101852417, 2.002890944480896],
[2.9435755014419556, 2.007458508014679],
[2.7936575412750244, 1.9971559643745422],
[2.644674062728882, 1.9773675203323364],
[2.4961490631103516, 1.9543344974517822],
[2.3470449447631836, 1.9355109333992004],
[2.1969469785690308, 1.9284019470214844],
[2.0471014976501465, 1.9390785098075867],
[1.9001749753952026, 1.9704304933547974],
[1.7589374780654907, 2.021634042263031],
[1.6246790289878845, 2.089140474796295],
[1.4970124959945679, 2.168420433998108],
[1.3744670152664213, 2.2554309368133523],
[1.2550445199012756, 2.3466904759407043],
[1.136623501777649, 2.4392449855804443],
[1.0175322592258453, 2.5309349298477173],
[0.8965888321399689, 2.62016499042511],
[0.7730114161968201, 2.7057089805603045],
[0.646395280957222, 2.786687970161438],
[0.516713559627533, 2.862656593322754],
[0.38432036340236664, 2.9337960481643677],
[0.24994239117950662, 3.0011165142059304],
[0.11478725075721741, 3.0668665170669556],
[-0.019242696464061737, 3.134871482849121],
[-0.14943695068359156, 3.209935903549193],
[-0.2720812577754259, 3.296726942062378],
[-0.38277214812114835, 3.3982945680618286],
[-0.47733404859900325, 3.5150060653686506],
[-0.5537961088120937, 3.6443090438842773],
[-0.6129774041473877, 3.7824189662933385],
[-0.6577053144574165, 3.925875663757324],
[-0.6913427039980888, 4.07235062122345],
[-0.7168591022491446, 4.220460653305048],
[-0.736379757523537, 4.369482517242435],
[-0.7515415772795677, 4.519012928009033],
[-0.7636354416608809, 4.6688239574432355],
[-0.7737331688404083, 4.818783521652222],
[-0.7827220559120178, 4.968812942504883],
[-0.7912855893373492, 5.118867874145511],
[-0.7999028712511063, 5.268920660018921],
[-0.8094909340143204, 5.418912887573242],
[-0.8219924718141556, 5.5686869621276855],
[-0.8406123220920559, 5.717813491821286],
[-0.8697943985462189, 5.865213632583618],
[-0.9145664423704163, 6.008611679077152],
[-0.9786119759082794, 6.144468545913696],
[-1.0616509914398193, 6.2696332931518555],
[-1.160482794046402, 6.382781982421875],
[-1.2714843153953552, 6.484057426452637],
[-1.391730010509491, 6.574174165725708],
[-1.519204497337338, 6.653758287429808],
[-1.6525489687919617, 6.723059892654419],
[-1.7908880114555359, 6.781764507293701],
[-1.9335185289382935, 6.829078912734985],
[-2.0797035098075867, 6.863890647888184],
[-2.228494524955753, 6.884876012802124],
[-2.3786330223083496, 6.890727519989014],
[-2.528536081314087, 6.880491018295288],
[-2.676444411277771, 6.854072093963623],
[-2.8209365606307935, 6.812851190567018],
[-2.961097002029419, 6.75869345664978],
[-3.0957679748535156, 6.692063331604004],
[-3.2232725620269775, 6.612581491470337],
[-3.342753529548645, 6.521458625793457],
[-3.455515503883362, 6.422109603881836],
[-3.5637189149856567, 6.317797899246216],
[-3.669121026992798, 6.210652828216553],
[-3.7727506160736084, 6.101792097091675],
[-3.87520492076874, 5.9918239116668675],
[-3.9768350124359104, 5.881093978881839],
[-4.077861428260806, 5.769812345504758],
[-4.1784268617630005, 5.658114910125732],
[-4.278627514839172, 5.546089172363281],
[-4.37852597236633, 5.433794021606449],
[-4.478174924850464, 5.321277618408203],
[-4.577608585357666, 5.208571195602417],
[-4.676850557327273, 5.095695495605466],
[-4.775922060012817, 4.982670545578003],
[-4.874833583831787, 4.869504928588867],
[-4.973595142364502, 4.756208896636963],
[-5.07220792770386, 4.642782926559446],
[-5.1706695556640625, 4.529226541519165],
[-5.26896333694458, 4.415524482727051],
[-5.367077112197876, 4.301666617393494],
[-5.464974880218503, 4.187623381614689],
[-5.562634944915774, 4.073376059532163],
[-5.66000294685364, 3.958880424499509]
]
|
<filename>tests/manage/pv_services/pvc_resize/test_node_restart_during_pvc_expansion.py<gh_stars>0
import logging
import pytest
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.ocs import constants, node
from ocs_ci.utility.utils import ceph_health_check
from tests.helpers import wait_for_resource_state
from ocs_ci.framework.testlib import (
skipif_ocs_version, ManageTest, tier4, tier4b, ignore_leftovers,
polarion_id, skipif_bm, skipif_upgraded_from
)
log = logging.getLogger(__name__)
@tier4
@tier4b
@ignore_leftovers
@skipif_bm
@skipif_ocs_version('<4.5')
@skipif_upgraded_from(['4.4'])
@polarion_id('OCS-2235')
class TestNodeRestartDuringPvcExpansion(ManageTest):
"""
Tests to verify PVC expansion will be success even if a node is restarted
while expansion is in progress.
"""
@pytest.fixture(autouse=True)
def setup(self, create_pvcs_and_pods):
"""
Create PVCs and pods
"""
self.pvcs, self.pods = create_pvcs_and_pods(
pvc_size=4, pods_for_rwx=2, num_of_rbd_pvc=15, num_of_cephfs_pvc=10
)
@pytest.fixture(autouse=True)
def teardown(self, request, nodes):
"""
Make sure the nodes are up
"""
def finalizer():
nodes.restart_nodes_by_stop_and_start_teardown()
assert ceph_health_check(), "Ceph cluster health is not OK"
log.info("Ceph cluster health is OK")
request.addfinalizer(finalizer)
def test_worker_node_restart_during_pvc_expansion(self, nodes):
"""
Verify PVC expansion will succeed if a worker node is restarted
during expansion
"""
pvc_size_expanded = 30
executor = ThreadPoolExecutor(max_workers=len(self.pods))
selected_node = node.get_typed_nodes(
node_type=constants.WORKER_MACHINE, num_of_nodes=1
)
# Restart node
log.info(f"Restart node {selected_node[0].name}")
restart_thread = executor.submit(
nodes.restart_nodes, nodes=selected_node
)
log.info("Expanding all PVCs.")
for pvc_obj in self.pvcs:
log.info(
f"Expanding size of PVC {pvc_obj.name} to {pvc_size_expanded}G"
)
pvc_obj.expand_proc = executor.submit(
pvc_obj.resize_pvc, pvc_size_expanded, True
)
# Check result of node 'restart_nodes'
restart_thread.result()
log.info("Verify status of node.")
node.wait_for_nodes_status(
node_names=[node.get_node_name(selected_node[0])],
status=constants.NODE_READY, timeout=300
)
# Verify pvc expansion status
for pvc_obj in self.pvcs:
assert pvc_obj.expand_proc.result(), (
f"Expansion failed for PVC {pvc_obj.name}"
)
log.info("PVC expansion was successful on all PVCs")
# Run IO
log.info("Run IO after PVC expansion.")
for pod_obj in self.pods:
wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
storage_type = (
'block' if pod_obj.pvc.volume_mode == 'Block' else 'fs'
)
pod_obj.io_proc = executor.submit(
pod_obj.run_io, storage_type=storage_type, size='6G',
runtime=30, fio_filename=f'{pod_obj.name}_file'
)
log.info("Wait for IO to complete on all pods")
for pod_obj in self.pods:
pod_obj.io_proc.result()
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get('jobs')[0].get('error')
assert err_count == 0, (
f"IO error on pod {pod_obj.name}. "
f"FIO result: {fio_result}"
)
log.info(f"Verified IO on pod {pod_obj.name}.")
log.info("IO is successful on all pods after PVC expansion.")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.ticker as ticker
import numpy as np
from numpy.fft import fftn, fftshift
matplotlib.use("Qt5Agg")
helptext = """
calculation of the diffraction pattern using FFTs with both conventions and kinematic
sum, to show the relationship between the phase and the displacement.
The object is a Ge-core / Si-shell nanowire.
"""
savedir = "C:/Users/carnis/Work Folders/Documents/data/CH4760_Pt/S2227/simu/Figures/phasing_kin_FFT/new/"
colorbar_range = [-7, 4] # [0, 9.5] # [vmin, vmax] log scale in photon counts
comment = "_GeSi_NW_scale" + str(colorbar_range) # should start with _
tick_spacing = 25 # for plots in real space, in nm
tick_length = 5 # in plots
tick_width = 2 # in plots
save_colorbar = 1 # to save the colorbar
phase_range = np.pi / 30 # in radians, for plots
# parameters for plotting
params = {
"backend": "ps",
"axes.labelsize": 20,
"text.fontsize": 20,
"legend.fontsize": 20,
"title.fontsize": 20,
"xtick.labelsize": 20,
"ytick.labelsize": 20,
"text.usetex": False,
"figure.figsize": (11, 9),
}
# define a colormap
cdict = {
"red": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 0.0, 0.0),
(0.62, 1.0, 1.0),
(0.87, 1.0, 1.0),
(1.0, 0.0, 0.0),
),
"green": (
(0.0, 1.0, 1.0),
(0.11, 0.0, 0.0),
(0.36, 1.0, 1.0),
(0.62, 1.0, 1.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
"blue": (
(0.0, 1.0, 1.0),
(0.11, 1.0, 1.0),
(0.36, 1.0, 1.0),
(0.62, 0.0, 0.0),
(0.87, 0.0, 0.0),
(1.0, 0.0, 0.0),
),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap("my_colormap", cdict, 256)
plt.ion()
##################
# Create the shape of the object
##################
half_window = 256 # half number of pixels in x (horizontal axis) and y (vertical axis)
aSi = 0.54309 # lattice spacing of Si in nm
aGe = 0.5658 # lattice spacing of Ge in nm
d400_Ge = aGe / 4 # the diffraction is calculated at Ge 400 peak
misfit = (aSi - aGe) / aGe # dimensionless
Zc = 32 # atomic number of Germanium core
Zs = 14 # atomic number of Silicon shell
voxel_size = aGe # in nm
radius_core = 20 * voxel_size
radius_NW = 40 * voxel_size
alpha = np.arccos(1 / np.sqrt(3))
tmp = np.mgrid[-half_window:half_window, -half_window:half_window]
ygrid, xgrid = tmp[0] * voxel_size, tmp[1] * voxel_size
area_nanowire = np.where(
(ygrid < radius_NW)
& (ygrid > -radius_NW)
& (ygrid < -np.tan(alpha - 10 * np.pi / 180) * (-xgrid - radius_NW))
& (ygrid > -np.tan(alpha) * (-xgrid + radius_NW))
& (ygrid > np.tan(alpha - 0 * np.pi / 180) * (xgrid - radius_NW)) #
& (ygrid < -np.tan(alpha + 30 * np.pi / 180) * (xgrid - radius_NW))
& (ygrid < np.tan(alpha) * (xgrid + radius_NW))
& (ygrid > -np.tan(alpha) * (xgrid + radius_NW)),
1,
0,
)
area_core = np.where(
(ygrid < radius_core)
& (ygrid > -radius_core)
& (ygrid < -np.tan(alpha - 10 * np.pi / 180) * (-xgrid - radius_core))
& (ygrid > -np.tan(alpha) * (-xgrid + radius_core))
& (ygrid > np.tan(alpha - 0 * np.pi / 180) * (xgrid - radius_core))
& (ygrid < -np.tan(alpha + 30 * np.pi / 180) * (xgrid - radius_core))
& (ygrid < np.tan(alpha) * (xgrid + radius_core))
& (ygrid > -np.tan(alpha) * (xgrid + radius_core)),
1,
0,
)
nanowire = area_core * abs(Zc) + (area_nanowire - area_core) * abs(Zs)
np.savez_compressed(savedir + "GeSi_NW_support.npz", obj=nanowire)
pixel_spacing = tick_spacing / voxel_size
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
nanowire[
half_window - 100 : half_window + 100, half_window - 100 : half_window + 100
],
cmap=my_cmap,
vmin=0,
vmax=35,
)
ax0.xaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.yaxis.set_major_locator(ticker.MultipleLocator(pixel_spacing))
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="in",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "density.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("X")
plt.ylabel("Y")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="in",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "density_colorbar.png", bbox_inches="tight")
##################
# displacement
##################
nu = 0.27
theta = np.arctan2(xgrid, ygrid)
r = np.sqrt(ygrid * ygrid + xgrid * xgrid) # in nm
alpha = (
-radius_core
* radius_core
* misfit
* (1 + nu)
/ (2 * radius_NW * radius_NW * (1 - nu))
) # dimensionless
beta = alpha * radius_NW * radius_NW # nm2
epsilonR = alpha - beta / (r * r) # dimensionless
epsilonT = alpha + beta / (r * r) # dimensionless
epsilonXX = misfit + epsilonR * np.cos(theta) ** 2 + epsilonT * np.sin(theta) ** 2
epsilonXX_Si = epsilonR * np.cos(theta) ** 2 + epsilonT * np.sin(theta) ** 2
epsilon_xx = np.zeros((2 * half_window, 2 * half_window))
# calculation based on the calculation of elastic strain in radial and
# transverse direction for a core-shell SiGe NW
# reference:
displacement = ((area_core - area_nanowire) * epsilonXX_Si + area_core * epsilon_xx) * 2
# plt.figure()
# plt.imshow(disp)
displacement[np.isnan(displacement)] = 0 # for central pixel which is not defined
ux = np.copy(displacement)
displacement[nanowire == 0] = np.nan # for plots
# no displacement along y
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
displacement[
half_window - 100 : half_window + 100, half_window - 100 : half_window + 100
],
cmap=my_cmap,
vmin=-phase_range,
vmax=phase_range,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "ux.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("X")
plt.ylabel("Y")
plt.pause(0.5)
plt.savefig(savedir + "ux_colorbar.png", bbox_inches="tight")
##################
# diffraction on Ge 400 peak
##################
q400_Ge = 2 * np.pi / d400_Ge # inverse nm
avg_q = np.matrix([q400_Ge, 0])
dq = 2 * np.pi / (2 * half_window * aGe) # inverse nm
qx = q400_Ge + np.arange(-dq * half_window, dq * half_window, dq)
qy = np.arange(-dq * half_window, dq * half_window, dq)
########################
# FFT with displacement field and symmetric
# normalization for comparison with mathematica
########################
complex_object = nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0))
np.save(savedir + "GeSi_NW_complex_object.npy", complex_object)
print("Min(abs(object)", abs(complex_object).min())
print("Max(abs(object)", abs(complex_object).max())
amplitude = fftshift(fftn(nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0)), norm="ortho"))
print("Min(abs(amplitude)", abs(amplitude).min()) # should be same as mathematica
print("Max(abs(amplitude)", abs(amplitude).max()) # should be same as mathematica
intensity = abs(amplitude) ** 2
print(
"Min(log10(intensity)", np.log10(intensity).min()
) # should be same as mathematica
print(
"Max(log10(intensity)", np.log10(intensity).max()
) # should be same as mathematica
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + "_ortho_jet.png", bbox_inches="tight")
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("Qx")
plt.ylabel("Qy")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_ortho_colorbar_jet.png",
bbox_inches="tight",
)
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_ortho_zoom_jet.png", bbox_inches="tight"
)
########################
# FFT with displacement field of opposite sign and
# symmetric normalization for comparison with mathematica
########################
amplitude = fftshift(
fftn(nanowire * np.exp(1j * (-ux * avg_q[0, 0] + 0)), norm="ortho")
)
print("Min(abs(amplitude)", abs(amplitude).min()) # should be same as mathematica
print("Max(abs(amplitude)", abs(amplitude).max()) # should be same as mathematica
intensity = abs(amplitude) ** 2
print(
"Min(log10(intensity)", np.log10(intensity).min()
) # should be same as mathematica
print(
"Max(log10(intensity)", np.log10(intensity).max()
) # should be same as mathematica
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + "_ortho_jet.png", bbox_inches="tight")
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap="jet",
vmin=-7,
vmax=4,
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_negative" + comment + "_ortho_zoom_jet.png", bbox_inches="tight"
)
########################
# FFT with displacement field and default normalization
########################
intensity = abs(fftshift(fftn(nanowire * np.exp(1j * (ux * avg_q[0, 0] + 0))))) ** 2
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
top="on",
right="on",
labelsize=12,
direction="out",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "GeSi_NW_FFT_positive.npz", obj=intensity)
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("Qx")
plt.ylabel("Qy")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(
savedir + "FFT_positive" + comment + "_colorbar.png", bbox_inches="tight"
)
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_positive" + comment + "_zoom.png", bbox_inches="tight")
########################
# FFT with displacement field of opposite sign and default normalization
########################
intensity = abs(fftshift(fftn(nanowire * np.exp(1j * (-ux * avg_q[0, 0] + 0))))) ** 2
fig, ax0 = plt.subplots(1, 1)
plt0 = ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "GeSi_NW_FFT_negative.npz", obj=intensity)
fig, x0 = plt.subplots(1, 1)
plt0 = x0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
x0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "FFT_negative" + comment + "_zoom.png", bbox_inches="tight")
#######################
# kinematic sums
#######################
nanowire_zoom = nanowire[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
plt.figure()
plt.imshow(nanowire_zoom)
qx = q400_Ge + np.arange(-dq * half_window, dq * half_window, dq)
qy = np.arange(-dq * half_window, dq * half_window, dq)
grid_x = xgrid + ux
grid_y = ygrid
grid_x = grid_x[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
grid_y = grid_y[
half_window - 50 : half_window + 50, half_window - 50 : half_window + 50
]
qx1 = np.repeat(qx[np.newaxis, :], len(qy), axis=0)
qy1 = np.repeat(qy[:, np.newaxis], len(qx), axis=1)
##############################
# calculate the centered kinematic sum +1j +ux
##############################
Fhk1 = np.zeros((len(qy), len(qx))).astype(np.complex64)
for ii in range(len(qy)):
for jj in range(len(qx)):
Fhk1[ii, jj] = (
Fhk1[ii, jj]
+ (
nanowire_zoom
* np.exp(+1j * (qx1[ii, jj] * grid_x + qy1[ii, jj] * grid_y))
).sum()
)
intensity = abs(Fhk1) ** 2
fig, ax0 = plt.subplots(1, 1)
ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "Kinematic_+1j_+ux" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "Kinematic_+1j_+ux.npz", obj=abs(Fhk1) ** 2)
if save_colorbar == 1:
plt.colorbar(plt0, ax=ax0)
plt.xlabel("Qx")
plt.ylabel("Qy")
ax0.tick_params(
labelbottom="on",
labelleft="on",
labelsize=12,
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(
savedir + "Kinematic_+1j_+ux" + comment + "_colorbar.png", bbox_inches="tight"
)
fig, x0 = plt.subplots(1, 1)
plt0 = x0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
x0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(
savedir + "GeSi_NW_kinsum_+1j_+ux" + comment + "_zoom.png", bbox_inches="tight"
)
##############################
# calculate the centered kinematic sum -1j +ux
##############################
Fhk1 = np.zeros((len(qy), len(qx))).astype(np.complex64)
for ii in range(len(qy)):
for jj in range(len(qx)):
Fhk1[ii, jj] = (
Fhk1[ii, jj]
+ (
nanowire_zoom
* np.exp(-1j * (qx1[ii, jj] * grid_x + qy1[ii, jj] * grid_y))
).sum()
)
intensity = abs(Fhk1) ** 2
fig, ax0 = plt.subplots(1, 1)
ax0.imshow(
np.log10(intensity),
extent=(qx.min(), qx.max(), qy.min(), qy.max()),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
ax0.tick_params(
labelbottom="off",
labelleft="off",
direction="out",
top="on",
right="on",
length=tick_length,
width=tick_width,
)
plt.pause(0.5)
plt.savefig(savedir + "Kinematic_-1j_+ux" + comment + ".png", bbox_inches="tight")
np.savez_compressed(savedir + "GeSi_NW_kinsum_-1j_+ux.npz", obj=abs(Fhk1) ** 2)
fig, x0 = plt.subplots(1, 1)
plt0 = x0.imshow(
np.log10(
intensity[
half_window - 20 : half_window + 20, half_window - 20 : half_window + 20
]
),
cmap=my_cmap,
vmin=colorbar_range[0],
vmax=colorbar_range[1],
)
x0.tick_params(
labelbottom="off", labelleft="off", bottom="off", left="off", top="off", right="off"
)
plt.pause(0.5)
plt.savefig(savedir + "Kinematic_-1j_+ux" + comment + "_zoom.png", bbox_inches="tight")
plt.ioff()
plt.show()
print("end")
|
import sys, codecs, json
from argparse import ArgumentParser
from copy import deepcopy
from pymongo import MongoClient
#=====================================
sys.stdin = codecs.getreader('utf8')(sys.stdin.detach())
sys.stderr = codecs.getwriter('utf8')(sys.stderr.detach())
sys.stdout = codecs.getwriter('utf8')(sys.stdout.detach())
#===============================================
parser = ArgumentParser()
parser.add_argument("-H", "--host", default = "localhost",
help = "MongoDB host")
parser.add_argument("-P", "--port", type = int, default = 27017,
help = "MongoDB port")
parser.add_argument("-d", "--database", default = "Anfisa",
help = "Anfisa database in MongoDB")
parser.add_argument("-c", "--config",
help = "Anfisa config file(anfisa.json), "
"use it instead of host/port/database")
parser.add_argument("-C", "--config_default", action = "store_true",
help = "Use it for config = ./anfisa.json")
parser.add_argument("command", nargs="+",
help="Commands, use help command for list")
run_args = parser.parse_args()
#===============================================
def readContent(content):
for line in content.split('\n'):
line = line.strip()
if not line:
continue
obj = json.loads(line)
if obj is None or isinstance(obj, str):
continue
yield obj
def updateMRec(agent, obj):
set_data = dict()
for key, val in obj.items():
if key == '_id':
obj_id = val
else:
set_data[key] = val
agent.update({'_id': obj_id}, {"$set": set_data}, upsert = True)
#===============================================
def cleanRecordTags(it):
rec = deepcopy(it)
for sub_tags in rec['_h'][1]:
if '_id' in sub_tags:
del sub_tags['_id']
return rec
def _filterTagState(state, tag_name):
ret = dict()
for key, value in state.items():
if not key.startswith('_') and key != tag_name:
ret[key] = value
return json.dumps(ret, sort_keys = True)
def clearRecordOneTag(rec, tag_name, out_seq):
if tag_name not in rec and all([tag_name not in sub_tags
for sub_tags in rec['_h'][1]]):
return
rec_id = rec['_id']
base_idx = rec['_h'][0]
hist_seq = [_filterTagState(state, tag_name)
for state in rec['_h'][1]]
if base_idx >= len(hist_seq):
base_idx = len(hist_seq)
hist_seq.append(_filterTagState(rec, tag_name))
idx = 1
while idx < len(hist_seq):
if hist_seq[idx - 1] == hist_seq[idx]:
del hist_seq[idx]
if base_idx >= idx:
base_idx -= 1
else:
idx += 1
hist_seq = [json.loads(descr) for descr in hist_seq]
if len(hist_seq) == 1 and len(hist_seq[0]) == 0:
out_seq.append((rec_id, None))
return
set_data = deepcopy(hist_seq[base_idx])
if base_idx + 1 == len(hist_seq):
del hist_seq[base_idx]
set_data['_h'] = [base_idx, hist_seq]
instr = {"$set": set_data}
if tag_name in rec:
instr["$unset"] = {tag_name: ""}
out_seq.append((rec_id, instr))
#===============================================
class CmdInfo:
sCmdList = []
def __init__(self, name, args = "ds", create_support = False):
self.mName = name
self.mArgs = args
self.mCreateSupp = create_support
self.sCmdList.append(self)
def getDSName(self, cmd_seq):
if len(self.mArgs) > 0 and self.mArgs[0] == "ds":
return cmd_seq[1]
return None
def hasCreateSupport(self):
return self.mCreateSupp
def checkArgs(self, cmd_seq):
if len(cmd_seq) < 1 or cmd_seq[0] != self.mName:
return None
if len(self.mArgs) > 0 and self.mArgs[-1] == "datafile":
if len(cmd_seq) == 1 + len(self.mArgs):
with open(cmd_seq[-1], "r", encoding = "utf-8") as inp:
content = inp.read()
cmd_seq[-1] = content
return cmd_seq
elif len(cmd_seq) == len(self.mArgs):
content = sys.stdin.read()
cmd_seq.append(content)
return cmd_seq
print("Improper call arguments", file = sys.stderr)
return False
if len(cmd_seq) == 1 + len(self.mArgs):
return cmd_seq
print("Improper call arguments", file = sys.stderr)
return False
def report(self, output):
print("\t%s %s" % (self.mName, " ".join(self.mArgs)), file = output)
@classmethod
def reportAll(cls, output):
for cmd_info in cls.sCmdList:
cmd_info.report(output)
@classmethod
def checkCall(cls, cmd_seq):
for cmd_info in cls.sCmdList:
ret = cmd_info.checkArgs(cmd_seq)
if ret is not None:
return (ret, cmd_info.getDSName(cmd_seq),
cmd_info.hasCreateSupport())
print("Command not supported", file = sys.stderr)
return False, None, None
#===============================================
CmdInfo("ds-list", [], True)
CmdInfo("filter-list", ["ds"], True)
CmdInfo("tag-list", ["ds"], True)
CmdInfo("dump-filters", ["ds"], True)
CmdInfo("dump-tags", ["ds"], True)
CmdInfo("dump-rules", ["ds"], True)
CmdInfo("load-tags", ["ds", "datafile"], create_support = True)
CmdInfo("load-filters", ["ds", "datafile"], create_support = True)
CmdInfo("load-rules", ["ds", "datafile"], create_support = True)
CmdInfo("del-filter", ["ds", "filter_name"])
CmdInfo("del-tag", ["ds", "tag_name"])
CmdInfo("drop-filters", ["ds"])
CmdInfo("drop-tags", ["ds"])
CmdInfo("drop-rules", ["ds"])
CmdInfo("drop-ds", ["ds"])
#===============================================
if run_args.command[0] == "help":
print(' ===Anfisa/MongoDB administration tool===', file = sys.stderr)
print(' * List of commands *', file = sys.stderr)
CmdInfo.reportAll(sys.stderr)
sys.exit()
cmd_seq, ds_name, cr_supp = CmdInfo.checkCall(run_args.command)
if not cmd_seq:
sys.exit()
if run_args.config_default:
config_path = "./anfisa.json"
else:
config_path = run_args.config
if config_path:
with open(config_path, "r", encoding = "utf-8") as inp:
cfg = json.loads(inp.read())
database = cfg["mongo-db"]
host, port = cfg.get("mongo-host"), cfg.get("mongo-port")
else:
database = run_args.database
host, port = run_args.host, run_args.port
mongo = MongoClient(host, port)
if ds_name is not None:
m_db = mongo[database]
if ds_name not in m_db.list_collection_names():
if cr_supp:
print("DS %s is possibly creating" % ds_name,
file = sys.stderr)
else:
print("DS not found", ds_name, file = sys.stderr)
sys.exit()
m_ds = m_db[ds_name]
else:
m_db = mongo[database]
#===============================================
if cmd_seq[0] == "ds-list":
ret = []
for coll_name in m_db.list_collection_names():
if coll_name != "system.indexes":
ret.append(coll_name)
print(json.dumps(ret))
sys.exit()
#===============================================
if cmd_seq[0] == "filter-list":
ret = []
for it in m_ds.find({'_tp' : "flt"}):
it_name = it['_id']
if it_name.startswith("flt-"):
ret.append(it_name[4:])
print(json.dumps(ret, sort_keys = True, indent = 4))
sys.exit()
#===============================================
if cmd_seq[0] == "tag-list":
ret = set()
for it in m_ds.find():
if not it['_id'].startswith("rec-"):
continue
for key in it.keys():
if not key.startswith('_'):
ret.add(key)
print(json.dumps(sorted(ret)))
sys.exit()
#===============================================
if cmd_seq[0] == "dump-filters":
ret = []
for it in m_ds.find({'_tp' : "flt"}):
it_name = it['_id']
if it_name.startswith("flt-"):
ret.append(deepcopy(it))
for rec in ret:
print(json.dumps(rec))
sys.exit()
#===============================================
if cmd_seq[0] == "dump-tags":
ret = []
for it in m_ds.find():
if not it['_id'].startswith("rec-"):
continue
ret.append(cleanRecordTags(it))
for rec in ret:
print(json.dumps(rec))
sys.exit()
#===============================================
if cmd_seq[0] == "dump-rules":
ret = None
it = m_ds.find_one({'_id': 'params'})
if it is not None:
ret = deepcopy(it["params"])
print(json.dumps(ret))
sys.exit()
#===============================================
if cmd_seq[0] == "load-filters":
cnt = 0
for instr in readContent(cmd_seq[2]):
assert instr['_tp'] == "flt"
updateMRec(m_ds, instr)
cnt += 1
print(json.dumps("FILTERS LOADED: %d" % cnt))
sys.exit()
#===============================================
if cmd_seq[0] == "load-tags":
cnt = 0
for instr in readContent(cmd_seq[2]):
assert instr['_id'].startswith("rec-")
updateMRec(m_ds, instr)
cnt += 1
print(json.dumps("TAGS LOADED: %d" % cnt))
sys.exit()
#===============================================
if cmd_seq[0] == "load-rules":
cnt = 0
for data in readContent(cmd_seq[2]):
assert all([len(pair) == 2 for pair in data])
m_ds.update({'_id': "params"},
{"$set": {'params': data}}, upsert = True)
cnt += 1
print(json.dumps("RULES LOADED: %d" % cnt))
sys.exit()
#===============================================
if cmd_seq[0] == "del-filter":
filter_name = cmd_seq[2]
m_ds.remove({'_id': "flt-" + filter_name})
print(json.dumps("FILTER %s DELETED" % filter_name))
sys.exit()
#===============================================
if cmd_seq[0] == "del-tag":
tag_name = cmd_seq[2]
seq_update = []
for it in m_ds.find():
if it['_id'].startswith("rec-"):
clearRecordOneTag(it, tag_name, seq_update)
for rec_id, instr in seq_update:
if instr is not None:
m_ds.update({'_id': rec_id}, instr, upsert = True)
else:
m_ds.remove({'_id': rec_id})
print(json.dumps("TAG %s DELETED: %d records" %
(tag_name, len(seq_update))))
sys.exit()
#===============================================
if cmd_seq[0] == "drop-filters":
m_ds.remove({'_tp': "flt"})
print(json.dumps("FILTERS DROPPED"))
sys.exit()
#===============================================
if cmd_seq[0] == "drop-tags":
m_ds.remove({'_id': {"$regex": "^rec-"}})
print(json.dumps("TAGS DROPPED"))
sys.exit()
#===============================================
if cmd_seq[0] == "drop-rules":
m_ds.remove({'_id': 'params'})
print(json.dumps("RULES PARAMS DROPPED"))
sys.exit()
#===============================================
if cmd_seq[0] == "drop-ds":
m_ds.drop()
print(json.dumps("DATASET DROPPED"))
sys.exit()
#===============================================
print("Oops: command not supported", file = sys.stderr)
|
<reponame>uta-smile/CD-MVGNN<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
import numpy as np
from dglt.contrib.moses.moses.model.sd_vae.config import get_parser
from dglt.contrib.moses.moses.model.sd_vae.utils.mol_tree import Node
cmd_args, _ = get_parser().parse_known_args()
class RingBond(object):
def __init__(self, pos, b_type):
self.pos = pos
self.b_type = b_type
class AttMolGraphDecoder(object):
def __init__(self, utils):
self.reset_state()
self.rule_ranges = utils.rule_ranges
self.avail_atoms = utils.avail_atoms
self.atom_valence = utils.atom_valence
self.bond_types = utils.bond_types
self.bond_valence = utils.bond_valence
self.prod = utils.prod
self.MAX_NESTED_BONDS = utils.MAX_NESTED_BONDS
def reset_state(self):
self.atom_num = 0
self.matched_bonds = set()
self.open_rings = {}
self.sameatom_bonds = defaultdict(set)
def get_node(self, node, new_sym, pos):
if node.is_created():
assert pos < len(node.children)
ans = node.children[pos]
ans.init_atts()
assert ans.symbol == new_sym
return ans
return Node(new_sym, self.prod, node)
def rand_rule(self, node, sub_ranges = None):
g_range = self.rule_ranges[node.symbol]
idxes = np.arange(g_range[0], g_range[1])
if sub_ranges is not None:
idxes = idxes[sub_ranges]
assert len(idxes)
if len(idxes) == 1 and cmd_args.skip_deter:
result = 0
else:
result = self.walker.sample_index_with_mask(node, idxes)
if sub_ranges is not None:
new_idx = sub_ranges[result]
else:
new_idx = result
if node.rule_used is not None:
assert node.rule_used == new_idx
else:
node.rule_used = new_idx
return node.rule_used
def rand_att(self, node, candidates):
if len(candidates) == 1 and cmd_args.skip_deter:
att_idx = candidates[0]
else:
att_idx = self.walker.sample_att(node, candidates)
if not hasattr(node, 'bond_idx'):
node.bond_idx = att_idx
else:
assert node.bond_idx == att_idx
return att_idx
def ring_valid(self, r, pre_pos, remain):
p = (self.open_rings[r].pos, self.atom_num - 1)
if self.open_rings[r].pos == self.atom_num - 1:
return False
if self.open_rings[r].pos == pre_pos:
return False
if p in self.matched_bonds:
return False
if self.bond_valence[self.open_rings[r].b_type] > remain:
return False
return True
def maximum_match(self, pre_pos, remain):
if remain == 0:
return 0
cur_pos = self.atom_num - 1
s = set()
ans = 0
rest = remain
for cost in range(1, 4):
for r in self.open_rings:
if self.bond_valence[self.open_rings[r].b_type] != cost:
continue
if self.ring_valid(r, pre_pos, rest) and not self.open_rings[r].pos in s:
s.add(self.open_rings[r].pos)
rest -= 1
ans += 1
assert rest >= 0
if rest == 0:
return ans
return ans
def tree_generator(self, node, left_conn = False, right_conn = False, cap_remain = None, ref_symbol = None, is_last = None):
assert is_last is not None
if node.symbol in ['bond', 'BB', 'branch', 'BAC', 'BAH', 'charge', 'hcount']:
assert cap_remain is not None
if node.symbol == 'chain':
rule = self.rand_rule(node)
a = self.get_node(node, 'branched_atom', 0)
node.add_child(a)
if rule == 0: # chain -> branched_atom
self.tree_generator(a, left_conn, right_conn, is_last = is_last)
node.left_remain = a.left_remain
node.right_remain = a.right_remain
node.single_atom = True
else:
self.tree_generator(a, left_conn, True, is_last = False)
c = self.get_node(node, 'chain', -1)
c.pre_node = a.atom_pos
assert c.pre_node is not None
self.tree_generator(c, True, right_conn, is_last = is_last)
cost = 0
if rule == 2: # chain -> chain bond branched_atom
b = self.get_node(node, 'bond', 1)
self.tree_generator(b, cap_remain = min(c.left_remain, a.right_remain) + 1, is_last=is_last)
cost = self.bond_valence[b.children[0].symbol] - 1
node.add_child(b)
if rule == 3: # chain -> branched_atom '.' chain
b = self.get_node(node, '\'.\'', 1)
node.add_child(b)
node.add_child(c)
node.left_remain = a.left_remain - cost
node.right_remain = c.right_remain
if c.single_atom:
node.right_remain = c.right_remain - cost
assert node.left_remain >= 0
assert node.right_remain >= 0
node.single_atom = False
elif node.symbol == 'aliphatic_organic' or node.symbol == 'aromatic_organic' \
or node.symbol == 'element_symbols' or node.symbol == 'aromatic_symbols':
min_valence = int(left_conn) + int(right_conn)
if len(self.open_rings) and is_last:
min_valence += 1
candidates = []
atom_types = self.avail_atoms[node.symbol]
for i in range(len(atom_types)):
a = atom_types[i]
if self.atom_valence[a] >= min_valence:
if hasattr(node, 'banned_set') and a in node.banned_set:
continue
candidates.append(i)
rule = self.rand_rule(node, candidates)
a = self.get_node(node, atom_types[rule], 0)
assert self.atom_valence[a.symbol] >= min_valence
node.add_child(a)
node.left_remain = self.atom_valence[a.symbol] - min_valence
node.right_remain = self.atom_valence[a.symbol] - min_valence
node.single_atom = True
node.atom_pos = self.atom_num
if node.symbol == 'aromatic_organic' or node.symbol == 'aromatic_symbols':
node.is_aromatic = True
else:
node.is_aromatic = False
self.atom_num += 1
elif node.symbol == 'bond':
candidates = []
assert cap_remain
rr = range(len(self.bond_types))
if hasattr(node, 'allowed'):
rr = node.allowed
for i in rr:
b = self.bond_types[i]
if self.bond_valence[b] <= cap_remain:
candidates.append(i)
rule = self.rand_rule(node, candidates)
b = self.get_node(node, self.bond_types[rule], 0)
node.add_child(b)
elif node.symbol == 'branched_atom':
a = self.get_node(node, 'atom', 0)
self.tree_generator(a, left_conn, right_conn, is_last=is_last)
node.atom_pos = a.atom_pos
node.is_aromatic = a.is_aromatic
node.add_child(a)
candidates = set([0, 1, 2, 3])
remain = int(a.left_remain)
if len(self.open_rings) and is_last:
remain += 1
candidates.remove(0)
pre_idx = node.get_pre()
if self.maximum_match(pre_idx, remain) < len(self.open_rings):
candidates.remove(2)
if remain < 2:
candidates.remove(3)
else:
if remain < 2:
candidates.remove(3)
if remain < 1:
candidates.remove(2)
candidates.remove(1)
if len(self.open_rings) == 0 and is_last:
if 2 in candidates:
candidates.remove(2)
pre_idx = node.get_pre()
if self.maximum_match(pre_idx, remain) == 0 and len(self.open_rings) == self.MAX_NESTED_BONDS:
assert not is_last
if 2 in candidates:
candidates.remove(2)
if 3 in candidates:
candidates.remove(3)
if self.maximum_match(pre_idx, remain - 1) == 0 and len(self.open_rings) == self.MAX_NESTED_BONDS:
assert not is_last
if 3 in candidates:
candidates.remove(3)
rule = self.rand_rule(node, list(candidates))
if rule > 1: # branched_atom -> atom RB | atom RB BB
r = self.get_node(node, 'RB', 1)
if rule == 2 and is_last:
r.task = True
remain = self.tree_generator(r, cap_remain=remain - (rule == 3), is_last=is_last)
remain += (rule == 3)
node.add_child(r)
node.left_remain = remain
if rule % 2 == 1: # branched_atom -> atom BB | atom RB BB
assert remain > 0
b = self.get_node(node, 'BB', -1)
b.pre_node = a.atom_pos
assert b.pre_node is not None
node.left_remain = self.tree_generator(b, cap_remain = remain, is_last=is_last)
node.add_child(b)
node.right_remain = node.left_remain
node.single_atom = True
elif node.symbol == 'RB':
assert cap_remain
b = self.get_node(node, 'ringbond', 0)
b.task = node.task
cap_remain = self.tree_generator(b, cap_remain=cap_remain, is_last=is_last)
node.add_child(b)
candidates = []
if node.task:
candidates = [ int(len(self.open_rings) > 0) ]
else:
candidates = [0]
pre_idx = node.get_pre()
if cap_remain > 0 and not (self.maximum_match(pre_idx, cap_remain) == 0 and len(self.open_rings) == self.MAX_NESTED_BONDS):
candidates.append(1)
rule = self.rand_rule(node, candidates)
if rule == 1: # RB -> ringbond RB
assert cap_remain > 0
r = self.get_node(node, 'RB', 1)
r.task = node.task
cap_remain = self.tree_generator(r, cap_remain = cap_remain, is_last=is_last)
node.add_child(r)
elif node.symbol == 'BB':
b = self.get_node(node, 'branch', 0)
candidates = [0]
assert cap_remain > 0
if cap_remain > 1:
candidates.append(1)
rule = self.rand_rule(node, candidates)
if rule == 1: # BB -> branch BB
rest = self.tree_generator(b, cap_remain=cap_remain - 1, is_last=False)
node.add_child(b)
bb = self.get_node(node, 'BB', 1)
rest = self.tree_generator(bb, cap_remain=rest + 1, is_last=is_last)
node.add_child(bb)
else:
rest = self.tree_generator(b, cap_remain=cap_remain, is_last=is_last)
node.add_child(b)
cap_remain = rest
elif node.symbol == 'ringbond':
pre_idx = node.get_pre()
mm = self.maximum_match(pre_idx, cap_remain)
if node.task:
assert mm > 0 and mm >= len(self.open_rings)
candidates = []
# whether to match bond
if mm > 0 and len(self.open_rings):
for r in self.open_rings:
if self.ring_valid(r, pre_idx, cap_remain):
candidates.append(r)
# whether to create bond
if mm == 0 or (not node.task and len(self.open_rings) < self.MAX_NESTED_BONDS):
assert len(self.open_rings) < self.MAX_NESTED_BONDS
candidates.append(self.MAX_NESTED_BONDS)
r = self.rand_att(node, candidates)
bond_idx = r
bond_type = '?'
create = False
if r == self.MAX_NESTED_BONDS: # create new bond
for i in range(self.MAX_NESTED_BONDS):
if not i in self.open_rings and ((not i in self.sameatom_bonds[self.atom_num - 1]) or cmd_args.bondcompact):
bond_idx = i
create = True
break
assert create
else: # paired bond removed
assert r in self.open_rings
self.matched_bonds.add((self.open_rings[r].pos, self.atom_num - 1))
bond_type = self.open_rings[r].b_type
del self.open_rings[r]
self.sameatom_bonds[self.atom_num - 1].add(bond_idx)
if bond_idx + 1 <= 9:
d = self.get_node(node, 'DIGIT', -1)
r = self.get_node(d, '\'%d\'' % (bond_idx + 1), 0)
d.add_child(r)
node.add_child(d)
if not create and bond_type is not None:
rule = self.rand_rule(node, [1])
else:
rule = self.rand_rule(node, [0, 1])
else:
e = self.get_node(node, '\'%\'', -3)
node.add_child(e)
d1 = self.get_node(node, 'DIGIT', -2)
r1 = self.get_node(d1, '\'%d\'' % ((bond_idx + 1) // 10), 0)
d1.add_child(r1)
node.add_child(d1)
d2 = self.get_node(node, 'DIGIT', -1)
r2 = self.get_node(d2, '\'%d\'' % ((bond_idx + 1) % 10), 0)
d2.add_child(r2)
node.add_child(d2)
if not create and bond_type is not None:
rule = self.rand_rule(node, [3])
else:
rule = self.rand_rule(node, [2, 3])
if rule % 2 == 1: # ringbond -> bond DIGIT | bond '%' DIGIT DIGIT
b = self.get_node(node, 'bond', 0)
if create:
self.tree_generator(b, cap_remain=cap_remain, is_last=is_last)
bond_type = b.children[0].symbol
else:
assert cap_remain >= self.bond_valence[bond_type]
b.allowed = [0, 1, 2, 3, 4]
self.tree_generator(b, cap_remain=cap_remain, is_last=is_last)
cap_remain -= self.bond_valence[b.children[0].symbol]
node.add_child(b, 0)
else:
if bond_type == '?':
bond_type = None
cap_remain -= 1
if create:
assert bond_type is None or bond_type != '?'
self.open_rings[bond_idx] = RingBond(self.atom_num - 1, bond_type)
elif node.symbol == 'branch':
node.add_child(self.get_node(node, '\'(\'', 0))
c = self.get_node(node, 'chain', -2)
self.tree_generator(c, left_conn=True, right_conn=False, is_last=is_last)
rule = self.rand_rule(node)
cost = 1
if rule == 1: # branch -> '(' bond chain ')'
b = self.get_node(node, 'bond', 1)
self.tree_generator(b, cap_remain= min(cap_remain, c.left_remain + 1), is_last=is_last)
cost = self.bond_valence[b.children[0].symbol]
node.add_child(b)
node.add_child(c)
node.add_child(self.get_node(node, '\')\'', -1))
cap_remain -= cost
elif node.symbol == 'BAI':
rule = self.rand_rule(node)
if rule % 2 == 0: # BAI -> isotope xxx
i = self.get_node(node, 'isotope', 0)
self.tree_generator(i, is_last=is_last)
node.add_child(i)
s = self.get_node(node, 'symbol', -1 - (rule < 2))
s.banned_set = set(['\'B\''])
self.tree_generator(s, left_conn=left_conn, right_conn=right_conn, is_last=is_last)
node.atom_pos = s.atom_pos
node.add_child(s)
cap = s.left_remain
if rule <= 1: # BAI -> isotope aliphatic_organic BAC | aliphatic_organic BAC
b = self.get_node(node, 'BAC', -1)
cap = self.tree_generator(b, cap_remain=cap, ref_symbol=s.children[0].symbol, is_last=is_last)
node.add_child(b)
node.left_remain = cap
node.right_remain = cap
node.single_atom = True
elif node.symbol == 'BAC':
rule = self.rand_rule(node)
if rule == 0 or rule == 2: # BAC -> chiral BAH | chiral
c = self.get_node(node, 'chiral', 0)
self.tree_generator(c, is_last=is_last)
node.add_child(c)
if rule <= 1: # BAC -> chiral BAH | BAH
b = self.get_node(node, 'BAH', -1)
cap_remain = self.tree_generator(b, cap_remain=cap_remain, ref_symbol=ref_symbol, is_last=is_last)
node.add_child(b)
elif node.symbol == 'BAH':
if cap_remain == 0:
rule = self.rand_rule(node, [0, 1])
else:
rule = self.rand_rule(node)
if rule <= 1: # BAH -> hcount charge | charge
c = self.get_node(node, 'charge', -1)
borrow = 0
if cap_remain > 0 and rule == 0:
borrow = 1
cap_remain = self.tree_generator(c, cap_remain=cap_remain - borrow, ref_symbol=ref_symbol, is_last=is_last)
cap_remain += borrow
node.add_child(c)
if rule % 2 == 0: # BAH -> hcount charge | hcount
assert cap_remain > 0
hc = self.get_node(node, 'hcount', 0)
cap_remain = self.tree_generator(hc, cap_remain=cap_remain, is_last=is_last)
node.add_child(hc, 0)
elif node.symbol == 'hcount':
rule = self.rand_rule(node)
h = self.get_node(node, '\'H\'', 0)
node.add_child(h)
cost = 1
if rule == 1: # hcount -> 'H' DIGIT
d = self.get_node(node, 'DIGIT', -1)
self.tree_generator(d, cap_remain=cap_remain, is_last=is_last)
cost = int(d.children[0].symbol[1 : -1])
node.add_child(d)
cap_remain -= cost
elif node.symbol == 'charge':
if cap_remain == 0:
rule = self.rand_rule(node, [2, 3])
else:
rule = self.rand_rule(node)
if rule <= 1: # charge -> '-' | '-' DIGIT
m = self.get_node(node, '\'-\'', 0)
node.add_child(m)
cost = 1
if rule == 1: # charge -> '-' DIGIT
d = self.get_node(node, 'DIGIT', -1)
self.tree_generator(d, cap_remain=cap_remain, is_last=is_last)
cost = int(d.children[0].symbol[1 : -1])
node.add_child(d)
cap_remain -= cost
else: # charge -> '+' | '+' DIGIT
p = self.get_node(node, '\'+\'', 0)
node.add_child(p)
delta = 1
if rule == 1: # charge -> '+' DIGIT
d1 = self.get_node(node, 'DIGIT', -1)
self.tree_generator(d1, is_last=is_last)
delta = int(d1.children[0].symbol[1 : -1])
node.add_child(d1)
cap_remain += delta
assert ref_symbol is not None and ref_symbol != '\'B\''
elif node.symbol == 'DIGIT':
if cap_remain is None or cap_remain > len(self.prod[node.symbol]):
rule = self.rand_rule(node)
else:
rule = self.rand_rule(node, range(cap_remain))
d = self.get_node(node, '\'%d\'' % (rule + 1), 0)
node.add_child(d)
else:
assert node.symbol in ['smiles', 'atom', 'bracket_atom', 'isotope', 'chiral', 'symbol']
rule = self.rand_rule(node)
p = self.prod[node.symbol][rule]
for i in range(len(p)):
c = self.get_node(node, p[i], i)
if not p[i][0] == '\'': # non-terminal
t = self.tree_generator(c, left_conn, right_conn, cap_remain=cap_remain, ref_symbol=ref_symbol, is_last=is_last)
node.left_remain = c.left_remain
node.right_remain = c.right_remain
node.single_atom = c.single_atom
node.atom_pos = c.atom_pos
node.is_aromatic = c.is_aromatic
if t >= 0:
cap_remain = t
node.add_child(c)
if cap_remain is not None:
assert cap_remain >= 0
return cap_remain
return -1
def decode(self, node, walker):
self.walker = walker
self.walker.reset()
self.reset_state()
self.tree_generator(node, is_last=True)
def create_tree_decoder(utils):
fname = cmd_args.grammar_file.split('/')[-1]
# print('using', fname)
tree_decoder = AttMolGraphDecoder(utils)
return tree_decoder
if __name__ == '__main__':
pass
|
<filename>collection_manager/collection_manager/services/history_manager/SolrIngestionHistory.py
import hashlib
import logging
import pysolr
import requests
from collection_manager.services.history_manager.IngestionHistory import (IngestionHistory, IngestionHistoryBuilder)
from common.async_utils.AsyncUtils import run_in_executor
logging.getLogger("pysolr").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
def doc_key(dataset_id, file_name):
return hashlib.sha1(f'{dataset_id}{file_name}'.encode('utf-8')).hexdigest()
class SolrIngestionHistoryBuilder(IngestionHistoryBuilder):
def __init__(self, solr_url: str, signature_fun=None):
self._solr_url = solr_url
self._signature_fun = signature_fun
def build(self, dataset_id: str):
return SolrIngestionHistory(solr_url=self._solr_url,
dataset_id=dataset_id,
signature_fun=self._signature_fun)
class SolrIngestionHistory(IngestionHistory):
_granule_collection_name = "nexusgranules"
_dataset_collection_name = "nexusdatasets"
_req_session = None
def __init__(self, solr_url: str, dataset_id: str, signature_fun=None):
try:
self._url_prefix = f"{solr_url.strip('/')}/solr"
self._create_collection_if_needed()
self._solr_granules = pysolr.Solr(f"{self._url_prefix}/{self._granule_collection_name}")
self._solr_datasets = pysolr.Solr(f"{self._url_prefix}/{self._dataset_collection_name}")
self._dataset_id = dataset_id
self._signature_fun = signature_fun
self._latest_ingested_file_update = self._get_latest_file_update()
except requests.exceptions.RequestException:
raise DatasetIngestionHistorySolrException(f"solr instance unreachable {solr_url}")
def __del__(self):
self._req_session.close()
@run_in_executor
def _push_record(self, file_name, signature):
hash_id = doc_key(self._dataset_id, file_name)
self._solr_granules.delete(q=f"id:{hash_id}")
self._solr_granules.add([{
'id': hash_id,
'dataset_s': self._dataset_id,
'granule_s': file_name,
'granule_signature_s': signature}])
self._solr_granules.commit()
return None
@run_in_executor
def _save_latest_timestamp(self):
if self._solr_datasets:
self._solr_datasets.delete(q=f"id:{self._dataset_id}")
self._solr_datasets.add([{
'id': self._dataset_id,
'dataset_s': self._dataset_id,
'latest_update_l': self._latest_ingested_file_update}])
self._solr_datasets.commit()
def _get_latest_file_update(self):
results = self._solr_datasets.search(q=f"id:{self._dataset_id}")
if results:
return results.docs[0]['latest_update_l']
else:
return None
@run_in_executor
def _get_signature(self, file_name):
hash_id = doc_key(self._dataset_id, file_name)
results = self._solr_granules.search(q=f"id:{hash_id}")
if results:
return results.docs[0]['granule_signature_s']
else:
return None
def _create_collection_if_needed(self):
try:
if not self._req_session:
self._req_session = requests.session()
payload = {'action': 'CLUSTERSTATUS'}
collections_endpoint = f"{self._url_prefix}/admin/collections"
result = self._req_session.get(collections_endpoint, params=payload)
response = result.json()
node_number = len(response['cluster']['live_nodes'])
existing_collections = response['cluster']['collections'].keys()
if self._granule_collection_name not in existing_collections:
# Create collection
payload = {'action': 'CREATE',
'name': self._granule_collection_name,
'numShards': node_number
}
result = self._req_session.get(collections_endpoint, params=payload)
response = result.json()
logger.info(f"solr collection created {response}")
# Update schema
schema_endpoint = f"{self._url_prefix}/{self._granule_collection_name}/schema"
self._add_field(schema_endpoint, "dataset_s", "string")
self._add_field(schema_endpoint, "granule_s", "string")
self._add_field(schema_endpoint, "granule_signature_s", "string")
if self._dataset_collection_name not in existing_collections:
# Create collection
payload = {'action': 'CREATE',
'name': self._dataset_collection_name,
'numShards': node_number
}
result = self._req_session.get(collections_endpoint, params=payload)
response = result.json()
logger.info(f"solr collection created {response}")
# Update schema
schema_endpoint = f"{self._url_prefix}/{self._dataset_collection_name}/schema"
self._add_field(schema_endpoint, "dataset_s", "string")
self._add_field(schema_endpoint, "latest_update_l", "TrieLongField")
except requests.exceptions.RequestException as e:
logger.error(f"solr instance unreachable {self._solr_url}")
raise e
def _add_field(self, schema_url, field_name, field_type):
"""
Helper to add a string field in a solr schema
:param schema_url:
:param field_name:
:param field_type
:return:
"""
add_field_payload = {
"add-field": {
"name": field_name,
"type": field_type,
"stored": False
}
}
return self._req_session.post(schema_url, data=str(add_field_payload).encode('utf-8'))
class DatasetIngestionHistorySolrException(Exception):
pass
|
<gh_stars>1-10
# Monkey-patch because I trained with a newer version.
# This can be removed once PyTorch 0.4.x is out.
# See https://discuss.pytorch.org/t/question-about-rebuild-tensor-v2/14560
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision.utils import save_image
import sys
import os
import time
import numpy as np
import cv2
import argparse
import yaml
import json
import random
import math
import copy
from tqdm import tqdm
from easydict import EasyDict as edict
parser = argparse.ArgumentParser(description='Training code')
parser.add_argument('--config', default='config.yaml', type=str, help='yaml config file')
args = parser.parse_args()
CONFIG = edict(yaml.load(open(args.config, 'r')))
print ('==> CONFIG is: \n', CONFIG, '\n')
if CONFIG.IS_TRAIN:
LOGDIR = '%s/%s_%d'%(CONFIG.LOGS.LOG_DIR, CONFIG.NAME, int(time.time()))
SNAPSHOTDIR = '%s/%s_%d'%(CONFIG.LOGS.SNAPSHOT_DIR, CONFIG.NAME, int(time.time()))
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
if not os.path.exists(SNAPSHOTDIR):
os.makedirs(SNAPSHOTDIR)
def to_varabile(arr, requires_grad=False, is_cuda=True):
if type(arr) == np.ndarray:
tensor = torch.from_numpy(arr)
else:
tensor = arr
if is_cuda:
tensor = tensor.cuda()
var = Variable(tensor, requires_grad=requires_grad)
return var
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name):
self.name = name
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
MEAN_var = to_varabile(np.array(CONFIG.DATASET.MEAN, dtype=np.float32)[:,np.newaxis,np.newaxis], requires_grad=False, is_cuda=True)
######################################################################################################################
# "Globally and Locally Consistent Image Completion" Model
######################################################################################################################
def AffineAlignOp(features, idxs, aligned_height, aligned_width, Hs):
def _transform_matrix(Hs, w, h):
_Hs = np.zeros(Hs.shape, dtype = np.float32)
for i, H in enumerate(Hs):
H0 = np.concatenate((H, np.array([[0, 0, 1]])), axis=0)
A = np.array([[2.0 / w, 0, -1], [0, 2.0 / h, -1], [0, 0, 1]])
A_inv = np.array([[w / 2.0, 0, w / 2.0], [0, h / 2.0, h/ 2.0], [0, 0, 1]])
H0 = A.dot(H0).dot(A_inv)
H0 = np.linalg.inv(H0)
_Hs[i] = H0[:-1]
return _Hs
bz, C_feat, H_feat, W_feat = features.size()
N = len(idxs)
feature_select = features[idxs] # (N, feature_channel, feature_size, feature_size)
Hs_new = _transform_matrix(Hs, w=W_feat, h=H_feat) # return (N, 2, 3)
Hs_var = Variable(torch.from_numpy(Hs_new), requires_grad=False).cuda()
flow = F.affine_grid(theta=Hs_var, size=(N, C_feat, H_feat, W_feat)).float().cuda()
flow = flow[:,:aligned_height, :aligned_width, :]
rois = F.grid_sample(feature_select, flow, mode='bilinear', padding_mode='border') # 'zeros' | 'border'
return rois
def CropAlignOp(feature_var, rois_var, aligned_height, aligned_width, spatial_scale):
rois_np = rois_var.data.cpu().numpy()
#idxs = rois_np[:,0]
affinematrixs_feat = []
for roi in rois_np:
#x1, y1, x2, y2 = roi[1:] * float(spatial_scale)
x1, y1, x2, y2 = roi * float(spatial_scale)
matrix = np.array([[aligned_width/(x2-x1), 0, -aligned_width/(x2-x1)*x1],
[0, aligned_height/(y2-y1), -aligned_height/(y2-y1)*y1]
])
affinematrixs_feat.append(matrix)
affinematrixs_feat = np.array(affinematrixs_feat)
feature_rois = AffineAlignOp(feature_var, np.array(range(rois_var.size(0))),
aligned_height, aligned_width, affinematrixs_feat)
return feature_rois
class ConvBnRelu(nn.Module):
def __init__(self, inp_dim, out_dim,
kernel_size=3, stride=1, dilation=1, group=1,
bias = True, bn = True, relu = True):
super(ConvBnRelu, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, (kernel_size-1)//2+(dilation-1), dilation, group, bias=bias)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class DeconvBnRelu(nn.Module):
def __init__(self, inp_dim, out_dim,
kernel_size=4, stride=2,
bias = True, bn = True, relu = True):
super(DeconvBnRelu, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.ConvTranspose2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size-1)//2, bias=bias)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class GLCIC_G(nn.Module):
def __init__(self, bias_in_conv=True, pretrainfile=None):
super(GLCIC_G, self).__init__()
self.conv1_1 = ConvBnRelu(4, 64, kernel_size=5, stride=1, bias=bias_in_conv)
self.conv1_2 = ConvBnRelu(64, 128, kernel_size=3, stride=2, bias=bias_in_conv)
self.conv1_3 = ConvBnRelu(128, 128, kernel_size=3, stride=1, bias=bias_in_conv)
self.conv2_1 = ConvBnRelu(128, 256, kernel_size=3, stride=2, bias=bias_in_conv)
self.conv2_2 = ConvBnRelu(256, 256, kernel_size=3, stride=1, bias=bias_in_conv)
self.conv2_3 = ConvBnRelu(256, 256, kernel_size=3, stride=1, bias=bias_in_conv)
self.conv3_1 = ConvBnRelu(256, 256, kernel_size=3, dilation=2, stride=1, bias=bias_in_conv)
self.conv3_2 = ConvBnRelu(256, 256, kernel_size=3, dilation=4, stride=1, bias=bias_in_conv)
self.conv3_3 = ConvBnRelu(256, 256, kernel_size=3, dilation=8, stride=1, bias=bias_in_conv)
self.conv3_4 = ConvBnRelu(256, 256, kernel_size=3, dilation=16, stride=1, bias=bias_in_conv)
self.conv4_1 = ConvBnRelu(256, 256, kernel_size=3, stride=1, bias=bias_in_conv)
self.conv4_2 = ConvBnRelu(256, 256, kernel_size=3, stride=1, bias=bias_in_conv)
self.decoder1_1 = DeconvBnRelu(256, 128, kernel_size=4, stride=2, bias=bias_in_conv)
self.decoder1_2 = ConvBnRelu(128, 128, kernel_size=3, stride=1, bias=bias_in_conv)
self.decoder2_1 = DeconvBnRelu(128, 64, kernel_size=4, stride=2, bias=bias_in_conv)
self.decoder2_2 = ConvBnRelu(64, 32, kernel_size=3, stride=1, bias=bias_in_conv)
self.decoder2_3 = ConvBnRelu(32, 3, kernel_size=3, stride=1, bias=bias_in_conv, bn = False, relu = False)
self.init(pretrainfile)
def init(self, pretrainfile=None):
if pretrainfile is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, .1)
m.bias.data.zero_()
elif 'completionnet_places2.t7' in pretrainfile:
mapping = {'conv1_1.conv': 0, 'conv1_1.bn': 1, 'conv1_2.conv': 3, 'conv1_2.bn': 4, 'conv1_3.conv': 6, 'conv1_3.bn': 7, 'conv2_1.conv': 9, 'conv2_1.bn': 10, 'conv2_2.conv': 12, 'conv2_2.bn': 13, 'conv2_3.conv': 15, 'conv2_3.bn': 16, 'conv3_1.conv': 18, 'conv3_1.bn': 19, 'conv3_2.conv': 21, 'conv3_2.bn': 22, 'conv3_3.conv': 24, 'conv3_3.bn': 25, 'conv3_4.conv': 27, 'conv3_4.bn': 28, 'conv4_1.conv': 30, 'conv4_1.bn': 31, 'conv4_2.conv': 33, 'conv4_2.bn': 34, 'decoder1_1.conv': 36, 'decoder1_1.bn': 37, 'decoder1_2.conv': 39, 'decoder1_2.bn': 40, 'decoder2_1.conv': 42, 'decoder2_1.bn': 43, 'decoder2_2.conv': 45, 'decoder2_2.bn': 46, 'decoder2_3.conv': 48}
from torch.utils.serialization import load_lua
pretrain = load_lua(pretrainfile).model
pretrained_dict = {}
for key, mapidx in mapping.items():
if '.conv' in key:
pretrained_dict[key+'.weight'] = pretrain.modules[mapidx].weight
pretrained_dict[key+'.bias'] = pretrain.modules[mapidx].bias
elif '.bn' in key:
pretrained_dict[key+'.weight'] = pretrain.modules[mapidx].weight
pretrained_dict[key+'.bias'] = pretrain.modules[mapidx].bias
pretrained_dict[key+'.running_var'] = pretrain.modules[mapidx].running_var
pretrained_dict[key+'.running_mean'] = pretrain.modules[mapidx].running_mean
model_dict = self.state_dict()
print ('==> [netG] load official weight as pretrain. init %d/%d layers.'%(len(pretrained_dict), len(model_dict)))
model_dict.update(pretrained_dict)
self.load_state_dict(pretrained_dict)
else:
self.load_state_dict(torch.load(pretrainfile, map_location=lambda storage, loc: storage))
print ('==> [netG] load self-train weight as pretrain.')
def forward(self, input):
x = self.conv1_1(input)
x = self.conv1_2(x)
x = self.conv1_3(x)
x = self.conv2_1(x)
x = self.conv2_2(x)
x = self.conv2_3(x)
x = self.conv3_1(x)
x = self.conv3_2(x)
x = self.conv3_3(x)
x = self.conv3_4(x)
x = self.conv4_1(x)
x = self.conv4_2(x)
x = self.decoder1_1(x)
x = self.decoder1_2(x)
x = self.decoder2_1(x)
x = self.decoder2_2(x)
x = self.decoder2_3(x)
x = F.sigmoid(x)
return x
def calc_loss(self, pred, gt):
loss = torch.nn.MSELoss()(pred, gt)
return loss
class GLCIC_D(nn.Module):
def __init__(self, bias_in_conv=True, pretrainfile=None):
super(GLCIC_D, self).__init__()
# local D
self.local_conv1 = ConvBnRelu(3, 64, kernel_size=5, stride=2, bias=bias_in_conv)
self.local_conv2 = ConvBnRelu(64, 128, kernel_size=5, stride=2, bias=bias_in_conv)
self.local_conv3 = ConvBnRelu(128, 256, kernel_size=5, stride=2, bias=bias_in_conv)
self.local_conv4 = ConvBnRelu(256, 512, kernel_size=5, stride=2, bias=bias_in_conv)
self.local_conv5 = ConvBnRelu(512, 512, kernel_size=5, stride=2, bias=bias_in_conv)
self.local_fc = nn.Linear(8192, 1024)
# global D
self.global_conv1 = ConvBnRelu(3, 64, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_conv2 = ConvBnRelu(64, 128, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_conv3 = ConvBnRelu(128, 256, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_conv4 = ConvBnRelu(256, 512, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_conv5 = ConvBnRelu(512, 512, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_conv6 = ConvBnRelu(512, 512, kernel_size=5, stride=2, bias=bias_in_conv)
self.global_fc = nn.Linear(8192, 1024)
# after concat
self.fc = nn.Linear(2048, 1)
self.init(pretrainfile)
def init(self, pretrainfile=None):
if pretrainfile is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, .1)
m.bias.data.zero_()
else:
self.load_state_dict(torch.load(pretrainfile, map_location=lambda storage, loc: storage))
print ('==> [netD] load self-train weight as pretrain.')
def forward(self, input_local, input_global):
x_local = self._forward_local(input_local)
x_global = self._forward_global(input_global)
x = torch.cat([x_local, x_global], 1)
x = self.fc(x)
return x
def _forward_local(self, input):
x = self.local_conv1(input)
x = self.local_conv2(x)
x = self.local_conv3(x)
x = self.local_conv4(x)
x = self.local_conv5(x)
x = x.view(x.size(0), -1)
x = self.local_fc(x)
return x
def _forward_global(self, input):
x = self.global_conv1(input)
x = self.global_conv2(x)
x = self.global_conv3(x)
x = self.global_conv4(x)
x = self.global_conv5(x)
x = self.global_conv6(x)
x = x.view(x.size(0), -1)
x = self.global_fc(x)
return x
def calc_loss(self, pred, gt):
loss = nn.BCEWithLogitsLoss()(pred, gt)
return loss
######################################################################################################################
# Dataset: ATR/LIP
######################################################################################################################
# CONFIG.DATASET.TRAINDIR
# CONFIG.DATASET.VALDIR
# CONFIG.DATASET.INPUT_RES
# CONFIG.DATASET.MEAN
class MyDataset(object):
def __init__(self, ImageDir, istrain=True):
self.istrain = istrain
self.imgdir = ImageDir
self.imglist = os.listdir(ImageDir)
print ('==> Load Dataset: \n', {'dataset': ImageDir, 'istrain:': istrain, 'len': self.__len__()}, '\n')
assert istrain==CONFIG.IS_TRAIN
def __len__(self):
return len(self.imglist)
def __getitem__(self, idx):
return self.loadImage(idx)
def loadImage(self, idx):
path = os.path.join(self.imgdir, self.imglist[idx])
image = cv2.imread(path)
image = image[:,:,::-1]
image = cv2.resize(image, (CONFIG.DATASET.INPUT_RES, CONFIG.DATASET.INPUT_RES), interpolation=cv2.INTER_LINEAR)
input = (image.astype(np.float32)/255.0 - CONFIG.DATASET.MEAN)
input = input.transpose(2,0,1)
if self.istrain:
bbox_c, mask_c = self.randommask(image.shape[0], image.shape[1])
bbox_d, mask_d = self.randommask(image.shape[0], image.shape[1])
else:
if CONFIG.NAME in ['horse', 'LIP', 'ATR']:
mask_c = cv2.imread('%s/%s'%(CONFIG.VAL.MASKDIR, self.imglist[idx].replace('jpg', 'png')), cv2.IMREAD_GRAYSCALE).astype(np.float32)
mask_c = cv2.resize(mask_c, (CONFIG.DATASET.INPUT_RES, CONFIG.DATASET.INPUT_RES), interpolation=cv2.INTER_NEAREST)
mask_c = mask_c[np.newaxis, :,:]
mask_c[mask_c>=1] = 1.0
mask_c[mask_c<1] = 0.0
return np.float32(input), np.float32(mask_c), np.int32(idx)
return np.float32(input), np.float32(mask_c), bbox_c, np.float32(mask_d), bbox_d
def randommask(self, height, width):
x1, y1 = np.random.randint(0, CONFIG.DATASET.INPUT_RES - CONFIG.DATASET.LOCAL_RES + 1, 2)
x2, y2 = np.array([x1, y1]) + CONFIG.DATASET.LOCAL_RES
w, h = np.random.randint(CONFIG.DATASET.HOLE_MIN, CONFIG.DATASET.HOLE_MAX + 1, 2)
p1 = x1 + np.random.randint(0, CONFIG.DATASET.LOCAL_RES - w)
q1 = y1 + np.random.randint(0, CONFIG.DATASET.LOCAL_RES - h)
p2 = p1 + w
q2 = q1 + h
mask = np.zeros((height, width), dtype=np.float32)
mask[q1:q2 + 1, p1:p2 + 1] = 1.0
bbox = np.array([x1, y1, x1+CONFIG.DATASET.LOCAL_RES, y1+CONFIG.DATASET.LOCAL_RES], dtype=np.int32)
return bbox, mask[np.newaxis, :,:]
######################################################################################################################
# Training
######################################################################################################################
def train(dataLoader, model_G, model_D, epoch):
batch_time = AverageMeter('batch_time')
data_time = AverageMeter('data_time')
losses_G = AverageMeter('losses_G')
losses_D = AverageMeter('losses_D')
losses_G_L2 = AverageMeter('losses_G_L2')
losses_G_real = AverageMeter('losses_G_real')
losses_D_real = AverageMeter('losses_D_real')
losses_D_fake = AverageMeter('losses_D_fake')
# switch to train mode
model_G.train()
model_D.train()
end = time.time()
for i, data in enumerate(dataLoader):
# measure data loading time
data_time.update(time.time() - end)
input3ch, mask_c, bbox_c, mask_d, bbox_d = data
input4ch = torch.cat([input3ch * (1 - mask_c), mask_c], dim=1)
input3ch_var = to_varabile(input3ch, requires_grad=False, is_cuda=True) + MEAN_var
input4ch_var = to_varabile(input4ch, requires_grad=True, is_cuda=True)
bbox_c_var = to_varabile(bbox_c, requires_grad=False, is_cuda=True)
mask_c_var = to_varabile(mask_c, requires_grad=True, is_cuda=True)
out_G = model_G(input4ch_var)
loss_G_L2 = model_G.calc_loss(out_G, input3ch_var)
losses_G_L2.update(loss_G_L2.data[0], input3ch.size(0))
completion = (input3ch_var)*(1 - mask_c_var) + out_G * mask_c_var
# local_completion = completion[:,:,bbox_c[0][1]:bbox_c[0][3], bbox_c[0][0]:bbox_c[0][2]]
# local_input3ch = input3ch_var[:,:,bbox_c[0][1]:bbox_c[0][3], bbox_c[0][0]:bbox_c[0][2]]
local_completion = CropAlignOp(completion, bbox_c_var,
CONFIG.DATASET.LOCAL_RES, CONFIG.DATASET.LOCAL_RES, spatial_scale=1.0)
local_input3ch = CropAlignOp(input3ch_var, bbox_c_var,
CONFIG.DATASET.LOCAL_RES, CONFIG.DATASET.LOCAL_RES, spatial_scale=1.0)
out_D_fake = model_D(local_completion, completion)
loss_D_fake = model_D.calc_loss(out_D_fake, torch.zeros_like(out_D_fake))
losses_D_fake.update(loss_D_fake.data[0], input3ch.size(0))
out_D_real = model_D(local_input3ch, input3ch_var)
loss_D_real = model_D.calc_loss(out_D_real, torch.ones_like(out_D_real))
losses_D_real.update(loss_D_real.data[0], input3ch.size(0))
#out_G_real = model_D(local_completion, completion) # TODO
out_G_real = out_D_fake
loss_G_real = model_D.calc_loss(out_G_real, torch.ones_like(out_G_real))
losses_G_real.update(loss_G_real.data[0], input3ch.size(0))
if epoch <= CONFIG.TRAIN_G_EPOCHES:
optimizer = torch.optim.Adam(model_G.parameters(),
CONFIG.SOLVER.LR, weight_decay=CONFIG.SOLVER.WEIGHTDECAY)
loss_G = losses_G_L2
losses_G.update(loss_G.data[0], input3ch.size(0))
optimizer.zero_grad()
loss_G.backward()
optimizer.step()
elif epoch <= CONFIG.TRAIN_D_EPOCHES:
optimizer = torch.optim.Adam(model_D.parameters(),
CONFIG.SOLVER.LR, weight_decay=CONFIG.SOLVER.WEIGHTDECAY)
loss_D = loss_D_fake + loss_D_real
losses_D.update(loss_D.data[0], input3ch.size(0))
optimizer.zero_grad()
loss_D.backward()
optimizer.step()
else:
optimizer_G = torch.optim.Adam(model_G.parameters(), CONFIG.SOLVER.LR, weight_decay=CONFIG.SOLVER.WEIGHTDECAY)
optimizer_D = torch.optim.Adam(model_D.parameters(), CONFIG.SOLVER.LR, weight_decay=CONFIG.SOLVER.WEIGHTDECAY)
loss_G = loss_G_L2 + CONFIG.LOSS.ALPHA * loss_G_real
loss_D = loss_D_fake + loss_D_real
losses_G.update(loss_G.data[0], input3ch.size(0))
losses_D.update(loss_D.data[0], input3ch.size(0))
optimizer_G.zero_grad()
loss_G.backward(retain_graph=True)
optimizer_G.step()
optimizer_D.zero_grad()
loss_D.backward()
optimizer_D.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % CONFIG.LOGS.PRINT_FREQ == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
#'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'G {loss_G.val:.4f} ({loss_G.avg:.4f})\t'
'D {loss_D.val:.4f} ({loss_D.avg:.4f})\t'
'G_L2 {G_L2.val:.4f} ({G_L2.avg:.4f})\t'
'G_real {G_real.val:.4f} ({G_real.avg:.4f})\t'
'D_fake {D_fake.val:.4f} ({D_fake.avg:.4f})\t'
'D_real {D_real.val:.4f} ({D_real.avg:.4f})\t'.format(
epoch, i, len(dataLoader), batch_time=batch_time, #data_time=data_time,
loss_G=losses_G, loss_D=losses_D,
G_L2 = losses_G_L2, G_real=losses_G_real,
D_fake=losses_D_fake, D_real=losses_D_real ))
if i % CONFIG.LOGS.LOG_FREQ == 0:
vis = torch.cat([input3ch_var * (1 - mask_c_var),
completion], dim=0)
save_image(vis.data, os.path.join(LOGDIR, 'epoch%d_%d_vis.jpg'%(epoch, i)), nrow=input3ch.size(0), padding=2,
normalize=True, range=None, scale_each=True, pad_value=0)
# vis = torch.cat([local_input3ch + MEAN_var, local_completion], dim=0)
# save_image(vis, os.path.join(LOGDIR, 'epoch%d_%d_vis_crop.jpg'%(epoch, i)), nrow=input3ch.size(0), padding=2,
# normalize=True, range=None, scale_each=True, pad_value=0)
#if i % CONFIG.LOGS.SNAPSHOT_FREQ == 0 :
# torch.save(model_G.state_dict(), os.path.join(SNAPSHOTDIR, 'G_%d_%d.pkl'%(epoch,i)))
# torch.save(model_D.state_dict(), os.path.join(SNAPSHOTDIR, 'D_%d_%d.pkl'%(epoch,i)))
if epoch == CONFIG.TRAIN_G_EPOCHES:
torch.save(model_G.state_dict(), os.path.join(SNAPSHOTDIR, 'preG_%d_%d.pkl'%(epoch,i)))
if epoch == CONFIG.TRAIN_D_EPOCHES:
torch.save(model_G.state_dict(), os.path.join(SNAPSHOTDIR, 'preD_%d_%d.pkl'%(epoch,i)))
def main():
dataset = MyDataset(ImageDir=CONFIG.DATASET.TRAINDIR, istrain=True)
BATCHSIZE = CONFIG.SOLVER.IMG_PER_GPU * len(CONFIG.SOLVER.GPU_IDS)
dataLoader = torch.utils.data.DataLoader(dataset, batch_size=BATCHSIZE, shuffle=True, num_workers=CONFIG.SOLVER.WORKERS, pin_memory=False)
model_G = GLCIC_G(bias_in_conv=True, pretrainfile=CONFIG.INIT_G).cuda()
model_D = GLCIC_D(bias_in_conv=True, pretrainfile=CONFIG.INIT_D).cuda()
epoches = CONFIG.TOTAL_EPOCHES
for epoch in range(epoches):
print ('===========> [Epoch %d] training <==========='%epoch)
train(dataLoader, model_G, model_D, epoch)
if epoch % CONFIG.LOGS.SNAPSHOT_FREQ == 0 :
torch.save(model_G.state_dict(), os.path.join(SNAPSHOTDIR, 'G_%d.pkl'%(epoch)))
torch.save(model_D.state_dict(), os.path.join(SNAPSHOTDIR, 'D_%d.pkl'%(epoch)))
torch.save(model_G.state_dict(), os.path.join(SNAPSHOTDIR, 'G_%d.pkl'%(epoch)))
torch.save(model_D.state_dict(), os.path.join(SNAPSHOTDIR, 'D_%d.pkl'%(epoch)))
def test():
from blend import blend
if not os.path.exists(CONFIG.VAL.OUTDIR):
os.makedirs(CONFIG.VAL.OUTDIR)
dataset = MyDataset(ImageDir=CONFIG.DATASET.VALDIR, istrain=False)
dataLoader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, pin_memory=False)
model_G = GLCIC_G(bias_in_conv=True, pretrainfile=CONFIG.VAL.INIT).cuda()
# switch to eval mode
model_G.eval()
for data in tqdm(dataLoader):
input3ch, mask_c, idxs = data
filename = dataset.imglist[idxs.numpy()[0]]
input4ch = torch.cat([input3ch * (1 - mask_c), mask_c], dim=1)
input3ch_var = to_varabile(input3ch, requires_grad=False, is_cuda=True) + MEAN_var
input4ch_var = to_varabile(input4ch, requires_grad=False, is_cuda=True)
mask_c_var = to_varabile(mask_c, requires_grad=False, is_cuda=True)
out_G = model_G(input4ch_var)
completion = (input3ch_var)*(1 - mask_c_var) + out_G * mask_c_var
completion_np = completion.data.cpu().numpy().transpose((0, 2, 3, 1))[0] *255.0
path = os.path.join(dataset.imgdir, filename)
image = cv2.imread(path)[:,:,::-1]
completion_np = cv2.resize(completion_np, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_LINEAR)
completion_np = np.uint8(completion_np)
mask = cv2.imread('%s/%s'%(CONFIG.VAL.MASKDIR, filename.replace('jpg', 'png')))
mask = cv2.resize(mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
completion_np[mask<0.5] = image[mask<0.5]
# target = image # background
# source = completion_np # foreground
# mask = mask
# completion_np = blend(target, source, mask, offset=(0, 0))
cv2.imwrite('%s/%s'%(CONFIG.VAL.OUTDIR, filename), np.uint8(completion_np[:,:,::-1]))
# vis = np.hstack((cv2.imread(path), mask, completion_np[:,:,::-1]))
# cv2.imwrite('%s/%s'%(CONFIG.VAL.OUTDIR, filename), np.uint8(vis))
# break
if __name__ == '__main__':
if CONFIG.IS_TRAIN:
main()
else:
test()
|
"""
<NAME>
<NAME>
<NAME>
<NAME>
CISC 204
Modelling project
Wed december 9th 2020
Professor Muise
"""
#Import
from nnf import Var
from nnf import Or
import nnf
from lib204 import Encoding
from csvReader import readCSV
'''
Customer class
Used to create a class containing the various restrictions a
person might have with a restaurant
Paramaters:
price: Price range being searched for
diet: any diet restrictions
dine_opt: preferred dining options
'''
class customer:
def __init__(self, price_opt, diet_opt, dine_opt,distance):
self.userprice = price_opt
self.userdiet = diet_opt
self.userdine_opt = dine_opt
self.distance = distance
#Defining variables for encoding
#Price point variables
low = Var('low')
med = Var('med')
high = Var('high')
#Dietary restriction food options variables
vegetarian = Var('vegetarian')
vegan = Var('vegan')
gluten = Var('gluten')
lactose = Var('lactose')
#Dining variables
dine_in = Var('dine-in')
take_out = Var('take-out')
delivery = Var('delivery')
#Distance variables
time_under_10 = Var('under 10')
time_10_to_20 = Var('10 to 20')
time_over_20 = Var('over 20')
#Constraints
"""
If the user selected a price constraint and it matches
$,$$,$$$. If the restaurant matches the price point then
the constraint will get returned so that its only holds true
for that instance.
Parameters: Restaurant object, Customer object
Returns: A price constraint
"""
def price_constraint(restaurant,customer):
#For low price point
if "low" in customer.userprice:
if restaurant.price == "$":
return low & ~med & ~high
else:
return low & ~low
#For the med price point
if "med" in customer.userprice:
if restaurant.price == "$$":
return med & ~high & ~low
else:
return med & ~med
#For the high price point
if "high" in customer.userprice:
if restaurant.price == "$$$":
return high & ~low & ~med
else:
return high & ~high
"""
If the user selected a single dietary restriction the
appropriate constraint will get returned so it only
holds true for that instance.
Parameters: Restaurant object, Customer object
Returns: A single dietary restriction constraint
"""
def single_diet_constraint(restaurant, customer):
#For gluten free
if 'gluten' in customer.userdiet:
if 'TRUE' in restaurant.diet[2]:
return gluten & ~vegan & ~vegetarian & ~lactose
else:
return ~gluten & gluten
#For lactose
elif 'lactose' in customer.userdiet:
if 'TRUE' in restaurant.diet[3]:
return ~gluten & ~vegan & ~vegetarian & lactose
else:
return ~lactose & lactose
#For vegetarian
elif 'vegetarian' in customer.userdiet:
if 'TRUE' in restaurant.diet[1]:
return ~gluten & ~vegan & vegetarian & ~lactose
else:
return ~vegetarian & vegetarian
#For vegan
elif 'vegan' in customer.userdiet:
if 'TRUE' in restaurant.diet[0]:
return ~gluten & vegan & ~vegetarian & ~lactose
else:
return ~vegan & vegan
"""If the user selected two dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: A single two dietary restriction constraint
"""
def two_diet_constraint(restaurant, customer):
#For vegetarian and vegan customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & vegan & ~lactose & ~gluten
else:
return vegetarian & ~vegetarian
#For vegan and lactose free customers
elif ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & ~gluten
else:
return vegan & ~vegan
#For vegetarian and gluten free customers
elif ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[2]):
return ~vegetarian & vegan & ~lactose & gluten
else:
return vegan & ~vegan
#For gluten free and lactose free customers
elif ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & ~vegan & lactose & gluten
else:
return gluten & ~gluten
#For gluten free and vegitarian customers
elif ('gluten' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & ~lactose & gluten
else:
return gluten & ~gluten
#For lactose free and vegetarian customers
elif ('lactose' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & lactose & ~gluten
else:
return lactose & ~lactose
"""If the user selected three dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single three dietary constraint
"""
def three_diet_constraint(restaurant,customer):
# For vegetarian and vegan and gluten free customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]):
return vegetarian & vegan & ~lactose & gluten
else:
return vegetarian & ~vegetarian
# For vegetarian and vegan and lactose free customers
elif ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & ~gluten
else:
return vegetarian & ~vegetarian
# For gluten free and vegan and lactose free customers
elif ('gluten' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected all dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single all dietary constraint
"""
def all_diet_constraint(restaurant,customer):
# For users that have all the dietary restrictions
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected one dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single dining constraint
"""
def one_dining_constraints(restaurant, customer):
# For dine in customers
if 'dine-in' in customer.userdine_opt:
if restaurant.delivery[0] == 'TRUE':
return dine_in
else:
return ~dine_in & dine_in
# For take out customers
elif 'take-out' in customer.userdine_opt:
if restaurant.delivery[1] == 'TRUE':
return take_out
else:
return ~take_out & take_out
# For delivery customers
elif 'delivery' in customer.userdine_opt:
if restaurant.delivery[2] == 'TRUE':
return delivery
else:
return ~delivery & delivery
"""If the user selected two dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: two dining constraint
"""
def two_dining_constraints(restaurant, customer):
#For users that want dine in and take out
if ('dine-in' in customer.userdine_opt) and ('take-out' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE':
return dine_in & take_out & ~delivery
else:
return ~dine_in & dine_in
#For users that want Dine in and Delivery
elif ('dine-in' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & ~take_out & delivery
else:
return ~dine_in & dine_in
#For users that want Take out and Delivery
elif ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return ~dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected all dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: all dining constraint
"""
def all_dining_constraints(restaurant, customer):
# For users that want dine in, Take out and delivery
if ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt) and ('dine-in' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected distance restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: distance constraint
"""
def distanceConstraint(restaurant,customer):
#For customers that want under 10 to campus
if customer.distance == 'under 10':
if restaurant.distance[0] == 'TRUE':
return time_under_10 & ~time_10_to_20 & ~time_over_20
else:
return time_under_10 & ~time_under_10
# For customers that want 10-20 min to campus
if customer.distance == '10 to 20':
if restaurant.distance[1] == 'TRUE':
return time_10_to_20 & ~time_under_10 & ~time_over_20
else:
return time_10_to_20 & ~time_10_to_20
# For customers that dont mind over the distance being over 20 minutes to campus
if customer.distance == 'over 20':
if restaurant.distance[2] == 'TRUE':
return time_over_20 & ~time_10_to_20 & ~time_under_10
else:
return time_over_20 & ~time_over_20
"""
This function is where the constraints get added to our
theory.
Parameters: Restaurant object and Customer object
"""
def example_theory(restaurant,customer):
# Shorter variables for the objects
r = restaurant
c = customer
# Defining encoding variable
E = Encoding()
# Add distance constraint
E.add_constraint(distanceConstraint(r,c))
E.add_constraint(price_constraint(r,c))
# Add dining constraints
if len(user.userdine_opt) == 1:
E.add_constraint(one_dining_constraints(r,c))
elif len(user.userdine_opt) == 2:
E.add_constraint(two_dining_constraints(r,c))
elif len(user.userdine_opt) == 3:
E.add_constraint(all_dining_constraints(r,c))
# Add Diet constraints
if len(user.userdiet) == 1:
if 5 in user.userdiet:
pass
else:
E.add_constraint(single_diet_constraint(r,c))
elif len(user.userdiet) == 2:
E.add_constraint(two_diet_constraint(r,c))
elif len(user.userdiet) == 3:
E.add_constraint(three_diet_constraint(r,c))
elif len(user.userdiet) == 4:
E.add_constraint(all_diet_constraint(r,c))
# return the Encoding variable
return E
"""
Main method: Where the implementation happens. The theory gets solved
where a sorted list from best result to worst result is displayed
to the screen.
The user also inputs their prefrences
"""
if __name__ == "__main__":
# This is where we will get user input information
flag = True
restaurant_list = readCSV()
# While loop to start
while flag:
# creating example theory
# T = example_theory()
# Asking if user wants to continue or exit
prog_exit = input('Welcome to the Queens restuarant finder! Press Q to quit or enter to continue.\n')
# if statement to exit
if prog_exit.lower() == 'q':
break
# Getting users price range information
user_price = int(input('Please select a price range: \n 1. $ - most affordable'\
'\n 2. $$ - intermediate \n 3. $$$ - most expensive\n'))
# Telling user which price was selected as well as some exception handling
if user_price in [1,2,3]:
if user_price == 1:
price = 'low'
print('You selected $.')
elif user_price == 2:
price = 'med'
print('You selected $$.')
else:
price = 'high'
print('You selected $$$')
else:
print('Invalid input: Must be either option 1, 2 or 3')
# Getting diet restrictions of the user
user_restrictions_in = input('Please select the following diet restrictions '
'(please separate by a comma if selecting multiple):'
' \n 1. Vegan \n 2. Vegetarian \n 3. Gluten-free \n'
' 4. lactose intolerant \n 5. No restrictions\n')
# Since there is a possibility of having multiple restrictions, split into list
user_selected_restrictions = user_restrictions_in.split(',')
# Turning list of strings into list of integers
for entry in range(len(user_selected_restrictions)):
user_selected_restrictions[entry] = int(user_selected_restrictions[entry])
# Getting user input for dietary restrictions
diet = []
if 1 in user_selected_restrictions:
diet.append('vegan')
if 2 in user_selected_restrictions:
diet.append('vegetarian')
if 3 in user_selected_restrictions:
diet.append('gluten')
if 4 in user_selected_restrictions:
diet.append('lactose')
# Getting user preference for dining options
user_dine_option = input('Please select a dining option. If multiple separate by a comma: \n 1. Dine-in \n 2. Take-out\n 3. Delivery\n')
dine_in_list = user_dine_option.split(',')
final_list = []
if '1' in dine_in_list:
final_list.append('dine-in')
if '2' in dine_in_list:
final_list.append('take-out')
if '3' in dine_in_list:
final_list.append('delivery')
# Getting user preference for distance
user_distance_option = int(input('Please select a distance from Queens campus:'
' \n 1. Under 10 minutes \n 2. Between 10 and 20 minutes \n 3. Over 20 minutes\n'))
if user_distance_option == 1:
distance = 'under 10'
elif user_distance_option == 2:
distance = '10 to 20'
else:
distance = 'over 20'
# Creating customer class to store information in an object for easier access
user = customer(price, diet, final_list, distance)
# Need to iterate through the list and find which restaurants match with the users preference
# using the example theory function. Use T.solve to find the solution to the users preferences and then match with
# restaurants that match up
# T = example_theory(user)
# List to display results
finalListR = []
# Loops through each restaurant in the csv file
for entry in restaurant_list:
# Variable for example theory method
T = example_theory(entry, user)
""" Checks if the theory is satisfiable for each restaurant.
this is where we determine if the restaurant is a good fit
or not"""
y = example_theory(entry,user).is_satisfiable()
# if the theory is satified
if y == True:
finalListR.insert(0, entry.name)
else:
finalListR.insert(len(finalListR), entry.name)
# to display all the results of restaurants best fit to worst fit
for i in range(len(finalListR)):
if i < 4:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★ ★')
elif i >= 4 and i < 7:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★')
elif i <= 7 and i < 11:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★')
elif i <= 11 and i < 15:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★')
else:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★') |
<reponame>RemcoTaal/IDP<filename>Gui.py<gh_stars>0
"""
Interdisciplinair Project
University of Applied Sciences Utrecht
TICT-V1IDP-15 Project
"""
import csv
import datetime
import json
import random
import socket
import time
import tkinter as tk
import tkinter.font
import uuid
import requests
from _thread import *
from tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from Node import Node
# The Gui class is based upon Node and TkInter Frame
class Gui(Node, tk.Frame):
def __init__(self, ip_address: str, port: int, debug: bool):
# Call super constructors
super().__init__(
ip_address=ip_address,
port=port,
uuid="GUI_{}".format(uuid.uuid4().hex[:7]),
connection_handler=socket.socket(socket.AF_INET, socket.SOCK_STREAM),
debug=debug,
is_gui=True,
)
# Initialise TkInter objects
self.root = Tk()
self.font_size_10 = tkinter.font.Font()
self.font_size_12 = tkinter.font.Font()
self.main_frame = Frame(self.root)
self.top_main_frame = Frame(self.main_frame)
self.middle_main_frame = Frame(self.main_frame)
self.under_main_frame = Frame(self.main_frame)
self.result_frame = Frame(self.root)
self.scrollbar = Scrollbar(self.under_main_frame)
self.figure = Figure((5, 2), 100)
self.sub_plot = self.figure.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.figure, self.result_frame)
self.client_listbox = Listbox(self.under_main_frame)
self.top_frame = Frame(self.root)
self.middle_top_frame = Frame(self.top_frame)
# Initialise TkInter labels
self.node_1_name_label = Label(master=self.top_main_frame)
self.node_1_status_label = Label(master=self.top_main_frame)
self.node_2_name_label = Label(master=self.middle_main_frame)
self.node_2_status_label = Label(master=self.middle_main_frame)
self.status_label = Label(master=self.middle_top_frame)
self.status_value_label = Label(master=self.middle_top_frame)
self.barrier_label = Label(master=self.middle_top_frame)
self.barrier_value_label = Label(master=self.middle_top_frame)
self.water_level_label = Label(master=self.middle_top_frame)
self.water_level_value_label = Label(master=self.middle_top_frame)
# Initialise and set GUI variables
self.width = 1000
self.height = 666
self.client_list = []
self.graph_x = []
self.graph_y = []
self.last_data = None
self.file_location = 'water_level.csv'
self.csv_url = 'https://waterberichtgeving.rws.nl/wbviewer/maak_grafiek.php' \
'?loc=HOEK&set=eindverwachting&nummer=1&format=csv'
# Set TkInter variables
self.set_tkinter_variables()
# After setting everything up, try to connect to the passed IP and Port
try:
self.connection_handler.connect((self.ip_address, self.port))
self.connected_to_server = True
except WindowsError:
if self.debug: print("{} - Could not connect to server, is it running?".format(Gui.get_time()))
sys.exit()
except socket.error as e:
if self.debug: print("{} - Socket error {}".format(Gui.get_time(), e))
sys.exit()
if self.debug:
print("{} - Successfully connect to IP:{}, PORT:{}".format(Gui.get_time(), self.ip_address, self.port))
# Finally call TkInter super constructor
tk.Frame.__init__(self)
@staticmethod
def get_time():
""" Returns current time in format %d-%m-%Y %X """
return datetime.datetime.now().strftime('%d-%m-%Y %X')
@staticmethod
def bool(string):
""" Apparently a bool(str) is always true, so let's use this to convert 'True' to True and 'False' to False """
if string == "True":
return True
return False
def set_tkinter_variables(self):
""" Set TkInter variables to the objects created in the constructor """
self.get_api_data()
self.read_api_data()
self.font_size_10.configure(family="Courier", size=10)
self.font_size_12.configure(family="Courier", size=12)
self.root.title('Status Waterkering')
self.root.resizable(0, 0)
self.root.geometry("{}x{}+{}+{}".format(
self.width,
self.height,
# The auto center functionally to center the frame on your screen doesn't always work, just use 0.
0, # int(math.floor(GetSystemMetrics(0)) / 2 - self.width / 2),
0) # int(math.floor(GetSystemMetrics(1)) / 2 - self.height / 2) - 50)
)
self.main_frame.pack(side=LEFT, fill=BOTH, expand=True)
self.main_frame.configure(
background='DodgerBlue4',
highlightthickness=15,
highlightbackground='DodgerBlue4',
highlightcolor='DodgerBlue4',
)
self.top_main_frame.pack(side=TOP, fill=BOTH)
self.top_main_frame.configure(
background='midnight blue',
highlightthickness=4,
highlightbackground='black',
highlightcolor='black'
)
self.middle_main_frame.pack(side=TOP, fill=X, pady=25)
self.middle_main_frame.configure(
background='midnight blue',
highlightthickness=4,
highlightbackground='black',
highlightcolor='black'
)
self.under_main_frame.pack(side=BOTTOM, fill=X)
self.under_main_frame.configure(background='yellow')
self.result_frame.pack(side=BOTTOM, fill=BOTH, expand=True)
self.result_frame.configure(
width=250,
height=250,
background='midnight blue',
highlightthickness=15,
highlightbackground='DodgerBlue4',
highlightcolor='DodgerBlue4'
)
self.scrollbar.pack(side=RIGHT, fill=Y)
self.scrollbar.configure(width=25)
self.scrollbar.config(command=self.client_listbox.yview)
self.sub_plot.plot(self.graph_x[-7:], self.graph_y[-7:])
self.sub_plot.set_title('Actuele Waterstand ' + Gui.get_time(), fontsize=10)
self.sub_plot.set_xlabel('Tijdstip (Afgelopen uur)', fontsize=10)
self.sub_plot.set_ylabel('Verschil NAP in cm', fontsize=10)
self.canvas.show()
self.canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=True) # TODO: Fix access to protected member
self.canvas.get_tk_widget().pack(side=RIGHT, fill=BOTH, expand=True)
self.client_listbox.config(yscrollcommand=self.scrollbar.set)
self.client_listbox.pack(side=BOTTOM, fill=BOTH)
self.client_listbox.configure(
bd=5,
font=self.font_size_10,
height=15,
relief='flat'
)
self.top_frame.pack(side=TOP, fill=X)
self.top_frame.configure(
background='midnight blue',
highlightthickness=15,
highlightbackground='DodgerBlue4',
highlightcolor='DodgerBlue4'
)
self.middle_top_frame.pack(side=TOP, fill=BOTH)
self.middle_top_frame.configure(
background='midnight blue',
highlightthickness=4,
highlightbackground='black',
highlightcolor='black'
)
self.node_1_name_label.configure(
text="Raspberry 1:", bg='midnight blue', fg='white', font=self.font_size_12, height=5
)
self.node_1_status_label.configure(text='Offline', bg='midnight blue', fg='white', font=self.font_size_12)
self.node_1_name_label.grid(row=0, column=0)
self.node_1_status_label.grid(row=0, column=1)
self.node_2_name_label.configure(
text='Raspberry 2:', bg='midnight blue', fg='white', font=self.font_size_12, height=5
)
self.node_2_status_label.configure(text='Offline', bg='midnight blue', fg='white', font=self.font_size_12)
self.node_2_name_label.grid(row=0, column=0)
self.node_2_status_label.grid(row=0, column=1)
self.status_label.grid(row=0, column=0, sticky=W)
self.status_value_label.grid(row=0, column=1)
self.status_label.configure(text="Status:", bg='midnight blue', fg='white', font=self.font_size_12)
self.status_value_label.configure(text="", bg='midnight blue', fg='white', font=self.font_size_12)
self.barrier_label.grid(row=1, column=0, sticky=W)
self.barrier_value_label.grid(row=1, column=1, sticky=W)
self.barrier_label.configure(text="Kering:", bg='midnight blue', fg='white', font=self.font_size_12)
self.barrier_value_label.configure(text="Onbekend", fg='white', bg='midnight blue', font=self.font_size_12)
self.water_level_label.grid(row=2, column=0)
self.water_level_value_label.grid(row=2, column=1, sticky=W)
self.water_level_label.configure(text="Waterpeil:", bg='midnight blue', fg='white', font=self.font_size_12)
self.water_level_value_label.configure(
text='Sensor error', fg='white', bg='midnight blue', font=self.font_size_12
)
def get_api_data(self) -> None:
""" Get's API data and writes in to CSV file """
with requests.Session() as s:
download = s.get(self.csv_url)
decoded_content = download.content.decode('utf-8')
result = list(csv.reader(decoded_content.splitlines(), delimiter=';'))
# Write to file
with open(self.file_location, 'w', newline='') as myCSVFile:
write = csv.writer(myCSVFile, delimiter=';')
for x in result:
write.writerow(x)
def read_api_data(self) -> None:
""" Reads data from CSV file to display in the graph """
self.graph_x = []
self.graph_y = []
with open(self.file_location, 'r') as file:
reader = csv.reader(file, delimiter=';')
file.readline()
for x in reader: # Every line has a tuple value.
datum = x[0][-5:]
water_level = x[2]
if len(water_level) != 0:
self.graph_x.append(datum)
self.graph_y.append(int(water_level))
def parse_socket_data(self, data: list):
""" Handles socket data accordingly """
if data[1] == "CLIENT_DATA":
# Server send a JSON formatted string of all the client data, let's parse it.
json_data = ''
for x in range(2, len(data)):
# Since the socket_read function splits the string based on comma's,
# we need to stick the string back together
json_data += data[x] + ","
# This part fixes some parentheses error when trying to load the JSON
while "},{" in json_data or "{{" in json_data:
json_data = json_data.replace("},{", "},\"" + str(random.uniform(0, 10)) + "\":{", 1)
json_data = json_data.replace("{{", "{ \"" + str(random.uniform(0, 10)) + "\":{", 1)
# Load the JSON, and remove every current Node in client_list
json_data = json.loads(json_data[:-1])
self.client_list.clear()
for x in json_data:
if json_data[x]['uuid'] == "NODE_1":
# Since NODE_1 holds vital information, like water level and barrier status. We parse it separately
self.barrier_open = Gui.bool(json_data[x]['barrier_open'])
self.online = Gui.bool(json_data[x]['online'])
self.water_level = float(json_data[x]['water_level'])
self.water_level_value_label.configure(text=str(round(self.water_level, 1)) + ' cm')
# Update the labels while we're at it.
if self.online:
self.node_1_status_label.configure(text="Online")
else:
self.node_1_status_label.configure(text="Offline")
if self.barrier_open:
self.barrier_value_label.configure(text="Open")
else:
self.barrier_value_label.configure(text="Gesloten")
# If it's anything else, create a new Node object from the JSON data.
self.client_list.append(
Node(
ip_address=json_data[x]['ip_address'],
port=int(json_data[x]['port']),
uuid=json_data[x]['uuid'],
connection_handler=json_data[x]['connection_handler'],
barrier_open=Gui.bool(json_data[x]['barrier_open']),
online=Gui.bool(json_data[x]['online']),
debug=Gui.bool(json_data[x]['debug']),
registered=Gui.bool(json_data[x]['registered']),
is_gui=Gui.bool(json_data[x]['is_gui']),
last_ping=float(json_data[x]['last_ping'])
)
)
elif data[1] == "UUID_REQ":
# Server want's to know our UUID, let's write it back to the socket.
self.socket_write(data_header="UUID", data=str(self.uuid))
elif data[1] == "REG_COMPLETE":
# The connection procedure is done.
self.registered = True
def socket_write(self, data: str, data_header: str):
"""
Writes a concatenation of the client UUID, data header and data to
the connection socket of this program instance
"""
message = str(self.uuid) + "," + data_header + "," + data
if self.debug: print("{} - GUI send: {}".format(Gui.get_time(), message))
try:
self.connection_handler.send(message.encode('ascii'))
except ConnectionResetError or ConnectionAbortedError:
if self.debug:
print("{} - Connection has been terminated by the server.".format(self.get_time()))
self.default_values_labels()
self.connection_handler.send(message.encode('ascii'))
def socket_read(self):
"""
Listens to the connection socket of this program instance
and passes that data to the parse_socket_data() function
"""
data = ''
try:
data = self.connection_handler.recv(8192)
if data == self.last_data:
# Don't do anything if data is identical
return
self.last_data = data
except ConnectionResetError or ConnectionAbortedError or KeyboardInterrupt or WindowsError:
if self.debug:
print("{} - Connection has been terminated by the server.".format(Gui.get_time()))
self.default_values_labels()
data = data.decode('utf-8').strip().split(',')
if self.debug:
print("{} - GUI received: {}".format(Gui.get_time(), data))
if (data[0] == self.uuid) or (data[0] == "BROADCAST"):
return self.parse_socket_data(data=data)
def default_values_labels(self) -> None:
""" If the Gui lost connection to the server it will display some default values """
self.node_1_status_label.configure(text='Offline', bg='midnight blue', fg='white', font=self.font_size_12)
self.node_2_status_label.configure(text='Offline', bg='midnight blue', fg='white', font=self.font_size_12)
self.barrier_value_label.configure(text="Onbekend", fg='white', bg='midnight blue', font=self.font_size_12)
self.water_level_value_label.configure(text='Sensor error', fg='white', bg='midnight blue',
font=self.font_size_12
)
self.status_value_label.configure(text="Onderhoud vereist", bg='midnight blue', fg='white',
font=self.font_size_12
)
self.water_level_value_label.configure(text='Sensor error', fg='white', bg='midnight blue',
font=self.font_size_12
)
def get_server_data(self) -> None:
"""" Sends a request to the server to get the latest client JSON data """
while True:
if self.registered:
self.socket_write("", "GUI_UPDATE_REQ")
time.sleep(2.5)
def update_graph(self) -> None:
""" Function to update the graph """
self.get_api_data()
self.graph_y = []
self.graph_x = []
self.read_api_data()
self.sub_plot.set_title('Actuele Waterstand ' + Gui.get_time(), fontsize=10)
self.canvas.get_tk_widget().forget()
self.sub_plot.plot(self.graph_x[-7:], self.graph_y[-7:])
self.canvas.get_tk_widget().pack(side=RIGHT, fill=BOTH, expand=True)
self.canvas.show()
def nodes_online_check(self) -> None:
""" Checks if NODE_1 or NODE_2 is online or not and updates labels accordingly """
node_list = []
for client in self.client_list:
if "NODE_1" == client.uuid:
node_list.append(client.uuid)
if "NODE_2" == client.uuid:
node_list.append(client.uuid)
if "NODE_1" not in node_list:
self.node_1_status_label['text'] = 'Offline'
else:
self.node_1_status_label['text'] = 'Online'
if "NODE_2" not in node_list:
self.node_2_status_label['text'] = 'Offline'
else:
self.node_2_status_label['text'] = 'Online'
def update_gui(self):
""" Function to update labels and the listbox of the GUI """
self.populate_client_list()
self.nodes_online_check()
if self.node_1_status_label['text'] == 'Online' and self.node_2_status_label['text'] == 'Online':
self.status_value_label['text'] = 'In werking'
elif self.node_1_status_label['text'] == 'Offline' and self.node_2_status_label['text'] == 'Online':
self.status_value_label['text'] = 'In werking (onderhoud vereist)'
elif self.node_2_status_label['text'] == 'Offline' and self.node_1_status_label['text'] == 'Online':
self.status_value_label['text'] = 'In werking (onderhoud vereist)'
elif self.node_1_status_label['text'] == 'Offline' and self.node_2_status_label['text'] == 'Offline':
self.status_value_label['text'] = 'Niet in werking (onderhoud vereist)'
def update_gui_handler(self):
""" Recursively calls update function every 4.5 seconds """
self.update_gui()
self.root.after(4500, self.update_gui_handler) # Update gui labels elke 4.5 seconden
def update_graph_handler(self):
""" Recursively calls update function every 5 minutes """
self.update_graph()
self.root.after(300000, self.update_graph_handler) # Update grafiek elke 5 minuten
def populate_client_list(self):
""" Shows all connected clients in the listbox """
self.client_listbox.delete(0, END)
self.client_listbox.insert(0, "{:19}{:15}{:21}".format('UUID', 'IP', 'Port'))
self.client_listbox.insert(1, '{:19}{:15}{:21}'.format('SERVER_1', '192.168.42.1', '5555'))
for client in self.client_list:
self.client_listbox.insert(2, '{:19}{:14}{:20}'.format(client.uuid, client.ip_address, str(client.port)))
def init_socket_read(self):
""" socket_read() thread had to be called via another function to work """
while True:
self.debug = True
self.socket_read()
# The main function starts the code.
if __name__ == '__main__':
try:
gui = Gui( # Create new Gui object
str(input("IP: ")), # Ask for input, since it depends on how it's setup
int(input("Port (5555): ")),
bool(input("Debug (False): "))
)
start_new_thread(gui.get_server_data, ()) # Start the tread to ask the server for client data
start_new_thread(gui.init_socket_read, ()) # Start the thread to listen for server socket traffic
gui.update_gui_handler() # Start the TkInter thread to update the GUI labels
gui.update_graph_handler() # Start the TkInter thread to update the graph
gui.mainloop() # Start TkInter
except Exception as e:
print("There was an error initiating this node: {}".format(e))
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import celery
from celery.bin import worker as celery_worker
from datetime import datetime
from subprocess import Popen
from flask_migrate import MigrateCommand
from flask_script import Manager
from superset import app, db, data, security
config = app.config
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def init():
"""Inits the Superset application"""
security.sync_role_definitions()
@manager.option(
'-v', '--verbose', action='store_true',
help="Show extra information")
def version(verbose):
"""Prints the current version number"""
s = (
"\n-----------------------\n"
"Superset {version}\n"
"-----------------------").format(
version=config.get('VERSION_STRING'))
print(s)
if verbose:
print("[DB] : " + "{}".format(db.engine))
@manager.option(
'-t', '--load-test-data', action='store_true',
help="Load additional test data")
def load_examples(load_test_data):
"""Loads a set of Slices and Dashboards and a supporting dataset """
print("Loading examples into {}".format(db))
data.load_css_templates()
print("Loading energy related dataset")
data.load_energy()
print("Loading [World Bank's Health Nutrition and Population Stats]")
data.load_world_bank_health_n_pop()
print("Loading [Birth names]")
data.load_birth_names()
print("Loading [Random time series data]")
data.load_random_time_series_data()
print("Loading [Random long/lat data]")
data.load_long_lat_data()
print("Loading [Multiformat time series]")
data.load_multiformat_time_series_data()
print("Loading [Misc Charts] dashboard")
data.load_misc_dashboard()
if load_test_data:
print("Loading [Unicode test data]")
data.load_unicode_test_data()
@manager.option(
'-d', '--datasource',
help=(
"Specify which datasource name to load, if omitted, all "
"datasources will be refreshed"))
@manager.option(
'-m', '--merge',
help=(
"Specify using 'merge' property during operation. "
"Default value is False "))
def refresh_druid(datasource, merge):
"""Refresh druid datasources"""
session = db.session()
from superset import models
for cluster in session.query(models.DruidCluster).all():
try:
cluster.refresh_datasources(datasource_name=datasource,
merge_flag=merge)
except Exception as e:
print(
"Error while processing cluster '{}'\n{}".format(
cluster, str(e)))
logging.exception(e)
cluster.metadata_last_refreshed = datetime.now()
print(
"Refreshed metadata from cluster "
"[" + cluster.cluster_name + "]")
session.commit()
@manager.command
def worker():
"""Starts a Superset worker for async SQL query execution."""
# celery -A tasks worker --loglevel=info
print("Starting SQL Celery worker.")
if config.get('CELERY_CONFIG'):
print("Celery broker url: ")
print(config.get('CELERY_CONFIG').BROKER_URL)
application = celery.current_app._get_current_object()
c_worker = celery_worker.worker(app=application)
options = {
'broker': config.get('CELERY_CONFIG').BROKER_URL,
'loglevel': 'INFO',
'traceback': True,
}
c_worker.run(**options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.