code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
import nnabla.solvers as S
import argparse
import gym
from nnabla.ext_utils import get_extension_context
from common.buffer import ReplayBuffer
from common.log import prepare_monitor
from common.experiment import evaluate, train
from common.exploration import LinearlyDecayEpsilonGreedy
from common.helper import clip_by_value
from common.network import nature_head
from common.env import AtariWrapper
from dqn import DQN, update
def q_function(obs, num_actions, min_v, max_v, num_bins, scope):
with nn.parameter_scope(scope):
out = nature_head(obs)
out = PF.affine(out, num_actions * num_bins, name='output')
out = F.reshape(out, (-1, num_actions, num_bins))
probs = F.exp(out) / F.sum(F.exp(out), axis=2, keepdims=True)
dists = F.arange(0, num_bins) * (max_v - min_v) / (num_bins - 1) + min_v
values = F.sum(probs * F.reshape(dists, (1, 1, num_bins)), axis=2)
return values, probs, F.reshape(dists, (-1, 1))
class CategoricalDQN(DQN):
def __init__(self,
q_function,
num_actions,
min_v,
max_v,
num_bins,
batch_size,
gamma,
lr):
self.min_v = min_v
self.max_v = max_v
self.num_bins = num_bins
super().__init__(q_function, num_actions, batch_size, gamma, lr)
def _build(self):
# infer variable
self.infer_obs_t = infer_obs_t = nn.Variable((1, 4, 84, 84))
# inference output
self.infer_q_t,\
self.infer_probs_t, _ = self.q_function(infer_obs_t, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'q_func')
self.infer_t = F.sink(self.infer_q_t, self.infer_probs_t)
# train variables
self.obss_t = nn.Variable((self.batch_size, 4, 84, 84))
self.acts_t = nn.Variable((self.batch_size, 1))
self.rews_tp1 = nn.Variable((self.batch_size, 1))
self.obss_tp1 = nn.Variable((self.batch_size, 4, 84, 84))
self.ters_tp1 = nn.Variable((self.batch_size, 1))
# training output
q_t, probs_t, dists = self.q_function(self.obss_t, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'q_func')
q_tp1, probs_tp1, _ = self.q_function(self.obss_tp1, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'target_q_func')
expand_last = lambda x: F.reshape(x, x.shape + (1,))
flat = lambda x: F.reshape(x, (-1, 1))
# extract selected dimension
a_t_one_hot = expand_last(F.one_hot(self.acts_t, (self.num_actions,)))
probs_t_selected = F.max(probs_t * a_t_one_hot, axis=1)
# extract max dimension
_, indices = F.max(q_tp1, axis=1, keepdims=True, with_index=True)
a_tp1_one_hot = expand_last(F.one_hot(indices, (self.num_actions,)))
probs_tp1_best = F.max(probs_tp1 * a_tp1_one_hot, axis=1)
# clipping reward
clipped_rews_tp1 = clip_by_value(self.rews_tp1, -1.0, 1.0)
disc_q_tp1 = F.reshape(dists, (1, -1)) * (1.0 - self.ters_tp1)
t_z = clip_by_value(clipped_rews_tp1 + self.gamma * disc_q_tp1,
self.min_v, self.max_v)
# update indices
b = (t_z - self.min_v) / ((self.max_v - self.min_v) / (self.num_bins - 1))
l = F.floor(b)
l_mask = F.reshape(F.one_hot(flat(l), (self.num_bins,)),
(-1, self.num_bins, self.num_bins))
u = F.ceil(b)
u_mask = F.reshape(F.one_hot(flat(u), (self.num_bins,)),
(-1, self.num_bins, self.num_bins))
m_l = expand_last(probs_tp1_best * (1 - (b - l)))
m_u = expand_last(probs_tp1_best * (b - l))
m = F.sum(m_l * l_mask + m_u * u_mask, axis=1)
m.need_grad = False
self.loss = -F.mean(F.sum(m * F.log(probs_t_selected + 1e-10), axis=1))
# optimizer
self.solver = S.RMSprop(self.lr, 0.95, 1e-2)
# weights and biases
with nn.parameter_scope('q_func'):
self.params = nn.get_parameters()
with nn.parameter_scope('target_q_func'):
self.target_params = nn.get_parameters()
# set q function parameters to solver
self.solver.set_parameters(self.params)
def main(args):
if args.gpu:
ctx = get_extension_context('cudnn', device_id=str(args.device))
nn.set_default_context(ctx)
# atari environment
env = AtariWrapper(gym.make(args.env), args.seed, episodic=True)
eval_env = AtariWrapper(gym.make(args.env), 50, episodic=False)
num_actions = env.action_space.n
# action-value function built with neural network
model = CategoricalDQN(q_function, num_actions, args.min_v, args.max_v,
args.num_bins, args.batch_size, args.gamma, args.lr)
if args.load is not None:
nn.load_parameters(args.load)
model.update_target()
buffer = ReplayBuffer(args.buffer_size, args.batch_size)
exploration = LinearlyDecayEpsilonGreedy(num_actions, args.epsilon, 0.1,
args.schedule_duration)
monitor = prepare_monitor(args.logdir)
update_fn = update(model, buffer, args.target_update_interval)
eval_fn = evaluate(eval_env, model, render=args.render)
train(env, model, buffer, exploration, monitor, update_fn, eval_fn,
args.final_step, args.update_start, args.update_interval,
args.save_interval, args.evaluate_interval, ['loss'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=2.5e-4)
parser.add_argument('--buffer-size', type=int, default=10 ** 5)
parser.add_argument('--epsilon', type=float, default=1.0)
parser.add_argument('--schedule-duration', type=int, default=10 ** 6)
parser.add_argument('--final-step', type=int, default=10 ** 7)
parser.add_argument('--target-update-interval', type=int, default=10 ** 4)
parser.add_argument('--update-start', type=int, default=5 * 10 ** 4)
parser.add_argument('--update-interval', type=int, default=4)
parser.add_argument('--evaluate-interval', type=int, default=10 ** 6)
parser.add_argument('--save-interval', type=int, default=10 ** 6)
parser.add_argument('--min-v', type=float, default=-10.0)
parser.add_argument('--max-v', type=float, default=10.0)
parser.add_argument('--num-bins', type=int, default=51)
parser.add_argument('--logdir', type=str, default='categorical_dqn')
parser.add_argument('--load', type=str)
parser.add_argument('--device', type=int, default='0')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--render', action='store_true')
args = parser.parse_args()
main(args) | categorical_dqn.py | import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
import nnabla.solvers as S
import argparse
import gym
from nnabla.ext_utils import get_extension_context
from common.buffer import ReplayBuffer
from common.log import prepare_monitor
from common.experiment import evaluate, train
from common.exploration import LinearlyDecayEpsilonGreedy
from common.helper import clip_by_value
from common.network import nature_head
from common.env import AtariWrapper
from dqn import DQN, update
def q_function(obs, num_actions, min_v, max_v, num_bins, scope):
with nn.parameter_scope(scope):
out = nature_head(obs)
out = PF.affine(out, num_actions * num_bins, name='output')
out = F.reshape(out, (-1, num_actions, num_bins))
probs = F.exp(out) / F.sum(F.exp(out), axis=2, keepdims=True)
dists = F.arange(0, num_bins) * (max_v - min_v) / (num_bins - 1) + min_v
values = F.sum(probs * F.reshape(dists, (1, 1, num_bins)), axis=2)
return values, probs, F.reshape(dists, (-1, 1))
class CategoricalDQN(DQN):
def __init__(self,
q_function,
num_actions,
min_v,
max_v,
num_bins,
batch_size,
gamma,
lr):
self.min_v = min_v
self.max_v = max_v
self.num_bins = num_bins
super().__init__(q_function, num_actions, batch_size, gamma, lr)
def _build(self):
# infer variable
self.infer_obs_t = infer_obs_t = nn.Variable((1, 4, 84, 84))
# inference output
self.infer_q_t,\
self.infer_probs_t, _ = self.q_function(infer_obs_t, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'q_func')
self.infer_t = F.sink(self.infer_q_t, self.infer_probs_t)
# train variables
self.obss_t = nn.Variable((self.batch_size, 4, 84, 84))
self.acts_t = nn.Variable((self.batch_size, 1))
self.rews_tp1 = nn.Variable((self.batch_size, 1))
self.obss_tp1 = nn.Variable((self.batch_size, 4, 84, 84))
self.ters_tp1 = nn.Variable((self.batch_size, 1))
# training output
q_t, probs_t, dists = self.q_function(self.obss_t, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'q_func')
q_tp1, probs_tp1, _ = self.q_function(self.obss_tp1, self.num_actions,
self.min_v, self.max_v,
self.num_bins, 'target_q_func')
expand_last = lambda x: F.reshape(x, x.shape + (1,))
flat = lambda x: F.reshape(x, (-1, 1))
# extract selected dimension
a_t_one_hot = expand_last(F.one_hot(self.acts_t, (self.num_actions,)))
probs_t_selected = F.max(probs_t * a_t_one_hot, axis=1)
# extract max dimension
_, indices = F.max(q_tp1, axis=1, keepdims=True, with_index=True)
a_tp1_one_hot = expand_last(F.one_hot(indices, (self.num_actions,)))
probs_tp1_best = F.max(probs_tp1 * a_tp1_one_hot, axis=1)
# clipping reward
clipped_rews_tp1 = clip_by_value(self.rews_tp1, -1.0, 1.0)
disc_q_tp1 = F.reshape(dists, (1, -1)) * (1.0 - self.ters_tp1)
t_z = clip_by_value(clipped_rews_tp1 + self.gamma * disc_q_tp1,
self.min_v, self.max_v)
# update indices
b = (t_z - self.min_v) / ((self.max_v - self.min_v) / (self.num_bins - 1))
l = F.floor(b)
l_mask = F.reshape(F.one_hot(flat(l), (self.num_bins,)),
(-1, self.num_bins, self.num_bins))
u = F.ceil(b)
u_mask = F.reshape(F.one_hot(flat(u), (self.num_bins,)),
(-1, self.num_bins, self.num_bins))
m_l = expand_last(probs_tp1_best * (1 - (b - l)))
m_u = expand_last(probs_tp1_best * (b - l))
m = F.sum(m_l * l_mask + m_u * u_mask, axis=1)
m.need_grad = False
self.loss = -F.mean(F.sum(m * F.log(probs_t_selected + 1e-10), axis=1))
# optimizer
self.solver = S.RMSprop(self.lr, 0.95, 1e-2)
# weights and biases
with nn.parameter_scope('q_func'):
self.params = nn.get_parameters()
with nn.parameter_scope('target_q_func'):
self.target_params = nn.get_parameters()
# set q function parameters to solver
self.solver.set_parameters(self.params)
def main(args):
if args.gpu:
ctx = get_extension_context('cudnn', device_id=str(args.device))
nn.set_default_context(ctx)
# atari environment
env = AtariWrapper(gym.make(args.env), args.seed, episodic=True)
eval_env = AtariWrapper(gym.make(args.env), 50, episodic=False)
num_actions = env.action_space.n
# action-value function built with neural network
model = CategoricalDQN(q_function, num_actions, args.min_v, args.max_v,
args.num_bins, args.batch_size, args.gamma, args.lr)
if args.load is not None:
nn.load_parameters(args.load)
model.update_target()
buffer = ReplayBuffer(args.buffer_size, args.batch_size)
exploration = LinearlyDecayEpsilonGreedy(num_actions, args.epsilon, 0.1,
args.schedule_duration)
monitor = prepare_monitor(args.logdir)
update_fn = update(model, buffer, args.target_update_interval)
eval_fn = evaluate(eval_env, model, render=args.render)
train(env, model, buffer, exploration, monitor, update_fn, eval_fn,
args.final_step, args.update_start, args.update_interval,
args.save_interval, args.evaluate_interval, ['loss'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=2.5e-4)
parser.add_argument('--buffer-size', type=int, default=10 ** 5)
parser.add_argument('--epsilon', type=float, default=1.0)
parser.add_argument('--schedule-duration', type=int, default=10 ** 6)
parser.add_argument('--final-step', type=int, default=10 ** 7)
parser.add_argument('--target-update-interval', type=int, default=10 ** 4)
parser.add_argument('--update-start', type=int, default=5 * 10 ** 4)
parser.add_argument('--update-interval', type=int, default=4)
parser.add_argument('--evaluate-interval', type=int, default=10 ** 6)
parser.add_argument('--save-interval', type=int, default=10 ** 6)
parser.add_argument('--min-v', type=float, default=-10.0)
parser.add_argument('--max-v', type=float, default=10.0)
parser.add_argument('--num-bins', type=int, default=51)
parser.add_argument('--logdir', type=str, default='categorical_dqn')
parser.add_argument('--load', type=str)
parser.add_argument('--device', type=int, default='0')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--render', action='store_true')
args = parser.parse_args()
main(args) | 0.764628 | 0.370567 |
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
# DEBUG = config('DEBUG', default=False, cast=bool)
DEBUG = config('DEBUG')
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS= []
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'social_django',
'crispy_forms',
'instapp.apps.InstappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instapro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'instapro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'instagram',
# 'USER':'carine',
# 'PASSWORD':'<PASSWORD>',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# GOOGLE_OAUTH2_KEY = '381056630805-vbei9ftenvqhac060q09ebkaajrkje94.apps.googleusercontent.com'
# GOOGLE_OAUTH2_SECRET = 'QRTaCa-iJmzvdibGVlia6P63'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'login'
SOCIAL_AUTH_GITHUB_KEY = config('SOCIAL_AUTH_GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = config('SOCIAL_AUTH_GITHUB_SECRET')
SOCIAL_AUTH_TWITTER_KEY = config('SOCIAL_AUTH_TWITTER_KEY')
SOCIAL_AUTH_TWITTER_SECRET = config('SOCIAL_AUTH_TWITTER_SECRET')
SOCIAL_AUTH_FACEBOOK_KEY = config('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = config('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_GOOGLE_KEY = config('SOCIAL_AUTH_GOOGLE_KEY')
SOCIAL_AUTH_GOOGLE_SECRET = config('SOCIAL_AUTH_GOOGLE_SECRET') | instapro/settings.py | import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
# DEBUG = config('DEBUG', default=False, cast=bool)
DEBUG = config('DEBUG')
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS= []
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'social_django',
'crispy_forms',
'instapp.apps.InstappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instapro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'instapro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'instagram',
# 'USER':'carine',
# 'PASSWORD':'<PASSWORD>',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# GOOGLE_OAUTH2_KEY = '381056630805-vbei9ftenvqhac060q09ebkaajrkje94.apps.googleusercontent.com'
# GOOGLE_OAUTH2_SECRET = 'QRTaCa-iJmzvdibGVlia6P63'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'login'
SOCIAL_AUTH_GITHUB_KEY = config('SOCIAL_AUTH_GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = config('SOCIAL_AUTH_GITHUB_SECRET')
SOCIAL_AUTH_TWITTER_KEY = config('SOCIAL_AUTH_TWITTER_KEY')
SOCIAL_AUTH_TWITTER_SECRET = config('SOCIAL_AUTH_TWITTER_SECRET')
SOCIAL_AUTH_FACEBOOK_KEY = config('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = config('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_GOOGLE_KEY = config('SOCIAL_AUTH_GOOGLE_KEY')
SOCIAL_AUTH_GOOGLE_SECRET = config('SOCIAL_AUTH_GOOGLE_SECRET') | 0.221182 | 0.043063 |
import sys
import unittest
from dynd import nd, ndt
if sys.version_info >= (3, 0):
unicode = str
@unittest.skip('Test disabled since callables were reworked')
class TestUnicode(unittest.TestCase):
def test_array_string(self):
a = nd.array("Testing 1 2 3")
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(str(a),
'nd.array("Testing 1 2 3",\n type="string")')
# self.assertEqual(unicode(a), u"Testing 1 2 3")
def test_bytes_string(self):
if sys.version_info >= (3, 0):
return
# This needs to be fixed for Python 3
a = nd.array(b"Testing 1 2 3")
b = nd.array([b"First", b"Second"])
else:
# In Python 2, str and bytes are the same,
# so we have to manually request a bytes type
a = nd.array(b"Testing 1 2 3", type=ndt.bytes)
b = nd.array([b"First", b"Second"], type=ndt.make_fixed_dim(2, ndt.bytes))
self.assertEqual(nd.type_of(a), ndt.bytes)
self.assertEqual(nd.dtype_of(b), ndt.bytes)
self.assertEqual(nd.as_py(a), b"Testing 1 2 3")
self.assertEqual(nd.as_py(b), [b"First", b"Second"])
def test_array_unicode(self):
a = nd.array(u"\uc548\ub155")
b = nd.array([u"\uc548\ub155", u"Hello"])
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(nd.dtype_of(b), ndt.string)
# self.assertEqual(unicode(a), u"\uc548\ub155")
self.assertEqual(nd.as_py(b), [u"\uc548\ub155", u"Hello"])
# In Python 2, 'str' is not unicode
# if sys.version_info < (3, 0):
# self.assertRaises(UnicodeEncodeError, str, a)
# def test_ascii_decode_error(self):
# a = nd.array(128, type=ndt.uint8).view_scalars("fixed_string[1,'A']")
# self.assertRaises(UnicodeDecodeError, a.cast("string").eval)
@unittest.skip('Test disabled since callables were reworked')
class TestEncodings(unittest.TestCase):
encodings = ["ascii", "utf8", "utf16", "utf32", "ucs2"]
def test_string_encoding(self):
t = ndt.type("string")
self.assertEqual(t.encoding, "utf8")
def test_fixed_string_encoding(self):
for x in self.encodings:
t = ndt.type("fixed_string[10, '%s']" % x)
self.assertEqual(t.encoding, x)
if __name__ == '__main__':
unittest.main(verbosity=2) | dynd/nd/test/test_unicode.py | import sys
import unittest
from dynd import nd, ndt
if sys.version_info >= (3, 0):
unicode = str
@unittest.skip('Test disabled since callables were reworked')
class TestUnicode(unittest.TestCase):
def test_array_string(self):
a = nd.array("Testing 1 2 3")
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(str(a),
'nd.array("Testing 1 2 3",\n type="string")')
# self.assertEqual(unicode(a), u"Testing 1 2 3")
def test_bytes_string(self):
if sys.version_info >= (3, 0):
return
# This needs to be fixed for Python 3
a = nd.array(b"Testing 1 2 3")
b = nd.array([b"First", b"Second"])
else:
# In Python 2, str and bytes are the same,
# so we have to manually request a bytes type
a = nd.array(b"Testing 1 2 3", type=ndt.bytes)
b = nd.array([b"First", b"Second"], type=ndt.make_fixed_dim(2, ndt.bytes))
self.assertEqual(nd.type_of(a), ndt.bytes)
self.assertEqual(nd.dtype_of(b), ndt.bytes)
self.assertEqual(nd.as_py(a), b"Testing 1 2 3")
self.assertEqual(nd.as_py(b), [b"First", b"Second"])
def test_array_unicode(self):
a = nd.array(u"\uc548\ub155")
b = nd.array([u"\uc548\ub155", u"Hello"])
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(nd.dtype_of(b), ndt.string)
# self.assertEqual(unicode(a), u"\uc548\ub155")
self.assertEqual(nd.as_py(b), [u"\uc548\ub155", u"Hello"])
# In Python 2, 'str' is not unicode
# if sys.version_info < (3, 0):
# self.assertRaises(UnicodeEncodeError, str, a)
# def test_ascii_decode_error(self):
# a = nd.array(128, type=ndt.uint8).view_scalars("fixed_string[1,'A']")
# self.assertRaises(UnicodeDecodeError, a.cast("string").eval)
@unittest.skip('Test disabled since callables were reworked')
class TestEncodings(unittest.TestCase):
encodings = ["ascii", "utf8", "utf16", "utf32", "ucs2"]
def test_string_encoding(self):
t = ndt.type("string")
self.assertEqual(t.encoding, "utf8")
def test_fixed_string_encoding(self):
for x in self.encodings:
t = ndt.type("fixed_string[10, '%s']" % x)
self.assertEqual(t.encoding, x)
if __name__ == '__main__':
unittest.main(verbosity=2) | 0.362518 | 0.497437 |
from functools import partial
import yaml
from galaxy import model
from galaxy.model import mapping
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.util import bunch
class MockTrans(object):
def __init__(self):
self.app = TestApp()
self.sa_session = self.app.model.context
self._user = None
def save_workflow(self, workflow):
stored_workflow = model.StoredWorkflow()
stored_workflow.latest_workflow = workflow
workflow.stored_workflow = stored_workflow
stored_workflow.user = self.user
self.sa_session.add(stored_workflow)
self.sa_session.flush()
return stored_workflow
@property
def user(self):
if self._user is None:
self._user = model.User(
email="<EMAIL>",
password="password"
)
return self._user
class TestApp(object):
def __init__(self):
self.config = bunch.Bunch(
tool_secret="awesome_secret",
)
self.model = mapping.init(
"/tmp",
"sqlite:///:memory:",
create_tables=True
)
self.toolbox = TestToolbox()
self.datatypes_registry = TestDatatypesRegistry()
self.security = IdEncodingHelper(id_secret="testing")
class TestDatatypesRegistry(object):
def __init__(self):
pass
def get_datatype_by_extension(self, ext):
return ext
class TestToolbox(object):
def __init__(self):
self.tools = {}
def get_tool(self, tool_id, tool_version=None, exact=False, tool_uuid=None):
# Real tool box returns None of missing tool also
return self.tools.get(tool_id, None)
def get_tool_id(self, tool_id):
tool = self.get_tool(tool_id)
return tool and tool.id
def yaml_to_model(has_dict, id_offset=100):
if isinstance(has_dict, str):
has_dict = yaml.safe_load(has_dict)
workflow = model.Workflow()
workflow.steps = []
for i, step in enumerate(has_dict.get("steps", [])):
workflow_step = model.WorkflowStep()
if "order_index" not in step:
step["order_index"] = i
if "id" not in step:
# Fixed Offset ids just to test against assuption order_index != id
step["id"] = id_offset
id_offset += 1
step_type = step.get("type", None)
assert step_type is not None
if step_type == "subworkflow":
subworkflow_dict = step["subworkflow"]
del step["subworkflow"]
subworkflow = yaml_to_model(subworkflow_dict, id_offset=id_offset)
step["subworkflow"] = subworkflow
id_offset += len(subworkflow.steps)
for key, value in step.items():
if key == "input_connections":
raise NotImplementedError()
if key == "inputs":
inputs = []
for input_name, input_def in value.items():
step_input = model.WorkflowStepInput(workflow_step)
step_input.name = input_name
connections = []
for conn_dict in input_def.get("connections", []):
conn = model.WorkflowStepConnection()
for conn_key, conn_value in conn_dict.items():
if conn_key == "@output_step":
target_step = workflow.steps[conn_value]
conn_value = target_step
conn_key = "output_step"
if conn_key == "@input_subworkflow_step":
conn_value = step["subworkflow"].step_by_index(conn_value)
conn_key = "input_subworkflow_step"
setattr(conn, conn_key, conn_value)
connections.append(conn)
step_input.connections = connections
inputs.append(step_input)
value = inputs
if key == "workflow_outputs":
value = [partial(_dict_to_workflow_output, workflow_step)(_) for _ in value]
if key == 'collection_type':
key = 'tool_inputs'
value = {'collection_type': value}
setattr(workflow_step, key, value)
workflow.steps.append(workflow_step)
return workflow
def _dict_to_workflow_output(workflow_step, as_dict):
output = model.WorkflowOutput(workflow_step)
for key, value in as_dict.items():
setattr(output, key, value)
return output | test/unit/workflows/workflow_support.py | from functools import partial
import yaml
from galaxy import model
from galaxy.model import mapping
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.util import bunch
class MockTrans(object):
def __init__(self):
self.app = TestApp()
self.sa_session = self.app.model.context
self._user = None
def save_workflow(self, workflow):
stored_workflow = model.StoredWorkflow()
stored_workflow.latest_workflow = workflow
workflow.stored_workflow = stored_workflow
stored_workflow.user = self.user
self.sa_session.add(stored_workflow)
self.sa_session.flush()
return stored_workflow
@property
def user(self):
if self._user is None:
self._user = model.User(
email="<EMAIL>",
password="password"
)
return self._user
class TestApp(object):
def __init__(self):
self.config = bunch.Bunch(
tool_secret="awesome_secret",
)
self.model = mapping.init(
"/tmp",
"sqlite:///:memory:",
create_tables=True
)
self.toolbox = TestToolbox()
self.datatypes_registry = TestDatatypesRegistry()
self.security = IdEncodingHelper(id_secret="testing")
class TestDatatypesRegistry(object):
def __init__(self):
pass
def get_datatype_by_extension(self, ext):
return ext
class TestToolbox(object):
def __init__(self):
self.tools = {}
def get_tool(self, tool_id, tool_version=None, exact=False, tool_uuid=None):
# Real tool box returns None of missing tool also
return self.tools.get(tool_id, None)
def get_tool_id(self, tool_id):
tool = self.get_tool(tool_id)
return tool and tool.id
def yaml_to_model(has_dict, id_offset=100):
if isinstance(has_dict, str):
has_dict = yaml.safe_load(has_dict)
workflow = model.Workflow()
workflow.steps = []
for i, step in enumerate(has_dict.get("steps", [])):
workflow_step = model.WorkflowStep()
if "order_index" not in step:
step["order_index"] = i
if "id" not in step:
# Fixed Offset ids just to test against assuption order_index != id
step["id"] = id_offset
id_offset += 1
step_type = step.get("type", None)
assert step_type is not None
if step_type == "subworkflow":
subworkflow_dict = step["subworkflow"]
del step["subworkflow"]
subworkflow = yaml_to_model(subworkflow_dict, id_offset=id_offset)
step["subworkflow"] = subworkflow
id_offset += len(subworkflow.steps)
for key, value in step.items():
if key == "input_connections":
raise NotImplementedError()
if key == "inputs":
inputs = []
for input_name, input_def in value.items():
step_input = model.WorkflowStepInput(workflow_step)
step_input.name = input_name
connections = []
for conn_dict in input_def.get("connections", []):
conn = model.WorkflowStepConnection()
for conn_key, conn_value in conn_dict.items():
if conn_key == "@output_step":
target_step = workflow.steps[conn_value]
conn_value = target_step
conn_key = "output_step"
if conn_key == "@input_subworkflow_step":
conn_value = step["subworkflow"].step_by_index(conn_value)
conn_key = "input_subworkflow_step"
setattr(conn, conn_key, conn_value)
connections.append(conn)
step_input.connections = connections
inputs.append(step_input)
value = inputs
if key == "workflow_outputs":
value = [partial(_dict_to_workflow_output, workflow_step)(_) for _ in value]
if key == 'collection_type':
key = 'tool_inputs'
value = {'collection_type': value}
setattr(workflow_step, key, value)
workflow.steps.append(workflow_step)
return workflow
def _dict_to_workflow_output(workflow_step, as_dict):
output = model.WorkflowOutput(workflow_step)
for key, value in as_dict.items():
setattr(output, key, value)
return output | 0.639398 | 0.203173 |
import argparse
import logging
import os
import sys
from .client import Client
from .config import load_config
LOG_FILE = "mallard.log"
LOG_FILE_MODE = "w"
LOG_FORMAT = "[%(levelname)s] %(name)s: %(message)s"
LOG_DATE_FORMAT = "[%d/%m/%Y %H:%M:%S]"
if __name__ == "__main__":
# Parse arguments
argparser = argparse.ArgumentParser(description="DuckDuckGo Discord bot")
argparser.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="Be quiet: only output errors",
)
argparser.add_argument(
"-d",
"--debug",
"--verbose",
dest="debug",
action="store_true",
help="Enable debug logging.",
)
argparser.add_argument(
"-D",
"--ddg-logs",
dest="ddg_logs",
action="store_true",
help="Enable logs for the DuckDuckGo library.",
)
argparser.add_argument(
"-C",
"--color",
"--colour",
dest="color",
help="Override the embed color used by the bot.",
)
argparser.add_argument(
"-l",
"--ratelimit-log",
dest="ratelimit_log",
help="Override the ratelimit log file used by the bot.",
)
argparser.add_argument(
"-T", "--token", dest="token", help="Override the bot token used to log in."
)
argparser.add_argument(
"config_file", help="Specify a configuration file to use. Keep it secret!"
)
args = argparser.parse_args()
# Set up logging
log_fmtr = logging.Formatter(LOG_FORMAT, datefmt=LOG_DATE_FORMAT)
log_hndl = logging.FileHandler(filename=LOG_FILE, mode=LOG_FILE_MODE)
log_hndl.setFormatter(log_fmtr)
log_level = logging.DEBUG if args.debug else logging.INFO
main_logger = logging.getLogger("mallard")
main_logger.setLevel(log_level)
main_logger.addHandler(log_hndl)
if args.ddg_logs:
ddg_logger = logging.getLogger("duckduckgo")
ddg_logger.setLevel(log_level)
ddg_logger.addHandler(log_hndl)
if not args.quiet:
log_out_hndl = logging.StreamHandler(sys.stdout)
log_out_hndl.setFormatter(log_fmtr)
main_logger.addHandler(log_out_hndl)
if args.ddg_logs:
ddg_logger.addHandler(log_out_hndl)
# Get configuration
config = load_config(args.config_file)
if args.color is not None:
config["color"] = args.color
if args.token is not None:
config["bot"]["token"] = args.token
if args.ratelimit_log is not None:
config["ratelimit"]["log"] = args.ratelimit_log
# Special logging
path = config["ratelimit"]["log"]
if path is not None:
if not os.path.isfile(path):
ratelimit_handle = open(path, "w")
ratelimit_handle.write("guild_id,user_id\n")
else:
ratelimit_handle = open(path, "a")
# Create and run client
client = Client(config, ratelimit_handle)
client.run(config["bot"]["token"]) | mallard/__main__.py |
import argparse
import logging
import os
import sys
from .client import Client
from .config import load_config
LOG_FILE = "mallard.log"
LOG_FILE_MODE = "w"
LOG_FORMAT = "[%(levelname)s] %(name)s: %(message)s"
LOG_DATE_FORMAT = "[%d/%m/%Y %H:%M:%S]"
if __name__ == "__main__":
# Parse arguments
argparser = argparse.ArgumentParser(description="DuckDuckGo Discord bot")
argparser.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="Be quiet: only output errors",
)
argparser.add_argument(
"-d",
"--debug",
"--verbose",
dest="debug",
action="store_true",
help="Enable debug logging.",
)
argparser.add_argument(
"-D",
"--ddg-logs",
dest="ddg_logs",
action="store_true",
help="Enable logs for the DuckDuckGo library.",
)
argparser.add_argument(
"-C",
"--color",
"--colour",
dest="color",
help="Override the embed color used by the bot.",
)
argparser.add_argument(
"-l",
"--ratelimit-log",
dest="ratelimit_log",
help="Override the ratelimit log file used by the bot.",
)
argparser.add_argument(
"-T", "--token", dest="token", help="Override the bot token used to log in."
)
argparser.add_argument(
"config_file", help="Specify a configuration file to use. Keep it secret!"
)
args = argparser.parse_args()
# Set up logging
log_fmtr = logging.Formatter(LOG_FORMAT, datefmt=LOG_DATE_FORMAT)
log_hndl = logging.FileHandler(filename=LOG_FILE, mode=LOG_FILE_MODE)
log_hndl.setFormatter(log_fmtr)
log_level = logging.DEBUG if args.debug else logging.INFO
main_logger = logging.getLogger("mallard")
main_logger.setLevel(log_level)
main_logger.addHandler(log_hndl)
if args.ddg_logs:
ddg_logger = logging.getLogger("duckduckgo")
ddg_logger.setLevel(log_level)
ddg_logger.addHandler(log_hndl)
if not args.quiet:
log_out_hndl = logging.StreamHandler(sys.stdout)
log_out_hndl.setFormatter(log_fmtr)
main_logger.addHandler(log_out_hndl)
if args.ddg_logs:
ddg_logger.addHandler(log_out_hndl)
# Get configuration
config = load_config(args.config_file)
if args.color is not None:
config["color"] = args.color
if args.token is not None:
config["bot"]["token"] = args.token
if args.ratelimit_log is not None:
config["ratelimit"]["log"] = args.ratelimit_log
# Special logging
path = config["ratelimit"]["log"]
if path is not None:
if not os.path.isfile(path):
ratelimit_handle = open(path, "w")
ratelimit_handle.write("guild_id,user_id\n")
else:
ratelimit_handle = open(path, "a")
# Create and run client
client = Client(config, ratelimit_handle)
client.run(config["bot"]["token"]) | 0.333286 | 0.052086 |
import time
from .__init__ import parse
from . import common
class DebugSink(common.Sink):
def set_head_revision(self, revision):
print('head:', revision)
def set_principal_branch(self, branch_name):
print('branch:', branch_name)
def define_tag(self, name, revision):
print('tag:', name, '=', revision)
def set_comment(self, comment):
print('comment:', comment)
def set_description(self, description):
print('description:', description)
def define_revision(self, revision, timestamp, author, state,
branches, next):
print('revision:', revision)
print(' timestamp:', timestamp)
print(' author:', author)
print(' state:', state)
print(' branches:', branches)
print(' next:', next)
def set_revision_info(self, revision, log, text):
print('revision:', revision)
print(' log:', log)
print(' text:', text[:100], '...')
class DumpSink(common.Sink):
"""Dump all the parse information directly to stdout.
The output is relatively unformatted and untagged. It is intended as a
raw dump of the data in the RCS file. A copy can be saved, then changes
made to the parsing engine, then a comparison of the new output against
the old output.
"""
def __init__(self):
global sha
import sha
def set_head_revision(self, revision):
print(revision)
def set_principal_branch(self, branch_name):
print(branch_name)
def define_tag(self, name, revision):
print(name, revision)
def set_comment(self, comment):
print(comment)
def set_description(self, description):
print(description)
def define_revision(self, revision, timestamp, author, state,
branches, next):
print(revision, timestamp, author, state, branches, next)
def set_revision_info(self, revision, log, text):
print(revision, sha.new(log).hexdigest(), sha.new(text).hexdigest())
def tree_completed(self):
print('tree_completed')
def parse_completed(self):
print('parse_completed')
def dump_file(fname):
parse(open(fname, 'rb'), DumpSink())
def time_file(fname):
f = open(fname, 'rb')
s = common.Sink()
t = time.time()
parse(f, s)
t = time.time() - t
print(t)
def _usage():
print('This is normally a module for importing, but it has a couple')
print('features for testing as an executable script.')
print('USAGE: %s COMMAND filename,v' % sys.argv[0])
print(' where COMMAND is one of:')
print(' dump: filename is "dumped" to stdout')
print(' time: filename is parsed with the time written to stdout')
sys.exit(1)
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
_usage()
if sys.argv[1] == 'dump':
dump_file(sys.argv[2])
elif sys.argv[1] == 'time':
time_file(sys.argv[2])
else:
_usage() | lib/vclib/ccvs/rcsparse/debug.py | import time
from .__init__ import parse
from . import common
class DebugSink(common.Sink):
def set_head_revision(self, revision):
print('head:', revision)
def set_principal_branch(self, branch_name):
print('branch:', branch_name)
def define_tag(self, name, revision):
print('tag:', name, '=', revision)
def set_comment(self, comment):
print('comment:', comment)
def set_description(self, description):
print('description:', description)
def define_revision(self, revision, timestamp, author, state,
branches, next):
print('revision:', revision)
print(' timestamp:', timestamp)
print(' author:', author)
print(' state:', state)
print(' branches:', branches)
print(' next:', next)
def set_revision_info(self, revision, log, text):
print('revision:', revision)
print(' log:', log)
print(' text:', text[:100], '...')
class DumpSink(common.Sink):
"""Dump all the parse information directly to stdout.
The output is relatively unformatted and untagged. It is intended as a
raw dump of the data in the RCS file. A copy can be saved, then changes
made to the parsing engine, then a comparison of the new output against
the old output.
"""
def __init__(self):
global sha
import sha
def set_head_revision(self, revision):
print(revision)
def set_principal_branch(self, branch_name):
print(branch_name)
def define_tag(self, name, revision):
print(name, revision)
def set_comment(self, comment):
print(comment)
def set_description(self, description):
print(description)
def define_revision(self, revision, timestamp, author, state,
branches, next):
print(revision, timestamp, author, state, branches, next)
def set_revision_info(self, revision, log, text):
print(revision, sha.new(log).hexdigest(), sha.new(text).hexdigest())
def tree_completed(self):
print('tree_completed')
def parse_completed(self):
print('parse_completed')
def dump_file(fname):
parse(open(fname, 'rb'), DumpSink())
def time_file(fname):
f = open(fname, 'rb')
s = common.Sink()
t = time.time()
parse(f, s)
t = time.time() - t
print(t)
def _usage():
print('This is normally a module for importing, but it has a couple')
print('features for testing as an executable script.')
print('USAGE: %s COMMAND filename,v' % sys.argv[0])
print(' where COMMAND is one of:')
print(' dump: filename is "dumped" to stdout')
print(' time: filename is parsed with the time written to stdout')
sys.exit(1)
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
_usage()
if sys.argv[1] == 'dump':
dump_file(sys.argv[2])
elif sys.argv[1] == 'time':
time_file(sys.argv[2])
else:
_usage() | 0.365004 | 0.227566 |
import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((node.identifier, node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=["node_id", "node_name", "metaedge", "degree"])
return df.sort_values(["node_name", "metaedge"])
def get_metanode_to_degree_df(graph):
"""
Return a dictionary of metanode to degree_df, where degree_df is a
wide-format dataframe of node degrees.
"""
metanode_to_degree_df = dict()
for metanode in graph.metagraph.get_nodes():
degree_df = get_degrees_for_metanode(graph, metanode)
if degree_df.empty:
continue
degree_df = pandas.pivot_table(
degree_df,
values="degree",
index=["node_id", "node_name"],
columns="metaedge",
).reset_index()
metanode_to_degree_df[metanode] = degree_df
return metanode_to_degree_df
def degrees_to_excel(graph, path):
"""
Write node degrees to a multisheet excel spreadsheet. Path should end in
a valid excel extension that `pandas.ExcelWriter` can detect, such as
`.xlsx`.
"""
metanode_to_degree_df = get_metanode_to_degree_df(graph)
writer = pandas.ExcelWriter(path)
for metanode, degree_df in metanode_to_degree_df.items():
degree_df.to_excel(writer, sheet_name=str(metanode), index=False)
if writer.engine == "xlsxwriter":
for sheet in writer.sheets.values():
sheet.freeze_panes(1, 0)
writer.close()
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(
degree_df,
col="metaedge",
sharex=False,
sharey=False,
col_wrap=col_wrap,
size=facet_height,
)
grid.map(seaborn.distplot, "degree", kde=False)
grid.set_titles("{col_name}")
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams["figure.max_open_warning"]
matplotlib.rcParams["figure.max_open_warning"] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format="pdf")
pdf_pages.close()
matplotlib.rcParams["figure.max_open_warning"] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series["metanode"] = metanode
series["abbreviation"] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series["metaedges"] = sum([not metaedge.inverted for metaedge in metaedges])
series["nodes"] = len(nodes)
series["unconnected_nodes"] = sum(
not any(node.edges.values()) for node in nodes
)
rows.append(series)
metanode_df = pandas.DataFrame(rows).sort_values("metanode")
return metanode_df
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges(exclude_inverts=True).items():
series = pandas.Series()
series["metaedge"] = str(metaedge)
series["abbreviation"] = metaedge.abbrev
series["edges"] = len(edges)
series["source_nodes"] = len(set(edge.source for edge in edges))
series["target_nodes"] = len(set(edge.target for edge in edges))
rows.append(series)
metaedge_df = pandas.DataFrame(rows).sort_values("metaedge")
return metaedge_df
def get_metaedge_style_df(metagraph):
"""
Get metaedge representations in various styles.
"""
rows = list()
for metaedge in metagraph.get_edges(exclude_inverts=False):
series = pandas.Series()
series["metaedge"] = str(metaedge)
series["unicode_metaedge"] = metaedge.get_unicode_str()
series["standard_metaedge"] = str(
metaedge.inverse if metaedge.inverted else metaedge
)
series["abbreviation"] = metaedge.abbrev
series["standard_abbreviation"] = metaedge.get_standard_abbrev()
series["source"] = str(metaedge.source)
series["target"] = str(metaedge.target)
series["inverted"] = int(metaedge.inverted)
rows.append(series)
metaedge_style_df = pandas.DataFrame(rows).sort_values("metaedge")
return metaedge_style_df | hetnetpy/stats.py | import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((node.identifier, node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=["node_id", "node_name", "metaedge", "degree"])
return df.sort_values(["node_name", "metaedge"])
def get_metanode_to_degree_df(graph):
"""
Return a dictionary of metanode to degree_df, where degree_df is a
wide-format dataframe of node degrees.
"""
metanode_to_degree_df = dict()
for metanode in graph.metagraph.get_nodes():
degree_df = get_degrees_for_metanode(graph, metanode)
if degree_df.empty:
continue
degree_df = pandas.pivot_table(
degree_df,
values="degree",
index=["node_id", "node_name"],
columns="metaedge",
).reset_index()
metanode_to_degree_df[metanode] = degree_df
return metanode_to_degree_df
def degrees_to_excel(graph, path):
"""
Write node degrees to a multisheet excel spreadsheet. Path should end in
a valid excel extension that `pandas.ExcelWriter` can detect, such as
`.xlsx`.
"""
metanode_to_degree_df = get_metanode_to_degree_df(graph)
writer = pandas.ExcelWriter(path)
for metanode, degree_df in metanode_to_degree_df.items():
degree_df.to_excel(writer, sheet_name=str(metanode), index=False)
if writer.engine == "xlsxwriter":
for sheet in writer.sheets.values():
sheet.freeze_panes(1, 0)
writer.close()
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(
degree_df,
col="metaedge",
sharex=False,
sharey=False,
col_wrap=col_wrap,
size=facet_height,
)
grid.map(seaborn.distplot, "degree", kde=False)
grid.set_titles("{col_name}")
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams["figure.max_open_warning"]
matplotlib.rcParams["figure.max_open_warning"] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format="pdf")
pdf_pages.close()
matplotlib.rcParams["figure.max_open_warning"] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series["metanode"] = metanode
series["abbreviation"] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series["metaedges"] = sum([not metaedge.inverted for metaedge in metaedges])
series["nodes"] = len(nodes)
series["unconnected_nodes"] = sum(
not any(node.edges.values()) for node in nodes
)
rows.append(series)
metanode_df = pandas.DataFrame(rows).sort_values("metanode")
return metanode_df
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges(exclude_inverts=True).items():
series = pandas.Series()
series["metaedge"] = str(metaedge)
series["abbreviation"] = metaedge.abbrev
series["edges"] = len(edges)
series["source_nodes"] = len(set(edge.source for edge in edges))
series["target_nodes"] = len(set(edge.target for edge in edges))
rows.append(series)
metaedge_df = pandas.DataFrame(rows).sort_values("metaedge")
return metaedge_df
def get_metaedge_style_df(metagraph):
"""
Get metaedge representations in various styles.
"""
rows = list()
for metaedge in metagraph.get_edges(exclude_inverts=False):
series = pandas.Series()
series["metaedge"] = str(metaedge)
series["unicode_metaedge"] = metaedge.get_unicode_str()
series["standard_metaedge"] = str(
metaedge.inverse if metaedge.inverted else metaedge
)
series["abbreviation"] = metaedge.abbrev
series["standard_abbreviation"] = metaedge.get_standard_abbrev()
series["source"] = str(metaedge.source)
series["target"] = str(metaedge.target)
series["inverted"] = int(metaedge.inverted)
rows.append(series)
metaedge_style_df = pandas.DataFrame(rows).sort_values("metaedge")
return metaedge_style_df | 0.734976 | 0.452475 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import math
class GraphicsScene(QGraphicsScene):
itemSelected = pyqtSignal()
itemsDeselected = pyqtSignal()
def __init__(self, scene, parent=None):
super().__init__(parent)
self.scene = scene
self.setItemIndexMethod(QGraphicsScene.NoIndex)
# graphics settings
self.gridSize = 20
self.gridSquares = 5
self._color_background = QColor("#393939")
self._color_light = QColor("#343434")
self._color_dark = QColor("#313131")
self.pen_light = QPen(self._color_light)
self.pen_light.setWidth(2)
self.pen_dark = QPen(self._color_dark)
self.pen_dark.setWidth(3)
self.setBackgroundBrush(self._color_background)
# The drag enter event wont be allowed unless is overwritten (disabled)
def dragMoveEvent(self, event):
pass
def setGrScene(self, width, height):
self.setSceneRect(-width//2, -height//2, width, height)
def drawBackground(self, painter, rect):
super().drawBackground(painter, rect)
# draw the grid in the window
left = int(math.floor(rect.left()))
right = int(math.ceil(rect.right()))
top = int(math.ceil(rect.top()))
bottom= int(math.floor(rect.bottom()))
first_left = left - (left % self.gridSize)
first_top = top - (top % self.gridSize)
# compute the lines to be drawn
lines_light = []
lines_dark = []
# compute vertical lines of the grid
for x in range(first_left, right, self.gridSize):
lines_light.append(QLine(x,top,x,bottom))
# compute horizontal lines of the grid
for x in range(first_top, bottom, self.gridSize):
lines_light.append(QLine(left,x,right,x))
# compute vertical lines of the grid
for x in range(first_left, right, self.gridSize*self.gridSquares):
lines_dark.append(QLine(x,top,x,bottom))
# compute horizontal lines of the grid
for x in range(first_top, bottom, self.gridSize*self.gridSquares):
lines_dark.append(QLine(left,x,right,x))
painter.setPen(self.pen_light)
painter.drawLines(*lines_light)
painter.setPen(self.pen_dark)
painter.drawLines(*lines_dark) | datanodes/graphics/graphics_scene.py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import math
class GraphicsScene(QGraphicsScene):
itemSelected = pyqtSignal()
itemsDeselected = pyqtSignal()
def __init__(self, scene, parent=None):
super().__init__(parent)
self.scene = scene
self.setItemIndexMethod(QGraphicsScene.NoIndex)
# graphics settings
self.gridSize = 20
self.gridSquares = 5
self._color_background = QColor("#393939")
self._color_light = QColor("#343434")
self._color_dark = QColor("#313131")
self.pen_light = QPen(self._color_light)
self.pen_light.setWidth(2)
self.pen_dark = QPen(self._color_dark)
self.pen_dark.setWidth(3)
self.setBackgroundBrush(self._color_background)
# The drag enter event wont be allowed unless is overwritten (disabled)
def dragMoveEvent(self, event):
pass
def setGrScene(self, width, height):
self.setSceneRect(-width//2, -height//2, width, height)
def drawBackground(self, painter, rect):
super().drawBackground(painter, rect)
# draw the grid in the window
left = int(math.floor(rect.left()))
right = int(math.ceil(rect.right()))
top = int(math.ceil(rect.top()))
bottom= int(math.floor(rect.bottom()))
first_left = left - (left % self.gridSize)
first_top = top - (top % self.gridSize)
# compute the lines to be drawn
lines_light = []
lines_dark = []
# compute vertical lines of the grid
for x in range(first_left, right, self.gridSize):
lines_light.append(QLine(x,top,x,bottom))
# compute horizontal lines of the grid
for x in range(first_top, bottom, self.gridSize):
lines_light.append(QLine(left,x,right,x))
# compute vertical lines of the grid
for x in range(first_left, right, self.gridSize*self.gridSquares):
lines_dark.append(QLine(x,top,x,bottom))
# compute horizontal lines of the grid
for x in range(first_top, bottom, self.gridSize*self.gridSquares):
lines_dark.append(QLine(left,x,right,x))
painter.setPen(self.pen_light)
painter.drawLines(*lines_light)
painter.setPen(self.pen_dark)
painter.drawLines(*lines_dark) | 0.58948 | 0.201538 |
import logging
from uuid import UUID
from typing import Optional, Dict, List, cast
from asyncio import TimeoutError
from asyncio.events import AbstractEventLoop
from CoreBluetooth import ( # type: ignore
CBService,
CBPeripheralManager,
CBMutableCharacteristic,
CBAdvertisementDataLocalNameKey,
CBAdvertisementDataServiceUUIDsKey,
)
from bleak.backends.service import BleakGATTService # type: ignore
from .PeripheralManagerDelegate import PeripheralManagerDelegate # type: ignore
from bless.exceptions import BlessError
from bless.backends.server import BaseBlessServer # type: ignore
from bless.backends.corebluetooth.service import BlessGATTServiceCoreBluetooth
from bless.backends.corebluetooth.characteristic import ( # type: ignore
BlessGATTCharacteristicCoreBluetooth,
)
from bless.backends.characteristic import (
GATTCharacteristicProperties,
GATTAttributePermissions,
)
logger = logging.getLogger(name=__name__)
class BlessServerCoreBluetooth(BaseBlessServer):
"""
CoreBluetooth Implementation of BlessServer
This implementation essentially wraps the PeripheralManagerDelegate Class
from CoreBluetooth
Attributes
----------
name : str
The name of the server to advertise
services : BleakGATTServiceCollection
A collection of services to be advertised by this server
peripheral_manager_delegate : PeripheralManagerDelegate
The delegated class to manage this peripheral device
"""
def __init__(self, name: str, loop: AbstractEventLoop = None, **kwargs):
super(BlessServerCoreBluetooth, self).__init__(loop=loop, **kwargs)
self.name: str = name
self.services: Dict[str, BlessGATTServiceCoreBluetooth] = {}
self.peripheral_manager_delegate: PeripheralManagerDelegate = (
PeripheralManagerDelegate.alloc().init()
)
self.peripheral_manager_delegate.read_request_func = self.read_request
self.peripheral_manager_delegate.write_request_func = self.write_request
async def start(self, timeout: float = 10, **kwargs):
"""
Start the server
Parameters
----------
timeout : float
Floating point decimal in seconds for how long to wait for the
on-board bluetooth module to power on
"""
for service_uuid in self.services:
bleak_service: BleakGATTService = self.services[service_uuid]
service_obj: CBService = bleak_service.obj
logger.debug("Adding service: {}".format(bleak_service.uuid))
await self.peripheral_manager_delegate.addService(service_obj)
if not self.read_request_func or not self.write_request_func:
raise BlessError("Callback functions must be initialized first")
advertisement_data = {
CBAdvertisementDataLocalNameKey: self.name,
CBAdvertisementDataServiceUUIDsKey: list(
map(lambda x: self.services[x].obj.UUID(), self.services)
)
}
logger.debug("Advertisement Data: {}".format(advertisement_data))
try:
await self.peripheral_manager_delegate.startAdvertising_(advertisement_data)
except TimeoutError:
# If advertising fails as a result of bluetooth module power
# cycling or advertisement failure, attempt to start again
await self.start()
logger.debug("Advertising...")
async def stop(self):
"""
Stop the server
"""
await self.peripheral_manager_delegate.stopAdvertising()
async def is_connected(self) -> bool:
"""
Determine whether there are any connected central devices
Returns
-------
bool
True if there are central devices that are connected
"""
n_subscriptions = len(self.peripheral_manager_delegate._central_subscriptions)
return n_subscriptions > 0
async def is_advertising(self) -> bool:
"""
Determine whether the service is advertising
Returns
-------
bool
True if advertising
"""
return self.peripheral_manager_delegate.is_advertising() == 1
async def add_new_service(self, uuid: str):
"""
Add a service and all it's characteristics to be advertised
Parameters
----------
uuid : str
The string representation of the UUID of the service to be added
"""
logger.debug("Creating a new service with uuid: {}".format(uuid))
service: BlessGATTServiceCoreBluetooth = BlessGATTServiceCoreBluetooth(uuid)
await service.init()
self.services[service.uuid] = service
async def add_new_characteristic(
self,
service_uuid: str,
char_uuid: str,
properties: GATTCharacteristicProperties,
value: Optional[bytearray],
permissions: GATTAttributePermissions,
):
"""
Generate a new characteristic to be associated with the server
Parameters
----------
service_uuid: str
The string representation of the UUID for the service associated
with the characteristic to be added
char_uuid : str
The string representation of the UUID for the characteristic to be
added
properties : GATTCharacteristicProperties
The flags for the characteristic
value : Optional[bytearray]
The initial value for the characteristic
permissions : GATTAttributePermissions
The permissions for the characteristic
"""
service_uuid = str(UUID(service_uuid))
logger.debug("Craeting a new characteristic with uuid: {}".format(char_uuid))
characteristic: BlessGATTCharacteristicCoreBluetooth = (
BlessGATTCharacteristicCoreBluetooth(
char_uuid, properties, permissions, value
)
)
await characteristic.init()
service: BlessGATTServiceCoreBluetooth = self.services[service_uuid]
service.add_characteristic(characteristic)
characteristics: List[CBMutableCharacteristic] = [
characteristic.obj for characteristic in service.characteristics
]
service.obj.setCharacteristics_(characteristics)
def update_value(self, service_uuid: str, char_uuid: str) -> bool:
"""
Update the characteristic value. This is different than using
characteristic.set_value. This send notifications to subscribed
central devices.
Parameters
----------
service_uuid : str
The string representation of the UUID for the service associated
with the characteristic to be added
char_uuid : str
The string representation of the UUID for the characteristic to be
added
Returns
-------
bool
Whether the value was successfully updated
"""
service_uuid = str(UUID(service_uuid))
char_uuid = str(UUID(char_uuid))
characteristic: BlessGATTCharacteristicCoreBluetooth = cast(
BlessGATTCharacteristicCoreBluetooth, self.get_characteristic(char_uuid)
)
value: bytes = characteristic.value
value = value if value is not None else b"\x00"
peripheral_manager: CBPeripheralManager = (
self.peripheral_manager_delegate.peripheral_manager
)
result: bool = (
peripheral_manager.updateValue_forCharacteristic_onSubscribedCentrals_(
value, characteristic.obj, None
)
)
return result | bless/backends/corebluetooth/server.py | import logging
from uuid import UUID
from typing import Optional, Dict, List, cast
from asyncio import TimeoutError
from asyncio.events import AbstractEventLoop
from CoreBluetooth import ( # type: ignore
CBService,
CBPeripheralManager,
CBMutableCharacteristic,
CBAdvertisementDataLocalNameKey,
CBAdvertisementDataServiceUUIDsKey,
)
from bleak.backends.service import BleakGATTService # type: ignore
from .PeripheralManagerDelegate import PeripheralManagerDelegate # type: ignore
from bless.exceptions import BlessError
from bless.backends.server import BaseBlessServer # type: ignore
from bless.backends.corebluetooth.service import BlessGATTServiceCoreBluetooth
from bless.backends.corebluetooth.characteristic import ( # type: ignore
BlessGATTCharacteristicCoreBluetooth,
)
from bless.backends.characteristic import (
GATTCharacteristicProperties,
GATTAttributePermissions,
)
logger = logging.getLogger(name=__name__)
class BlessServerCoreBluetooth(BaseBlessServer):
"""
CoreBluetooth Implementation of BlessServer
This implementation essentially wraps the PeripheralManagerDelegate Class
from CoreBluetooth
Attributes
----------
name : str
The name of the server to advertise
services : BleakGATTServiceCollection
A collection of services to be advertised by this server
peripheral_manager_delegate : PeripheralManagerDelegate
The delegated class to manage this peripheral device
"""
def __init__(self, name: str, loop: AbstractEventLoop = None, **kwargs):
super(BlessServerCoreBluetooth, self).__init__(loop=loop, **kwargs)
self.name: str = name
self.services: Dict[str, BlessGATTServiceCoreBluetooth] = {}
self.peripheral_manager_delegate: PeripheralManagerDelegate = (
PeripheralManagerDelegate.alloc().init()
)
self.peripheral_manager_delegate.read_request_func = self.read_request
self.peripheral_manager_delegate.write_request_func = self.write_request
async def start(self, timeout: float = 10, **kwargs):
"""
Start the server
Parameters
----------
timeout : float
Floating point decimal in seconds for how long to wait for the
on-board bluetooth module to power on
"""
for service_uuid in self.services:
bleak_service: BleakGATTService = self.services[service_uuid]
service_obj: CBService = bleak_service.obj
logger.debug("Adding service: {}".format(bleak_service.uuid))
await self.peripheral_manager_delegate.addService(service_obj)
if not self.read_request_func or not self.write_request_func:
raise BlessError("Callback functions must be initialized first")
advertisement_data = {
CBAdvertisementDataLocalNameKey: self.name,
CBAdvertisementDataServiceUUIDsKey: list(
map(lambda x: self.services[x].obj.UUID(), self.services)
)
}
logger.debug("Advertisement Data: {}".format(advertisement_data))
try:
await self.peripheral_manager_delegate.startAdvertising_(advertisement_data)
except TimeoutError:
# If advertising fails as a result of bluetooth module power
# cycling or advertisement failure, attempt to start again
await self.start()
logger.debug("Advertising...")
async def stop(self):
"""
Stop the server
"""
await self.peripheral_manager_delegate.stopAdvertising()
async def is_connected(self) -> bool:
"""
Determine whether there are any connected central devices
Returns
-------
bool
True if there are central devices that are connected
"""
n_subscriptions = len(self.peripheral_manager_delegate._central_subscriptions)
return n_subscriptions > 0
async def is_advertising(self) -> bool:
"""
Determine whether the service is advertising
Returns
-------
bool
True if advertising
"""
return self.peripheral_manager_delegate.is_advertising() == 1
async def add_new_service(self, uuid: str):
"""
Add a service and all it's characteristics to be advertised
Parameters
----------
uuid : str
The string representation of the UUID of the service to be added
"""
logger.debug("Creating a new service with uuid: {}".format(uuid))
service: BlessGATTServiceCoreBluetooth = BlessGATTServiceCoreBluetooth(uuid)
await service.init()
self.services[service.uuid] = service
async def add_new_characteristic(
self,
service_uuid: str,
char_uuid: str,
properties: GATTCharacteristicProperties,
value: Optional[bytearray],
permissions: GATTAttributePermissions,
):
"""
Generate a new characteristic to be associated with the server
Parameters
----------
service_uuid: str
The string representation of the UUID for the service associated
with the characteristic to be added
char_uuid : str
The string representation of the UUID for the characteristic to be
added
properties : GATTCharacteristicProperties
The flags for the characteristic
value : Optional[bytearray]
The initial value for the characteristic
permissions : GATTAttributePermissions
The permissions for the characteristic
"""
service_uuid = str(UUID(service_uuid))
logger.debug("Craeting a new characteristic with uuid: {}".format(char_uuid))
characteristic: BlessGATTCharacteristicCoreBluetooth = (
BlessGATTCharacteristicCoreBluetooth(
char_uuid, properties, permissions, value
)
)
await characteristic.init()
service: BlessGATTServiceCoreBluetooth = self.services[service_uuid]
service.add_characteristic(characteristic)
characteristics: List[CBMutableCharacteristic] = [
characteristic.obj for characteristic in service.characteristics
]
service.obj.setCharacteristics_(characteristics)
def update_value(self, service_uuid: str, char_uuid: str) -> bool:
"""
Update the characteristic value. This is different than using
characteristic.set_value. This send notifications to subscribed
central devices.
Parameters
----------
service_uuid : str
The string representation of the UUID for the service associated
with the characteristic to be added
char_uuid : str
The string representation of the UUID for the characteristic to be
added
Returns
-------
bool
Whether the value was successfully updated
"""
service_uuid = str(UUID(service_uuid))
char_uuid = str(UUID(char_uuid))
characteristic: BlessGATTCharacteristicCoreBluetooth = cast(
BlessGATTCharacteristicCoreBluetooth, self.get_characteristic(char_uuid)
)
value: bytes = characteristic.value
value = value if value is not None else b"\x00"
peripheral_manager: CBPeripheralManager = (
self.peripheral_manager_delegate.peripheral_manager
)
result: bool = (
peripheral_manager.updateValue_forCharacteristic_onSubscribedCentrals_(
value, characteristic.obj, None
)
)
return result | 0.877161 | 0.143278 |
import os
import re
try:
from . import global_state
from . import utils as ut
from .text_box import textbox
except (SystemError, ValueError, ImportError):
import global_state
import utils as ut
from text_box import textbox
try:
import tkinter as tk # python 3
import tkinter.font as tk_Font
except (SystemError, ValueError, ImportError):
import Tkinter as tk # python 2
import tkFont as tk_Font
def demo_buttonbox_1():
print("hello from the demo")
value = buttonbox(
title="First demo",
msg="bonjour",
choices=["Button[1]", "Button[2]", "Button[3]"],
default_choice="Button[2]")
print("Return: {}".format(value))
def demo_buttonbox_2():
package_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) ;# My parent's directory
images = list()
images.append(os.path.join(package_dir, "python_and_check_logo.gif"))
images.append(os.path.join(package_dir, "zzzzz.gif"))
images.append(os.path.join(package_dir, "python_and_check_logo.png"))
images = [images, images, images, images, ]
value = buttonbox(
title="Second demo",
msg="Now is a good time to press buttons and show images",
choices=['ok', 'cancel'],
images=images)
print("Return: {}".format(value))
# REF: http://stackoverflow.com/questions/1835018/python-check-if-an-object-is-a-list-or-tuple-but-not-string
def is_sequence(arg):
return hasattr(arg, "__getitem__") or hasattr(arg, "__iter__")
def is_string(arg):
ret_val = None
try:
ret_val = isinstance(arg, basestring) #Python 2
except:
ret_val = isinstance(arg, str) #Python 3
return ret_val
def buttonbox(msg="",
title=" ",
choices=("Button[1]", "Button[2]", "Button[3]"),
image=None,
images=None,
default_choice=None,
cancel_choice=None,
callback=None,
run=True):
"""
Display a message, a title, an image, and a set of buttons.
The buttons are defined by the members of the choices argument.
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: (Only here for backward compatibility)
:param str images: Filename of image or iterable or iteratable of iterable to display
:param str default_choice: The choice you want highlighted when the gui appears
:return: the text of the button that the user selected
"""
if image and images:
raise ValueError("Specify 'images' parameter only for buttonbox.")
if image:
images = image
bb = ButtonBox(
msg=msg,
title=title,
choices=choices,
images=images,
default_choice=default_choice,
cancel_choice=cancel_choice,
callback=callback)
if not run:
return bb
else:
reply = bb.run()
return reply
class ButtonBox(object):
""" Display various types of button boxes
This object separates user from ui, defines which methods can
the user invoke and which properties can he change.
It also calls the ui in defined ways, so if other gui
library can be used (wx, qt) without breaking anything for the user.
"""
def __init__(self, msg, title, choices, images, default_choice, cancel_choice, callback):
""" Create box object
Parameters
----------
msg : string
text displayed in the message area (instructions...)
title : str
the window title
choices : iterable of strings
build a button for each string in choices
images : iterable of filenames, or an iterable of iterables of filenames
displays each image
default_choice : string
one of the strings in choices to be the default selection
cancel_choice : string
if X or <esc> is pressed, it appears as if this button was pressed.
callback: function
if set, this function will be called when any button is pressed.
Returns
-------
object
The box object
"""
self.callback = callback
self.ui = GUItk(msg, title, choices, images, default_choice, cancel_choice, self.callback_ui)
def run(self):
""" Start the ui """
self.ui.run()
ret_val = self._text
self.ui = None
return ret_val
def stop(self):
""" Stop the ui """
self.ui.stop()
def callback_ui(self, ui, command):
""" This method is executed when buttons or x is pressed in the ui.
"""
if command == 'update': # Any button was pressed
self._text = ui.choice
self._choice_rc = ui.choice_rc
if self.callback:
# If a callback was set, call main process
self.callback(self)
else:
self.stop()
elif command == 'x':
self.stop()
self._text = None
elif command == 'cancel':
self.stop()
self._text = None
# methods to change properties --------------
@property
def msg(self):
"""Text in msg Area"""
return self._msg
@msg.setter
def msg(self, msg):
self._msg = self.to_string(msg)
self.ui.set_msg(self._msg)
@msg.deleter
def msg(self):
self._msg = ""
self.ui.set_msg(self._msg)
@property
def choice(self):
""" Name of button selected """
return self._text
@property
def choice_rc(self):
""" The row/column of the selected button (as a tuple) """
return self._choice_rc
# Methods to validate what will be sent to ui ---------
def to_string(self, something):
try:
basestring # python 2
except NameError:
basestring = str # Python 3
if isinstance(something, basestring):
return something
try:
text = "".join(something) # convert a list or a tuple to a string
except:
textbox(
"Exception when trying to convert {} to text in self.textArea"
.format(type(something)))
sys.exit(16)
return text
class GUItk(object):
""" This is the object that contains the tk root object"""
def __init__(self, msg, title, choices, images, default_choice, cancel_choice, callback):
""" Create ui object
Parameters
----------
msg : string
text displayed in the message area (instructions...)
title : str
the window title
choices : iterable of strings
build a button for each string in choices
images : iterable of filenames, or an iterable of iterables of filenames
displays each image
default_choice : string
one of the strings in choices to be the default selection
cancel_choice : string
if X or <esc> is pressed, it appears as if this button was pressed.
callback: function
if set, this function will be called when any button is pressed.
Returns
-------
object
The ui object
"""
self._title = title
self._msg = msg
self._choices = choices
self._default_choice = default_choice
self._cancel_choice = cancel_choice
self.callback = callback
self._choice_text = None
self._choice_rc = None
self._images = list()
self.boxRoot = tk.Tk()
# self.boxFont = tk_Font.Font(
# family=global_state.PROPORTIONAL_FONT_FAMILY,
# size=global_state.PROPORTIONAL_FONT_SIZE)
self.boxFont = tk_Font.nametofont("TkFixedFont")
self.width_in_chars = global_state.fixw_font_line_length
# default_font.configure(size=global_state.PROPORTIONAL_FONT_SIZE)
self.configure_root(title)
self.create_msg_widget(msg)
self.create_images_frame()
self.create_images(images)
self.create_buttons_frame()
self.create_buttons(choices, default_choice)
@property
def choice(self):
return self._choice_text
@property
def choice_rc(self):
return self._choice_rc
# Run and stop methods ---------------------------------------
def run(self):
self.boxRoot.mainloop()
self.boxRoot.destroy()
def stop(self):
# Get the current position before quitting
#self.get_pos()
self.boxRoot.quit()
# Methods to change content ---------------------------------------
def set_msg(self, msg):
self.messageArea.config(state=tk.NORMAL)
self.messageArea.delete(1.0, tk.END)
self.messageArea.insert(tk.END, msg)
self.messageArea.config(state=tk.DISABLED)
# Adjust msg height
self.messageArea.update()
self.set_msg_height()
self.messageArea.update()
def set_msg_height(self):
message_content = self.messageArea.get("1.0", tk.END)
lines = message_content.split("\n")
width = self.messageArea["width"]
num_lines = len(lines)
num_wordwraps = sum(len(line) // width for line in lines if len(line) != width)
height = num_lines + num_wordwraps + 1
self.messageArea.configure(height=height)
def set_pos(self, pos):
self.boxRoot.geometry(pos)
def get_pos(self):
# The geometry() method sets a size for the window and positions it on
# the screen. The first two parameters are width and height of
# the window. The last two parameters are x and y screen coordinates.
# geometry("250x150+300+300")
geom = self.boxRoot.geometry() # "628x672+300+200"
global_state.window_position = '+' + geom.split('+', 1)[1]
# Methods executing when a key is pressed -------------------------------
def x_pressed(self):
self._choice_text = self._cancel_choice
self.callback(self, command='x')
def cancel_pressed(self, event):
self._choice_text = self._cancel_choice
self.callback(self, command='cancel')
def button_pressed(self, button_text, button_rc):
self._choice_text = button_text
self._choice_rc = button_rc
self.callback(self, command='update')
def hotkey_pressed(self, event=None):
"""
Handle an event that is generated by a person interacting with a button. It may be a button press
or a key press.
TODO: Enhancement: Allow hotkey to be specified in filename of image as a shortcut too!!!
"""
# Determine window location and save to global
# TODO: Not sure where this goes, but move it out of here!
m = re.match(r"(\d+)x(\d+)([-+]\d+)([-+]\d+)", self.boxRoot.geometry())
if not m:
raise ValueError(
"failed to parse geometry string: {}".format(self.boxRoot.geometry()))
width, height, xoffset, yoffset = [int(s) for s in m.groups()]
global_state.window_position = '{0:+g}{1:+g}'.format(xoffset, yoffset)
# Hotkeys
if self._buttons:
for button_name, button in self._buttons.items():
hotkey_pressed = event.keysym
if event.keysym != event.char: # A special character
hotkey_pressed = '<{}>'.format(event.keysym)
if button['hotkey'] == hotkey_pressed:
self._choice_text = button_name
self.callback(self, command='update')
return
print("Event not understood")
# Auxiliary methods -----------------------------------------------
def calc_character_width(self):
char_width = self.boxFont.measure('W')
return char_width
# Initial configuration methods ---------------------------------------
# These ones are just called once, at setting.
def configure_root(self, title):
self.boxRoot.title(title)
self.set_pos(global_state.window_position)
# Resize setup
self.boxRoot.columnconfigure(0, weight=10)
self.boxRoot.minsize(100, 200)
# Quit when x button pressed
self.boxRoot.protocol('WM_DELETE_WINDOW', self.x_pressed)
self.boxRoot.bind("<Escape>", self.cancel_pressed)
self.boxRoot.iconname('Dialog')
self.boxRoot.attributes("-topmost", True) # Put the dialog box in focus.
def create_msg_widget(self, msg):
if msg is None:
msg = ""
self.messageArea = tk.Text(
self.boxRoot,
width=self.width_in_chars,
state=tk.DISABLED,
padx=(global_state.default_hpad_in_chars) *
self.calc_character_width(),
relief="flat",
background=self.boxRoot.config()["background"][-1],
pady=global_state.default_hpad_in_chars *
self.calc_character_width(),
wrap=tk.WORD,
)
self.set_msg(msg)
self.messageArea.grid(row=0)
self.boxRoot.rowconfigure(0, weight=10, minsize='10m')
def create_images_frame(self):
self.imagesFrame = tk.Frame(self.boxRoot)
row = 1
self.imagesFrame.grid(row=row)
self.boxRoot.rowconfigure(row, weight=10, minsize='10m')
def create_images(self, filenames):
"""
Create one or more images in the dialog.
:param filenames:
May be a filename (which will generate a single image), a list of filenames (which will generate
a row of images), or a list of list of filename (which will create a 2D array of buttons.
:return:
"""
if filenames is None:
return
# Convert to a list of lists of filenames regardless of input
if is_string(filenames):
filenames = [[filenames,],]
elif is_sequence(filenames) and is_string(filenames[0]):
filenames = [filenames,]
elif is_sequence(filenames) and is_sequence(filenames[0]) and is_string(filenames[0][0]):
pass
else:
raise ValueError("Incorrect images argument.")
images = list()
for _r, images_row in enumerate(filenames):
row_number = len(filenames) - _r
for column_number, filename in enumerate(images_row):
this_image = dict()
try:
this_image['tk_image'] = ut.load_tk_image(filename)
except Exception as e:
print(e)
this_image['tk_image'] = None
this_image['widget'] = tk.Button(
self.imagesFrame,
takefocus=1,
compound=tk.TOP)
if this_image['widget'] is not None:
this_image['widget'].configure(image=this_image['tk_image'])
fn = lambda text=filename, row=_r, column=column_number: self.button_pressed(text, (row, column))
this_image['widget'].configure(command=fn)
sticky_dir = tk.N+tk.S+tk.E+tk.W
this_image['widget'].grid(row=row_number, column=column_number, sticky=sticky_dir, padx='1m', pady='1m', ipadx='2m', ipady='1m')
self.imagesFrame.rowconfigure(row_number, weight=10, minsize='10m')
self.imagesFrame.columnconfigure(column_number, weight=10)
images.append(this_image)
self._images = images # Image objects must live, so place them in self. Otherwise, they will be deleted.
def create_buttons_frame(self):
self.buttonsFrame = tk.Frame(self.boxRoot)
self.buttonsFrame.grid(row=2, column=0)
def create_buttons(self, choices, default_choice):
unique_choices = ut.uniquify_list_of_strings(choices)
# Create buttons dictionary and Tkinter widgets
buttons = dict()
i_hack = 0
for row, (button_text, unique_button_text) in enumerate(zip(choices, unique_choices)):
this_button = dict()
this_button['original_text'] = button_text
this_button['clean_text'], this_button['hotkey'], hotkey_position = ut.parse_hotkey(button_text)
this_button['widget'] = tk.Button(
self.buttonsFrame,
takefocus=1,
text=this_button['clean_text'],
underline=hotkey_position)
fn = lambda text=button_text, row=row, column=0: self.button_pressed(text, (row, column))
this_button['widget'].configure(command=fn)
this_button['widget'].grid(row=0, column=i_hack, padx='1m', pady='1m', ipadx='2m', ipady='1m')
self.buttonsFrame.columnconfigure(i_hack, weight=10)
i_hack += 1
buttons[unique_button_text] = this_button
self._buttons = buttons
if default_choice in buttons:
buttons[default_choice]['widget'].focus_force()
# Bind hotkeys
for hk in [button['hotkey'] for button in buttons.values() if button['hotkey']]:
self.boxRoot.bind_all(hk, lambda e: self.hotkey_pressed(e), add=True)
if __name__ == '__main__':
demo_buttonbox_1()
demo_buttonbox_2() | venv/Lib/site-packages/easygui/boxes/button_box.py | import os
import re
try:
from . import global_state
from . import utils as ut
from .text_box import textbox
except (SystemError, ValueError, ImportError):
import global_state
import utils as ut
from text_box import textbox
try:
import tkinter as tk # python 3
import tkinter.font as tk_Font
except (SystemError, ValueError, ImportError):
import Tkinter as tk # python 2
import tkFont as tk_Font
def demo_buttonbox_1():
print("hello from the demo")
value = buttonbox(
title="First demo",
msg="bonjour",
choices=["Button[1]", "Button[2]", "Button[3]"],
default_choice="Button[2]")
print("Return: {}".format(value))
def demo_buttonbox_2():
package_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) ;# My parent's directory
images = list()
images.append(os.path.join(package_dir, "python_and_check_logo.gif"))
images.append(os.path.join(package_dir, "zzzzz.gif"))
images.append(os.path.join(package_dir, "python_and_check_logo.png"))
images = [images, images, images, images, ]
value = buttonbox(
title="Second demo",
msg="Now is a good time to press buttons and show images",
choices=['ok', 'cancel'],
images=images)
print("Return: {}".format(value))
# REF: http://stackoverflow.com/questions/1835018/python-check-if-an-object-is-a-list-or-tuple-but-not-string
def is_sequence(arg):
return hasattr(arg, "__getitem__") or hasattr(arg, "__iter__")
def is_string(arg):
ret_val = None
try:
ret_val = isinstance(arg, basestring) #Python 2
except:
ret_val = isinstance(arg, str) #Python 3
return ret_val
def buttonbox(msg="",
title=" ",
choices=("Button[1]", "Button[2]", "Button[3]"),
image=None,
images=None,
default_choice=None,
cancel_choice=None,
callback=None,
run=True):
"""
Display a message, a title, an image, and a set of buttons.
The buttons are defined by the members of the choices argument.
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: (Only here for backward compatibility)
:param str images: Filename of image or iterable or iteratable of iterable to display
:param str default_choice: The choice you want highlighted when the gui appears
:return: the text of the button that the user selected
"""
if image and images:
raise ValueError("Specify 'images' parameter only for buttonbox.")
if image:
images = image
bb = ButtonBox(
msg=msg,
title=title,
choices=choices,
images=images,
default_choice=default_choice,
cancel_choice=cancel_choice,
callback=callback)
if not run:
return bb
else:
reply = bb.run()
return reply
class ButtonBox(object):
""" Display various types of button boxes
This object separates user from ui, defines which methods can
the user invoke and which properties can he change.
It also calls the ui in defined ways, so if other gui
library can be used (wx, qt) without breaking anything for the user.
"""
def __init__(self, msg, title, choices, images, default_choice, cancel_choice, callback):
""" Create box object
Parameters
----------
msg : string
text displayed in the message area (instructions...)
title : str
the window title
choices : iterable of strings
build a button for each string in choices
images : iterable of filenames, or an iterable of iterables of filenames
displays each image
default_choice : string
one of the strings in choices to be the default selection
cancel_choice : string
if X or <esc> is pressed, it appears as if this button was pressed.
callback: function
if set, this function will be called when any button is pressed.
Returns
-------
object
The box object
"""
self.callback = callback
self.ui = GUItk(msg, title, choices, images, default_choice, cancel_choice, self.callback_ui)
def run(self):
""" Start the ui """
self.ui.run()
ret_val = self._text
self.ui = None
return ret_val
def stop(self):
""" Stop the ui """
self.ui.stop()
def callback_ui(self, ui, command):
""" This method is executed when buttons or x is pressed in the ui.
"""
if command == 'update': # Any button was pressed
self._text = ui.choice
self._choice_rc = ui.choice_rc
if self.callback:
# If a callback was set, call main process
self.callback(self)
else:
self.stop()
elif command == 'x':
self.stop()
self._text = None
elif command == 'cancel':
self.stop()
self._text = None
# methods to change properties --------------
@property
def msg(self):
"""Text in msg Area"""
return self._msg
@msg.setter
def msg(self, msg):
self._msg = self.to_string(msg)
self.ui.set_msg(self._msg)
@msg.deleter
def msg(self):
self._msg = ""
self.ui.set_msg(self._msg)
@property
def choice(self):
""" Name of button selected """
return self._text
@property
def choice_rc(self):
""" The row/column of the selected button (as a tuple) """
return self._choice_rc
# Methods to validate what will be sent to ui ---------
def to_string(self, something):
try:
basestring # python 2
except NameError:
basestring = str # Python 3
if isinstance(something, basestring):
return something
try:
text = "".join(something) # convert a list or a tuple to a string
except:
textbox(
"Exception when trying to convert {} to text in self.textArea"
.format(type(something)))
sys.exit(16)
return text
class GUItk(object):
""" This is the object that contains the tk root object"""
def __init__(self, msg, title, choices, images, default_choice, cancel_choice, callback):
""" Create ui object
Parameters
----------
msg : string
text displayed in the message area (instructions...)
title : str
the window title
choices : iterable of strings
build a button for each string in choices
images : iterable of filenames, or an iterable of iterables of filenames
displays each image
default_choice : string
one of the strings in choices to be the default selection
cancel_choice : string
if X or <esc> is pressed, it appears as if this button was pressed.
callback: function
if set, this function will be called when any button is pressed.
Returns
-------
object
The ui object
"""
self._title = title
self._msg = msg
self._choices = choices
self._default_choice = default_choice
self._cancel_choice = cancel_choice
self.callback = callback
self._choice_text = None
self._choice_rc = None
self._images = list()
self.boxRoot = tk.Tk()
# self.boxFont = tk_Font.Font(
# family=global_state.PROPORTIONAL_FONT_FAMILY,
# size=global_state.PROPORTIONAL_FONT_SIZE)
self.boxFont = tk_Font.nametofont("TkFixedFont")
self.width_in_chars = global_state.fixw_font_line_length
# default_font.configure(size=global_state.PROPORTIONAL_FONT_SIZE)
self.configure_root(title)
self.create_msg_widget(msg)
self.create_images_frame()
self.create_images(images)
self.create_buttons_frame()
self.create_buttons(choices, default_choice)
@property
def choice(self):
return self._choice_text
@property
def choice_rc(self):
return self._choice_rc
# Run and stop methods ---------------------------------------
def run(self):
self.boxRoot.mainloop()
self.boxRoot.destroy()
def stop(self):
# Get the current position before quitting
#self.get_pos()
self.boxRoot.quit()
# Methods to change content ---------------------------------------
def set_msg(self, msg):
self.messageArea.config(state=tk.NORMAL)
self.messageArea.delete(1.0, tk.END)
self.messageArea.insert(tk.END, msg)
self.messageArea.config(state=tk.DISABLED)
# Adjust msg height
self.messageArea.update()
self.set_msg_height()
self.messageArea.update()
def set_msg_height(self):
message_content = self.messageArea.get("1.0", tk.END)
lines = message_content.split("\n")
width = self.messageArea["width"]
num_lines = len(lines)
num_wordwraps = sum(len(line) // width for line in lines if len(line) != width)
height = num_lines + num_wordwraps + 1
self.messageArea.configure(height=height)
def set_pos(self, pos):
self.boxRoot.geometry(pos)
def get_pos(self):
# The geometry() method sets a size for the window and positions it on
# the screen. The first two parameters are width and height of
# the window. The last two parameters are x and y screen coordinates.
# geometry("250x150+300+300")
geom = self.boxRoot.geometry() # "628x672+300+200"
global_state.window_position = '+' + geom.split('+', 1)[1]
# Methods executing when a key is pressed -------------------------------
def x_pressed(self):
self._choice_text = self._cancel_choice
self.callback(self, command='x')
def cancel_pressed(self, event):
self._choice_text = self._cancel_choice
self.callback(self, command='cancel')
def button_pressed(self, button_text, button_rc):
self._choice_text = button_text
self._choice_rc = button_rc
self.callback(self, command='update')
def hotkey_pressed(self, event=None):
"""
Handle an event that is generated by a person interacting with a button. It may be a button press
or a key press.
TODO: Enhancement: Allow hotkey to be specified in filename of image as a shortcut too!!!
"""
# Determine window location and save to global
# TODO: Not sure where this goes, but move it out of here!
m = re.match(r"(\d+)x(\d+)([-+]\d+)([-+]\d+)", self.boxRoot.geometry())
if not m:
raise ValueError(
"failed to parse geometry string: {}".format(self.boxRoot.geometry()))
width, height, xoffset, yoffset = [int(s) for s in m.groups()]
global_state.window_position = '{0:+g}{1:+g}'.format(xoffset, yoffset)
# Hotkeys
if self._buttons:
for button_name, button in self._buttons.items():
hotkey_pressed = event.keysym
if event.keysym != event.char: # A special character
hotkey_pressed = '<{}>'.format(event.keysym)
if button['hotkey'] == hotkey_pressed:
self._choice_text = button_name
self.callback(self, command='update')
return
print("Event not understood")
# Auxiliary methods -----------------------------------------------
def calc_character_width(self):
char_width = self.boxFont.measure('W')
return char_width
# Initial configuration methods ---------------------------------------
# These ones are just called once, at setting.
def configure_root(self, title):
self.boxRoot.title(title)
self.set_pos(global_state.window_position)
# Resize setup
self.boxRoot.columnconfigure(0, weight=10)
self.boxRoot.minsize(100, 200)
# Quit when x button pressed
self.boxRoot.protocol('WM_DELETE_WINDOW', self.x_pressed)
self.boxRoot.bind("<Escape>", self.cancel_pressed)
self.boxRoot.iconname('Dialog')
self.boxRoot.attributes("-topmost", True) # Put the dialog box in focus.
def create_msg_widget(self, msg):
if msg is None:
msg = ""
self.messageArea = tk.Text(
self.boxRoot,
width=self.width_in_chars,
state=tk.DISABLED,
padx=(global_state.default_hpad_in_chars) *
self.calc_character_width(),
relief="flat",
background=self.boxRoot.config()["background"][-1],
pady=global_state.default_hpad_in_chars *
self.calc_character_width(),
wrap=tk.WORD,
)
self.set_msg(msg)
self.messageArea.grid(row=0)
self.boxRoot.rowconfigure(0, weight=10, minsize='10m')
def create_images_frame(self):
self.imagesFrame = tk.Frame(self.boxRoot)
row = 1
self.imagesFrame.grid(row=row)
self.boxRoot.rowconfigure(row, weight=10, minsize='10m')
def create_images(self, filenames):
"""
Create one or more images in the dialog.
:param filenames:
May be a filename (which will generate a single image), a list of filenames (which will generate
a row of images), or a list of list of filename (which will create a 2D array of buttons.
:return:
"""
if filenames is None:
return
# Convert to a list of lists of filenames regardless of input
if is_string(filenames):
filenames = [[filenames,],]
elif is_sequence(filenames) and is_string(filenames[0]):
filenames = [filenames,]
elif is_sequence(filenames) and is_sequence(filenames[0]) and is_string(filenames[0][0]):
pass
else:
raise ValueError("Incorrect images argument.")
images = list()
for _r, images_row in enumerate(filenames):
row_number = len(filenames) - _r
for column_number, filename in enumerate(images_row):
this_image = dict()
try:
this_image['tk_image'] = ut.load_tk_image(filename)
except Exception as e:
print(e)
this_image['tk_image'] = None
this_image['widget'] = tk.Button(
self.imagesFrame,
takefocus=1,
compound=tk.TOP)
if this_image['widget'] is not None:
this_image['widget'].configure(image=this_image['tk_image'])
fn = lambda text=filename, row=_r, column=column_number: self.button_pressed(text, (row, column))
this_image['widget'].configure(command=fn)
sticky_dir = tk.N+tk.S+tk.E+tk.W
this_image['widget'].grid(row=row_number, column=column_number, sticky=sticky_dir, padx='1m', pady='1m', ipadx='2m', ipady='1m')
self.imagesFrame.rowconfigure(row_number, weight=10, minsize='10m')
self.imagesFrame.columnconfigure(column_number, weight=10)
images.append(this_image)
self._images = images # Image objects must live, so place them in self. Otherwise, they will be deleted.
def create_buttons_frame(self):
self.buttonsFrame = tk.Frame(self.boxRoot)
self.buttonsFrame.grid(row=2, column=0)
def create_buttons(self, choices, default_choice):
unique_choices = ut.uniquify_list_of_strings(choices)
# Create buttons dictionary and Tkinter widgets
buttons = dict()
i_hack = 0
for row, (button_text, unique_button_text) in enumerate(zip(choices, unique_choices)):
this_button = dict()
this_button['original_text'] = button_text
this_button['clean_text'], this_button['hotkey'], hotkey_position = ut.parse_hotkey(button_text)
this_button['widget'] = tk.Button(
self.buttonsFrame,
takefocus=1,
text=this_button['clean_text'],
underline=hotkey_position)
fn = lambda text=button_text, row=row, column=0: self.button_pressed(text, (row, column))
this_button['widget'].configure(command=fn)
this_button['widget'].grid(row=0, column=i_hack, padx='1m', pady='1m', ipadx='2m', ipady='1m')
self.buttonsFrame.columnconfigure(i_hack, weight=10)
i_hack += 1
buttons[unique_button_text] = this_button
self._buttons = buttons
if default_choice in buttons:
buttons[default_choice]['widget'].focus_force()
# Bind hotkeys
for hk in [button['hotkey'] for button in buttons.values() if button['hotkey']]:
self.boxRoot.bind_all(hk, lambda e: self.hotkey_pressed(e), add=True)
if __name__ == '__main__':
demo_buttonbox_1()
demo_buttonbox_2() | 0.460774 | 0.199913 |
import test_package
print(dir(test_package))
print(test_package.test_module.TEST_CONSTANT)
# Імпортування модуля
from test_package import test_module
print(dir(test_module))
print(test_module.TEST_CONSTANT)
print(test_module.test_variable)
test_module.test_function()
test_module.test_function("Vadym")
import demo_module
print(__name__)
# Імпортування функції
from test_package.test_module import test_function
print(test_function)
test_function()
test_function("Vadym")
# Імпортування функції з псевдонімом
from test_package.test_module import test_function
from test_package.another_test_module import test_function as another_test_function
print(test_function)
test_function()
test_function("Vadym")
print(another_test_function)
another_test_function()
another_test_function("Vadym")
# Імпортування змінної/константи
from test_package.test_module import TEST_CONSTANT, test_variable
print(TEST_CONSTANT)
print(test_variable)
# Імпортування всього
from test_package.test_module import test_function
print(test_function)
test_function()
test_function("Vadym")
from test_package.another_test_module import *
print(test_function)
test_function()
test_function("Vadym")
# Використання вбудованих бібліотек
# Бібліотека os
import os
for attr in dir(os):
if not attr.startswith("_"):
print(attr)
print(help(os.getcwd))
print(os.getcwd())
print(os.listdir())
os.mkdir("temp")
print(os.listdir())
os.chdir("temp")
print(os.getcwd())
os.chdir("..")
print(os.getcwd())
print(os.listdir())
os.rmdir("temp")
print(os.listdir())
os.system("ls")
# Бібліотека sys
import sys
for attr in dir(sys):
if not attr.startswith("_"):
print(attr)
print(help(sys))
print(sys.argv)
print(sys.path)
print(sys.platform)
print(sys.version)
print(sys.getsizeof("String"))
print(sys.getsizeof(list("String")))
print(sys.getsizeof(tuple("String")))
print(sys.getsizeof(set("String")))
# Бібліотека itertools
import itertools
for attr in dir(itertools):
if not attr.startswith("_"):
print(attr)
print(help(itertools))
# itertools.groupby
users = [
{"name": "max", "status": "active"},
{"name": "bob", "status": "inactive"},
{"name": "bill", "status": "pending"},
{"name": "john", "status": "pending"},
{"name": "brian", "status": "inactive"},
{"name": "dan", "status": "active"},
{"name": "will", "status": "active"},
{"name": "jack", "status": "inactive"},
]
users.sort(key=lambda i: i["status"])
for grouper, group in itertools.groupby(users, key=lambda i: i["status"]):
print(grouper)
for item in group:
print(item)
# itertools.cycle
counter = 0
from_zero_to_ten = range(11)
for i in itertools.cycle(from_zero_to_ten):
if counter > 25:
break
print(i)
counter += 1
# Бібліотека collections
import collections
for attr in dir(collections):
if not attr.startswith("_"):
print(attr)
print(help(collections))
# collections.Counter
some_text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris feugiat pulvinar aliquam. Duis ac consectetur lectus. Curabitur interdum, justo vitae convallis pharetra, tortor quam rhoncus ipsum, a sagittis lectus ipsum a orci. Donec non orci tellus. Curabitur volutpat consectetur ante id ornare. Maecenas et lorem vitae massa tincidunt volutpat in in tortor. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Quisque a nibh vitae velit convallis interdum a at felis. Fusce id dolor egestas, maximus orci a, sodales nisi.
Duis semper erat ut nibh dictum finibus. Aliquam eget accumsan neque. Nullam volutpat, dolor in aliquet aliquam, ante lectus dignissim erat, in egestas mauris tellus eu tortor. Ut non varius sem, ut commodo nisi. Donec tellus nibh, varius vel nibh ac, auctor laoreet sem. Proin non venenatis nisl. Integer rutrum, urna vel suscipit euismod, sapien lacus feugiat sapien, a congue nunc dui ac ante. Phasellus vel tellus dictum leo aliquam faucibus. Fusce elementum, orci sed gravida dapibus, nisl ante bibendum mi, nec sollicitudin ante nisi in lacus.
Suspendisse potenti. Maecenas tristique posuere purus, ac sollicitudin justo tempus vitae. Morbi nec euismod tellus. Curabitur urna purus, commodo at porttitor ut, mattis nec quam. In congue vel metus id pellentesque. Praesent tristique mi sed interdum laoreet. Quisque quis enim eu lacus tincidunt sagittis non nec urna. Aenean scelerisque nec justo vel efficitur.
Aenean quis rutrum nisl. Maecenas pharetra tortor scelerisque, aliquet lacus eget, posuere elit. In dapibus porta nulla. Nulla gravida accumsan augue, nec vulputate metus tempor in. Praesent nec leo odio. Sed risus tortor, eleifend quis euismod vel, malesuada scelerisque lectus. Donec eget iaculis elit. Nullam nibh nunc, ullamcorper ornare eros eget, vehicula ornare ex. Vestibulum non eros aliquet, ullamcorper turpis eu, blandit arcu. Nunc tincidunt, purus et fermentum ullamcorper, elit arcu gravida sapien, placerat malesuada mauris turpis eu nunc. Nulla eu nisl quis tortor dictum aliquet ac vel metus. Curabitur augue lacus, scelerisque in arcu egestas, vulputate accumsan dui. Donec viverra arcu scelerisque quam ullamcorper euismod. Nunc molestie metus ut urna venenatis, sit amet posuere metus tincidunt. Duis vel augue lacus.
Pellentesque maximus molestie aliquet. Sed laoreet dui massa, a accumsan felis dapibus at. Nullam vel fermentum sapien, in luctus sem. Nam at ligula purus. Quisque id lacus eu est congue ornare a sit amet ipsum. Suspendisse ac leo non felis venenatis laoreet eu non augue. Praesent sollicitudin erat sit amet mi convallis cursus. Vestibulum neque diam, congue id lobortis rutrum, volutpat a nibh. Donec vel fringilla tellus, vel rutrum mauris. Duis efficitur nisi ac lacus gravida, sit amet iaculis lacus mollis. In hac habitasse platea dictumst."""
counter = collections.Counter(some_text.replace(",", "").replace(".", "").replace("\n", " ").split(" "))
for word, times in counter.items():
print(f"Word '{word}' appears {times} times.")
# collections.defaultdict
users = [
{"name": "max", "status": "active"},
{"name": "bob", "status": "inactive"},
{"name": "bill", "status": "pending"},
{"name": "john", "status": "pending"},
{"name": "brian", "status": "inactive"},
{"name": "dan", "status": "active"},
{"name": "will", "status": "active"},
{"name": "jack", "status": "inactive"},
]
users_by_status = collections.defaultdict(list)
for user in users:
users_by_status[user["status"]].append(user)
print(users_by_status)
# Бібліотека datetime
import datetime
for attr in dir(datetime):
if not attr.startswith("_"):
print(attr)
print(help(datetime.datetime))
for attr in dir(datetime.datetime):
if not attr.startswith("_"):
print(attr)
print(help(datetime.datetime))
for attr in dir(datetime.date):
if not attr.startswith("_"):
print(attr)
print(help(datetime.date))
for attr in dir(datetime.time):
if not attr.startswith("_"):
print(attr)
print(help(datetime.time))
# datetime.datetime
now = datetime.datetime.now()
utcnow = datetime.datetime.utcnow()
new_year = datetime.datetime(2022, 1, 1, 0, 0, 0)
print(now)
print(utcnow)
print(new_year)
print(new_year - now)
# datetime.datetime.strftime and datetime.datetime.strptime
now = datetime.datetime.utcnow()
now_str = now.strftime("%d %b %Y %H hours %M minutes %S seconds (%A)")
print(now_str)
new_year_str = "January, 1, 2022"
new_year = datetime.datetime.strptime(new_year_str, "%B, %d, %Y")
print(new_year)
# datetime.date and datetime.timedelta
today = datetime.date.today()
print(today)
print(today.ctime())
new_year = datetime.date(2022, 1, 1)
print(new_year)
print(new_year.strftime("%A"))
print(new_year.isoweekday())
print(new_year.weekday())
day_after_new_year = new_year + datetime.timedelta(days=2)
print(day_after_new_year)
print(day_after_new_year.strftime("%A"))
print(day_after_new_year.isoweekday())
print(day_after_new_year.weekday())
# Бібліотека time
import time
start = time.time()
print(start)
time.sleep(6)
print(time.time() - start)
print(time.localtime()) | course/code/builtin_libraries.py | import test_package
print(dir(test_package))
print(test_package.test_module.TEST_CONSTANT)
# Імпортування модуля
from test_package import test_module
print(dir(test_module))
print(test_module.TEST_CONSTANT)
print(test_module.test_variable)
test_module.test_function()
test_module.test_function("Vadym")
import demo_module
print(__name__)
# Імпортування функції
from test_package.test_module import test_function
print(test_function)
test_function()
test_function("Vadym")
# Імпортування функції з псевдонімом
from test_package.test_module import test_function
from test_package.another_test_module import test_function as another_test_function
print(test_function)
test_function()
test_function("Vadym")
print(another_test_function)
another_test_function()
another_test_function("Vadym")
# Імпортування змінної/константи
from test_package.test_module import TEST_CONSTANT, test_variable
print(TEST_CONSTANT)
print(test_variable)
# Імпортування всього
from test_package.test_module import test_function
print(test_function)
test_function()
test_function("Vadym")
from test_package.another_test_module import *
print(test_function)
test_function()
test_function("Vadym")
# Використання вбудованих бібліотек
# Бібліотека os
import os
for attr in dir(os):
if not attr.startswith("_"):
print(attr)
print(help(os.getcwd))
print(os.getcwd())
print(os.listdir())
os.mkdir("temp")
print(os.listdir())
os.chdir("temp")
print(os.getcwd())
os.chdir("..")
print(os.getcwd())
print(os.listdir())
os.rmdir("temp")
print(os.listdir())
os.system("ls")
# Бібліотека sys
import sys
for attr in dir(sys):
if not attr.startswith("_"):
print(attr)
print(help(sys))
print(sys.argv)
print(sys.path)
print(sys.platform)
print(sys.version)
print(sys.getsizeof("String"))
print(sys.getsizeof(list("String")))
print(sys.getsizeof(tuple("String")))
print(sys.getsizeof(set("String")))
# Бібліотека itertools
import itertools
for attr in dir(itertools):
if not attr.startswith("_"):
print(attr)
print(help(itertools))
# itertools.groupby
users = [
{"name": "max", "status": "active"},
{"name": "bob", "status": "inactive"},
{"name": "bill", "status": "pending"},
{"name": "john", "status": "pending"},
{"name": "brian", "status": "inactive"},
{"name": "dan", "status": "active"},
{"name": "will", "status": "active"},
{"name": "jack", "status": "inactive"},
]
users.sort(key=lambda i: i["status"])
for grouper, group in itertools.groupby(users, key=lambda i: i["status"]):
print(grouper)
for item in group:
print(item)
# itertools.cycle
counter = 0
from_zero_to_ten = range(11)
for i in itertools.cycle(from_zero_to_ten):
if counter > 25:
break
print(i)
counter += 1
# Бібліотека collections
import collections
for attr in dir(collections):
if not attr.startswith("_"):
print(attr)
print(help(collections))
# collections.Counter
some_text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris feugiat pulvinar aliquam. Duis ac consectetur lectus. Curabitur interdum, justo vitae convallis pharetra, tortor quam rhoncus ipsum, a sagittis lectus ipsum a orci. Donec non orci tellus. Curabitur volutpat consectetur ante id ornare. Maecenas et lorem vitae massa tincidunt volutpat in in tortor. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Quisque a nibh vitae velit convallis interdum a at felis. Fusce id dolor egestas, maximus orci a, sodales nisi.
Duis semper erat ut nibh dictum finibus. Aliquam eget accumsan neque. Nullam volutpat, dolor in aliquet aliquam, ante lectus dignissim erat, in egestas mauris tellus eu tortor. Ut non varius sem, ut commodo nisi. Donec tellus nibh, varius vel nibh ac, auctor laoreet sem. Proin non venenatis nisl. Integer rutrum, urna vel suscipit euismod, sapien lacus feugiat sapien, a congue nunc dui ac ante. Phasellus vel tellus dictum leo aliquam faucibus. Fusce elementum, orci sed gravida dapibus, nisl ante bibendum mi, nec sollicitudin ante nisi in lacus.
Suspendisse potenti. Maecenas tristique posuere purus, ac sollicitudin justo tempus vitae. Morbi nec euismod tellus. Curabitur urna purus, commodo at porttitor ut, mattis nec quam. In congue vel metus id pellentesque. Praesent tristique mi sed interdum laoreet. Quisque quis enim eu lacus tincidunt sagittis non nec urna. Aenean scelerisque nec justo vel efficitur.
Aenean quis rutrum nisl. Maecenas pharetra tortor scelerisque, aliquet lacus eget, posuere elit. In dapibus porta nulla. Nulla gravida accumsan augue, nec vulputate metus tempor in. Praesent nec leo odio. Sed risus tortor, eleifend quis euismod vel, malesuada scelerisque lectus. Donec eget iaculis elit. Nullam nibh nunc, ullamcorper ornare eros eget, vehicula ornare ex. Vestibulum non eros aliquet, ullamcorper turpis eu, blandit arcu. Nunc tincidunt, purus et fermentum ullamcorper, elit arcu gravida sapien, placerat malesuada mauris turpis eu nunc. Nulla eu nisl quis tortor dictum aliquet ac vel metus. Curabitur augue lacus, scelerisque in arcu egestas, vulputate accumsan dui. Donec viverra arcu scelerisque quam ullamcorper euismod. Nunc molestie metus ut urna venenatis, sit amet posuere metus tincidunt. Duis vel augue lacus.
Pellentesque maximus molestie aliquet. Sed laoreet dui massa, a accumsan felis dapibus at. Nullam vel fermentum sapien, in luctus sem. Nam at ligula purus. Quisque id lacus eu est congue ornare a sit amet ipsum. Suspendisse ac leo non felis venenatis laoreet eu non augue. Praesent sollicitudin erat sit amet mi convallis cursus. Vestibulum neque diam, congue id lobortis rutrum, volutpat a nibh. Donec vel fringilla tellus, vel rutrum mauris. Duis efficitur nisi ac lacus gravida, sit amet iaculis lacus mollis. In hac habitasse platea dictumst."""
counter = collections.Counter(some_text.replace(",", "").replace(".", "").replace("\n", " ").split(" "))
for word, times in counter.items():
print(f"Word '{word}' appears {times} times.")
# collections.defaultdict
users = [
{"name": "max", "status": "active"},
{"name": "bob", "status": "inactive"},
{"name": "bill", "status": "pending"},
{"name": "john", "status": "pending"},
{"name": "brian", "status": "inactive"},
{"name": "dan", "status": "active"},
{"name": "will", "status": "active"},
{"name": "jack", "status": "inactive"},
]
users_by_status = collections.defaultdict(list)
for user in users:
users_by_status[user["status"]].append(user)
print(users_by_status)
# Бібліотека datetime
import datetime
for attr in dir(datetime):
if not attr.startswith("_"):
print(attr)
print(help(datetime.datetime))
for attr in dir(datetime.datetime):
if not attr.startswith("_"):
print(attr)
print(help(datetime.datetime))
for attr in dir(datetime.date):
if not attr.startswith("_"):
print(attr)
print(help(datetime.date))
for attr in dir(datetime.time):
if not attr.startswith("_"):
print(attr)
print(help(datetime.time))
# datetime.datetime
now = datetime.datetime.now()
utcnow = datetime.datetime.utcnow()
new_year = datetime.datetime(2022, 1, 1, 0, 0, 0)
print(now)
print(utcnow)
print(new_year)
print(new_year - now)
# datetime.datetime.strftime and datetime.datetime.strptime
now = datetime.datetime.utcnow()
now_str = now.strftime("%d %b %Y %H hours %M minutes %S seconds (%A)")
print(now_str)
new_year_str = "January, 1, 2022"
new_year = datetime.datetime.strptime(new_year_str, "%B, %d, %Y")
print(new_year)
# datetime.date and datetime.timedelta
today = datetime.date.today()
print(today)
print(today.ctime())
new_year = datetime.date(2022, 1, 1)
print(new_year)
print(new_year.strftime("%A"))
print(new_year.isoweekday())
print(new_year.weekday())
day_after_new_year = new_year + datetime.timedelta(days=2)
print(day_after_new_year)
print(day_after_new_year.strftime("%A"))
print(day_after_new_year.isoweekday())
print(day_after_new_year.weekday())
# Бібліотека time
import time
start = time.time()
print(start)
time.sleep(6)
print(time.time() - start)
print(time.localtime()) | 0.117408 | 0.332473 |
from __future__ import annotations
import os
import yaml
from ..exceptions import (
FileArgumentNotFoundError,
ModelExtensionError,
ModelTypeNotSupportedError,
NoDefaultSpeakerDictionaryError,
PretrainedModelNotFoundError,
)
from ..models import MODEL_TYPES
__all__ = ["validate_model_arg"]
def validate_model_arg(name: str, model_type: str) -> str:
"""
Validate pretrained model name argument
Parameters
----------
name: str
Name of model
model_type: str
Type of model
Returns
-------
str
Full path of validated model
Raises
------
:class:`~montreal_forced_aligner.exceptions.ModelTypeNotSupportedError`
If the type of model is not supported
:class:`~montreal_forced_aligner.exceptions.FileArgumentNotFoundError`
If the file specified is not found
:class:`~montreal_forced_aligner.exceptions.PretrainedModelNotFoundError`
If the pretrained model specified is not found
:class:`~montreal_forced_aligner.exceptions.ModelExtensionError`
If the extension is not valid for the specified model type
:class:`~montreal_forced_aligner.exceptions.NoDefaultSpeakerDictionaryError`
If a multispeaker dictionary does not have a default dictionary
"""
if model_type not in MODEL_TYPES:
raise ModelTypeNotSupportedError(model_type, MODEL_TYPES)
model_class = MODEL_TYPES[model_type]
available_models = model_class.get_available_models()
model_class = MODEL_TYPES[model_type]
if name in available_models:
name = model_class.get_pretrained_path(name)
elif model_class.valid_extension(name):
if not os.path.exists(name):
raise FileArgumentNotFoundError(name)
if model_type == "dictionary" and os.path.splitext(name)[1].lower() == ".yaml":
with open(name, "r", encoding="utf8") as f:
data = yaml.safe_load(f)
found_default = False
for speaker, path in data.items():
if speaker == "default":
found_default = True
path = validate_model_arg(path, "dictionary")
if not found_default:
raise NoDefaultSpeakerDictionaryError()
else:
if os.path.splitext(name)[1]:
raise ModelExtensionError(name, model_type, model_class.extensions)
else:
raise PretrainedModelNotFoundError(name, model_type, available_models)
return name | montreal_forced_aligner/command_line/utils.py | from __future__ import annotations
import os
import yaml
from ..exceptions import (
FileArgumentNotFoundError,
ModelExtensionError,
ModelTypeNotSupportedError,
NoDefaultSpeakerDictionaryError,
PretrainedModelNotFoundError,
)
from ..models import MODEL_TYPES
__all__ = ["validate_model_arg"]
def validate_model_arg(name: str, model_type: str) -> str:
"""
Validate pretrained model name argument
Parameters
----------
name: str
Name of model
model_type: str
Type of model
Returns
-------
str
Full path of validated model
Raises
------
:class:`~montreal_forced_aligner.exceptions.ModelTypeNotSupportedError`
If the type of model is not supported
:class:`~montreal_forced_aligner.exceptions.FileArgumentNotFoundError`
If the file specified is not found
:class:`~montreal_forced_aligner.exceptions.PretrainedModelNotFoundError`
If the pretrained model specified is not found
:class:`~montreal_forced_aligner.exceptions.ModelExtensionError`
If the extension is not valid for the specified model type
:class:`~montreal_forced_aligner.exceptions.NoDefaultSpeakerDictionaryError`
If a multispeaker dictionary does not have a default dictionary
"""
if model_type not in MODEL_TYPES:
raise ModelTypeNotSupportedError(model_type, MODEL_TYPES)
model_class = MODEL_TYPES[model_type]
available_models = model_class.get_available_models()
model_class = MODEL_TYPES[model_type]
if name in available_models:
name = model_class.get_pretrained_path(name)
elif model_class.valid_extension(name):
if not os.path.exists(name):
raise FileArgumentNotFoundError(name)
if model_type == "dictionary" and os.path.splitext(name)[1].lower() == ".yaml":
with open(name, "r", encoding="utf8") as f:
data = yaml.safe_load(f)
found_default = False
for speaker, path in data.items():
if speaker == "default":
found_default = True
path = validate_model_arg(path, "dictionary")
if not found_default:
raise NoDefaultSpeakerDictionaryError()
else:
if os.path.splitext(name)[1]:
raise ModelExtensionError(name, model_type, model_class.extensions)
else:
raise PretrainedModelNotFoundError(name, model_type, available_models)
return name | 0.749546 | 0.127898 |
import os
import base64
import dateutil.parser as parser
from email.mime.text import MIMEText
from MailLogging import debug
from authentication import auth
class Gmail:
def __init__(self):
self.user_id = 'me'
self.service = auth()
debug.createLog()
def getMessageData(self, messages, format=None, log=False):
""" Return a list of message data in dictionary format
Args:
messages (list): The returned object from the
`getMessages` function
"""
messages = [
self.service.users().messages().get(userId=self.user_id,
id=message['id'],
format=format).execute()
for message in messages['messages']
]
if log:
debug.writeLog(format+' message' if format else 'messages',
messages)
return messages
def getPayload(self, messages, log=False):
""" Returns a list of payload data from message data
Args:
messages (list): The returned object from the
`getMessageData` function
"""
payloads = [ message['payload'] for message in messages ]
if log:
debug.writeLog('payload', payloads)
return payloads
def decodeMSG(self, message):
""" Decodes `message` paramater into utf-8
Arg:
message (string): base64 encoded string
"""
msgString = base64.urlsafe_b64decode(message.encode('ASCII'))
decoded = msgString.decode("utf-8")
return decoded
def unpackPayload(self, payloads, bodies=True,
types=['html', 'plain'], log=False, x=0, depth=0):
"""
recurses through message parts until it find the types
specified, then returns the decoded part and appends it to a
list/dictionary of all messages parts
Args:
payloads (dictionary): The returned object from the
`getPayload` function
bodies (bool): True to collect html or plain text bodies
from payload, false for simply headers
types (:obj:`list` of :obj:`str`): types of bodies to be
returned, either `html` or `plain`
log (bool): True to log output to ./logs directory
*DO NOT ALTER THE FOLLOWING PARAMETERS, THEY ARE REQUIRED
FOR THE RECURSION*
x (int): Specifies the index of the payloads
depth (int): Specifies the Depth of recursion
"""
messageParts = []
plain = False
plainText = ''
for msg in payloads:
parts = {}
if bodies:
type = msg['mimeType']
# Plain text
if type == 'text/plain' and 'plain' in types:
plainText = self.decodeMSG(msg['body']['data'])
if depth == 0:
parts['plain'] = plainText[:20]
else:
plain = True
# Html
elif type == 'text/html' and 'html' in types:
plain = False
body = self.decodeMSG(msg['body']['data'])
if depth > 0:
return { "html": body }
parts['html'] = body
# Other
elif type[10:] in ['mixed', 'alternative',
'related', 'report']:
msgParts = msg['parts']
parts = self.unpackPayload(msgParts, bodies=True,
types=types, x=x, depth=depth+1)
if depth > 0:
return parts
# Attatchment
elif type[:10] == 'application':
pass
# Headers
if depth == 0:
#print(parts, 'X:', x)
header = msg['headers']
for head in header:
name, value = head['name'], head['value']
if name == 'Date':
parts['dateTime'] = value
if name == 'Subject':
parts['Subject'] = value
if name == 'From':
if '<' in value:
FROM, EMAIL = value.split('<')
parts['From'] = FROM[:-1]
parts['From-email'] = EMAIL[:-1]
else:
parts['From-email'] = value
messageParts.append(parts)
x += 1
if plain and depth > 0:
plain = False
return { "plain": plainText[:20] }
# adds plain message if no html
if log:
debug.createLog(dir='logs/bodies')
debug.writeLog("mockData" if bodies else "headers",
messageParts)
if bodies:
for (index, msg) in enumerate(messageParts):
if 'html' in msg:
debug.writeLog(str(index+1), msg['html'],
'bodies/', extension="html")
else:
debug.writeLog(str(index+1), msg['plain'],
'bodies/', extension="txt")
return messageParts
def getUserInfo(self):
return self.service.users().labels().get(userId=self.user_id,
id="INBOX").execute()
class GetMessages:
def __init__(self):
self.service = auth()
def list(self, maxResults=1, labelIds=["INBOX"]):
"""
Returns a list of messages and thread Id's for n number
of messages
"""
msgList = self.service.users().messages().list(userId='me',
labelIds=labelIds, maxResults=maxResults).execute()
return msgList
def query(self, results=1, query=''):
"""
Returns a list of messages and thread Id's for n number
of messages that meet a specific query
"""
return self.service.users().messages().list(userId='me',
q=query, maxResults=results).execute()
def sendMessage(self, to, from_, subject, content):
""" Sends a messages from users mailbox """
# create message base64 with MIMEText format
createdMessage = self.createMessage(to, from_, subject, content)
# sends message
message = (self.service.users().messages().send(userId="me",
body=createdMessage).execute())
def createMessage(self, sender, to, subject, message_text):
""" Creates a MIME fromat email message """
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(
message.as_bytes()).decode()} | PyMail.py | import os
import base64
import dateutil.parser as parser
from email.mime.text import MIMEText
from MailLogging import debug
from authentication import auth
class Gmail:
def __init__(self):
self.user_id = 'me'
self.service = auth()
debug.createLog()
def getMessageData(self, messages, format=None, log=False):
""" Return a list of message data in dictionary format
Args:
messages (list): The returned object from the
`getMessages` function
"""
messages = [
self.service.users().messages().get(userId=self.user_id,
id=message['id'],
format=format).execute()
for message in messages['messages']
]
if log:
debug.writeLog(format+' message' if format else 'messages',
messages)
return messages
def getPayload(self, messages, log=False):
""" Returns a list of payload data from message data
Args:
messages (list): The returned object from the
`getMessageData` function
"""
payloads = [ message['payload'] for message in messages ]
if log:
debug.writeLog('payload', payloads)
return payloads
def decodeMSG(self, message):
""" Decodes `message` paramater into utf-8
Arg:
message (string): base64 encoded string
"""
msgString = base64.urlsafe_b64decode(message.encode('ASCII'))
decoded = msgString.decode("utf-8")
return decoded
def unpackPayload(self, payloads, bodies=True,
types=['html', 'plain'], log=False, x=0, depth=0):
"""
recurses through message parts until it find the types
specified, then returns the decoded part and appends it to a
list/dictionary of all messages parts
Args:
payloads (dictionary): The returned object from the
`getPayload` function
bodies (bool): True to collect html or plain text bodies
from payload, false for simply headers
types (:obj:`list` of :obj:`str`): types of bodies to be
returned, either `html` or `plain`
log (bool): True to log output to ./logs directory
*DO NOT ALTER THE FOLLOWING PARAMETERS, THEY ARE REQUIRED
FOR THE RECURSION*
x (int): Specifies the index of the payloads
depth (int): Specifies the Depth of recursion
"""
messageParts = []
plain = False
plainText = ''
for msg in payloads:
parts = {}
if bodies:
type = msg['mimeType']
# Plain text
if type == 'text/plain' and 'plain' in types:
plainText = self.decodeMSG(msg['body']['data'])
if depth == 0:
parts['plain'] = plainText[:20]
else:
plain = True
# Html
elif type == 'text/html' and 'html' in types:
plain = False
body = self.decodeMSG(msg['body']['data'])
if depth > 0:
return { "html": body }
parts['html'] = body
# Other
elif type[10:] in ['mixed', 'alternative',
'related', 'report']:
msgParts = msg['parts']
parts = self.unpackPayload(msgParts, bodies=True,
types=types, x=x, depth=depth+1)
if depth > 0:
return parts
# Attatchment
elif type[:10] == 'application':
pass
# Headers
if depth == 0:
#print(parts, 'X:', x)
header = msg['headers']
for head in header:
name, value = head['name'], head['value']
if name == 'Date':
parts['dateTime'] = value
if name == 'Subject':
parts['Subject'] = value
if name == 'From':
if '<' in value:
FROM, EMAIL = value.split('<')
parts['From'] = FROM[:-1]
parts['From-email'] = EMAIL[:-1]
else:
parts['From-email'] = value
messageParts.append(parts)
x += 1
if plain and depth > 0:
plain = False
return { "plain": plainText[:20] }
# adds plain message if no html
if log:
debug.createLog(dir='logs/bodies')
debug.writeLog("mockData" if bodies else "headers",
messageParts)
if bodies:
for (index, msg) in enumerate(messageParts):
if 'html' in msg:
debug.writeLog(str(index+1), msg['html'],
'bodies/', extension="html")
else:
debug.writeLog(str(index+1), msg['plain'],
'bodies/', extension="txt")
return messageParts
def getUserInfo(self):
return self.service.users().labels().get(userId=self.user_id,
id="INBOX").execute()
class GetMessages:
def __init__(self):
self.service = auth()
def list(self, maxResults=1, labelIds=["INBOX"]):
"""
Returns a list of messages and thread Id's for n number
of messages
"""
msgList = self.service.users().messages().list(userId='me',
labelIds=labelIds, maxResults=maxResults).execute()
return msgList
def query(self, results=1, query=''):
"""
Returns a list of messages and thread Id's for n number
of messages that meet a specific query
"""
return self.service.users().messages().list(userId='me',
q=query, maxResults=results).execute()
def sendMessage(self, to, from_, subject, content):
""" Sends a messages from users mailbox """
# create message base64 with MIMEText format
createdMessage = self.createMessage(to, from_, subject, content)
# sends message
message = (self.service.users().messages().send(userId="me",
body=createdMessage).execute())
def createMessage(self, sender, to, subject, message_text):
""" Creates a MIME fromat email message """
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(
message.as_bytes()).decode()} | 0.466116 | 0.150903 |
from django_spanner.compiler import SQLCompiler
from django.db.models import F
from tests.unit.django_spanner.simple_test import SpannerSimpleTestClass
from decimal import Decimal
from .models import Number, Author
class TestLookups(SpannerSimpleTestClass):
def test_cast_param_to_float_lte_sql_query(self):
qs1 = Number.objects.filter(decimal_num__lte=Decimal("1.1")).values(
"decimal_num"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.decimal_num FROM tests_number WHERE "
+ "tests_number.decimal_num <= %s",
)
self.assertEqual(params, (Decimal("1.1"),))
def test_cast_param_to_float_for_int_field_query(self):
qs1 = Number.objects.filter(num__lte=1.1).values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s",
)
self.assertEqual(params, (1,))
def test_cast_param_to_float_for_foreign_key_field_query(self):
qs1 = Number.objects.filter(item_id__exact="10").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.item_id = %s",
)
self.assertEqual(params, (10,))
def test_cast_param_to_float_with_no_params_query(self):
qs1 = Number.objects.filter(item_id__exact=F("num")).values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.item_id = (tests_number.num)",
)
self.assertEqual(params, ())
def test_startswith_endswith_sql_query_with_startswith(self):
qs1 = Author.objects.filter(name__startswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("^abc",))
def test_startswith_endswith_sql_query_with_endswith(self):
qs1 = Author.objects.filter(name__endswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc$",))
def test_startswith_endswith_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__istartswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)^abc",))
def test_startswith_endswith_sql_query_with_bileteral_transform(self):
qs1 = Author.objects.filter(name__upper__startswith="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('^', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_startswith_endswith_case_insensitive_transform_sql_query(self):
qs1 = Author.objects.filter(name__upper__istartswith="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('^(?i)', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_startswith_endswith_endswith_sql_query_with_transform(self):
qs1 = Author.objects.filter(name__upper__endswith="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('', (UPPER(%s)), '$'), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_sensitive(self):
qs1 = Author.objects.filter(name__regex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
"REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__iregex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
"REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)abc",))
def test_regex_sql_query_case_sensitive_with_transform(self):
qs1 = Author.objects.filter(name__upper__regex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "(UPPER(%s)))",
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_insensitive_with_transform(self):
qs1 = Author.objects.filter(name__upper__iregex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "CONCAT('(?i)', (UPPER(%s))))",
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__icontains="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)abc",))
def test_contains_sql_query_case_sensitive(self):
qs1 = Author.objects.filter(name__contains="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_insensitive_transform(self):
qs1 = Author.objects.filter(name__upper__icontains="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('(?i)', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_sensitive_transform(self):
qs1 = Author.objects.filter(name__upper__contains="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ 'REPLACE(REPLACE(REPLACE((UPPER(%s)), "\\\\", "\\\\\\\\"), '
+ '"%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_iexact_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__iexact="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("^(?i)abc$",))
def test_iexact_sql_query_case_insensitive_function_transform(self):
qs1 = Author.objects.filter(name__upper__iexact=F("last_name")).values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS((UPPER(tests_author.last_name)), "
+ "CONCAT('^(?i)', CAST(UPPER(tests_author.name) AS STRING), '$'))",
)
self.assertEqual(params, ())
def test_iexact_sql_query_case_insensitive_value_match(self):
qs1 = Author.objects.filter(name__upper__iexact="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS((UPPER(CONCAT('^(?i)', "
+ "CAST(UPPER(tests_author.name) AS STRING), '$'))), %s)",
)
self.assertEqual(params, ("abc",)) | tests/unit/django_spanner/test_lookups.py |
from django_spanner.compiler import SQLCompiler
from django.db.models import F
from tests.unit.django_spanner.simple_test import SpannerSimpleTestClass
from decimal import Decimal
from .models import Number, Author
class TestLookups(SpannerSimpleTestClass):
def test_cast_param_to_float_lte_sql_query(self):
qs1 = Number.objects.filter(decimal_num__lte=Decimal("1.1")).values(
"decimal_num"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.decimal_num FROM tests_number WHERE "
+ "tests_number.decimal_num <= %s",
)
self.assertEqual(params, (Decimal("1.1"),))
def test_cast_param_to_float_for_int_field_query(self):
qs1 = Number.objects.filter(num__lte=1.1).values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s",
)
self.assertEqual(params, (1,))
def test_cast_param_to_float_for_foreign_key_field_query(self):
qs1 = Number.objects.filter(item_id__exact="10").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.item_id = %s",
)
self.assertEqual(params, (10,))
def test_cast_param_to_float_with_no_params_query(self):
qs1 = Number.objects.filter(item_id__exact=F("num")).values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.item_id = (tests_number.num)",
)
self.assertEqual(params, ())
def test_startswith_endswith_sql_query_with_startswith(self):
qs1 = Author.objects.filter(name__startswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("^abc",))
def test_startswith_endswith_sql_query_with_endswith(self):
qs1 = Author.objects.filter(name__endswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc$",))
def test_startswith_endswith_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__istartswith="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)^abc",))
def test_startswith_endswith_sql_query_with_bileteral_transform(self):
qs1 = Author.objects.filter(name__upper__startswith="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('^', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_startswith_endswith_case_insensitive_transform_sql_query(self):
qs1 = Author.objects.filter(name__upper__istartswith="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('^(?i)', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_startswith_endswith_endswith_sql_query_with_transform(self):
qs1 = Author.objects.filter(name__upper__endswith="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('', (UPPER(%s)), '$'), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_sensitive(self):
qs1 = Author.objects.filter(name__regex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
"REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__iregex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
"REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)abc",))
def test_regex_sql_query_case_sensitive_with_transform(self):
qs1 = Author.objects.filter(name__upper__regex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "(UPPER(%s)))",
)
self.assertEqual(params, ("abc",))
def test_regex_sql_query_case_insensitive_with_transform(self):
qs1 = Author.objects.filter(name__upper__iregex="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "CONCAT('(?i)', (UPPER(%s))))",
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__icontains="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("(?i)abc",))
def test_contains_sql_query_case_sensitive(self):
qs1 = Author.objects.filter(name__contains="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_insensitive_transform(self):
qs1 = Author.objects.filter(name__upper__icontains="abc").values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ "REPLACE(REPLACE(REPLACE(CONCAT('(?i)', (UPPER(%s))), "
+ '"\\\\", "\\\\\\\\"), "%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_contains_sql_query_case_sensitive_transform(self):
qs1 = Author.objects.filter(name__upper__contains="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(UPPER(tests_author.name) AS STRING), "
+ 'REPLACE(REPLACE(REPLACE((UPPER(%s)), "\\\\", "\\\\\\\\"), '
+ '"%%", r"\\%%"), "_", r"\\_"))',
)
self.assertEqual(params, ("abc",))
def test_iexact_sql_query_case_insensitive(self):
qs1 = Author.objects.filter(name__iexact="abc").values("num")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.num FROM tests_author WHERE "
+ "REGEXP_CONTAINS(CAST(tests_author.name AS STRING), %s)",
)
self.assertEqual(params, ("^(?i)abc$",))
def test_iexact_sql_query_case_insensitive_function_transform(self):
qs1 = Author.objects.filter(name__upper__iexact=F("last_name")).values(
"name"
)
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS((UPPER(tests_author.last_name)), "
+ "CONCAT('^(?i)', CAST(UPPER(tests_author.name) AS STRING), '$'))",
)
self.assertEqual(params, ())
def test_iexact_sql_query_case_insensitive_value_match(self):
qs1 = Author.objects.filter(name__upper__iexact="abc").values("name")
compiler = SQLCompiler(qs1.query, self.connection, "default")
sql_compiled, params = compiler.as_sql()
self.assertEqual(
sql_compiled,
"SELECT tests_author.name FROM tests_author WHERE "
+ "REGEXP_CONTAINS((UPPER(CONCAT('^(?i)', "
+ "CAST(UPPER(tests_author.name) AS STRING), '$'))), %s)",
)
self.assertEqual(params, ("abc",)) | 0.467089 | 0.26863 |
import os
import sys
import numpy as np
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
from binary_file_writer import write_head, write_budget, uniform_flow_field
ex = ['adv01a_fmi', 'adv01b_fmi', 'adv01c_fmi']
scheme = ['upstream', 'central', 'tvd']
exdirs = []
for s in ex:
exdirs.append(os.path.join('temp', s))
ddir = 'data'
def get_model(idx, dir):
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
nstp = [200]
tsmult = [1.]
steady = [True]
delr = 1.
delc = 1.
top = 1.
botm = [0.]
strt = 1.
hk = 1.0
laytyp = 0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name='mf6',
sim_ws=ws)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
# create gwt model
gwtname = 'gwt_' + name
gwt = flopy.mf6.MFModel(sim, model_type='gwt6', modelname=gwtname,
model_nam_file='{}.nam'.format(gwtname))
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwtname))
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(gwt, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm,
idomain=1,
filename='{}.dis'.format(gwtname))
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.,
filename='{}.ic'.format(gwtname))
# advection
adv = flopy.mf6.ModflowGwtadv(gwt, scheme=scheme[idx],
filename='{}.adv'.format(gwtname))
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=0.1)
# sources
sourcerecarray = [('WEL-1', 'AUX', 'CONCENTRATION')]
ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray,
filename='{}.ssm'.format(gwtname))
# create a heads file with head equal top
fname = os.path.join(ws, 'myhead.hds')
with open(fname, 'wb') as fbin:
for kstp in range(1): #nstp[0]):
write_head(fbin, top * np.ones((nrow, ncol)), kstp=kstp + 1)
# create a budget file
qx = 1.
qy = 0.
qz = 0.
shape = (nlay, nrow, ncol)
spdis, flowja = uniform_flow_field(qx, qy, qz, shape)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('CONCENTRATION', np.float64),
])
wel = np.array([(0 + 1, 0 + 1, 1., 1.)], dtype=dt)
chd = np.array([(ncol - 1 + 1, 0 + 1, -1., 0.)], dtype=dt)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('SATURATION', np.float64),
])
sat = np.array([(i, i, 0., 1.) for i in range(nlay * nrow * ncol)], dtype=dt)
fname = os.path.join(ws, 'mybudget.bud')
with open(fname, 'wb') as fbin:
for kstp in range(1): #nstp[0]):
write_budget(fbin, flowja, kstp=kstp + 1)
write_budget(fbin, spdis, text=' DATA-SPDIS', imeth=6,
kstp=kstp + 1)
write_budget(fbin, sat, text=' DATA-SAT', imeth=6,
kstp=kstp + 1)
write_budget(fbin, wel, text=' WEL', imeth=6,
text2id2=' WEL-1', kstp=kstp + 1)
write_budget(fbin, chd, text=' CHD', imeth=6,
text2id2=' CHD-1', kstp=kstp + 1)
fbin.close()
# flow model interface
packagedata = [('GWFBUDGET', 'mybudget.bud', None),
('GWFHEAD', 'myheads.hds', None)]
fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=packagedata)
# output control
oc = flopy.mf6.ModflowGwtoc(gwt,
budget_filerecord='{}.cbc'.format(gwtname),
concentration_filerecord='{}.ucn'.format(gwtname),
concentrationprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')],
printrecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')])
obs_data = {'conc_obs.csv': [
('(1-1-10)', 'CONCENTRATION', (0, 0, 9)),
('(1-1-50)', 'CONCENTRATION', (0, 0, 49))],
'flow_obs.csv': [
('c10-c11', 'FLOW-JA-FACE', (0, 0, 9), (0, 0, 10)),
('c50-c51', 'FLOW-JA-FACE', (0, 0, 49), (0, 0, 50)),
('c99-c100', 'FLOW-JA-FACE', (0, 0, 98), (0, 0, 99)),
]}
obs_package = flopy.mf6.ModflowUtlobs(gwt, pname='conc_obs',
filename='{}.obs'.format(gwtname),
digits=10, print_input=True,
continuous=obs_data)
return sim
def build_models():
for idx, dir in enumerate(exdirs):
sim = get_model(idx, dir)
sim.write_simulation()
return
def eval_transport(sim):
print('evaluating transport...')
name = ex[sim.idxsim]
gwtname = 'gwt_' + name
fpth = os.path.join(sim.simpath, '{}.ucn'.format(gwtname))
try:
cobj = flopy.utils.HeadFile(fpth, precision='double',
text='CONCENTRATION')
conc = cobj.get_data()
except:
assert False, 'could not load data from "{}"'.format(fpth)
# This is the answer to this problem. These concentrations are for
# time step 200.
cres1 = [[[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 9.99999999e-01,
9.99999997e-01, 9.99999991e-01, 9.99999971e-01, 9.99999914e-01,
9.99999761e-01, 9.99999372e-01, 9.99998435e-01, 9.99996286e-01,
9.99991577e-01, 9.99981712e-01, 9.99961893e-01, 9.99923632e-01,
9.99852532e-01, 9.99725120e-01, 9.99504599e-01, 9.99135431e-01,
9.98536850e-01, 9.97595635e-01, 9.96158712e-01, 9.94026505e-01,
9.90948130e-01, 9.86619748e-01, 9.80687319e-01, 9.72754814e-01,
9.62398489e-01, 9.49187176e-01, 9.32707801e-01, 9.12594513e-01,
8.88559134e-01, 8.60420154e-01, 8.28127324e-01, 7.91779115e-01,
7.51630867e-01, 7.08092322e-01, 6.61714306e-01, 6.13165405e-01,
5.63200494e-01, 5.12623768e-01, 4.62249349e-01, 4.12862664e-01,
3.65185517e-01, 3.19847250e-01, 2.77363614e-01, 2.38124183e-01,
2.02388273e-01, 1.70288648e-01, 1.41841739e-01, 1.16962748e-01,
9.54838854e-02, 7.71740354e-02, 6.17583229e-02, 4.89363652e-02,
3.83983188e-02, 2.98381826e-02, 2.29641338e-02, 1.75059339e-02,
1.32196416e-02, 9.89000005e-03, 7.33093269e-03, 5.38459977e-03,
3.91944360e-03, 2.82760119e-03, 2.02199855e-03, 1.43337156e-03,
1.00739149e-03, 7.02013580e-04, 4.85116958e-04, 3.32465664e-04,
2.25991387e-04, 1.52379541e-04, 1.01928496e-04, 6.76460984e-05,
4.45462926e-05, 2.91101871e-05, 1.88792800e-05, 1.21527525e-05,
7.76522212e-06, 4.92565188e-06, 3.10201677e-06, 1.93969988e-06,
1.20440812e-06, 7.42676511e-07, 4.54831064e-07, 2.76669882e-07,
1.67174989e-07, 1.00349240e-07, 5.98446532e-08, 3.54600737e-08]]]
cres1 = np.array(cres1)
cres2 = [[[9.99996617e-01, 1.00001184e+00, 1.00000294e+00, 9.99972914e-01,
9.99992627e-01, 1.00004237e+00, 1.00002081e+00, 9.99945149e-01,
9.99952654e-01, 1.00005669e+00, 1.00008810e+00, 9.99966402e-01,
9.99865541e-01, 9.99967791e-01, 1.00015792e+00, 1.00014755e+00,
9.99895530e-01, 9.99724106e-01, 9.99916592e-01, 1.00029941e+00,
1.00038455e+00, 9.99960678e-01, 9.99433053e-01, 9.99453350e-01,
1.00018163e+00, 1.00097923e+00, 1.00093550e+00, 9.99790199e-01,
9.98371554e-01, 9.98054584e-01, 9.99598363e-01, 1.00229288e+00,
1.00416575e+00, 1.00323035e+00, 9.98995210e-01, 9.93234271e-01,
9.89448228e-01, 9.91206357e-01, 1.00016889e+00, 1.01473298e+00,
1.02990960e+00, 1.03846239e+00, 1.03282855e+00, 1.00710727e+00,
9.58480908e-01, 8.87726436e-01, 7.98820097e-01, 6.97900399e-01,
5.91969549e-01, 4.87686471e-01, 3.90487541e-01, 3.04127133e-01,
2.30608327e-01, 1.70400015e-01, 1.22812141e-01, 8.64138068e-02,
5.94120233e-02, 3.99463958e-02, 2.62868102e-02, 1.69426845e-02,
1.07033555e-02, 6.63198283e-03, 4.03300421e-03, 2.40844447e-03,
1.41323306e-03, 8.15254552e-04, 4.62589305e-04, 2.58303233e-04,
1.42001900e-04, 7.68911977e-05, 4.10256980e-05, 2.15775541e-05,
1.11912143e-05, 5.72578796e-06, 2.89083689e-06, 1.44073067e-06,
7.09001789e-07, 3.44624235e-07, 1.65501321e-07, 7.85475047e-08,
3.68512253e-08, 1.70949923e-08, 7.84310280e-09, 3.55966819e-09,
1.59856594e-09, 7.10467596e-10, 3.12565151e-10, 1.36146377e-10,
5.87252052e-11, 2.50886169e-11, 1.06179506e-11, 4.45237718e-12,
1.85013624e-12, 7.61982955e-13, 3.11095972e-13, 1.25908830e-13,
5.05704707e-14, 2.00370648e-14, 8.15003576e-15, 2.57563506e-15]]]
cres2 = np.array(cres2)
cres3 = [[[ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 9.99999999e-01, 9.99999997e-01,
9.99999991e-01, 9.99999975e-01, 9.99999926e-01,
9.99999789e-01, 9.99999407e-01, 9.99998374e-01,
9.99995665e-01, 9.99988785e-01, 9.99971918e-01,
9.99932078e-01, 9.99841550e-01, 9.99643930e-01,
9.99229970e-01, 9.98398720e-01, 9.96800070e-01,
9.93857995e-01, 9.88681096e-01, 9.79978744e-01,
9.66015902e-01, 9.44652308e-01, 9.13514114e-01,
8.70328697e-01, 8.13410724e-01, 7.42224214e-01,
6.57879960e-01, 5.63390876e-01, 4.63530320e-01,
3.64233335e-01, 2.71628522e-01, 1.90935412e-01,
1.25541007e-01, 7.65316248e-02, 4.28052252e-02,
2.16851758e-02, 9.78976172e-03, 3.85613094e-03,
1.28872611e-03, 3.52070089e-04, 7.49188445e-05,
1.17688715e-05, 1.33952025e-06, 1.08174095e-07,
-4.82019087e-08, -5.67180537e-08, -4.65251289e-08,
-3.25511455e-08, -1.94644548e-08, -9.78876693e-09,
-4.07380361e-09, -1.38097809e-09, -3.72934181e-10,
-7.83508455e-11, -1.26040926e-11, -1.48260453e-12,
4.10392230e-14, 2.44993743e-13, 2.46295025e-13,
1.90964563e-13, 1.03476379e-13, 3.96502895e-14,
1.04500247e-14, 2.00830327e-15, 4.70831032e-16,
3.38440506e-16, 2.49848438e-16, 1.83245111e-16,
1.32361223e-16, 9.39406563e-17, 6.54891851e-17,
4.48667613e-17, 3.02333440e-17, 2.00567815e-17,
1.31110206e-17, 8.45177289e-18, 5.37610069e-18,
3.37597383e-18]]]
cres3 = np.array(cres3)
creslist = [cres1, cres2, cres3]
assert np.allclose(creslist[sim.idxsim], conc), \
('simulated concentrations do not match with known solution.')
return
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_transport, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print('standalone run of {}'.format(os.path.basename(__file__)))
# run main routine
main() | autotest/test_gwt_adv01_fmi.py | import os
import sys
import numpy as np
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
from binary_file_writer import write_head, write_budget, uniform_flow_field
ex = ['adv01a_fmi', 'adv01b_fmi', 'adv01c_fmi']
scheme = ['upstream', 'central', 'tvd']
exdirs = []
for s in ex:
exdirs.append(os.path.join('temp', s))
ddir = 'data'
def get_model(idx, dir):
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
nstp = [200]
tsmult = [1.]
steady = [True]
delr = 1.
delc = 1.
top = 1.
botm = [0.]
strt = 1.
hk = 1.0
laytyp = 0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name='mf6',
sim_ws=ws)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
# create gwt model
gwtname = 'gwt_' + name
gwt = flopy.mf6.MFModel(sim, model_type='gwt6', modelname=gwtname,
model_nam_file='{}.nam'.format(gwtname))
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwtname))
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(gwt, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm,
idomain=1,
filename='{}.dis'.format(gwtname))
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.,
filename='{}.ic'.format(gwtname))
# advection
adv = flopy.mf6.ModflowGwtadv(gwt, scheme=scheme[idx],
filename='{}.adv'.format(gwtname))
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=0.1)
# sources
sourcerecarray = [('WEL-1', 'AUX', 'CONCENTRATION')]
ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray,
filename='{}.ssm'.format(gwtname))
# create a heads file with head equal top
fname = os.path.join(ws, 'myhead.hds')
with open(fname, 'wb') as fbin:
for kstp in range(1): #nstp[0]):
write_head(fbin, top * np.ones((nrow, ncol)), kstp=kstp + 1)
# create a budget file
qx = 1.
qy = 0.
qz = 0.
shape = (nlay, nrow, ncol)
spdis, flowja = uniform_flow_field(qx, qy, qz, shape)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('CONCENTRATION', np.float64),
])
wel = np.array([(0 + 1, 0 + 1, 1., 1.)], dtype=dt)
chd = np.array([(ncol - 1 + 1, 0 + 1, -1., 0.)], dtype=dt)
dt = np.dtype([('ID1', np.int32),
('ID2', np.int32),
('FLOW', np.float64),
('SATURATION', np.float64),
])
sat = np.array([(i, i, 0., 1.) for i in range(nlay * nrow * ncol)], dtype=dt)
fname = os.path.join(ws, 'mybudget.bud')
with open(fname, 'wb') as fbin:
for kstp in range(1): #nstp[0]):
write_budget(fbin, flowja, kstp=kstp + 1)
write_budget(fbin, spdis, text=' DATA-SPDIS', imeth=6,
kstp=kstp + 1)
write_budget(fbin, sat, text=' DATA-SAT', imeth=6,
kstp=kstp + 1)
write_budget(fbin, wel, text=' WEL', imeth=6,
text2id2=' WEL-1', kstp=kstp + 1)
write_budget(fbin, chd, text=' CHD', imeth=6,
text2id2=' CHD-1', kstp=kstp + 1)
fbin.close()
# flow model interface
packagedata = [('GWFBUDGET', 'mybudget.bud', None),
('GWFHEAD', 'myheads.hds', None)]
fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=packagedata)
# output control
oc = flopy.mf6.ModflowGwtoc(gwt,
budget_filerecord='{}.cbc'.format(gwtname),
concentration_filerecord='{}.ucn'.format(gwtname),
concentrationprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')],
printrecord=[('CONCENTRATION', 'LAST'),
('BUDGET', 'LAST')])
obs_data = {'conc_obs.csv': [
('(1-1-10)', 'CONCENTRATION', (0, 0, 9)),
('(1-1-50)', 'CONCENTRATION', (0, 0, 49))],
'flow_obs.csv': [
('c10-c11', 'FLOW-JA-FACE', (0, 0, 9), (0, 0, 10)),
('c50-c51', 'FLOW-JA-FACE', (0, 0, 49), (0, 0, 50)),
('c99-c100', 'FLOW-JA-FACE', (0, 0, 98), (0, 0, 99)),
]}
obs_package = flopy.mf6.ModflowUtlobs(gwt, pname='conc_obs',
filename='{}.obs'.format(gwtname),
digits=10, print_input=True,
continuous=obs_data)
return sim
def build_models():
for idx, dir in enumerate(exdirs):
sim = get_model(idx, dir)
sim.write_simulation()
return
def eval_transport(sim):
print('evaluating transport...')
name = ex[sim.idxsim]
gwtname = 'gwt_' + name
fpth = os.path.join(sim.simpath, '{}.ucn'.format(gwtname))
try:
cobj = flopy.utils.HeadFile(fpth, precision='double',
text='CONCENTRATION')
conc = cobj.get_data()
except:
assert False, 'could not load data from "{}"'.format(fpth)
# This is the answer to this problem. These concentrations are for
# time step 200.
cres1 = [[[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 9.99999999e-01,
9.99999997e-01, 9.99999991e-01, 9.99999971e-01, 9.99999914e-01,
9.99999761e-01, 9.99999372e-01, 9.99998435e-01, 9.99996286e-01,
9.99991577e-01, 9.99981712e-01, 9.99961893e-01, 9.99923632e-01,
9.99852532e-01, 9.99725120e-01, 9.99504599e-01, 9.99135431e-01,
9.98536850e-01, 9.97595635e-01, 9.96158712e-01, 9.94026505e-01,
9.90948130e-01, 9.86619748e-01, 9.80687319e-01, 9.72754814e-01,
9.62398489e-01, 9.49187176e-01, 9.32707801e-01, 9.12594513e-01,
8.88559134e-01, 8.60420154e-01, 8.28127324e-01, 7.91779115e-01,
7.51630867e-01, 7.08092322e-01, 6.61714306e-01, 6.13165405e-01,
5.63200494e-01, 5.12623768e-01, 4.62249349e-01, 4.12862664e-01,
3.65185517e-01, 3.19847250e-01, 2.77363614e-01, 2.38124183e-01,
2.02388273e-01, 1.70288648e-01, 1.41841739e-01, 1.16962748e-01,
9.54838854e-02, 7.71740354e-02, 6.17583229e-02, 4.89363652e-02,
3.83983188e-02, 2.98381826e-02, 2.29641338e-02, 1.75059339e-02,
1.32196416e-02, 9.89000005e-03, 7.33093269e-03, 5.38459977e-03,
3.91944360e-03, 2.82760119e-03, 2.02199855e-03, 1.43337156e-03,
1.00739149e-03, 7.02013580e-04, 4.85116958e-04, 3.32465664e-04,
2.25991387e-04, 1.52379541e-04, 1.01928496e-04, 6.76460984e-05,
4.45462926e-05, 2.91101871e-05, 1.88792800e-05, 1.21527525e-05,
7.76522212e-06, 4.92565188e-06, 3.10201677e-06, 1.93969988e-06,
1.20440812e-06, 7.42676511e-07, 4.54831064e-07, 2.76669882e-07,
1.67174989e-07, 1.00349240e-07, 5.98446532e-08, 3.54600737e-08]]]
cres1 = np.array(cres1)
cres2 = [[[9.99996617e-01, 1.00001184e+00, 1.00000294e+00, 9.99972914e-01,
9.99992627e-01, 1.00004237e+00, 1.00002081e+00, 9.99945149e-01,
9.99952654e-01, 1.00005669e+00, 1.00008810e+00, 9.99966402e-01,
9.99865541e-01, 9.99967791e-01, 1.00015792e+00, 1.00014755e+00,
9.99895530e-01, 9.99724106e-01, 9.99916592e-01, 1.00029941e+00,
1.00038455e+00, 9.99960678e-01, 9.99433053e-01, 9.99453350e-01,
1.00018163e+00, 1.00097923e+00, 1.00093550e+00, 9.99790199e-01,
9.98371554e-01, 9.98054584e-01, 9.99598363e-01, 1.00229288e+00,
1.00416575e+00, 1.00323035e+00, 9.98995210e-01, 9.93234271e-01,
9.89448228e-01, 9.91206357e-01, 1.00016889e+00, 1.01473298e+00,
1.02990960e+00, 1.03846239e+00, 1.03282855e+00, 1.00710727e+00,
9.58480908e-01, 8.87726436e-01, 7.98820097e-01, 6.97900399e-01,
5.91969549e-01, 4.87686471e-01, 3.90487541e-01, 3.04127133e-01,
2.30608327e-01, 1.70400015e-01, 1.22812141e-01, 8.64138068e-02,
5.94120233e-02, 3.99463958e-02, 2.62868102e-02, 1.69426845e-02,
1.07033555e-02, 6.63198283e-03, 4.03300421e-03, 2.40844447e-03,
1.41323306e-03, 8.15254552e-04, 4.62589305e-04, 2.58303233e-04,
1.42001900e-04, 7.68911977e-05, 4.10256980e-05, 2.15775541e-05,
1.11912143e-05, 5.72578796e-06, 2.89083689e-06, 1.44073067e-06,
7.09001789e-07, 3.44624235e-07, 1.65501321e-07, 7.85475047e-08,
3.68512253e-08, 1.70949923e-08, 7.84310280e-09, 3.55966819e-09,
1.59856594e-09, 7.10467596e-10, 3.12565151e-10, 1.36146377e-10,
5.87252052e-11, 2.50886169e-11, 1.06179506e-11, 4.45237718e-12,
1.85013624e-12, 7.61982955e-13, 3.11095972e-13, 1.25908830e-13,
5.05704707e-14, 2.00370648e-14, 8.15003576e-15, 2.57563506e-15]]]
cres2 = np.array(cres2)
cres3 = [[[ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 9.99999999e-01, 9.99999997e-01,
9.99999991e-01, 9.99999975e-01, 9.99999926e-01,
9.99999789e-01, 9.99999407e-01, 9.99998374e-01,
9.99995665e-01, 9.99988785e-01, 9.99971918e-01,
9.99932078e-01, 9.99841550e-01, 9.99643930e-01,
9.99229970e-01, 9.98398720e-01, 9.96800070e-01,
9.93857995e-01, 9.88681096e-01, 9.79978744e-01,
9.66015902e-01, 9.44652308e-01, 9.13514114e-01,
8.70328697e-01, 8.13410724e-01, 7.42224214e-01,
6.57879960e-01, 5.63390876e-01, 4.63530320e-01,
3.64233335e-01, 2.71628522e-01, 1.90935412e-01,
1.25541007e-01, 7.65316248e-02, 4.28052252e-02,
2.16851758e-02, 9.78976172e-03, 3.85613094e-03,
1.28872611e-03, 3.52070089e-04, 7.49188445e-05,
1.17688715e-05, 1.33952025e-06, 1.08174095e-07,
-4.82019087e-08, -5.67180537e-08, -4.65251289e-08,
-3.25511455e-08, -1.94644548e-08, -9.78876693e-09,
-4.07380361e-09, -1.38097809e-09, -3.72934181e-10,
-7.83508455e-11, -1.26040926e-11, -1.48260453e-12,
4.10392230e-14, 2.44993743e-13, 2.46295025e-13,
1.90964563e-13, 1.03476379e-13, 3.96502895e-14,
1.04500247e-14, 2.00830327e-15, 4.70831032e-16,
3.38440506e-16, 2.49848438e-16, 1.83245111e-16,
1.32361223e-16, 9.39406563e-17, 6.54891851e-17,
4.48667613e-17, 3.02333440e-17, 2.00567815e-17,
1.31110206e-17, 8.45177289e-18, 5.37610069e-18,
3.37597383e-18]]]
cres3 = np.array(cres3)
creslist = [cres1, cres2, cres3]
assert np.allclose(creslist[sim.idxsim], conc), \
('simulated concentrations do not match with known solution.')
return
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_transport, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print('standalone run of {}'.format(os.path.basename(__file__)))
# run main routine
main() | 0.287768 | 0.214455 |
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2019/11/03 21:50:25"
import numpy as np
import torch
torch.set_default_dtype(torch.float64)
import torch.optim as optim
import torch.distributions as distributions
import pickle
import math
from sys import exit
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from RealNVP import *
from functions import *
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("--alpha", type = float)
args = argparser.parse_args()
alpha = args.alpha
data = torch.load("./output/model_trained_by_data/model_alpha_{:.3f}_step_24.pt".format(alpha))
hidden_dim = data['hidden_dim']
masks = data['masks']
x1_min, x1_max = data['masks'][-2][2], data['masks'][-2][3]
x2_min, x2_max = data['masks'][-1][2], data['masks'][-1][3]
with open("./output/TREMC/x_record_alpha_{:.3f}.pkl".format(alpha), 'rb') as file_handle:
data = pickle.load(file_handle)
x_record = data['x_record']
alphas = data['alphas']
x_train = x_record[:, -1, :]
realNVP = RealNVP(masks, hidden_dim)
x_train = torch.from_numpy(x_train)
x_train = x_train.to(next(realNVP.parameters()).device)
num_steps = 3000
x_from_z_plots = PdfPages(f"./output/analyze_model_trained_by_data/x_transformed_from_z_alpha_{alpha:.3f}.pdf")
z_from_x_plots = PdfPages(f"./output/analyze_model_trained_by_data/z_transformed_from_x_alpha_{alpha:.3f}.pdf")
for idx_step in range(num_steps):
if (idx_step + 1) % 25 == 0:
print("idx_step: {}".format(idx_step))
data = torch.load("./output/model_trained_by_data/model_alpha_{:.3f}_step_{}.pt".format(alpha, idx_step))
realNVP.load_state_dict(data['state_dict'])
z = torch.normal(0, 1, size = (10000, 2), device = next(realNVP.parameters()).device)
with torch.no_grad():
x, logdet = realNVP(z)
x = x.cpu().detach().numpy()
z = z.cpu().detach().numpy()
## split data points into two sets based on x
flag = x[:,1] > x[:,0] + 1.5
fig = plt.figure(0)
fig.clf()
plt.plot(x[flag,0], x[flag,1], ".", alpha = 0.2)
plt.plot(x[~flag,0], x[~flag,1], ".", alpha = 0.2)
plt.xlabel(r'$X_1$')
plt.ylabel(r'$X_2$')
plt.xlim([x1_min, x1_max])
plt.ylim([x2_min, x2_max])
plt.title("idx_step: {}".format(idx_step))
x_from_z_plots.savefig()
## split data points into two sets based on x
flag = x_train[:,1] > x_train[:,0] + 1.5
with torch.no_grad():
z, _ = realNVP.inverse(x_train)
z = z.cpu().detach().numpy()
fig = plt.figure(0)
fig.clf()
plt.plot(z[flag,0], z[flag,1], ".", alpha = 0.2)
plt.plot(z[~flag,0], z[~flag,1], ".", alpha = 0.2)
plt.xlabel(r'$z_1$')
plt.ylabel(r'$z_2$')
plt.title("idx_step: {}".format(idx_step))
z_from_x_plots.savefig()
x_from_z_plots.close()
z_from_x_plots.close() | MullerPotential_new/script/sample_from_model_trained_by_data.py | __author__ = "<NAME> <<EMAIL>>"
__date__ = "2019/11/03 21:50:25"
import numpy as np
import torch
torch.set_default_dtype(torch.float64)
import torch.optim as optim
import torch.distributions as distributions
import pickle
import math
from sys import exit
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from RealNVP import *
from functions import *
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("--alpha", type = float)
args = argparser.parse_args()
alpha = args.alpha
data = torch.load("./output/model_trained_by_data/model_alpha_{:.3f}_step_24.pt".format(alpha))
hidden_dim = data['hidden_dim']
masks = data['masks']
x1_min, x1_max = data['masks'][-2][2], data['masks'][-2][3]
x2_min, x2_max = data['masks'][-1][2], data['masks'][-1][3]
with open("./output/TREMC/x_record_alpha_{:.3f}.pkl".format(alpha), 'rb') as file_handle:
data = pickle.load(file_handle)
x_record = data['x_record']
alphas = data['alphas']
x_train = x_record[:, -1, :]
realNVP = RealNVP(masks, hidden_dim)
x_train = torch.from_numpy(x_train)
x_train = x_train.to(next(realNVP.parameters()).device)
num_steps = 3000
x_from_z_plots = PdfPages(f"./output/analyze_model_trained_by_data/x_transformed_from_z_alpha_{alpha:.3f}.pdf")
z_from_x_plots = PdfPages(f"./output/analyze_model_trained_by_data/z_transformed_from_x_alpha_{alpha:.3f}.pdf")
for idx_step in range(num_steps):
if (idx_step + 1) % 25 == 0:
print("idx_step: {}".format(idx_step))
data = torch.load("./output/model_trained_by_data/model_alpha_{:.3f}_step_{}.pt".format(alpha, idx_step))
realNVP.load_state_dict(data['state_dict'])
z = torch.normal(0, 1, size = (10000, 2), device = next(realNVP.parameters()).device)
with torch.no_grad():
x, logdet = realNVP(z)
x = x.cpu().detach().numpy()
z = z.cpu().detach().numpy()
## split data points into two sets based on x
flag = x[:,1] > x[:,0] + 1.5
fig = plt.figure(0)
fig.clf()
plt.plot(x[flag,0], x[flag,1], ".", alpha = 0.2)
plt.plot(x[~flag,0], x[~flag,1], ".", alpha = 0.2)
plt.xlabel(r'$X_1$')
plt.ylabel(r'$X_2$')
plt.xlim([x1_min, x1_max])
plt.ylim([x2_min, x2_max])
plt.title("idx_step: {}".format(idx_step))
x_from_z_plots.savefig()
## split data points into two sets based on x
flag = x_train[:,1] > x_train[:,0] + 1.5
with torch.no_grad():
z, _ = realNVP.inverse(x_train)
z = z.cpu().detach().numpy()
fig = plt.figure(0)
fig.clf()
plt.plot(z[flag,0], z[flag,1], ".", alpha = 0.2)
plt.plot(z[~flag,0], z[~flag,1], ".", alpha = 0.2)
plt.xlabel(r'$z_1$')
plt.ylabel(r'$z_2$')
plt.title("idx_step: {}".format(idx_step))
z_from_x_plots.savefig()
x_from_z_plots.close()
z_from_x_plots.close() | 0.544559 | 0.462412 |
from abc import ABC, abstractmethod
from typing import Type, Dict, Any, Tuple, Union
import gym
import numpy as np
from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
from utils.distributions import Distribution
gym.logger.set_level(40)
class MDP(gym.Env, ABC):
"""
### Description
A Markov decision process
"""
def __init__(self, context: np.ndarray = None,
config: Dict = None,
reinit=True,
):
"""Initialize Gym env from context and config"""
self.context = context
self.config = config
if reinit:
assert all(k in config for k in ['action_space', 'observation_space'])
self.action_space: gym.Space = config['action_space']
self.observation_space: gym.Space = config['observation_space']
self.state: np.ndarray = None
self.observation: np.ndarray = None
self.steps = 0
self.done = False
@abstractmethod
def reset(self) -> np.ndarray:
"""Reset environment like in gym"""
@abstractmethod
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict):
"""Step using action like in gym"""
class cMDP(ABC, TaskSettableEnv):
"""
### Description
A cMDP is a contextual Markov decision process [https://arxiv.org/pdf/1502.02259.pdf].
Given a context vector c, one can obtain a unique MDP M(c) from cMDPs.
### Context sampling
> self.sample_context()
Each cMDP can have a context distribution P(c) from which c (and therefore MDPs) can be sampled from
self.context_distribution.
### cMDPs are independent of MDPs
cMDPs are independent of MDPs. It simply provides a context sampling interface.
"""
def __init__(self, config: Dict = None):
"""Initialize cMDP from config dict
config must contain: context_distribution, mdp_type"""
if config is None:
config = {}
assert all(k in config for k in ['context_distribution', 'mdp_type', 'env_config'])
self.context_distribution: Distribution = config['context_distribution'] # to sample context from
self.mdp_type: Type[MDP] = config['mdp_type'] # the class of the MDPs to generate
self.context: np.ndarray = self.sample_context() # cache of the current context
self.resample = True # default allow resample
self.env_config: Dict = self.unpack_env_config(config)
self.mdp: Union[MDP, gym.Env] = self.mdp_type(context=self.context, config=self.env_config)
self.observation_space = self.mdp.observation_space
self.action_space = self.mdp.action_space
@abstractmethod
def unpack_env_config(self, config) -> Dict:
"""Unpack env_config from config; set default values
Returns the dict env_config"""
def sample_context(self) -> np.ndarray:
"""Sample a context array from the context distribution"""
context_array = self.context_distribution.sample()
self.context = context_array
return context_array
def to_resample(self, flag: bool):
"""Change resample flag"""
self.resample = flag
def get_task(self):
return self.context
def set_task(self, context_distribution):
self.context_distribution = context_distribution
def update_context(self, data: Dict = None):
"""Update the context distribution of cMDP given data"""
self.context_distribution.update(data=data)
def get_mdp(self, resample=False) -> MDP:
"""Return an MDP object"""
if resample:
self.sample_context()
return self.mdp_type(context=self.context, config=self.env_config)
def reset(self) -> np.ndarray:
"""Meta method to reset the current context MDP
Default resample context for every cMDP"""
if self.mdp is None or self.resample:
self.mdp = self.get_mdp(resample=self.resample)
observation = self.mdp.reset()
return observation
def step(self, action: np.ndarray) -> Tuple[Any, float, bool, dict]:
"""Meta method to step the current context MDP"""
obs, rew, done, info = self.mdp.step(action)
return obs, rew, done, info | cmdp.py | from abc import ABC, abstractmethod
from typing import Type, Dict, Any, Tuple, Union
import gym
import numpy as np
from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
from utils.distributions import Distribution
gym.logger.set_level(40)
class MDP(gym.Env, ABC):
"""
### Description
A Markov decision process
"""
def __init__(self, context: np.ndarray = None,
config: Dict = None,
reinit=True,
):
"""Initialize Gym env from context and config"""
self.context = context
self.config = config
if reinit:
assert all(k in config for k in ['action_space', 'observation_space'])
self.action_space: gym.Space = config['action_space']
self.observation_space: gym.Space = config['observation_space']
self.state: np.ndarray = None
self.observation: np.ndarray = None
self.steps = 0
self.done = False
@abstractmethod
def reset(self) -> np.ndarray:
"""Reset environment like in gym"""
@abstractmethod
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict):
"""Step using action like in gym"""
class cMDP(ABC, TaskSettableEnv):
"""
### Description
A cMDP is a contextual Markov decision process [https://arxiv.org/pdf/1502.02259.pdf].
Given a context vector c, one can obtain a unique MDP M(c) from cMDPs.
### Context sampling
> self.sample_context()
Each cMDP can have a context distribution P(c) from which c (and therefore MDPs) can be sampled from
self.context_distribution.
### cMDPs are independent of MDPs
cMDPs are independent of MDPs. It simply provides a context sampling interface.
"""
def __init__(self, config: Dict = None):
"""Initialize cMDP from config dict
config must contain: context_distribution, mdp_type"""
if config is None:
config = {}
assert all(k in config for k in ['context_distribution', 'mdp_type', 'env_config'])
self.context_distribution: Distribution = config['context_distribution'] # to sample context from
self.mdp_type: Type[MDP] = config['mdp_type'] # the class of the MDPs to generate
self.context: np.ndarray = self.sample_context() # cache of the current context
self.resample = True # default allow resample
self.env_config: Dict = self.unpack_env_config(config)
self.mdp: Union[MDP, gym.Env] = self.mdp_type(context=self.context, config=self.env_config)
self.observation_space = self.mdp.observation_space
self.action_space = self.mdp.action_space
@abstractmethod
def unpack_env_config(self, config) -> Dict:
"""Unpack env_config from config; set default values
Returns the dict env_config"""
def sample_context(self) -> np.ndarray:
"""Sample a context array from the context distribution"""
context_array = self.context_distribution.sample()
self.context = context_array
return context_array
def to_resample(self, flag: bool):
"""Change resample flag"""
self.resample = flag
def get_task(self):
return self.context
def set_task(self, context_distribution):
self.context_distribution = context_distribution
def update_context(self, data: Dict = None):
"""Update the context distribution of cMDP given data"""
self.context_distribution.update(data=data)
def get_mdp(self, resample=False) -> MDP:
"""Return an MDP object"""
if resample:
self.sample_context()
return self.mdp_type(context=self.context, config=self.env_config)
def reset(self) -> np.ndarray:
"""Meta method to reset the current context MDP
Default resample context for every cMDP"""
if self.mdp is None or self.resample:
self.mdp = self.get_mdp(resample=self.resample)
observation = self.mdp.reset()
return observation
def step(self, action: np.ndarray) -> Tuple[Any, float, bool, dict]:
"""Meta method to step the current context MDP"""
obs, rew, done, info = self.mdp.step(action)
return obs, rew, done, info | 0.900392 | 0.549641 |
import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import joblib
#Creating the flask app
app = Flask(__name__)
model = joblib.load("Loan_Predictor.pkl")
@app.route('/')
def home():
return render_template('Loan_App_Template.html')
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
Male=request.form['genderBox']
if(Male=='male'):
Male=1
else:
Male=0
Marital_Status = request.form['maritalBox']
if(Marital_Status == 'Married'):
Marital_Status = 1
else:
Marital_Status = 0
dependents = request.form['dependentBox']
education = request.form['educationBox']
if(education == 'Graduate'):
education = 1
else:
education = 0
se = request.form['employmentBackground']
if(se == 'Self-Employed'):
se = 1
else:
se = 0
applicantsIncome = request.form['applicantIncomeBox']
coApplicantsIncome = request.form['coApplicantIncomeBox']
loanAmt = request.form['laonAmtBox']
loanTerm = request.form['laonAmtTermBox']
ch = request.form['CHBox']
if(ch == 'Good'):
ch = 1
else:
ch = 0
propArea = request.form['propertyAreaBox']
if(propArea == 'Rural'):
Rural = 1
SemiUrban = 0
elif(propArea == 'Semi Urban'):
Rural = 0
SemiUrban = 1
else:
Rural=0
SemiUrban=0
prediction = model.predict([[Male, Marital_Status, dependents, education, se, applicantsIncome,
coApplicantsIncome, loanAmt, loanTerm, ch, Rural, SemiUrban]])
if (prediction[0] > 0.5):
return render_template('Loan_App_Template.html', prediction_text='RESULTS: Loan Approved')
else:
return render_template('Loan_App_Template.html', prediction_text= 'RESULTS: Loan Rejected')
if __name__ == "__main__":
app.run(debug=True) | Loan_App.py | import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import joblib
#Creating the flask app
app = Flask(__name__)
model = joblib.load("Loan_Predictor.pkl")
@app.route('/')
def home():
return render_template('Loan_App_Template.html')
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
Male=request.form['genderBox']
if(Male=='male'):
Male=1
else:
Male=0
Marital_Status = request.form['maritalBox']
if(Marital_Status == 'Married'):
Marital_Status = 1
else:
Marital_Status = 0
dependents = request.form['dependentBox']
education = request.form['educationBox']
if(education == 'Graduate'):
education = 1
else:
education = 0
se = request.form['employmentBackground']
if(se == 'Self-Employed'):
se = 1
else:
se = 0
applicantsIncome = request.form['applicantIncomeBox']
coApplicantsIncome = request.form['coApplicantIncomeBox']
loanAmt = request.form['laonAmtBox']
loanTerm = request.form['laonAmtTermBox']
ch = request.form['CHBox']
if(ch == 'Good'):
ch = 1
else:
ch = 0
propArea = request.form['propertyAreaBox']
if(propArea == 'Rural'):
Rural = 1
SemiUrban = 0
elif(propArea == 'Semi Urban'):
Rural = 0
SemiUrban = 1
else:
Rural=0
SemiUrban=0
prediction = model.predict([[Male, Marital_Status, dependents, education, se, applicantsIncome,
coApplicantsIncome, loanAmt, loanTerm, ch, Rural, SemiUrban]])
if (prediction[0] > 0.5):
return render_template('Loan_App_Template.html', prediction_text='RESULTS: Loan Approved')
else:
return render_template('Loan_App_Template.html', prediction_text= 'RESULTS: Loan Rejected')
if __name__ == "__main__":
app.run(debug=True) | 0.17824 | 0.083367 |
from logic import loader
from data import player_runtime
from data.heros import hrs
import random
import copy
def dojob(x,y,is_mouse_down):
loader.screen.blit(loader.SELECT_HELP, (120, 0))
loader.screen.blit(loader.SELECT_PLAYER, (400, 220))
if x >= 400 and x < 520 and y >= 220 and y < 340:
player_runtime.INFO['player_num'] = 1
loader.screen.blit(loader.SELECT, (400, 220))
elif x >= 520 and x < 640 and y >= 220 and y < 340:
loader.screen.blit(loader.SELECT, (520, 220))
player_runtime.INFO['player_num'] = 2
elif x >= 400 and x < 520 and y >= 340 and y < 460:
loader.screen.blit(loader.SELECT, (400, 340))
player_runtime.INFO['player_num'] = 3
elif x >= 520 and x < 640 and y >= 340 and y < 460:
loader.screen.blit(loader.SELECT, (520, 340))
player_runtime.INFO['player_num'] = 4
else:
player_runtime.INFO['player_num'] = 0
#玩家不够,AI来凑
if player_runtime.INFO['player_num'] in [1,2, 3, 4] and is_mouse_down == True:
# 构造游戏初始数据
'''
加入AI,直到4个阵营满
'''
# 1 读取英雄池(深度拷贝)
player_runtime.INFO['heros_pool'] = copy.deepcopy(hrs.DATA)
cplayers = []
# 2 初始化玩家位置
for num in range(0, 4):
# 构建当前玩家英雄组
cplayer = []
while len(cplayer) < 4:
# 获取本局游戏当前英雄池数量
hrplen = len(player_runtime.INFO['heros_pool'])
# 随机从获取英雄,直到4员
c_index = random.randint(0, hrplen - 1)
#添加出战状态
chero = player_runtime.INFO['heros_pool'][c_index]
#是否出征状态,默认否
chero['in_war']=False
#行进方向 0倒退 1前进 默认前进
chero['direct']=1
#是否胜利到达终点的判定
chero['gowin']=False
# 面部朝向 0/1/2/3 下左右上 默认0
chero['faceto'] = 0
cplayer.append(chero)
player_runtime.INFO['heros_pool'].pop(c_index)
cplayers.append(cplayer)
#分配玩家/AI轮次
while sum(player_runtime.INFO['pa_turn'])<(4-player_runtime.INFO['player_num']):
xp = 0
for pa in player_runtime.INFO['pa_turn']:
if pa==0:
player_runtime.INFO['pa_turn'][xp]=1
break
xp=xp+1
# 打乱顺序
random.shuffle(player_runtime.INFO['pa_turn'])
#全部都是AI,测试AI逻辑是否有bug
#player_runtime.INFO['pa_turn']=[1,1,1,1]
# 分配角色初始化位置
player_runtime.INFO['zdata'] = cplayers
zd_index = 0
for zd in player_runtime.INFO['zdata']:
if len(zd) == 0:
pass
else:
z_index = 0
for z in zd:
if zd_index == 0:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif zd_index == 1:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif zd_index == 2:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif zd_index == 3:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
z_index = z_index + 1
zd_index = zd_index + 1
# 开始游戏
player_runtime.INFO['player_selected'] = True | logic/s_game_select_player.py |
from logic import loader
from data import player_runtime
from data.heros import hrs
import random
import copy
def dojob(x,y,is_mouse_down):
loader.screen.blit(loader.SELECT_HELP, (120, 0))
loader.screen.blit(loader.SELECT_PLAYER, (400, 220))
if x >= 400 and x < 520 and y >= 220 and y < 340:
player_runtime.INFO['player_num'] = 1
loader.screen.blit(loader.SELECT, (400, 220))
elif x >= 520 and x < 640 and y >= 220 and y < 340:
loader.screen.blit(loader.SELECT, (520, 220))
player_runtime.INFO['player_num'] = 2
elif x >= 400 and x < 520 and y >= 340 and y < 460:
loader.screen.blit(loader.SELECT, (400, 340))
player_runtime.INFO['player_num'] = 3
elif x >= 520 and x < 640 and y >= 340 and y < 460:
loader.screen.blit(loader.SELECT, (520, 340))
player_runtime.INFO['player_num'] = 4
else:
player_runtime.INFO['player_num'] = 0
#玩家不够,AI来凑
if player_runtime.INFO['player_num'] in [1,2, 3, 4] and is_mouse_down == True:
# 构造游戏初始数据
'''
加入AI,直到4个阵营满
'''
# 1 读取英雄池(深度拷贝)
player_runtime.INFO['heros_pool'] = copy.deepcopy(hrs.DATA)
cplayers = []
# 2 初始化玩家位置
for num in range(0, 4):
# 构建当前玩家英雄组
cplayer = []
while len(cplayer) < 4:
# 获取本局游戏当前英雄池数量
hrplen = len(player_runtime.INFO['heros_pool'])
# 随机从获取英雄,直到4员
c_index = random.randint(0, hrplen - 1)
#添加出战状态
chero = player_runtime.INFO['heros_pool'][c_index]
#是否出征状态,默认否
chero['in_war']=False
#行进方向 0倒退 1前进 默认前进
chero['direct']=1
#是否胜利到达终点的判定
chero['gowin']=False
# 面部朝向 0/1/2/3 下左右上 默认0
chero['faceto'] = 0
cplayer.append(chero)
player_runtime.INFO['heros_pool'].pop(c_index)
cplayers.append(cplayer)
#分配玩家/AI轮次
while sum(player_runtime.INFO['pa_turn'])<(4-player_runtime.INFO['player_num']):
xp = 0
for pa in player_runtime.INFO['pa_turn']:
if pa==0:
player_runtime.INFO['pa_turn'][xp]=1
break
xp=xp+1
# 打乱顺序
random.shuffle(player_runtime.INFO['pa_turn'])
#全部都是AI,测试AI逻辑是否有bug
#player_runtime.INFO['pa_turn']=[1,1,1,1]
# 分配角色初始化位置
player_runtime.INFO['zdata'] = cplayers
zd_index = 0
for zd in player_runtime.INFO['zdata']:
if len(zd) == 0:
pass
else:
z_index = 0
for z in zd:
if zd_index == 0:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif zd_index == 1:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 11
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 12
elif zd_index == 2:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 11
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 12
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif zd_index == 3:
if z_index == 0:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 1:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 0
elif z_index == 2:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 0
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
elif z_index == 3:
player_runtime.INFO['zdata'][zd_index][z_index]['px'] = 1
player_runtime.INFO['zdata'][zd_index][z_index]['py'] = 1
z_index = z_index + 1
zd_index = zd_index + 1
# 开始游戏
player_runtime.INFO['player_selected'] = True | 0.100552 | 0.342159 |
import os
import numpy as np
import pandas as pd
from utils.multi_thres_and_leak import *
def use_threshold(result_npy_file):
#It's the best threshold
threshold=np.array([0.422, 0.15, 0.454, 0.29, 0.348, 0.331, 0.15, 0.572, 0.15,0.15, 0.15, 0.15, 0.15 , 0.15 , 0.15, 0.15, 0.299, 0.15 , 0.15 , 0.15 , 0.15 , 0.318 ,0.15 ,0.336, 0.15 , 0.355 ,0.15 ,0.15 ])
print 'new Threshold',threshold
sample_file = './data/sample_submission.csv'
sample_submission_df = pd.read_csv(sample_file)
result_scores = np.load(result_npy_file)
assert len(sample_submission_df['Predicted']) == result_scores.shape[0], 'Error'
submissions = []
for it, row in enumerate(result_scores):
sub_label = row-threshold
sub_label = sub_label>0
subrow = ' '.join(list([str(i) for i in np.nonzero(sub_label)[0]]))
if len(np.nonzero(sub_label)[0]) == 0:
arg_maxscore = np.argmax(result_scores[it])
subrow = str(arg_maxscore)
#print subrow
submissions.append(subrow)
# print subrow
sample_submission_df['Predicted'] = submissions
save_file = result_npy_file[:-10]+'_multhr.csv'
sample_submission_df.to_csv(save_file, index=None)
print '[multi-threshold]result save to ', save_file
return save_file
def summary_scores(score_files,save_path,weight=None,save_result=True):
print'total {} result'.format(len(score_files)),
if weight is None:
weight=[1 for _ in xrange(len(score_files))]
assert len(score_files)==len(weight),'Error length of score_files not queal to weight'
scores = []
for i, sub_file in enumerate(score_files) :
scores.append(np.load(sub_file)*weight[i])
scores = np.array(scores)
ave_scores = np.sum(scores, 0)/sum(weight)
if save_result:
np.save(save_path, ave_scores)
print 'save to:',save_path
def summary_scores_inceptionv4_800():
score_files = [
# inceptionv4 800
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_1_epoch12_score.npy',# 0.587
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_2_epoch15_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_2_epoch15_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_3_epoch12_score.npy',# 0.589
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_5_epoch12_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_8_epoch22_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_8_epoch22_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_9_epoch13_score.npy',
]
save_path = './results/inceptionv4_800_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_xception_800():
score_files = [
# xception 800
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_0_epoch14_score.npy', # 0.577
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_6_epoch13_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_7_epoch13_score.npy',
]
save_path = './results/xception_800_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_xception_512():
score_files = [
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold_0_epoch17_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold_1_epoch12_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_0_epoch22_score.npy',
'./models/xception_fc/xception_fc_offi_lr0.001_weightedsamper_mlsm_rms_lrexp_pretrain/submit/xception_fc_offi_lr0.001_weightedsamper_mlsm_rms_lrexp_pretrain_0_epoch6_score.npy',
]
save_path = './results/xception_512_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_any_sub():
score_files_weight = [
['./models/inceptionv3_fc/inceptionv3_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv3_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_5_epoch13_score.npy', 0.5],
['./results/inceptionv4_800_tiny_score.npy',1.8],
['./results/xception_800_tiny_score.npy', 0.7],
['./results/xception_512_tiny_score.npy', 1.3],
]
score_files = [_[0] for _ in score_files_weight]
weight = [_[1] for _ in score_files_weight]
for _it in xrange(len(score_files)):
print os.path.basename(score_files[_it]),':',weight[_it]
print weight
save_path = './results/tiny_submission_score.npy'
summary_scores(score_files, save_path,weight)
multi_thres_file = use_threshold(save_path)
final_commit_file = replace_leak_write_result(multi_thres_file, show_replace=False)
print '*'*77
print '* It\'s our final submission --> '+final_commit_file+' *'
print '*'*77
if __name__ == '__main__':
summary_scores_inceptionv4_800()
summary_scores_xception_512()
summary_scores_xception_800()
summary_scores_any_sub() | vpp/src/result_summary_tiny.py | import os
import numpy as np
import pandas as pd
from utils.multi_thres_and_leak import *
def use_threshold(result_npy_file):
#It's the best threshold
threshold=np.array([0.422, 0.15, 0.454, 0.29, 0.348, 0.331, 0.15, 0.572, 0.15,0.15, 0.15, 0.15, 0.15 , 0.15 , 0.15, 0.15, 0.299, 0.15 , 0.15 , 0.15 , 0.15 , 0.318 ,0.15 ,0.336, 0.15 , 0.355 ,0.15 ,0.15 ])
print 'new Threshold',threshold
sample_file = './data/sample_submission.csv'
sample_submission_df = pd.read_csv(sample_file)
result_scores = np.load(result_npy_file)
assert len(sample_submission_df['Predicted']) == result_scores.shape[0], 'Error'
submissions = []
for it, row in enumerate(result_scores):
sub_label = row-threshold
sub_label = sub_label>0
subrow = ' '.join(list([str(i) for i in np.nonzero(sub_label)[0]]))
if len(np.nonzero(sub_label)[0]) == 0:
arg_maxscore = np.argmax(result_scores[it])
subrow = str(arg_maxscore)
#print subrow
submissions.append(subrow)
# print subrow
sample_submission_df['Predicted'] = submissions
save_file = result_npy_file[:-10]+'_multhr.csv'
sample_submission_df.to_csv(save_file, index=None)
print '[multi-threshold]result save to ', save_file
return save_file
def summary_scores(score_files,save_path,weight=None,save_result=True):
print'total {} result'.format(len(score_files)),
if weight is None:
weight=[1 for _ in xrange(len(score_files))]
assert len(score_files)==len(weight),'Error length of score_files not queal to weight'
scores = []
for i, sub_file in enumerate(score_files) :
scores.append(np.load(sub_file)*weight[i])
scores = np.array(scores)
ave_scores = np.sum(scores, 0)/sum(weight)
if save_result:
np.save(save_path, ave_scores)
print 'save to:',save_path
def summary_scores_inceptionv4_800():
score_files = [
# inceptionv4 800
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_1_epoch12_score.npy',# 0.587
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_2_epoch15_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_2_epoch15_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_3_epoch12_score.npy',# 0.589
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_5_epoch12_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_8_epoch22_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_8_epoch22_score.npy',
'./models/inceptionv4_fc/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv4_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_9_epoch13_score.npy',
]
save_path = './results/inceptionv4_800_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_xception_800():
score_files = [
# xception 800
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_0_epoch14_score.npy', # 0.577
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_6_epoch13_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_7_epoch13_score.npy',
]
save_path = './results/xception_800_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_xception_512():
score_files = [
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold_0_epoch17_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_5fold_1_epoch12_score.npy',
'./models/xception_fc/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm/submit/xception_fc_offi_hpa_lr0.05_weightedsamper_mlsm_0_epoch22_score.npy',
'./models/xception_fc/xception_fc_offi_lr0.001_weightedsamper_mlsm_rms_lrexp_pretrain/submit/xception_fc_offi_lr0.001_weightedsamper_mlsm_rms_lrexp_pretrain_0_epoch6_score.npy',
]
save_path = './results/xception_512_tiny_score.npy'
summary_scores(score_files, save_path)
use_threshold(save_path)
def summary_scores_any_sub():
score_files_weight = [
['./models/inceptionv3_fc/inceptionv3_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold/submit/inceptionv3_fc_offi_hpa_lr0.05_weightedsamper_mlsm_800_pretrain_5fold_5_epoch13_score.npy', 0.5],
['./results/inceptionv4_800_tiny_score.npy',1.8],
['./results/xception_800_tiny_score.npy', 0.7],
['./results/xception_512_tiny_score.npy', 1.3],
]
score_files = [_[0] for _ in score_files_weight]
weight = [_[1] for _ in score_files_weight]
for _it in xrange(len(score_files)):
print os.path.basename(score_files[_it]),':',weight[_it]
print weight
save_path = './results/tiny_submission_score.npy'
summary_scores(score_files, save_path,weight)
multi_thres_file = use_threshold(save_path)
final_commit_file = replace_leak_write_result(multi_thres_file, show_replace=False)
print '*'*77
print '* It\'s our final submission --> '+final_commit_file+' *'
print '*'*77
if __name__ == '__main__':
summary_scores_inceptionv4_800()
summary_scores_xception_512()
summary_scores_xception_800()
summary_scores_any_sub() | 0.171234 | 0.239738 |
from datetime import timedelta
import faker
import pytz
import factory
from factory import SubFactory, fuzzy
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
from courses.factories import CourseFactory
from exams.models import (
ExamAuthorization,
ExamProfile,
ExamRun,
ExamRunCoupon)
from micromasters.factories import UserFactory
from micromasters.utils import as_datetime
from profiles.factories import ProfileFactory
FAKE = faker.Factory.create()
class ExamProfileFactory(DjangoModelFactory):
"""
Factory for ExamProfile
"""
status = FuzzyChoice(
[value[0] for value in ExamProfile.PROFILE_STATUS_CHOICES]
)
profile = SubFactory(ProfileFactory)
class Meta:
model = ExamProfile
class ExamRunFactory(DjangoModelFactory):
"""
Factory for ExamRun
"""
course = SubFactory(CourseFactory)
exam_series_code = factory.Faker('lexify', text="????_MicroMasters")
date_first_schedulable = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc)
)
date_last_schedulable = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc)
)
date_first_eligible = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc).date()
)
date_last_eligible = factory.LazyAttribute(
lambda exam_run: exam_run.date_first_eligible + timedelta(days=20)
)
date_grades_available = factory.LazyAttribute(
# Convert date to datetime
lambda exam_run: as_datetime(exam_run.date_last_eligible)
)
authorized = False
class Meta:
model = ExamRun
class Params:
eligibility_past = factory.Trait(
date_first_eligible=factory.LazyAttribute(
lambda exam_run: exam_run.date_last_eligible - timedelta(days=20)
),
date_last_eligible=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc).date()
)
)
eligibility_future = factory.Trait(
date_first_eligible=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc).date()
),
date_last_eligible=factory.LazyAttribute(
lambda exam_run: exam_run.date_first_eligible + timedelta(days=20)
)
)
scheduling_past = factory.Trait(
date_first_schedulable=factory.LazyAttribute(
lambda exam_run: exam_run.date_last_schedulable - timedelta(days=10)
),
date_last_schedulable=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc)
)
)
scheduling_future = factory.Trait(
date_first_schedulable=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc)
),
date_last_schedulable=factory.LazyAttribute(
lambda exam_run: exam_run.date_first_schedulable + timedelta(days=10)
)
)
class ExamAuthorizationFactory(DjangoModelFactory):
"""
Factory for ExamAuthorization
"""
user = SubFactory(UserFactory)
course = SubFactory(CourseFactory)
operation = FuzzyChoice(
[value[0] for value in ExamAuthorization.OPERATION_CHOICES]
)
status = FuzzyChoice(
[value[0] for value in ExamAuthorization.STATUS_CHOICES]
)
exam_run = SubFactory(ExamRunFactory)
class Meta:
model = ExamAuthorization
class ExamRunCouponFactory(DjangoModelFactory):
"""
Factory for ExamRunCoupon
"""
course = SubFactory(CourseFactory)
edx_exam_course_key = fuzzy.FuzzyText()
expiration_date = factory.Faker(
'date_time_this_month', before_now=False, after_now=True, tzinfo=pytz.utc
)
coupon_url = factory.Faker('url')
coupon_code = fuzzy.FuzzyText()
is_taken = factory.Faker('boolean')
class Meta:
model = ExamRunCoupon | exams/factories.py | from datetime import timedelta
import faker
import pytz
import factory
from factory import SubFactory, fuzzy
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
from courses.factories import CourseFactory
from exams.models import (
ExamAuthorization,
ExamProfile,
ExamRun,
ExamRunCoupon)
from micromasters.factories import UserFactory
from micromasters.utils import as_datetime
from profiles.factories import ProfileFactory
FAKE = faker.Factory.create()
class ExamProfileFactory(DjangoModelFactory):
"""
Factory for ExamProfile
"""
status = FuzzyChoice(
[value[0] for value in ExamProfile.PROFILE_STATUS_CHOICES]
)
profile = SubFactory(ProfileFactory)
class Meta:
model = ExamProfile
class ExamRunFactory(DjangoModelFactory):
"""
Factory for ExamRun
"""
course = SubFactory(CourseFactory)
exam_series_code = factory.Faker('lexify', text="????_MicroMasters")
date_first_schedulable = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc)
)
date_last_schedulable = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc)
)
date_first_eligible = factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc).date()
)
date_last_eligible = factory.LazyAttribute(
lambda exam_run: exam_run.date_first_eligible + timedelta(days=20)
)
date_grades_available = factory.LazyAttribute(
# Convert date to datetime
lambda exam_run: as_datetime(exam_run.date_last_eligible)
)
authorized = False
class Meta:
model = ExamRun
class Params:
eligibility_past = factory.Trait(
date_first_eligible=factory.LazyAttribute(
lambda exam_run: exam_run.date_last_eligible - timedelta(days=20)
),
date_last_eligible=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc).date()
)
)
eligibility_future = factory.Trait(
date_first_eligible=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc).date()
),
date_last_eligible=factory.LazyAttribute(
lambda exam_run: exam_run.date_first_eligible + timedelta(days=20)
)
)
scheduling_past = factory.Trait(
date_first_schedulable=factory.LazyAttribute(
lambda exam_run: exam_run.date_last_schedulable - timedelta(days=10)
),
date_last_schedulable=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=True, after_now=False, tzinfo=pytz.utc)
)
)
scheduling_future = factory.Trait(
date_first_schedulable=factory.LazyFunction(
lambda: FAKE.date_time_this_year(before_now=False, after_now=True, tzinfo=pytz.utc)
),
date_last_schedulable=factory.LazyAttribute(
lambda exam_run: exam_run.date_first_schedulable + timedelta(days=10)
)
)
class ExamAuthorizationFactory(DjangoModelFactory):
"""
Factory for ExamAuthorization
"""
user = SubFactory(UserFactory)
course = SubFactory(CourseFactory)
operation = FuzzyChoice(
[value[0] for value in ExamAuthorization.OPERATION_CHOICES]
)
status = FuzzyChoice(
[value[0] for value in ExamAuthorization.STATUS_CHOICES]
)
exam_run = SubFactory(ExamRunFactory)
class Meta:
model = ExamAuthorization
class ExamRunCouponFactory(DjangoModelFactory):
"""
Factory for ExamRunCoupon
"""
course = SubFactory(CourseFactory)
edx_exam_course_key = fuzzy.FuzzyText()
expiration_date = factory.Faker(
'date_time_this_month', before_now=False, after_now=True, tzinfo=pytz.utc
)
coupon_url = factory.Faker('url')
coupon_code = fuzzy.FuzzyText()
is_taken = factory.Faker('boolean')
class Meta:
model = ExamRunCoupon | 0.530966 | 0.232637 |
import cairo
import math
parties = [
[ "Konservemuloj", (29, 113, 184), 36.9, 331 ],
[ "Laboristoj", (190, 22, 34), 30.4, 232 ],
[ "UKIP", (106, 37, 118), 12.6, 1 ],
[ "<NAME>", (251, 186, 48), 7.9, 8 ],
[ "SNP", (0, 242, 229), 4.7, 56 ],
[ "<NAME>", (156, 196, 58), 3.8, 1 ],
[ "DUP", (193, 63, 91), 0.6, 8 ],
[ "<NAME>", (0, 130, 67), 0.6, 3 ],
[ "<NAME>", (13, 103, 46), 0.6, 4 ],
[ "Ulster Unionist Party", (168, 168, 212), 0.4, 2 ],
[ "SDLP", (47, 172, 102), 0.3, 3 ],
[ "Aliaj", (90, 90, 90), 1.2, 0 ]
]
SIZE = 800
KEY_CIRCLE_RADIUS = 10
KEY_LINE_SIZE = 30
def set_party_color(cr, party):
cr.set_source_rgb(*map(lambda x: x / 255.0, party[1]))
def draw_chart(cr, value_index):
total = sum(map(lambda x: x[value_index], parties))
pos = -math.pi / 2.0
cr.save()
cr.translate(SIZE / 2, SIZE / 2)
for party in parties:
value = party[value_index]
size = value * 2 * math.pi / total
end = pos + size
cr.move_to(0, 0)
cr.arc(0, 0, SIZE / 2, pos, end)
cr.close_path()
set_party_color(cr, party)
cr.fill()
pos = end
cr.restore()
surf = cairo.SVGSurface("torto.svg", SIZE * 3, SIZE)
cr = cairo.Context(surf)
draw_chart(cr, 3)
cr.save()
cr.translate(SIZE, 0)
draw_chart(cr, 2)
cr.restore()
cr.save()
cr.set_font_size(KEY_LINE_SIZE * 0.9)
party_num = 0
for party in parties:
cr.save()
cr.translate(SIZE * 2 + KEY_CIRCLE_RADIUS * 4,
SIZE / 2 - KEY_LINE_SIZE * len(parties) / 2 +
party_num * KEY_LINE_SIZE)
cr.arc(KEY_CIRCLE_RADIUS / 2,
KEY_LINE_SIZE / 2,
KEY_CIRCLE_RADIUS,
0, 2 * math.pi)
set_party_color(cr, party)
cr.fill()
(ascent, descent, height, max_x_advance, max_y_advance) = cr.font_extents()
cr.set_source_rgb(0, 0, 0)
cr.move_to(KEY_CIRCLE_RADIUS * 3,
KEY_LINE_SIZE / 2 + height / 2 - descent)
cr.show_text(party[0])
cr.restore()
party_num += 1
cr.restore() | torto.py |
import cairo
import math
parties = [
[ "Konservemuloj", (29, 113, 184), 36.9, 331 ],
[ "Laboristoj", (190, 22, 34), 30.4, 232 ],
[ "UKIP", (106, 37, 118), 12.6, 1 ],
[ "<NAME>", (251, 186, 48), 7.9, 8 ],
[ "SNP", (0, 242, 229), 4.7, 56 ],
[ "<NAME>", (156, 196, 58), 3.8, 1 ],
[ "DUP", (193, 63, 91), 0.6, 8 ],
[ "<NAME>", (0, 130, 67), 0.6, 3 ],
[ "<NAME>", (13, 103, 46), 0.6, 4 ],
[ "Ulster Unionist Party", (168, 168, 212), 0.4, 2 ],
[ "SDLP", (47, 172, 102), 0.3, 3 ],
[ "Aliaj", (90, 90, 90), 1.2, 0 ]
]
SIZE = 800
KEY_CIRCLE_RADIUS = 10
KEY_LINE_SIZE = 30
def set_party_color(cr, party):
cr.set_source_rgb(*map(lambda x: x / 255.0, party[1]))
def draw_chart(cr, value_index):
total = sum(map(lambda x: x[value_index], parties))
pos = -math.pi / 2.0
cr.save()
cr.translate(SIZE / 2, SIZE / 2)
for party in parties:
value = party[value_index]
size = value * 2 * math.pi / total
end = pos + size
cr.move_to(0, 0)
cr.arc(0, 0, SIZE / 2, pos, end)
cr.close_path()
set_party_color(cr, party)
cr.fill()
pos = end
cr.restore()
surf = cairo.SVGSurface("torto.svg", SIZE * 3, SIZE)
cr = cairo.Context(surf)
draw_chart(cr, 3)
cr.save()
cr.translate(SIZE, 0)
draw_chart(cr, 2)
cr.restore()
cr.save()
cr.set_font_size(KEY_LINE_SIZE * 0.9)
party_num = 0
for party in parties:
cr.save()
cr.translate(SIZE * 2 + KEY_CIRCLE_RADIUS * 4,
SIZE / 2 - KEY_LINE_SIZE * len(parties) / 2 +
party_num * KEY_LINE_SIZE)
cr.arc(KEY_CIRCLE_RADIUS / 2,
KEY_LINE_SIZE / 2,
KEY_CIRCLE_RADIUS,
0, 2 * math.pi)
set_party_color(cr, party)
cr.fill()
(ascent, descent, height, max_x_advance, max_y_advance) = cr.font_extents()
cr.set_source_rgb(0, 0, 0)
cr.move_to(KEY_CIRCLE_RADIUS * 3,
KEY_LINE_SIZE / 2 + height / 2 - descent)
cr.show_text(party[0])
cr.restore()
party_num += 1
cr.restore() | 0.307254 | 0.212099 |
from __future__ import print_function
from __future__ import absolute_import
import numpy as nm
import sys
from six.moves import range
sys.path.append('.')
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import ensure_path
from sfepy.linalg import cycle
from sfepy.discrete.fem.mesh import Mesh
from sfepy.mesh.mesh_tools import elems_q2t
def get_tensor_product_conn(shape):
"""
Generate vertex connectivity for cells of a tensor-product mesh of the
given shape.
Parameters
----------
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the mesh.
Returns
-------
conn : array
The vertex connectivity array.
desc : str
The cell kind.
"""
shape = nm.asarray(shape)
dim = len(shape)
assert_(1 <= dim <= 3)
n_nod = nm.prod(shape)
n_el = nm.prod(shape - 1)
grid = nm.arange(n_nod, dtype=nm.int32)
grid.shape = shape
if dim == 1:
conn = nm.zeros((n_el, 2), dtype=nm.int32)
conn[:, 0] = grid[:-1]
conn[:, 1] = grid[1:]
desc = '1_2'
elif dim == 2:
conn = nm.zeros((n_el, 4), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1].flat
conn[:, 1] = grid[1:, :-1].flat
conn[:, 2] = grid[1:, 1:].flat
conn[:, 3] = grid[:-1, 1:].flat
desc = '2_4'
else:
conn = nm.zeros((n_el, 8), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1, :-1].flat
conn[:, 1] = grid[1:, :-1, :-1].flat
conn[:, 2] = grid[1:, 1:, :-1].flat
conn[:, 3] = grid[:-1, 1:, :-1].flat
conn[:, 4] = grid[:-1, :-1, 1:].flat
conn[:, 5] = grid[1:, :-1, 1:].flat
conn[:, 6] = grid[1:, 1:, 1:].flat
conn[:, 7] = grid[:-1, 1:, 1:].flat
desc = '3_8'
return conn, desc
def gen_block_mesh(dims, shape, centre, mat_id=0, name='block',
coors=None, verbose=True):
"""
Generate a 2D or 3D block mesh. The dimension is determined by the
lenght of the shape argument.
Parameters
----------
dims : array of 2 or 3 floats
Dimensions of the block.
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the block mesh.
centre : array of 2 or 3 floats
Centre of the block.
mat_id : int, optional
The material id of all elements.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
dim = shape.shape[0]
centre = centre[:dim]
dims = dims[:dim]
n_nod = nm.prod(shape)
output('generating %d vertices...' % n_nod, verbose=verbose)
x0 = centre - 0.5 * dims
dd = dims / (shape - 1)
ngrid = nm.mgrid[[slice(ii) for ii in shape]]
ngrid.shape = (dim, n_nod)
coors = x0 + ngrid.T * dd
output('...done', verbose=verbose)
n_el = nm.prod(shape - 1)
output('generating %d cells...' % n_el, verbose=verbose)
mat_ids = nm.empty((n_el,), dtype=nm.int32)
mat_ids.fill(mat_id)
conn, desc = get_tensor_product_conn(shape)
output('...done', verbose=verbose)
mesh = Mesh.from_data(name, coors, None, [conn], [mat_ids], [desc])
return mesh
def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False,
is_open=False, open_angle=0.0, non_uniform=False,
name='cylinder', verbose=True):
"""
Generate a cylindrical mesh along an axis. Its cross-section can be
ellipsoidal.
Parameters
----------
dims : array of 5 floats
Dimensions of the cylinder: inner surface semi-axes a1, b1, outer
surface semi-axes a2, b2, length.
shape : array of 3 ints
Shape (counts of nodes in radial, circumferential and longitudinal
directions) of the cylinder mesh.
centre : array of 3 floats
Centre of the cylinder.
axis: one of 'x', 'y', 'z'
The axis of the cylinder.
force_hollow : boolean
Force hollow mesh even if inner radii a1 = b1 = 0.
is_open : boolean
Generate an open cylinder segment.
open_angle : float
Opening angle in radians.
non_uniform : boolean
If True, space the mesh nodes in radial direction so that the element
volumes are (approximately) the same, making thus the elements towards
the outer surface thinner.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
a1, b1, a2, b2, length = dims
nr, nfi, nl = shape
origin = centre - nm.array([0.5 * length, 0.0, 0.0])
dfi = 2.0 * (nm.pi - open_angle) / nfi
if is_open:
nnfi = nfi + 1
else:
nnfi = nfi
is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15)
if is_hollow:
mr = 0
else:
mr = (nnfi - 1) * nl
grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32)
n_nod = nr * nnfi * nl - mr
coors = nm.zeros((n_nod, 3), dtype=nm.float64)
angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1)
xs = nm.linspace(0.0, length, nl)
if non_uniform:
ras = nm.zeros((nr,), dtype=nm.float64)
rbs = nm.zeros_like(ras)
advol = (a2**2 - a1**2) / (nr - 1)
bdvol = (b2**2 - b1**2) / (nr - 1)
ras[0], rbs[0] = a1, b1
for ii in range(1, nr):
ras[ii] = nm.sqrt(advol + ras[ii-1]**2)
rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2)
else:
ras = nm.linspace(a1, a2, nr)
rbs = nm.linspace(b1, b2, nr)
# This is 3D only...
output('generating %d vertices...' % n_nod, verbose=verbose)
ii = 0
for ix in range(nr):
a, b = ras[ix], rbs[ix]
for iy, fi in enumerate(angles[:nnfi]):
for iz, x in enumerate(xs):
grid[ix,iy,iz] = ii
coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)]
ii += 1
if not is_hollow and (ix == 0):
if iy > 0:
grid[ix,iy,iz] = grid[ix,0,iz]
ii -= 1
assert_(ii == n_nod)
output('...done', verbose=verbose)
n_el = (nr - 1) * nfi * (nl - 1)
conn = nm.zeros((n_el, 8), dtype=nm.int32)
output('generating %d cells...' % n_el, verbose=verbose)
ii = 0
for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]):
if iy < (nnfi - 1):
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]]
ii += 1
elif not is_open:
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,0,iz ], grid[ix ,0,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,0,iz+1], grid[ix ,0,iz+1]]
ii += 1
mat_id = nm.zeros((n_el,), dtype = nm.int32)
desc = '3_8'
assert_(n_nod == (conn.max() + 1))
output('...done', verbose=verbose)
if axis == 'z':
coors = coors[:,[1,2,0]]
elif axis == 'y':
coors = coors[:,[2,0,1]]
mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
return mesh
def _spread_along_axis(axis, coors, tangents, grading_fun):
"""
Spread the coordinates along the given axis using the grading function, and
the tangents in the other two directions.
"""
oo = list(set([0, 1, 2]).difference([axis]))
c0, c1, c2 = coors[:, axis], coors[:, oo[0]], coors[:, oo[1]]
out = nm.empty_like(coors)
mi, ma = c0.min(), c0.max()
nc0 = (c0 - mi) / (ma - mi)
out[:, axis] = oc0 = grading_fun(nc0) * (ma - mi) + mi
nc = oc0 - oc0.min()
mi, ma = c1.min(), c1.max()
n1 = 2 * (c1 - mi) / (ma - mi) - 1
out[:, oo[0]] = c1 + n1 * nc * tangents[oo[0]]
mi, ma = c2.min(), c2.max()
n2 = 2 * (c2 - mi) / (ma - mi) - 1
out[:, oo[1]] = c2 + n2 * nc * tangents[oo[1]]
return out
def _get_extension_side(side, grading_fun, mat_id,
b_dims, b_shape, e_dims, e_shape, centre):
"""
Get a mesh extending the given side of a block mesh.
"""
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
coff = 0.5 * (b_dims + pe_dims)
cc = centre + coff * nm.eye(3)[side]
if side == 0: # x axis.
dims = [pe_dims[0], b_dims[1], b_dims[2]]
shape = [e_shape, b_shape[1], b_shape[2]]
tangents = [0, pe_dims[1] / pe_dims[0], pe_dims[2] / pe_dims[0]]
elif side == 1: # y axis.
dims = [b_dims[0], pe_dims[1], b_dims[2]]
shape = [b_shape[0], e_shape, b_shape[2]]
tangents = [pe_dims[0] / pe_dims[1], 0, pe_dims[2] / pe_dims[1]]
elif side == 2: # z axis.
dims = [b_dims[0], b_dims[1], pe_dims[2]]
shape = [b_shape[0], b_shape[1], e_shape]
tangents = [pe_dims[0] / pe_dims[2], pe_dims[1] / pe_dims[2], 0]
e_mesh = gen_block_mesh(dims, shape, cc, mat_id=mat_id, verbose=False)
e_mesh.coors[:] = _spread_along_axis(side, e_mesh.coors, tangents,
grading_fun)
return e_mesh, shape
def gen_extended_block_mesh(b_dims, b_shape, e_dims, e_shape, centre,
grading_fun=None, name=None):
"""
Generate a 3D mesh with a central block and (coarse) extending side meshes.
The resulting mesh is again a block. Each of the components has a different
material id.
Parameters
----------
b_dims : array of 3 floats
The dimensions of the central block.
b_shape : array of 3 ints
The shape (counts of nodes in x, y, z) of the central block mesh.
e_dims : array of 3 floats
The dimensions of the complete block (central block + extensions).
e_shape : int
The count of nodes of extending blocks in the direction from the
central block.
centre : array of 3 floats
The centre of the mesh.
grading_fun : callable, optional
A function of :math:`x \in [0, 1]` that can be used to shift nodes in
the extension axis directions to allow smooth grading of element sizes
from the centre. The default function is :math:`x**p` with :math:`p`
determined so that the element sizes next to the central block have the
size of the shortest edge of the central block.
name : string, optional
The mesh name.
Returns
-------
mesh : Mesh instance
"""
b_dims = nm.asarray(b_dims, dtype=nm.float64)
b_shape = nm.asarray(b_shape, dtype=nm.int32)
e_dims = nm.asarray(e_dims, dtype=nm.float64)
centre = nm.asarray(centre, dtype=nm.float64)
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
# Central block element sizes.
dd = (b_dims / (b_shape - 1))
# The "first x" going to grading_fun.
nc = 1.0 / (e_shape - 1)
# Grading power and function.
power = nm.log(dd.min() / pe_dims.min()) / nm.log(nc)
grading_fun = (lambda x: x**power) if grading_fun is None else grading_fun
# Central block mesh.
b_mesh = gen_block_mesh(b_dims, b_shape, centre, mat_id=0, verbose=False)
# 'x' extension.
e_mesh, xs = _get_extension_side(0, grading_fun, 10,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = b_mesh + e_mesh
# Mirror by 'x'.
e_mesh.coors[:, 0] = (2 * centre[0]) - e_mesh.coors[:, 0]
e_mesh.cmesh.cell_groups.fill(11)
mesh = mesh + e_mesh
# 'y' extension.
e_mesh, ys = _get_extension_side(1, grading_fun, 20,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'y'.
e_mesh.coors[:, 1] = (2 * centre[1]) - e_mesh.coors[:, 1]
e_mesh.cmesh.cell_groups.fill(21)
mesh = mesh + e_mesh
# 'z' extension.
e_mesh, zs = _get_extension_side(2, grading_fun, 30,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'z'.
e_mesh.coors[:, 2] = (2 * centre[2]) - e_mesh.coors[:, 2]
e_mesh.cmesh.cell_groups.fill(31)
mesh = mesh + e_mesh
if name is not None:
mesh.name = name
# Verify merging by checking the number of nodes.
n_nod = (nm.prod(nm.maximum(b_shape - 2, 0)) + 2 * nm.prod(xs)
+ 2 * (max(ys[0] - 2, 0) * ys[1] * ys[2])
+ 2 * (max(zs[0] - 2, 0) * max(zs[1] - 2, 0) * zs[2]))
if n_nod != mesh.n_nod:
raise ValueError('Merge of meshes failed! (%d == %d)'
% (n_nod, mesh.n_nod))
return mesh
def tiled_mesh1d(conn, coors, ngrps, idim, n_rep, bb, eps=1e-6, ndmap=False):
from sfepy.discrete.fem.periodic import match_grid_plane
s1 = nm.nonzero(coors[:,idim] < (bb[0] + eps))[0]
s2 = nm.nonzero(coors[:,idim] > (bb[1] - eps))[0]
if s1.shape != s2.shape:
raise ValueError('incompatible shapes: %s == %s'\
% (s1.shape, s2.shape))
(nnod0, dim) = coors.shape
nnod = nnod0 * n_rep - s1.shape[0] * (n_rep - 1)
(nel0, nnel) = conn.shape
nel = nel0 * n_rep
dd = nm.zeros((dim,), dtype=nm.float64)
dd[idim] = bb[1] - bb[0]
m1, m2 = match_grid_plane(coors[s1], coors[s2], idim)
oconn = nm.zeros((nel, nnel), dtype=nm.int32)
ocoors = nm.zeros((nnod, dim), dtype=nm.float64)
ongrps = nm.zeros((nnod,), dtype=nm.int32)
if type(ndmap) is bool:
ret_ndmap = ndmap
else:
ret_ndmap= True
ndmap_out = nm.zeros((nnod,), dtype=nm.int32)
el_off = 0
nd_off = 0
for ii in range(n_rep):
if ii == 0:
oconn[0:nel0,:] = conn
ocoors[0:nnod0,:] = coors
ongrps[0:nnod0] = ngrps.squeeze()
nd_off += nnod0
mapto = s2[m2]
mask = nm.ones((nnod0,), dtype=nm.int32)
mask[s1] = 0
remap0 = nm.cumsum(mask) - 1
nnod0r = nnod0 - s1.shape[0]
cidx = nm.where(mask)
if ret_ndmap:
ndmap_out[0:nnod0] = nm.arange(nnod0)
else:
remap = remap0 + nd_off
remap[s1[m1]] = mapto
mapto = remap[s2[m2]]
ocoors[nd_off:(nd_off + nnod0r),:] =\
(coors[cidx,:] + ii * dd)
ongrps[nd_off:(nd_off + nnod0r)] = ngrps[cidx].squeeze()
oconn[el_off:(el_off + nel0),:] = remap[conn]
if ret_ndmap:
ndmap_out[nd_off:(nd_off + nnod0r)] = cidx[0]
nd_off += nnod0r
el_off += nel0
if ret_ndmap:
if ndmap is not None:
max_nd_ref = nm.max(ndmap)
idxs = nm.where(ndmap_out > max_nd_ref)
ndmap_out[idxs] = ndmap[ndmap_out[idxs]]
return oconn, ocoors, ongrps, ndmap_out
else:
return oconn, ocoors, ongrps
def gen_tiled_mesh(mesh, grid=None, scale=1.0, eps=1e-6, ret_ndmap=False):
"""
Generate a new mesh by repeating a given periodic element
along each axis.
Parameters
----------
mesh : Mesh instance
The input periodic FE mesh.
grid : array
Number of repetition along each axis.
scale : float, optional
Scaling factor.
eps : float, optional
Tolerance for boundary detection.
ret_ndmap : bool, optional
If True, return global node map.
Returns
-------
mesh_out : Mesh instance
FE mesh.
ndmap : array
Maps: actual node id --> node id in the reference cell.
"""
bbox = mesh.get_bounding_box()
if grid is None:
iscale = max(int(1.0 / scale), 1)
grid = [iscale] * mesh.dim
conn = mesh.get_conn(mesh.descs[0])
mat_ids = mesh.cmesh.cell_groups
coors = mesh.coors
ngrps = mesh.cmesh.vertex_groups
nrep = nm.prod(grid)
ndmap = None
output('repeating %s ...' % grid)
nblk = 1
for ii, gr in enumerate(grid):
if ret_ndmap:
(conn, coors,
ngrps, ndmap0) = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps, ndmap=ndmap)
ndmap = ndmap0
else:
conn, coors, ngrps = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps)
nblk *= gr
output('...done')
mat_ids = nm.tile(mat_ids, (nrep,))
mesh_out = Mesh.from_data('tiled mesh', coors * scale, ngrps,
[conn], [mat_ids], [mesh.descs[0]])
if ret_ndmap:
return mesh_out, ndmap
else:
return mesh_out
def gen_misc_mesh(mesh_dir, force_create, kind, args, suffix='.mesh',
verbose=False):
"""
Create sphere or cube mesh according to `kind` in the given
directory if it does not exist and return path to it.
"""
import os
from sfepy import data_dir
defdir = os.path.join(data_dir, 'meshes')
if mesh_dir is None:
mesh_dir = defdir
def retype(args, types, defaults):
args=list(args)
args.extend(defaults[len(args):len(defaults)])
return tuple([type(value) for type, value in zip(types, args) ])
if kind == 'sphere':
default = [5, 41, args[0]]
args = retype(args, [float, int, float], default)
mesh_pattern = os.path.join(mesh_dir, 'sphere-%.2f-%.2f-%i')
else:
assert_(kind == 'cube')
args = retype(args,
(int, float, int, float, int, float),
(args[0], args[1], args[0], args[1], args[0], args[1]))
mesh_pattern = os.path.join(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f')
if verbose:
output(args)
filename = mesh_pattern % args
if not force_create:
if os.path.exists(filename): return filename
if os.path.exists(filename + '.mesh') : return filename + '.mesh'
if os.path.exists(filename + '.vtk'): return filename + '.vtk'
if kind == 'cube':
filename = filename + suffix
ensure_path(filename)
output('creating new cube mesh')
output('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)'
% args)
output('to file %s...' % filename)
mesh = gen_block_mesh(args[1::2], args[0::2],
(0.0, 0.0, 0.0), name=filename)
mesh.write(filename, io='auto')
output('...done')
else:
import subprocess, shutil, tempfile
filename = filename + '.mesh'
ensure_path(filename)
output('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d'
% args)
output('to file %s...' % filename)
f = open(os.path.join(defdir, 'quantum', 'sphere.geo'))
tmp_dir = tempfile.mkdtemp()
tmpfile = os.path.join(tmp_dir, 'sphere.geo.temp')
ff = open(tmpfile, "w")
ff.write("""
R = %i.0;
n = %i.0;
dens = %f;
""" % args)
ff.write(f.read())
f.close()
ff.close()
subprocess.call(['gmsh', '-3', tmpfile, '-format', 'mesh',
'-o', filename])
shutil.rmtree(tmp_dir)
output('...done')
return filename
def gen_mesh_from_string(mesh_name, mesh_dir):
import re
result = re.match('^\\s*([a-zA-Z]+)[:\\(]([^\\):]*)[:\\)](\\*)?\\s*$',
mesh_name)
if result is None:
return mesh_name
else:
args = re.split(',', result.group(2))
kind = result.group(1)
return gen_misc_mesh(mesh_dir, result.group(3)=='*', kind, args)
def gen_mesh_from_geom(geo, a=None, verbose=False, refine=False):
"""
Runs mesh generator - tetgen for 3D or triangle for 2D meshes.
Parameters
----------
geo : geometry
geometry description
a : int, optional
a maximum area/volume constraint
verbose : bool, optional
detailed information
refine : bool, optional
refines mesh
Returns
-------
mesh : Mesh instance
triangular or tetrahedral mesh
"""
import os.path as op
import pexpect
import tempfile
import shutil
tmp_dir = tempfile.mkdtemp()
polyfilename = op.join(tmp_dir, 'meshgen.poly')
# write geometry to poly file
geo.to_poly_file(polyfilename)
meshgen_call = {2: ('triangle', ''), 3: ('tetgen', 'BFENk')}
params = "-ACp"
params += "q" if refine else ''
params += "V" if verbose else "Q"
params += meshgen_call[geo.dim][1]
if a is not None:
params += "a%f" % (a)
params += " %s" % (polyfilename)
cmd = "%s %s" % (meshgen_call[geo.dim][0], params)
if verbose: print("Generating mesh using", cmd)
p=pexpect.run(cmd, timeout=None)
bname, ext = op.splitext(polyfilename)
if geo.dim == 2:
mesh = Mesh.from_file(bname + '.1.node')
if geo.dim == 3:
mesh = Mesh.from_file(bname + '.1.vtk')
shutil.rmtree(tmp_dir)
return mesh
def gen_mesh_from_voxels(voxels, dims, etype='q'):
"""
Generate FE mesh from voxels (volumetric data).
Parameters
----------
voxels : array
Voxel matrix, 1=material.
dims : array
Size of one voxel.
etype : integer, optional
'q' - quadrilateral or hexahedral elements
't' - triangular or tetrahedral elements
Returns
-------
mesh : Mesh instance
Finite element mesh.
"""
dims = nm.array(dims).squeeze()
dim = len(dims)
nddims = nm.array(voxels.shape) + 2
nodemtx = nm.zeros(nddims, dtype=nm.int32)
if dim == 2:
#iy, ix = nm.where(voxels.transpose())
iy, ix = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy] += 1
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 1
nodemtx[ix,iy + 1] += 1
elif etype == 't':
nodemtx[ix,iy] += 2
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 2
nodemtx[ix,iy + 1] += 1
nel *= 2
elif dim == 3:
#iy, ix, iz = nm.where(voxels.transpose(1, 0, 2))
iy, ix, iz = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy,iz] += 1
nodemtx[ix + 1,iy,iz] += 1
nodemtx[ix + 1,iy + 1,iz] += 1
nodemtx[ix,iy + 1,iz] += 1
nodemtx[ix,iy,iz + 1] += 1
nodemtx[ix + 1,iy,iz + 1] += 1
nodemtx[ix + 1,iy + 1,iz + 1] += 1
nodemtx[ix,iy + 1,iz + 1] += 1
elif etype == 't':
nodemtx[ix,iy,iz] += 6
nodemtx[ix + 1,iy,iz] += 2
nodemtx[ix + 1,iy + 1,iz] += 2
nodemtx[ix,iy + 1,iz] += 2
nodemtx[ix,iy,iz + 1] += 2
nodemtx[ix + 1,iy,iz + 1] += 2
nodemtx[ix + 1,iy + 1,iz + 1] += 6
nodemtx[ix,iy + 1,iz + 1] += 2
nel *= 6
else:
msg = 'incorrect voxel dimension! (%d)' % dim
raise ValueError(msg)
ndidx = nm.where(nodemtx)
coors = nm.array(ndidx).transpose() * dims
nnod = coors.shape[0]
nodeid = -nm.ones(nddims, dtype=nm.int32)
nodeid[ndidx] = nm.arange(nnod)
# generate elements
if dim == 2:
elems = nm.array([nodeid[ix,iy],
nodeid[ix + 1,iy],
nodeid[ix + 1,iy + 1],
nodeid[ix,iy + 1]]).transpose()
elif dim == 3:
elems = nm.array([nodeid[ix,iy,iz],
nodeid[ix + 1,iy,iz],
nodeid[ix + 1,iy + 1,iz],
nodeid[ix,iy + 1,iz],
nodeid[ix,iy,iz + 1],
nodeid[ix + 1,iy,iz + 1],
nodeid[ix + 1,iy + 1,iz + 1],
nodeid[ix,iy + 1,iz + 1]]).transpose()
if etype == 't':
elems = elems_q2t(elems)
eid = etype + str(dim)
eltab = {'q2': 4, 'q3': 8, 't2': 3, 't3': 4}
mesh = Mesh.from_data('voxel_data',
coors, nm.ones((nnod,), dtype=nm.int32),
[nm.ascontiguousarray(elems)],
[nm.ones((nel,), dtype=nm.int32)],
['%d_%d' % (dim, eltab[eid])])
return mesh
def main():
mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
name='')
mesh.write('0.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('1.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.0,
name='')
mesh.write('2.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('3.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('4.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('5.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('6.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('7.mesh', io = 'auto')
if __name__ == '__main__':
main() | sfepy/mesh/mesh_generators.py | from __future__ import print_function
from __future__ import absolute_import
import numpy as nm
import sys
from six.moves import range
sys.path.append('.')
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import ensure_path
from sfepy.linalg import cycle
from sfepy.discrete.fem.mesh import Mesh
from sfepy.mesh.mesh_tools import elems_q2t
def get_tensor_product_conn(shape):
"""
Generate vertex connectivity for cells of a tensor-product mesh of the
given shape.
Parameters
----------
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the mesh.
Returns
-------
conn : array
The vertex connectivity array.
desc : str
The cell kind.
"""
shape = nm.asarray(shape)
dim = len(shape)
assert_(1 <= dim <= 3)
n_nod = nm.prod(shape)
n_el = nm.prod(shape - 1)
grid = nm.arange(n_nod, dtype=nm.int32)
grid.shape = shape
if dim == 1:
conn = nm.zeros((n_el, 2), dtype=nm.int32)
conn[:, 0] = grid[:-1]
conn[:, 1] = grid[1:]
desc = '1_2'
elif dim == 2:
conn = nm.zeros((n_el, 4), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1].flat
conn[:, 1] = grid[1:, :-1].flat
conn[:, 2] = grid[1:, 1:].flat
conn[:, 3] = grid[:-1, 1:].flat
desc = '2_4'
else:
conn = nm.zeros((n_el, 8), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1, :-1].flat
conn[:, 1] = grid[1:, :-1, :-1].flat
conn[:, 2] = grid[1:, 1:, :-1].flat
conn[:, 3] = grid[:-1, 1:, :-1].flat
conn[:, 4] = grid[:-1, :-1, 1:].flat
conn[:, 5] = grid[1:, :-1, 1:].flat
conn[:, 6] = grid[1:, 1:, 1:].flat
conn[:, 7] = grid[:-1, 1:, 1:].flat
desc = '3_8'
return conn, desc
def gen_block_mesh(dims, shape, centre, mat_id=0, name='block',
coors=None, verbose=True):
"""
Generate a 2D or 3D block mesh. The dimension is determined by the
lenght of the shape argument.
Parameters
----------
dims : array of 2 or 3 floats
Dimensions of the block.
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the block mesh.
centre : array of 2 or 3 floats
Centre of the block.
mat_id : int, optional
The material id of all elements.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
dim = shape.shape[0]
centre = centre[:dim]
dims = dims[:dim]
n_nod = nm.prod(shape)
output('generating %d vertices...' % n_nod, verbose=verbose)
x0 = centre - 0.5 * dims
dd = dims / (shape - 1)
ngrid = nm.mgrid[[slice(ii) for ii in shape]]
ngrid.shape = (dim, n_nod)
coors = x0 + ngrid.T * dd
output('...done', verbose=verbose)
n_el = nm.prod(shape - 1)
output('generating %d cells...' % n_el, verbose=verbose)
mat_ids = nm.empty((n_el,), dtype=nm.int32)
mat_ids.fill(mat_id)
conn, desc = get_tensor_product_conn(shape)
output('...done', verbose=verbose)
mesh = Mesh.from_data(name, coors, None, [conn], [mat_ids], [desc])
return mesh
def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False,
is_open=False, open_angle=0.0, non_uniform=False,
name='cylinder', verbose=True):
"""
Generate a cylindrical mesh along an axis. Its cross-section can be
ellipsoidal.
Parameters
----------
dims : array of 5 floats
Dimensions of the cylinder: inner surface semi-axes a1, b1, outer
surface semi-axes a2, b2, length.
shape : array of 3 ints
Shape (counts of nodes in radial, circumferential and longitudinal
directions) of the cylinder mesh.
centre : array of 3 floats
Centre of the cylinder.
axis: one of 'x', 'y', 'z'
The axis of the cylinder.
force_hollow : boolean
Force hollow mesh even if inner radii a1 = b1 = 0.
is_open : boolean
Generate an open cylinder segment.
open_angle : float
Opening angle in radians.
non_uniform : boolean
If True, space the mesh nodes in radial direction so that the element
volumes are (approximately) the same, making thus the elements towards
the outer surface thinner.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
a1, b1, a2, b2, length = dims
nr, nfi, nl = shape
origin = centre - nm.array([0.5 * length, 0.0, 0.0])
dfi = 2.0 * (nm.pi - open_angle) / nfi
if is_open:
nnfi = nfi + 1
else:
nnfi = nfi
is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15)
if is_hollow:
mr = 0
else:
mr = (nnfi - 1) * nl
grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32)
n_nod = nr * nnfi * nl - mr
coors = nm.zeros((n_nod, 3), dtype=nm.float64)
angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1)
xs = nm.linspace(0.0, length, nl)
if non_uniform:
ras = nm.zeros((nr,), dtype=nm.float64)
rbs = nm.zeros_like(ras)
advol = (a2**2 - a1**2) / (nr - 1)
bdvol = (b2**2 - b1**2) / (nr - 1)
ras[0], rbs[0] = a1, b1
for ii in range(1, nr):
ras[ii] = nm.sqrt(advol + ras[ii-1]**2)
rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2)
else:
ras = nm.linspace(a1, a2, nr)
rbs = nm.linspace(b1, b2, nr)
# This is 3D only...
output('generating %d vertices...' % n_nod, verbose=verbose)
ii = 0
for ix in range(nr):
a, b = ras[ix], rbs[ix]
for iy, fi in enumerate(angles[:nnfi]):
for iz, x in enumerate(xs):
grid[ix,iy,iz] = ii
coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)]
ii += 1
if not is_hollow and (ix == 0):
if iy > 0:
grid[ix,iy,iz] = grid[ix,0,iz]
ii -= 1
assert_(ii == n_nod)
output('...done', verbose=verbose)
n_el = (nr - 1) * nfi * (nl - 1)
conn = nm.zeros((n_el, 8), dtype=nm.int32)
output('generating %d cells...' % n_el, verbose=verbose)
ii = 0
for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]):
if iy < (nnfi - 1):
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]]
ii += 1
elif not is_open:
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,0,iz ], grid[ix ,0,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,0,iz+1], grid[ix ,0,iz+1]]
ii += 1
mat_id = nm.zeros((n_el,), dtype = nm.int32)
desc = '3_8'
assert_(n_nod == (conn.max() + 1))
output('...done', verbose=verbose)
if axis == 'z':
coors = coors[:,[1,2,0]]
elif axis == 'y':
coors = coors[:,[2,0,1]]
mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
return mesh
def _spread_along_axis(axis, coors, tangents, grading_fun):
"""
Spread the coordinates along the given axis using the grading function, and
the tangents in the other two directions.
"""
oo = list(set([0, 1, 2]).difference([axis]))
c0, c1, c2 = coors[:, axis], coors[:, oo[0]], coors[:, oo[1]]
out = nm.empty_like(coors)
mi, ma = c0.min(), c0.max()
nc0 = (c0 - mi) / (ma - mi)
out[:, axis] = oc0 = grading_fun(nc0) * (ma - mi) + mi
nc = oc0 - oc0.min()
mi, ma = c1.min(), c1.max()
n1 = 2 * (c1 - mi) / (ma - mi) - 1
out[:, oo[0]] = c1 + n1 * nc * tangents[oo[0]]
mi, ma = c2.min(), c2.max()
n2 = 2 * (c2 - mi) / (ma - mi) - 1
out[:, oo[1]] = c2 + n2 * nc * tangents[oo[1]]
return out
def _get_extension_side(side, grading_fun, mat_id,
b_dims, b_shape, e_dims, e_shape, centre):
"""
Get a mesh extending the given side of a block mesh.
"""
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
coff = 0.5 * (b_dims + pe_dims)
cc = centre + coff * nm.eye(3)[side]
if side == 0: # x axis.
dims = [pe_dims[0], b_dims[1], b_dims[2]]
shape = [e_shape, b_shape[1], b_shape[2]]
tangents = [0, pe_dims[1] / pe_dims[0], pe_dims[2] / pe_dims[0]]
elif side == 1: # y axis.
dims = [b_dims[0], pe_dims[1], b_dims[2]]
shape = [b_shape[0], e_shape, b_shape[2]]
tangents = [pe_dims[0] / pe_dims[1], 0, pe_dims[2] / pe_dims[1]]
elif side == 2: # z axis.
dims = [b_dims[0], b_dims[1], pe_dims[2]]
shape = [b_shape[0], b_shape[1], e_shape]
tangents = [pe_dims[0] / pe_dims[2], pe_dims[1] / pe_dims[2], 0]
e_mesh = gen_block_mesh(dims, shape, cc, mat_id=mat_id, verbose=False)
e_mesh.coors[:] = _spread_along_axis(side, e_mesh.coors, tangents,
grading_fun)
return e_mesh, shape
def gen_extended_block_mesh(b_dims, b_shape, e_dims, e_shape, centre,
grading_fun=None, name=None):
"""
Generate a 3D mesh with a central block and (coarse) extending side meshes.
The resulting mesh is again a block. Each of the components has a different
material id.
Parameters
----------
b_dims : array of 3 floats
The dimensions of the central block.
b_shape : array of 3 ints
The shape (counts of nodes in x, y, z) of the central block mesh.
e_dims : array of 3 floats
The dimensions of the complete block (central block + extensions).
e_shape : int
The count of nodes of extending blocks in the direction from the
central block.
centre : array of 3 floats
The centre of the mesh.
grading_fun : callable, optional
A function of :math:`x \in [0, 1]` that can be used to shift nodes in
the extension axis directions to allow smooth grading of element sizes
from the centre. The default function is :math:`x**p` with :math:`p`
determined so that the element sizes next to the central block have the
size of the shortest edge of the central block.
name : string, optional
The mesh name.
Returns
-------
mesh : Mesh instance
"""
b_dims = nm.asarray(b_dims, dtype=nm.float64)
b_shape = nm.asarray(b_shape, dtype=nm.int32)
e_dims = nm.asarray(e_dims, dtype=nm.float64)
centre = nm.asarray(centre, dtype=nm.float64)
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
# Central block element sizes.
dd = (b_dims / (b_shape - 1))
# The "first x" going to grading_fun.
nc = 1.0 / (e_shape - 1)
# Grading power and function.
power = nm.log(dd.min() / pe_dims.min()) / nm.log(nc)
grading_fun = (lambda x: x**power) if grading_fun is None else grading_fun
# Central block mesh.
b_mesh = gen_block_mesh(b_dims, b_shape, centre, mat_id=0, verbose=False)
# 'x' extension.
e_mesh, xs = _get_extension_side(0, grading_fun, 10,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = b_mesh + e_mesh
# Mirror by 'x'.
e_mesh.coors[:, 0] = (2 * centre[0]) - e_mesh.coors[:, 0]
e_mesh.cmesh.cell_groups.fill(11)
mesh = mesh + e_mesh
# 'y' extension.
e_mesh, ys = _get_extension_side(1, grading_fun, 20,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'y'.
e_mesh.coors[:, 1] = (2 * centre[1]) - e_mesh.coors[:, 1]
e_mesh.cmesh.cell_groups.fill(21)
mesh = mesh + e_mesh
# 'z' extension.
e_mesh, zs = _get_extension_side(2, grading_fun, 30,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'z'.
e_mesh.coors[:, 2] = (2 * centre[2]) - e_mesh.coors[:, 2]
e_mesh.cmesh.cell_groups.fill(31)
mesh = mesh + e_mesh
if name is not None:
mesh.name = name
# Verify merging by checking the number of nodes.
n_nod = (nm.prod(nm.maximum(b_shape - 2, 0)) + 2 * nm.prod(xs)
+ 2 * (max(ys[0] - 2, 0) * ys[1] * ys[2])
+ 2 * (max(zs[0] - 2, 0) * max(zs[1] - 2, 0) * zs[2]))
if n_nod != mesh.n_nod:
raise ValueError('Merge of meshes failed! (%d == %d)'
% (n_nod, mesh.n_nod))
return mesh
def tiled_mesh1d(conn, coors, ngrps, idim, n_rep, bb, eps=1e-6, ndmap=False):
from sfepy.discrete.fem.periodic import match_grid_plane
s1 = nm.nonzero(coors[:,idim] < (bb[0] + eps))[0]
s2 = nm.nonzero(coors[:,idim] > (bb[1] - eps))[0]
if s1.shape != s2.shape:
raise ValueError('incompatible shapes: %s == %s'\
% (s1.shape, s2.shape))
(nnod0, dim) = coors.shape
nnod = nnod0 * n_rep - s1.shape[0] * (n_rep - 1)
(nel0, nnel) = conn.shape
nel = nel0 * n_rep
dd = nm.zeros((dim,), dtype=nm.float64)
dd[idim] = bb[1] - bb[0]
m1, m2 = match_grid_plane(coors[s1], coors[s2], idim)
oconn = nm.zeros((nel, nnel), dtype=nm.int32)
ocoors = nm.zeros((nnod, dim), dtype=nm.float64)
ongrps = nm.zeros((nnod,), dtype=nm.int32)
if type(ndmap) is bool:
ret_ndmap = ndmap
else:
ret_ndmap= True
ndmap_out = nm.zeros((nnod,), dtype=nm.int32)
el_off = 0
nd_off = 0
for ii in range(n_rep):
if ii == 0:
oconn[0:nel0,:] = conn
ocoors[0:nnod0,:] = coors
ongrps[0:nnod0] = ngrps.squeeze()
nd_off += nnod0
mapto = s2[m2]
mask = nm.ones((nnod0,), dtype=nm.int32)
mask[s1] = 0
remap0 = nm.cumsum(mask) - 1
nnod0r = nnod0 - s1.shape[0]
cidx = nm.where(mask)
if ret_ndmap:
ndmap_out[0:nnod0] = nm.arange(nnod0)
else:
remap = remap0 + nd_off
remap[s1[m1]] = mapto
mapto = remap[s2[m2]]
ocoors[nd_off:(nd_off + nnod0r),:] =\
(coors[cidx,:] + ii * dd)
ongrps[nd_off:(nd_off + nnod0r)] = ngrps[cidx].squeeze()
oconn[el_off:(el_off + nel0),:] = remap[conn]
if ret_ndmap:
ndmap_out[nd_off:(nd_off + nnod0r)] = cidx[0]
nd_off += nnod0r
el_off += nel0
if ret_ndmap:
if ndmap is not None:
max_nd_ref = nm.max(ndmap)
idxs = nm.where(ndmap_out > max_nd_ref)
ndmap_out[idxs] = ndmap[ndmap_out[idxs]]
return oconn, ocoors, ongrps, ndmap_out
else:
return oconn, ocoors, ongrps
def gen_tiled_mesh(mesh, grid=None, scale=1.0, eps=1e-6, ret_ndmap=False):
"""
Generate a new mesh by repeating a given periodic element
along each axis.
Parameters
----------
mesh : Mesh instance
The input periodic FE mesh.
grid : array
Number of repetition along each axis.
scale : float, optional
Scaling factor.
eps : float, optional
Tolerance for boundary detection.
ret_ndmap : bool, optional
If True, return global node map.
Returns
-------
mesh_out : Mesh instance
FE mesh.
ndmap : array
Maps: actual node id --> node id in the reference cell.
"""
bbox = mesh.get_bounding_box()
if grid is None:
iscale = max(int(1.0 / scale), 1)
grid = [iscale] * mesh.dim
conn = mesh.get_conn(mesh.descs[0])
mat_ids = mesh.cmesh.cell_groups
coors = mesh.coors
ngrps = mesh.cmesh.vertex_groups
nrep = nm.prod(grid)
ndmap = None
output('repeating %s ...' % grid)
nblk = 1
for ii, gr in enumerate(grid):
if ret_ndmap:
(conn, coors,
ngrps, ndmap0) = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps, ndmap=ndmap)
ndmap = ndmap0
else:
conn, coors, ngrps = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps)
nblk *= gr
output('...done')
mat_ids = nm.tile(mat_ids, (nrep,))
mesh_out = Mesh.from_data('tiled mesh', coors * scale, ngrps,
[conn], [mat_ids], [mesh.descs[0]])
if ret_ndmap:
return mesh_out, ndmap
else:
return mesh_out
def gen_misc_mesh(mesh_dir, force_create, kind, args, suffix='.mesh',
verbose=False):
"""
Create sphere or cube mesh according to `kind` in the given
directory if it does not exist and return path to it.
"""
import os
from sfepy import data_dir
defdir = os.path.join(data_dir, 'meshes')
if mesh_dir is None:
mesh_dir = defdir
def retype(args, types, defaults):
args=list(args)
args.extend(defaults[len(args):len(defaults)])
return tuple([type(value) for type, value in zip(types, args) ])
if kind == 'sphere':
default = [5, 41, args[0]]
args = retype(args, [float, int, float], default)
mesh_pattern = os.path.join(mesh_dir, 'sphere-%.2f-%.2f-%i')
else:
assert_(kind == 'cube')
args = retype(args,
(int, float, int, float, int, float),
(args[0], args[1], args[0], args[1], args[0], args[1]))
mesh_pattern = os.path.join(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f')
if verbose:
output(args)
filename = mesh_pattern % args
if not force_create:
if os.path.exists(filename): return filename
if os.path.exists(filename + '.mesh') : return filename + '.mesh'
if os.path.exists(filename + '.vtk'): return filename + '.vtk'
if kind == 'cube':
filename = filename + suffix
ensure_path(filename)
output('creating new cube mesh')
output('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)'
% args)
output('to file %s...' % filename)
mesh = gen_block_mesh(args[1::2], args[0::2],
(0.0, 0.0, 0.0), name=filename)
mesh.write(filename, io='auto')
output('...done')
else:
import subprocess, shutil, tempfile
filename = filename + '.mesh'
ensure_path(filename)
output('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d'
% args)
output('to file %s...' % filename)
f = open(os.path.join(defdir, 'quantum', 'sphere.geo'))
tmp_dir = tempfile.mkdtemp()
tmpfile = os.path.join(tmp_dir, 'sphere.geo.temp')
ff = open(tmpfile, "w")
ff.write("""
R = %i.0;
n = %i.0;
dens = %f;
""" % args)
ff.write(f.read())
f.close()
ff.close()
subprocess.call(['gmsh', '-3', tmpfile, '-format', 'mesh',
'-o', filename])
shutil.rmtree(tmp_dir)
output('...done')
return filename
def gen_mesh_from_string(mesh_name, mesh_dir):
import re
result = re.match('^\\s*([a-zA-Z]+)[:\\(]([^\\):]*)[:\\)](\\*)?\\s*$',
mesh_name)
if result is None:
return mesh_name
else:
args = re.split(',', result.group(2))
kind = result.group(1)
return gen_misc_mesh(mesh_dir, result.group(3)=='*', kind, args)
def gen_mesh_from_geom(geo, a=None, verbose=False, refine=False):
"""
Runs mesh generator - tetgen for 3D or triangle for 2D meshes.
Parameters
----------
geo : geometry
geometry description
a : int, optional
a maximum area/volume constraint
verbose : bool, optional
detailed information
refine : bool, optional
refines mesh
Returns
-------
mesh : Mesh instance
triangular or tetrahedral mesh
"""
import os.path as op
import pexpect
import tempfile
import shutil
tmp_dir = tempfile.mkdtemp()
polyfilename = op.join(tmp_dir, 'meshgen.poly')
# write geometry to poly file
geo.to_poly_file(polyfilename)
meshgen_call = {2: ('triangle', ''), 3: ('tetgen', 'BFENk')}
params = "-ACp"
params += "q" if refine else ''
params += "V" if verbose else "Q"
params += meshgen_call[geo.dim][1]
if a is not None:
params += "a%f" % (a)
params += " %s" % (polyfilename)
cmd = "%s %s" % (meshgen_call[geo.dim][0], params)
if verbose: print("Generating mesh using", cmd)
p=pexpect.run(cmd, timeout=None)
bname, ext = op.splitext(polyfilename)
if geo.dim == 2:
mesh = Mesh.from_file(bname + '.1.node')
if geo.dim == 3:
mesh = Mesh.from_file(bname + '.1.vtk')
shutil.rmtree(tmp_dir)
return mesh
def gen_mesh_from_voxels(voxels, dims, etype='q'):
"""
Generate FE mesh from voxels (volumetric data).
Parameters
----------
voxels : array
Voxel matrix, 1=material.
dims : array
Size of one voxel.
etype : integer, optional
'q' - quadrilateral or hexahedral elements
't' - triangular or tetrahedral elements
Returns
-------
mesh : Mesh instance
Finite element mesh.
"""
dims = nm.array(dims).squeeze()
dim = len(dims)
nddims = nm.array(voxels.shape) + 2
nodemtx = nm.zeros(nddims, dtype=nm.int32)
if dim == 2:
#iy, ix = nm.where(voxels.transpose())
iy, ix = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy] += 1
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 1
nodemtx[ix,iy + 1] += 1
elif etype == 't':
nodemtx[ix,iy] += 2
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 2
nodemtx[ix,iy + 1] += 1
nel *= 2
elif dim == 3:
#iy, ix, iz = nm.where(voxels.transpose(1, 0, 2))
iy, ix, iz = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy,iz] += 1
nodemtx[ix + 1,iy,iz] += 1
nodemtx[ix + 1,iy + 1,iz] += 1
nodemtx[ix,iy + 1,iz] += 1
nodemtx[ix,iy,iz + 1] += 1
nodemtx[ix + 1,iy,iz + 1] += 1
nodemtx[ix + 1,iy + 1,iz + 1] += 1
nodemtx[ix,iy + 1,iz + 1] += 1
elif etype == 't':
nodemtx[ix,iy,iz] += 6
nodemtx[ix + 1,iy,iz] += 2
nodemtx[ix + 1,iy + 1,iz] += 2
nodemtx[ix,iy + 1,iz] += 2
nodemtx[ix,iy,iz + 1] += 2
nodemtx[ix + 1,iy,iz + 1] += 2
nodemtx[ix + 1,iy + 1,iz + 1] += 6
nodemtx[ix,iy + 1,iz + 1] += 2
nel *= 6
else:
msg = 'incorrect voxel dimension! (%d)' % dim
raise ValueError(msg)
ndidx = nm.where(nodemtx)
coors = nm.array(ndidx).transpose() * dims
nnod = coors.shape[0]
nodeid = -nm.ones(nddims, dtype=nm.int32)
nodeid[ndidx] = nm.arange(nnod)
# generate elements
if dim == 2:
elems = nm.array([nodeid[ix,iy],
nodeid[ix + 1,iy],
nodeid[ix + 1,iy + 1],
nodeid[ix,iy + 1]]).transpose()
elif dim == 3:
elems = nm.array([nodeid[ix,iy,iz],
nodeid[ix + 1,iy,iz],
nodeid[ix + 1,iy + 1,iz],
nodeid[ix,iy + 1,iz],
nodeid[ix,iy,iz + 1],
nodeid[ix + 1,iy,iz + 1],
nodeid[ix + 1,iy + 1,iz + 1],
nodeid[ix,iy + 1,iz + 1]]).transpose()
if etype == 't':
elems = elems_q2t(elems)
eid = etype + str(dim)
eltab = {'q2': 4, 'q3': 8, 't2': 3, 't3': 4}
mesh = Mesh.from_data('voxel_data',
coors, nm.ones((nnod,), dtype=nm.int32),
[nm.ascontiguousarray(elems)],
[nm.ones((nel,), dtype=nm.int32)],
['%d_%d' % (dim, eltab[eid])])
return mesh
def main():
mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
name='')
mesh.write('0.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('1.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.0,
name='')
mesh.write('2.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('3.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('4.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('5.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('6.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('7.mesh', io = 'auto')
if __name__ == '__main__':
main() | 0.725746 | 0.542863 |
import math
def stringify(attribute_value):
if isinstance(attribute_value, list):
attribute_value = [str(val) for val in attribute_value]
try:
return str((", ".join(attribute_value)).encode('utf-8').strip())
except:
return str(", ".join(attribute_value)).strip()
else:
try:
return str(attribute_value.encode('utf-8').strip())
except:
return str(attribute_value)
class Vector:
'''
An instance of this class represents a vector in n-dimensional space
'''
def __init__(self, filename=None, features=None):
'''
Create a vector
@param metadata features
'''
self.features = {}
if filename and features:
self.filename = filename
na_metadata = ["id", "_version_", "Name", "name"]
for na in na_metadata:
features.pop(na, None)
for key in features:
self.features[key] = len(stringify(features[key]))
'''
def __str__(self):
vector_str = "( {0} ): \n".format(self.)
if self.features:
for key in self.features:
vector_str += " {1}: {2} \n".format(key, self.features[key])
return vector_str+"\n"
'''
def getMagnitude(self):
totalMagnitude = 0.0
for key in self.features:
totalMagnitude += self.features[key] ** 2
return math.sqrt(totalMagnitude)
def dotProduct(self, anotherVector):
'''
A = ax+by+cz
B = mx+ny+oz
A.B = a*m + b*n + c*o
'''
dot_product = 0.0
intersect_features = set(self.features) & set(anotherVector.features)
for feature in intersect_features:
dot_product += self.features[feature] * anotherVector.features[feature]
return dot_product
def cosTheta(self, v2):
'''
cosTheta = (V1.V2) / (|V1| |V2|)
cos 0 = 1 implies identical documents
'''
return self.dotProduct(v2) / (self.getMagnitude() * v2.getMagnitude())
def euclidean_dist(self, anotherVector):
'''
dist = ((x1-x2)^2 + (y1-y2)^2 + (z1-z2)^2)^(0.5)
'''
intersect_features = set(self.features) & set(anotherVector.features)
dist_sum = 0.0
for feature in intersect_features:
dist_sum += (self.features[feature] - anotherVector.features[feature]) ** 2
setA = set(self.features) - intersect_features
for feature in setA:
dist_sum += self.features[feature] ** 2
setB = set(anotherVector.features) - intersect_features
for feature in setB:
dist_sum += anotherVector.features[feature] ** 2
return math.sqrt(dist_sum) | q11/tika-similarity-edited/vector.py |
import math
def stringify(attribute_value):
if isinstance(attribute_value, list):
attribute_value = [str(val) for val in attribute_value]
try:
return str((", ".join(attribute_value)).encode('utf-8').strip())
except:
return str(", ".join(attribute_value)).strip()
else:
try:
return str(attribute_value.encode('utf-8').strip())
except:
return str(attribute_value)
class Vector:
'''
An instance of this class represents a vector in n-dimensional space
'''
def __init__(self, filename=None, features=None):
'''
Create a vector
@param metadata features
'''
self.features = {}
if filename and features:
self.filename = filename
na_metadata = ["id", "_version_", "Name", "name"]
for na in na_metadata:
features.pop(na, None)
for key in features:
self.features[key] = len(stringify(features[key]))
'''
def __str__(self):
vector_str = "( {0} ): \n".format(self.)
if self.features:
for key in self.features:
vector_str += " {1}: {2} \n".format(key, self.features[key])
return vector_str+"\n"
'''
def getMagnitude(self):
totalMagnitude = 0.0
for key in self.features:
totalMagnitude += self.features[key] ** 2
return math.sqrt(totalMagnitude)
def dotProduct(self, anotherVector):
'''
A = ax+by+cz
B = mx+ny+oz
A.B = a*m + b*n + c*o
'''
dot_product = 0.0
intersect_features = set(self.features) & set(anotherVector.features)
for feature in intersect_features:
dot_product += self.features[feature] * anotherVector.features[feature]
return dot_product
def cosTheta(self, v2):
'''
cosTheta = (V1.V2) / (|V1| |V2|)
cos 0 = 1 implies identical documents
'''
return self.dotProduct(v2) / (self.getMagnitude() * v2.getMagnitude())
def euclidean_dist(self, anotherVector):
'''
dist = ((x1-x2)^2 + (y1-y2)^2 + (z1-z2)^2)^(0.5)
'''
intersect_features = set(self.features) & set(anotherVector.features)
dist_sum = 0.0
for feature in intersect_features:
dist_sum += (self.features[feature] - anotherVector.features[feature]) ** 2
setA = set(self.features) - intersect_features
for feature in setA:
dist_sum += self.features[feature] ** 2
setB = set(anotherVector.features) - intersect_features
for feature in setB:
dist_sum += anotherVector.features[feature] ** 2
return math.sqrt(dist_sum) | 0.530723 | 0.353763 |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.optim import lr_scheduler
from torch.autograd import Variable
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class AgreementRouting(nn.Module):
def __init__(self, input_caps, output_caps, n_iterations):
super(AgreementRouting, self).__init__()
self.n_iterations = n_iterations
self.b = nn.Parameter(torch.zeros((input_caps, output_caps)))
def forward(self, u_predict):
batch_size, input_caps, output_caps, output_dim = u_predict.size()
c = F.softmax(self.b)
s = (c.unsqueeze(2) * u_predict).sum(dim=1)
v = squash(s)
if self.n_iterations > 0:
b_batch = self.b.expand((batch_size, input_caps, output_caps))
for r in range(self.n_iterations):
v = v.unsqueeze(1)
b_batch = b_batch + (u_predict * v).sum(-1)
c = F.softmax(b_batch.view(-1, output_caps)).view(-1, input_caps, output_caps, 1)
s = (c * u_predict).sum(dim=1)
v = squash(s)
return v
class CapsLayer(nn.Module):
def __init__(self, input_caps, input_dim, output_caps, output_dim, routing_module):
super(CapsLayer, self).__init__()
self.input_dim = input_dim
self.input_caps = input_caps
self.output_dim = output_dim
self.output_caps = output_caps
self.weights = nn.Parameter(torch.Tensor(input_caps, input_dim, output_caps * output_dim))
self.routing_module = routing_module
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.input_caps)
self.weights.data.uniform_(-stdv, stdv)
def forward(self, caps_output):
caps_output = caps_output.unsqueeze(2)
u_predict = caps_output.matmul(self.weights)
u_predict = u_predict.view(u_predict.size(0), self.input_caps, self.output_caps, self.output_dim)
v = self.routing_module(u_predict)
return v
class PrimaryCapsLayer(nn.Module):
def __init__(self, input_channels, output_caps, output_dim, kernel_size, stride):
super(PrimaryCapsLayer, self).__init__()
self.conv = nn.Conv2d(input_channels, output_caps * output_dim, kernel_size=kernel_size, stride=stride)
self.input_channels = input_channels
self.output_caps = output_caps
self.output_dim = output_dim
def forward(self, input):
out = self.conv(input)
N, C, H, W = out.size()
out = out.view(N, self.output_caps, self.output_dim, H, W)
# will output N x OUT_CAPS x OUT_DIM
out = out.permute(0, 1, 3, 4, 2).contiguous()
out = out.view(out.size(0), -1, out.size(4))
out = squash(out)
return out
class CapsNet(nn.Module):
def __init__(self, routing_iterations, n_classes=10):
super(CapsNet, self).__init__()
self.conv1 = nn.Conv2d(1, 256, kernel_size=9, stride=1)
self.primaryCaps = PrimaryCapsLayer(256, 32, 8, kernel_size=9, stride=2) # outputs 6*6
self.num_primaryCaps = 32 * 6 * 6
routing_module = AgreementRouting(self.num_primaryCaps, n_classes, routing_iterations)
self.digitCaps = CapsLayer(self.num_primaryCaps, 8, n_classes, 16, routing_module)
def forward(self, input):
x = self.conv1(input)
x = F.relu(x)
x = self.primaryCaps(x)
x = self.digitCaps(x)
probs = x.pow(2).sum(dim=2).sqrt()
return x, probs
class ReconstructionNet(nn.Module):
def __init__(self, n_dim=16, n_classes=10):
super(ReconstructionNet, self).__init__()
self.fc1 = nn.Linear(n_dim * n_classes, 512)
self.fc2 = nn.Linear(512, 1024)
self.fc3 = nn.Linear(1024, 784)
self.n_dim = n_dim
self.n_classes = n_classes
def forward(self, x, target):
mask = Variable(torch.zeros((x.size()[0], self.n_classes)), requires_grad=False)
if next(self.parameters()).is_cuda:
mask = mask.cuda()
mask.scatter_(1, target.data.view(-1, 1), 1.)
mask = mask.unsqueeze(2)
x = x * mask
x = x.view(-1, self.n_dim * self.n_classes)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
class CapsNetWithReconstruction(nn.Module):
def __init__(self, capsnet, reconstruction_net):
super(CapsNetWithReconstruction, self).__init__()
self.capsnet = capsnet
self.reconstruction_net = reconstruction_net
def forward(self, x, target):
x, probs = self.capsnet(x)
reconstruction = self.reconstruction_net(x, target)
return reconstruction, probs
class MarginLoss(nn.Module):
def __init__(self, m_pos, m_neg, lambda_):
super(MarginLoss, self).__init__()
self.m_pos = m_pos
self.m_neg = m_neg
self.lambda_ = lambda_
def forward(self, lengths, targets, size_average=True):
t = torch.zeros(lengths.size()).long().cuda()
t = t.scatter_(1, targets.data.view(-1, 1), 1)
targets = Variable(t)
losses = targets.float() * F.relu(self.m_pos - lengths).pow(2) + \
self.lambda_ * (1. - targets.float()) * F.relu(lengths - self.m_neg).pow(2)
return losses.mean() if size_average else losses.sum()
if __name__ == '__main__':
import argparse
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='CapsNet with MNIST')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of epochs to train (default: 250)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--routing_iterations', type=int, default=3)
parser.add_argument('--with_reconstruction', action='store_true', default=False)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Using CUDA?",args.cuda)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 5, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data/MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(2), transforms.RandomCrop(28),
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data/MNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
model = CapsNet(args.routing_iterations)
if args.with_reconstruction:
reconstruction_model = ReconstructionNet(16, 10)
reconstruction_alpha = 0.0005
model = CapsNetWithReconstruction(model, reconstruction_model)
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, patience=15, min_lr=1e-6)
loss_fn = MarginLoss(0.9, 0.1, 0.5)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, requires_grad=True), Variable(target, requires_grad=False)
optimizer.zero_grad()
if args.with_reconstruction:
output, probs = model(data, target)
reconstruction_loss = F.mse_loss(output, data.view(-1, 784))
margin_loss = loss_fn(probs, target)
loss = reconstruction_alpha * reconstruction_loss + margin_loss
else:
output, probs = model(data)
loss = loss_fn(probs, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
if args.with_reconstruction:
output, probs = model(data, target)
reconstruction_loss = F.mse_loss(output, data.view(-1, 784), size_average=False).data[0]
test_loss += loss_fn(probs, target, size_average=False).data[0]
test_loss += reconstruction_alpha * reconstruction_loss
else:
output, probs = model(data)
test_loss += loss_fn(probs, target, size_average=False).data[0]
pred = probs.data.max(1, keepdim=True)[1] # get the index of the max probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test()
scheduler.step(test_loss)
torch.save(model.state_dict(),
'checkpoints/{:03d}_model_dict_{}routing_reconstruction{}.pth'.format(epoch, args.routing_iterations,
args.with_reconstruction)) | capsule_net.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.optim import lr_scheduler
from torch.autograd import Variable
def squash(x):
lengths2 = x.pow(2).sum(dim=2)
lengths = lengths2.sqrt()
x = x * (lengths2 / (1 + lengths2) / lengths).view(x.size(0), x.size(1), 1)
return x
class AgreementRouting(nn.Module):
def __init__(self, input_caps, output_caps, n_iterations):
super(AgreementRouting, self).__init__()
self.n_iterations = n_iterations
self.b = nn.Parameter(torch.zeros((input_caps, output_caps)))
def forward(self, u_predict):
batch_size, input_caps, output_caps, output_dim = u_predict.size()
c = F.softmax(self.b)
s = (c.unsqueeze(2) * u_predict).sum(dim=1)
v = squash(s)
if self.n_iterations > 0:
b_batch = self.b.expand((batch_size, input_caps, output_caps))
for r in range(self.n_iterations):
v = v.unsqueeze(1)
b_batch = b_batch + (u_predict * v).sum(-1)
c = F.softmax(b_batch.view(-1, output_caps)).view(-1, input_caps, output_caps, 1)
s = (c * u_predict).sum(dim=1)
v = squash(s)
return v
class CapsLayer(nn.Module):
def __init__(self, input_caps, input_dim, output_caps, output_dim, routing_module):
super(CapsLayer, self).__init__()
self.input_dim = input_dim
self.input_caps = input_caps
self.output_dim = output_dim
self.output_caps = output_caps
self.weights = nn.Parameter(torch.Tensor(input_caps, input_dim, output_caps * output_dim))
self.routing_module = routing_module
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.input_caps)
self.weights.data.uniform_(-stdv, stdv)
def forward(self, caps_output):
caps_output = caps_output.unsqueeze(2)
u_predict = caps_output.matmul(self.weights)
u_predict = u_predict.view(u_predict.size(0), self.input_caps, self.output_caps, self.output_dim)
v = self.routing_module(u_predict)
return v
class PrimaryCapsLayer(nn.Module):
def __init__(self, input_channels, output_caps, output_dim, kernel_size, stride):
super(PrimaryCapsLayer, self).__init__()
self.conv = nn.Conv2d(input_channels, output_caps * output_dim, kernel_size=kernel_size, stride=stride)
self.input_channels = input_channels
self.output_caps = output_caps
self.output_dim = output_dim
def forward(self, input):
out = self.conv(input)
N, C, H, W = out.size()
out = out.view(N, self.output_caps, self.output_dim, H, W)
# will output N x OUT_CAPS x OUT_DIM
out = out.permute(0, 1, 3, 4, 2).contiguous()
out = out.view(out.size(0), -1, out.size(4))
out = squash(out)
return out
class CapsNet(nn.Module):
def __init__(self, routing_iterations, n_classes=10):
super(CapsNet, self).__init__()
self.conv1 = nn.Conv2d(1, 256, kernel_size=9, stride=1)
self.primaryCaps = PrimaryCapsLayer(256, 32, 8, kernel_size=9, stride=2) # outputs 6*6
self.num_primaryCaps = 32 * 6 * 6
routing_module = AgreementRouting(self.num_primaryCaps, n_classes, routing_iterations)
self.digitCaps = CapsLayer(self.num_primaryCaps, 8, n_classes, 16, routing_module)
def forward(self, input):
x = self.conv1(input)
x = F.relu(x)
x = self.primaryCaps(x)
x = self.digitCaps(x)
probs = x.pow(2).sum(dim=2).sqrt()
return x, probs
class ReconstructionNet(nn.Module):
def __init__(self, n_dim=16, n_classes=10):
super(ReconstructionNet, self).__init__()
self.fc1 = nn.Linear(n_dim * n_classes, 512)
self.fc2 = nn.Linear(512, 1024)
self.fc3 = nn.Linear(1024, 784)
self.n_dim = n_dim
self.n_classes = n_classes
def forward(self, x, target):
mask = Variable(torch.zeros((x.size()[0], self.n_classes)), requires_grad=False)
if next(self.parameters()).is_cuda:
mask = mask.cuda()
mask.scatter_(1, target.data.view(-1, 1), 1.)
mask = mask.unsqueeze(2)
x = x * mask
x = x.view(-1, self.n_dim * self.n_classes)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
class CapsNetWithReconstruction(nn.Module):
def __init__(self, capsnet, reconstruction_net):
super(CapsNetWithReconstruction, self).__init__()
self.capsnet = capsnet
self.reconstruction_net = reconstruction_net
def forward(self, x, target):
x, probs = self.capsnet(x)
reconstruction = self.reconstruction_net(x, target)
return reconstruction, probs
class MarginLoss(nn.Module):
def __init__(self, m_pos, m_neg, lambda_):
super(MarginLoss, self).__init__()
self.m_pos = m_pos
self.m_neg = m_neg
self.lambda_ = lambda_
def forward(self, lengths, targets, size_average=True):
t = torch.zeros(lengths.size()).long().cuda()
t = t.scatter_(1, targets.data.view(-1, 1), 1)
targets = Variable(t)
losses = targets.float() * F.relu(self.m_pos - lengths).pow(2) + \
self.lambda_ * (1. - targets.float()) * F.relu(lengths - self.m_neg).pow(2)
return losses.mean() if size_average else losses.sum()
if __name__ == '__main__':
import argparse
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='CapsNet with MNIST')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of epochs to train (default: 250)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--routing_iterations', type=int, default=3)
parser.add_argument('--with_reconstruction', action='store_true', default=False)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Using CUDA?",args.cuda)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 5, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data/MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(2), transforms.RandomCrop(28),
transforms.ToTensor()
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data/MNIST', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
model = CapsNet(args.routing_iterations)
if args.with_reconstruction:
reconstruction_model = ReconstructionNet(16, 10)
reconstruction_alpha = 0.0005
model = CapsNetWithReconstruction(model, reconstruction_model)
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, patience=15, min_lr=1e-6)
loss_fn = MarginLoss(0.9, 0.1, 0.5)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, requires_grad=True), Variable(target, requires_grad=False)
optimizer.zero_grad()
if args.with_reconstruction:
output, probs = model(data, target)
reconstruction_loss = F.mse_loss(output, data.view(-1, 784))
margin_loss = loss_fn(probs, target)
loss = reconstruction_alpha * reconstruction_loss + margin_loss
else:
output, probs = model(data)
loss = loss_fn(probs, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
if args.with_reconstruction:
output, probs = model(data, target)
reconstruction_loss = F.mse_loss(output, data.view(-1, 784), size_average=False).data[0]
test_loss += loss_fn(probs, target, size_average=False).data[0]
test_loss += reconstruction_alpha * reconstruction_loss
else:
output, probs = model(data)
test_loss += loss_fn(probs, target, size_average=False).data[0]
pred = probs.data.max(1, keepdim=True)[1] # get the index of the max probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test()
scheduler.step(test_loss)
torch.save(model.state_dict(),
'checkpoints/{:03d}_model_dict_{}routing_reconstruction{}.pth'.format(epoch, args.routing_iterations,
args.with_reconstruction)) | 0.954921 | 0.505493 |
import yaml
import time
from gi.repository import Gtk, Gio, Gdk, Pango
from simulator import FRISCProcessor
from utils import *
# TODO: Search / go to line custom function
class SimulatorView( Gtk.Grid ):
memoryModel = Gtk.ListStore( str, str, str, int, str )
program = ''
memoryState = []
flags = { 'paused' : False, 'stopped' : True }
def __init__( self, parent, console, config ):
Gtk.Grid.__init__( self )
self.parent = parent
self.console = console
self.set_name( 'simulator-grid' )
self.simulator = FRISCProcessor( 65536 // 4 ) # TODO: Increase later, or on demand - place in settings
self.init_options()
self.init_goto_line()
self.init_memory_display()
self.init_registers_display()
def init_options( self ):
optionsBox = Gtk.ButtonBox()
optionsBox.set_orientation( Gtk.Orientation.VERTICAL )
optionsBox.set_layout( Gtk.ButtonBoxStyle.START )
optionsBox.set_name( 'options-box' )
optionsBox.set_margin_left( 20 )
optionsBox.set_margin_right( 20 )
reloadButton = Gtk.Button( 'Reload' )
icon = Gio.ThemedIcon( name = "reload" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
reloadButton.set_image( image )
reloadButton.set_always_show_image( True )
reloadButton.set_alignment( 0.0, 0.5 )
runButton = Gtk.Button( 'Run' )
icon = Gio.ThemedIcon( name = "media-playback-start" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
runButton.set_image( image )
runButton.set_always_show_image( True )
runButton.set_alignment( 0.0, 0.5 )
stepButton = Gtk.Button( 'Step' )
icon = Gio.ThemedIcon( name = "next" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stepButton.set_image( image )
stepButton.set_always_show_image( True )
stepButton.set_alignment( 0.0, 0.5 )
pauseButton = Gtk.Button( 'Pause' )
icon = Gio.ThemedIcon( name = "media-playback-pause" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
pauseButton.set_image( image )
pauseButton.set_always_show_image( True )
pauseButton.set_alignment( 0.0, 0.5 )
stopButton = Gtk.Button( 'Stop' )
icon = Gio.ThemedIcon( name = "media-playback-stop" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stopButton.set_image( image )
stopButton.set_always_show_image( True )
stopButton.set_alignment( 0.0, 0.5 )
reloadButton.connect( 'clicked', self.on_reload_click )
runButton.connect( 'clicked', self.on_run_click )
stepButton.connect( 'clicked', self.on_step_click )
pauseButton.connect( 'clicked', self.on_pause_click )
stopButton.connect( 'clicked', self.on_stop_click )
self.reloadButton = reloadButton
self.runButton = runButton
self.stepButton = stepButton
self.pauseButton = pauseButton
self.stopButton = stopButton
optionsBox.pack_start( reloadButton, True, True, 0 )
optionsBox.pack_start( runButton, True, True, 0 )
optionsBox.pack_start( stepButton, True, True, 0 )
optionsBox.pack_start( pauseButton, True, True, 0 )
optionsBox.pack_start( stopButton, True, True, 0 )
self.attach( optionsBox, 0, 1, 1, 2 )
def init_memory_display( self ):
self.memoryView = Gtk.TreeView( self.memoryModel )
self.memoryView.set_name( 'memory-view' )
self.memoryView.connect( 'row-activated', self.on_row_dblclick )
self.memoryView.set_headers_visible( False )
self.memorySelection = self.memoryView.get_selection()
self.memoryView.set_search_column( 1 )
self.memoryView.set_search_entry( self.searchEntry )
self.memoryView.set_enable_search( True )
rendererBrkPt = Gtk.CellRendererText()
rendererBrkPt.set_padding( 10, 5 )
rendererBrkPt.props.foreground = '#F83F4C'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Breakpoint', rendererBrkPt, text = 0 ) )
rendererAdr = Gtk.CellRendererText()
rendererAdr.set_padding( 10, 5 )
rendererAdr.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Address', rendererAdr, text = 1 ) )
rendererContX = Gtk.CellRendererText()
rendererContX.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (HEX)', rendererContX, text = 2 ) )
rendererContD = Gtk.CellRendererText()
rendererContD.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (DEC)', rendererContD, text = 3 ) )
rendererAnn = Gtk.CellRendererText()
rendererAnn.set_padding( 10, 5 )
rendererAnn.props.foreground = '#888'
rendererAnn.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Source Code', rendererAnn, text = 4 ) )
scrollBox = Gtk.ScrolledWindow()
scrollBox.set_hexpand( True )
scrollBox.set_vexpand( True )
scrollBox.add( self.memoryView )
self.attach( scrollBox, 1, 1, 1, 1 )
def init_registers_display( self ):
registerGrid = Gtk.Grid()
registerGrid.set_margin_left( 20 )
registerGrid.set_margin_right( 20 )
registerGrid.set_name( 'register-grid' )
self.registerDisplays = []
for i in range( 0, 10 ):
box = Gtk.HBox()
box.set_name( 'register-box' )
if i == 8:
box.set_name( 'pc-box' )
box.set_margin_bottom( 1 )
label = Gtk.Label( self.simulator.get_register( i ) )
label.set_name( 'register-value' )
self.registerDisplays.append( label )
name = Gtk.Label( FRISCProcessor.get_register_name( i ) )
name.set_name( 'register-name' )
name.set_width_chars( 5 )
box.pack_start( name, True, True, 0 )
box.pack_start( self.registerDisplays[ i ], True, True, 0 )
registerGrid.attach( box, 0, i, 1, 1 )
self.attach( registerGrid, 2, 1, 1, 2 )
def init_goto_line( self ):
box = Gtk.Box()
box.set_orientation( Gtk.Orientation.HORIZONTAL )
box.set_border_width( 10 )
label = Gtk.Label( 'Go to line:' )
label.set_name( 'goto-label' )
self.searchEntry = Gtk.SearchEntry()
self.searchEntry.set_text( '00000000' )
self.searchEntry.connect( 'search-changed', self.on_search )
box.pack_start( label, False, False, 0 )
box.pack_start( self.searchEntry, True, True, 0 )
self.attach( box, 1, 0, 1, 1 )
def load_simulator( self, file ):
self.program = file
self.simulator.load_program( file )
self.memoryModel.clear()
self.memoryState = []
for i in range( 0, self.simulator.MEM_SIZE // 4 ):
self.memoryState.append( {
'line' : '{:0>8X}'.format( 4*i ),
'contents' : self.simulator.get_word_from_mem( 4*i ),
'breakpoint' : False,
'annotation' : self.simulator.annotations[ 4*i ] } )
for l in self.memoryState:
self.memoryModel.append( self.get_memory_model_values( l ) )
def on_reload_click( self, element ):
pass
def on_run_click( self, element ):
if self.is_paused(): self.flags[ 'paused' ] = False
self.runButton.set_sensitive( False )
self.pauseButton.set_sensitive( True )
while not self.is_breakpoint() and not self.is_paused() and self.run_step():
while Gtk.events_pending():
Gtk.main_iteration()
time.sleep( 0.25 )
print('iteration')
self.runButton.set_sensitive( True )
def on_step_click( self, element ):
self.run_step()
def on_pause_click( self, element ):
self.flags[ 'paused' ] = True
self.pauseButton.set_sensitive( False )
self.runButton.set_sensitive( True )
def on_stop_click( self, element ):
pass
def on_row_dblclick( self, t, p, c ):
i = int( p.to_string() )
self.toggle_breakpoint( p, i )
def on_search( self, element ):
return True
def run_step( self ):
ret = True
try:
self.simulator.run_step()
self.update_registers()
self.select_active_row()
if self.simulator.last_changed_address != -1:
self.update_memory( self.simulator.last_changed_address )
self.simulator.last_changed_address = -1
except Exception as e:
self.console.show_message( str( e ), 'error' )
# TODO: What to do on error?
ret = False
return ret
def clear_simulator( self ):
pass
def update_memory( self, i ):
c = self.memoryState[ i ][ 'contents' ] = self.simulator.get_word_from_mem( 4*( i // 4 ) )
it = self.memoryModel.get_iter_from_string( str( i // 4 ) )
self.memoryModel.set( it, [ 2, 3 ], [ bin_to_pretty_hex( c ), from32( c ) ] )
def update_registers( self ):
for i in range( 0, 10 ):
self.registerDisplays[ i ].set_text( self.simulator.get_register( i ) )
def select_active_row( self ):
pc = self.simulator.get_program_counter()
it = self.memoryModel.get_iter_from_string( str( pc // 4 - 1 ) )
self.memorySelection.select_iter( it )
def toggle_breakpoint( self, p, i ):
self.memoryState[ i ][ 'breakpoint' ] = not self.memoryState[ i ][ 'breakpoint' ]
self.memoryModel.set_value( self.memoryModel.get_iter( p ), 0,
get_breakpoint_symbol( self.memoryState[ i ][ 'breakpoint' ] ) )
def is_breakpoint( self ):
return self.memoryState[ self.simulator.get_program_counter() // 4 ][ 'breakpoint' ]
def is_paused( self ):
return self.flags[ 'paused' ]
def get_memory_model_values( self, l ):
return [ get_breakpoint_symbol( l[ 'breakpoint' ] ), l[ 'line' ],
bin_to_pretty_hex( l[ 'contents' ] ), from32( l[ 'contents' ]),
l[ 'annotation' ] ] | simulator_view.py | import yaml
import time
from gi.repository import Gtk, Gio, Gdk, Pango
from simulator import FRISCProcessor
from utils import *
# TODO: Search / go to line custom function
class SimulatorView( Gtk.Grid ):
memoryModel = Gtk.ListStore( str, str, str, int, str )
program = ''
memoryState = []
flags = { 'paused' : False, 'stopped' : True }
def __init__( self, parent, console, config ):
Gtk.Grid.__init__( self )
self.parent = parent
self.console = console
self.set_name( 'simulator-grid' )
self.simulator = FRISCProcessor( 65536 // 4 ) # TODO: Increase later, or on demand - place in settings
self.init_options()
self.init_goto_line()
self.init_memory_display()
self.init_registers_display()
def init_options( self ):
optionsBox = Gtk.ButtonBox()
optionsBox.set_orientation( Gtk.Orientation.VERTICAL )
optionsBox.set_layout( Gtk.ButtonBoxStyle.START )
optionsBox.set_name( 'options-box' )
optionsBox.set_margin_left( 20 )
optionsBox.set_margin_right( 20 )
reloadButton = Gtk.Button( 'Reload' )
icon = Gio.ThemedIcon( name = "reload" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
reloadButton.set_image( image )
reloadButton.set_always_show_image( True )
reloadButton.set_alignment( 0.0, 0.5 )
runButton = Gtk.Button( 'Run' )
icon = Gio.ThemedIcon( name = "media-playback-start" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
runButton.set_image( image )
runButton.set_always_show_image( True )
runButton.set_alignment( 0.0, 0.5 )
stepButton = Gtk.Button( 'Step' )
icon = Gio.ThemedIcon( name = "next" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stepButton.set_image( image )
stepButton.set_always_show_image( True )
stepButton.set_alignment( 0.0, 0.5 )
pauseButton = Gtk.Button( 'Pause' )
icon = Gio.ThemedIcon( name = "media-playback-pause" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
pauseButton.set_image( image )
pauseButton.set_always_show_image( True )
pauseButton.set_alignment( 0.0, 0.5 )
stopButton = Gtk.Button( 'Stop' )
icon = Gio.ThemedIcon( name = "media-playback-stop" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stopButton.set_image( image )
stopButton.set_always_show_image( True )
stopButton.set_alignment( 0.0, 0.5 )
reloadButton.connect( 'clicked', self.on_reload_click )
runButton.connect( 'clicked', self.on_run_click )
stepButton.connect( 'clicked', self.on_step_click )
pauseButton.connect( 'clicked', self.on_pause_click )
stopButton.connect( 'clicked', self.on_stop_click )
self.reloadButton = reloadButton
self.runButton = runButton
self.stepButton = stepButton
self.pauseButton = pauseButton
self.stopButton = stopButton
optionsBox.pack_start( reloadButton, True, True, 0 )
optionsBox.pack_start( runButton, True, True, 0 )
optionsBox.pack_start( stepButton, True, True, 0 )
optionsBox.pack_start( pauseButton, True, True, 0 )
optionsBox.pack_start( stopButton, True, True, 0 )
self.attach( optionsBox, 0, 1, 1, 2 )
def init_memory_display( self ):
self.memoryView = Gtk.TreeView( self.memoryModel )
self.memoryView.set_name( 'memory-view' )
self.memoryView.connect( 'row-activated', self.on_row_dblclick )
self.memoryView.set_headers_visible( False )
self.memorySelection = self.memoryView.get_selection()
self.memoryView.set_search_column( 1 )
self.memoryView.set_search_entry( self.searchEntry )
self.memoryView.set_enable_search( True )
rendererBrkPt = Gtk.CellRendererText()
rendererBrkPt.set_padding( 10, 5 )
rendererBrkPt.props.foreground = '#F83F4C'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Breakpoint', rendererBrkPt, text = 0 ) )
rendererAdr = Gtk.CellRendererText()
rendererAdr.set_padding( 10, 5 )
rendererAdr.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Address', rendererAdr, text = 1 ) )
rendererContX = Gtk.CellRendererText()
rendererContX.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (HEX)', rendererContX, text = 2 ) )
rendererContD = Gtk.CellRendererText()
rendererContD.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (DEC)', rendererContD, text = 3 ) )
rendererAnn = Gtk.CellRendererText()
rendererAnn.set_padding( 10, 5 )
rendererAnn.props.foreground = '#888'
rendererAnn.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Source Code', rendererAnn, text = 4 ) )
scrollBox = Gtk.ScrolledWindow()
scrollBox.set_hexpand( True )
scrollBox.set_vexpand( True )
scrollBox.add( self.memoryView )
self.attach( scrollBox, 1, 1, 1, 1 )
def init_registers_display( self ):
registerGrid = Gtk.Grid()
registerGrid.set_margin_left( 20 )
registerGrid.set_margin_right( 20 )
registerGrid.set_name( 'register-grid' )
self.registerDisplays = []
for i in range( 0, 10 ):
box = Gtk.HBox()
box.set_name( 'register-box' )
if i == 8:
box.set_name( 'pc-box' )
box.set_margin_bottom( 1 )
label = Gtk.Label( self.simulator.get_register( i ) )
label.set_name( 'register-value' )
self.registerDisplays.append( label )
name = Gtk.Label( FRISCProcessor.get_register_name( i ) )
name.set_name( 'register-name' )
name.set_width_chars( 5 )
box.pack_start( name, True, True, 0 )
box.pack_start( self.registerDisplays[ i ], True, True, 0 )
registerGrid.attach( box, 0, i, 1, 1 )
self.attach( registerGrid, 2, 1, 1, 2 )
def init_goto_line( self ):
box = Gtk.Box()
box.set_orientation( Gtk.Orientation.HORIZONTAL )
box.set_border_width( 10 )
label = Gtk.Label( 'Go to line:' )
label.set_name( 'goto-label' )
self.searchEntry = Gtk.SearchEntry()
self.searchEntry.set_text( '00000000' )
self.searchEntry.connect( 'search-changed', self.on_search )
box.pack_start( label, False, False, 0 )
box.pack_start( self.searchEntry, True, True, 0 )
self.attach( box, 1, 0, 1, 1 )
def load_simulator( self, file ):
self.program = file
self.simulator.load_program( file )
self.memoryModel.clear()
self.memoryState = []
for i in range( 0, self.simulator.MEM_SIZE // 4 ):
self.memoryState.append( {
'line' : '{:0>8X}'.format( 4*i ),
'contents' : self.simulator.get_word_from_mem( 4*i ),
'breakpoint' : False,
'annotation' : self.simulator.annotations[ 4*i ] } )
for l in self.memoryState:
self.memoryModel.append( self.get_memory_model_values( l ) )
def on_reload_click( self, element ):
pass
def on_run_click( self, element ):
if self.is_paused(): self.flags[ 'paused' ] = False
self.runButton.set_sensitive( False )
self.pauseButton.set_sensitive( True )
while not self.is_breakpoint() and not self.is_paused() and self.run_step():
while Gtk.events_pending():
Gtk.main_iteration()
time.sleep( 0.25 )
print('iteration')
self.runButton.set_sensitive( True )
def on_step_click( self, element ):
self.run_step()
def on_pause_click( self, element ):
self.flags[ 'paused' ] = True
self.pauseButton.set_sensitive( False )
self.runButton.set_sensitive( True )
def on_stop_click( self, element ):
pass
def on_row_dblclick( self, t, p, c ):
i = int( p.to_string() )
self.toggle_breakpoint( p, i )
def on_search( self, element ):
return True
def run_step( self ):
ret = True
try:
self.simulator.run_step()
self.update_registers()
self.select_active_row()
if self.simulator.last_changed_address != -1:
self.update_memory( self.simulator.last_changed_address )
self.simulator.last_changed_address = -1
except Exception as e:
self.console.show_message( str( e ), 'error' )
# TODO: What to do on error?
ret = False
return ret
def clear_simulator( self ):
pass
def update_memory( self, i ):
c = self.memoryState[ i ][ 'contents' ] = self.simulator.get_word_from_mem( 4*( i // 4 ) )
it = self.memoryModel.get_iter_from_string( str( i // 4 ) )
self.memoryModel.set( it, [ 2, 3 ], [ bin_to_pretty_hex( c ), from32( c ) ] )
def update_registers( self ):
for i in range( 0, 10 ):
self.registerDisplays[ i ].set_text( self.simulator.get_register( i ) )
def select_active_row( self ):
pc = self.simulator.get_program_counter()
it = self.memoryModel.get_iter_from_string( str( pc // 4 - 1 ) )
self.memorySelection.select_iter( it )
def toggle_breakpoint( self, p, i ):
self.memoryState[ i ][ 'breakpoint' ] = not self.memoryState[ i ][ 'breakpoint' ]
self.memoryModel.set_value( self.memoryModel.get_iter( p ), 0,
get_breakpoint_symbol( self.memoryState[ i ][ 'breakpoint' ] ) )
def is_breakpoint( self ):
return self.memoryState[ self.simulator.get_program_counter() // 4 ][ 'breakpoint' ]
def is_paused( self ):
return self.flags[ 'paused' ]
def get_memory_model_values( self, l ):
return [ get_breakpoint_symbol( l[ 'breakpoint' ] ), l[ 'line' ],
bin_to_pretty_hex( l[ 'contents' ] ), from32( l[ 'contents' ]),
l[ 'annotation' ] ] | 0.11937 | 0.123921 |
from pycatia.drafting_interfaces.drawing_dimension import DrawingDimension
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.tps_interfaces.dimension_limit import DimensionLimit
class NonSemanticDimension(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| NonSemanticDimension
|
| Interface Managing Non Semantic Dimension.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.non_semantic_dimension = com_object
def dimension_limit(self) -> DimensionLimit:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func DimensionLimit() As DimensionLimit
|
| Gets the Dimension on the DimensionLimit interface.
|
| Parameters:
|
| oDimLim
| The Dimension Limits.
:return: DimensionLimit
:rtype: DimensionLimit
"""
return DimensionLimit(self.non_semantic_dimension.DimensionLimit())
def get_2d_annot(self) -> DrawingDimension:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func Get2dAnnot() As DrawingDimension
|
| Retrieves Drafting Dimension.
|
| Parameters:
|
| oDim
| The Drafting Dimension.
:return: DrawingDimension
:rtype: DrawingDimension
"""
return DrawingDimension(self.non_semantic_dimension.Get2dAnnot())
def has_dimension_limit(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func HasDimensionLimit() As boolean
|
| Checks if the Dimension has a Dimension Limit.
|
| Parameters:
|
| oHasDimLim
|
| TRUE: Dimension Limit exists
| FALSE: Dimension Limit does not exist
:return: bool
:rtype: bool
"""
return self.non_semantic_dimension.HasDimensionLimit()
def __repr__(self):
return f'NonSemanticDimension(name="{self.name}")' | pycatia/tps_interfaces/non_semantic_dimension.py | from pycatia.drafting_interfaces.drawing_dimension import DrawingDimension
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.tps_interfaces.dimension_limit import DimensionLimit
class NonSemanticDimension(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| NonSemanticDimension
|
| Interface Managing Non Semantic Dimension.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.non_semantic_dimension = com_object
def dimension_limit(self) -> DimensionLimit:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func DimensionLimit() As DimensionLimit
|
| Gets the Dimension on the DimensionLimit interface.
|
| Parameters:
|
| oDimLim
| The Dimension Limits.
:return: DimensionLimit
:rtype: DimensionLimit
"""
return DimensionLimit(self.non_semantic_dimension.DimensionLimit())
def get_2d_annot(self) -> DrawingDimension:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func Get2dAnnot() As DrawingDimension
|
| Retrieves Drafting Dimension.
|
| Parameters:
|
| oDim
| The Drafting Dimension.
:return: DrawingDimension
:rtype: DrawingDimension
"""
return DrawingDimension(self.non_semantic_dimension.Get2dAnnot())
def has_dimension_limit(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func HasDimensionLimit() As boolean
|
| Checks if the Dimension has a Dimension Limit.
|
| Parameters:
|
| oHasDimLim
|
| TRUE: Dimension Limit exists
| FALSE: Dimension Limit does not exist
:return: bool
:rtype: bool
"""
return self.non_semantic_dimension.HasDimensionLimit()
def __repr__(self):
return f'NonSemanticDimension(name="{self.name}")' | 0.91243 | 0.320635 |
from typing import List, Dict
from Murphi.ModularMurphi.MurphiTokens import MurphiTokens
from Murphi.ModularMurphi.TemplateClass import TemplateHandler
from DataObjects.ClassArchitecture import Architecture
from DataObjects.ClassCluster import Cluster
from DataObjects.ClassState import State
from DataObjects.ClassTransition import Transition
from DataObjects.ClassMultiDict import MultiDict
class GenAccessSendFunc(MurphiTokens, TemplateHandler):
def __init__(self, handler_dir: str):
TemplateHandler.__init__(self, handler_dir)
self.func_local_var_names = [self.defmsgname]
self.func_global_var_names = []
def _gen_arch_access_send_func(self, clusters: List[Cluster]):
sendfctstr = "--" + __name__ + self.nl
archs = self.get_machines_dict(clusters)
for arch in archs:
sendfctstr += self.gen_access_send_func(archs[arch])
return sendfctstr
def gen_access_send_func(self, arch: Architecture):
sendfctstr = ""
states = arch.get_states()
for state in states:
if [trans for trans in states[state].getaccess() if trans.getaccess() and not trans.getinmsg()] \
+ states[state].getevictmiss():
break
return sendfctstr
for state in sorted(states.keys()):
if len(states[state].getaccessmiss() + states[state].getevictmiss()):
sendfctstr += self._genAccessSendFunc(arch, states[state]) + self.nl
return sendfctstr
def _genAccessSendFunc(self, arch: Architecture, state: State):
transitions = [trans for trans in state.getaccess() if trans.getaccess() and not trans.getinmsg()] \
+ state.getevictmiss()
trans_dict = MultiDict()
for transition in transitions:
trans_dict[transition.getguard()] = transition
sendfctstr = ""
for guard in trans_dict:
ruleid = arch.get_unique_id_str() + "_" + str(state) + "_" + guard
sendfctstr += self._genSendFunctionHeader(arch, ruleid, trans_dict[guard]) + self.nl
return sendfctstr
def _genSendFunctionHeader(self, arch: Architecture, ruleid, transitions: List[Transition]):
arch_id = arch.get_unique_id_str()
fctstr = "procedure " + self.tSEND + ruleid + \
"(" + self.cadr + ":" + self.kaddress + "; m:" + self.SetKey + arch_id + ")" + self.end
all_var_name_dict = self._get_variable_names(arch)
global_var_name_dict = arch.data_object.variables
local_var_names_dict = self._filter_local_variables(all_var_name_dict, global_var_name_dict)
self.func_local_var_names = list(local_var_names_dict.keys())
self.func_global_var_names = list(global_var_name_dict.keys())
fctstr += self._gen_local_variables(local_var_names_dict)
fctstr += "begin" + self.nl
fctstr += self.tab + "alias " + self.ccle + ": " + self.instsuf \
+ arch_id + "[" + self.cmach + "]." + self.CLIdent \
+ "[" + self.cadr + "] do" + self.nl
fctstr += self.gen_single_trans_operation_str(arch_id, transitions)
fctstr += "endalias" + self.end
fctstr += "end" + self.end + self.nl
return fctstr
def get_machines_dict(self, clusters: List[Cluster]) -> Dict[str, Architecture]:
archs = {}
for cluster in clusters:
machines = set(cluster.system_tuple)
for machine in machines:
arch_name = machine.arch.get_unique_id_str()
if arch_name not in archs:
archs[arch_name] = machine.arch
return archs | Murphi/ModularMurphi/GenAccessSendFunc.py | from typing import List, Dict
from Murphi.ModularMurphi.MurphiTokens import MurphiTokens
from Murphi.ModularMurphi.TemplateClass import TemplateHandler
from DataObjects.ClassArchitecture import Architecture
from DataObjects.ClassCluster import Cluster
from DataObjects.ClassState import State
from DataObjects.ClassTransition import Transition
from DataObjects.ClassMultiDict import MultiDict
class GenAccessSendFunc(MurphiTokens, TemplateHandler):
def __init__(self, handler_dir: str):
TemplateHandler.__init__(self, handler_dir)
self.func_local_var_names = [self.defmsgname]
self.func_global_var_names = []
def _gen_arch_access_send_func(self, clusters: List[Cluster]):
sendfctstr = "--" + __name__ + self.nl
archs = self.get_machines_dict(clusters)
for arch in archs:
sendfctstr += self.gen_access_send_func(archs[arch])
return sendfctstr
def gen_access_send_func(self, arch: Architecture):
sendfctstr = ""
states = arch.get_states()
for state in states:
if [trans for trans in states[state].getaccess() if trans.getaccess() and not trans.getinmsg()] \
+ states[state].getevictmiss():
break
return sendfctstr
for state in sorted(states.keys()):
if len(states[state].getaccessmiss() + states[state].getevictmiss()):
sendfctstr += self._genAccessSendFunc(arch, states[state]) + self.nl
return sendfctstr
def _genAccessSendFunc(self, arch: Architecture, state: State):
transitions = [trans for trans in state.getaccess() if trans.getaccess() and not trans.getinmsg()] \
+ state.getevictmiss()
trans_dict = MultiDict()
for transition in transitions:
trans_dict[transition.getguard()] = transition
sendfctstr = ""
for guard in trans_dict:
ruleid = arch.get_unique_id_str() + "_" + str(state) + "_" + guard
sendfctstr += self._genSendFunctionHeader(arch, ruleid, trans_dict[guard]) + self.nl
return sendfctstr
def _genSendFunctionHeader(self, arch: Architecture, ruleid, transitions: List[Transition]):
arch_id = arch.get_unique_id_str()
fctstr = "procedure " + self.tSEND + ruleid + \
"(" + self.cadr + ":" + self.kaddress + "; m:" + self.SetKey + arch_id + ")" + self.end
all_var_name_dict = self._get_variable_names(arch)
global_var_name_dict = arch.data_object.variables
local_var_names_dict = self._filter_local_variables(all_var_name_dict, global_var_name_dict)
self.func_local_var_names = list(local_var_names_dict.keys())
self.func_global_var_names = list(global_var_name_dict.keys())
fctstr += self._gen_local_variables(local_var_names_dict)
fctstr += "begin" + self.nl
fctstr += self.tab + "alias " + self.ccle + ": " + self.instsuf \
+ arch_id + "[" + self.cmach + "]." + self.CLIdent \
+ "[" + self.cadr + "] do" + self.nl
fctstr += self.gen_single_trans_operation_str(arch_id, transitions)
fctstr += "endalias" + self.end
fctstr += "end" + self.end + self.nl
return fctstr
def get_machines_dict(self, clusters: List[Cluster]) -> Dict[str, Architecture]:
archs = {}
for cluster in clusters:
machines = set(cluster.system_tuple)
for machine in machines:
arch_name = machine.arch.get_unique_id_str()
if arch_name not in archs:
archs[arch_name] = machine.arch
return archs | 0.80837 | 0.291876 |
import pytest
from django.db import IntegrityError
from django.test.utils import override_settings
from .models import (TestModel, TestModelWithForeignKey, TestModelWithNonEditableFields,
OrdinaryTestModel, OrdinaryTestModelWithForeignKey, TestModelWithSelfForeignKey,
TestExpressionModel, TestModelWithPreSaveSignal, TestDoubleForeignKeyModel)
from .utils import assert_select_number_queries_on_model
@pytest.mark.django_db
def test_slicing_and_only():
# test for bug: https://github.com/depop/django-dirtyfields/issues/1
for _ in range(10):
TestModelWithNonEditableFields.objects.create()
qs_ = TestModelWithNonEditableFields.objects.only('pk').filter()
[o for o in qs_.filter().order_by('pk')]
@pytest.mark.django_db
def test_dirty_fields_ignores_the_editable_property_of_fields():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/17
tm = TestModelWithNonEditableFields.objects.create()
# Changing values should flag them as dirty
tm.boolean = False
tm.characters = 'testing'
assert tm.get_dirty_fields() == {
'boolean': True,
'characters': ''
}
@pytest.mark.django_db
def test_mandatory_foreign_key_field_not_initialized_is_not_raising_related_object_exception():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/26
with pytest.raises(IntegrityError):
TestModelWithForeignKey.objects.create()
@pytest.mark.django_db
@override_settings(DEBUG=True) # The test runner sets DEBUG to False. Set to True to enable SQL logging.
def test_relationship_model_loading_issue():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/34
# Query tests with models that are not using django-dirtyfields
tm1 = OrdinaryTestModel.objects.create()
tm2 = OrdinaryTestModel.objects.create()
OrdinaryTestModelWithForeignKey.objects.create(fkey=tm1)
OrdinaryTestModelWithForeignKey.objects.create(fkey=tm2)
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 0): # should be 0 since we don't access the relationship for now
for tmf in OrdinaryTestModelWithForeignKey.objects.all():
tmf.pk
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 2):
for tmf in OrdinaryTestModelWithForeignKey.objects.all():
tmf.fkey # access the relationship here
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 0): # should be 0 since we use `select_related`
for tmf in OrdinaryTestModelWithForeignKey.objects.select_related('fkey').all():
tmf.fkey # access the relationship here
# Query tests with models that are using django-dirtyfields
tm1 = TestModel.objects.create()
tm2 = TestModel.objects.create()
TestModelWithForeignKey.objects.create(fkey=tm1)
TestModelWithForeignKey.objects.create(fkey=tm2)
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 0): # should be 0, was 2 before bug fixing
for tmf in TestModelWithForeignKey.objects.all():
tmf.pk # we don't need the relationship here
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 2):
for tmf in TestModelWithForeignKey.objects.all():
tmf.fkey # access the relationship here
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 0): # should be 0 since we use `selected_related` (was 2 before)
for tmf in TestModelWithForeignKey.objects.select_related('fkey').all():
tmf.fkey # access the relationship here
@pytest.mark.django_db
def test_relationship_option_for_foreign_key_to_self():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/22
tm = TestModelWithSelfForeignKey.objects.create()
tm1 = TestModelWithSelfForeignKey.objects.create(fkey=tm)
tm.fkey = tm1
tm.save()
# Trying to access an instance was triggering a "RuntimeError: maximum recursion depth exceeded"
TestModelWithSelfForeignKey.objects.all()[0]
@pytest.mark.django_db
def test_expressions_not_taken_into_account_for_dirty_check():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/39
from django.db.models import F
tm = TestExpressionModel.objects.create()
tm.counter = F('counter') + 1
# This save() was raising a ValidationError: [u"'F(counter) + Value(1)' value must be an integer."]
# caused by a call to_python() on an expression node
tm.save()
@pytest.mark.django_db
def test_pre_save_signal_make_dirty_checking_not_consistent():
# first case
model = TestModelWithPreSaveSignal.objects.create(data='specific_value')
assert model.data_updated_on_presave is 'presave_value'
# second case
model = TestModelWithPreSaveSignal(data='specific_value')
model.save()
assert model.data_updated_on_presave is 'presave_value'
# third case
model = TestModelWithPreSaveSignal()
model.data = 'specific_value'
model.save()
assert model.data_updated_on_presave is 'presave_value'
@pytest.mark.django_db
def test_foreign_key_deferred_field():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/84
tm = TestModel.objects.create()
TestDoubleForeignKeyModel.objects.create(fkey1=tm)
list(TestDoubleForeignKeyModel.objects.only('fkey1')) # RuntimeError was raised here! | tests/test_non_regression.py | import pytest
from django.db import IntegrityError
from django.test.utils import override_settings
from .models import (TestModel, TestModelWithForeignKey, TestModelWithNonEditableFields,
OrdinaryTestModel, OrdinaryTestModelWithForeignKey, TestModelWithSelfForeignKey,
TestExpressionModel, TestModelWithPreSaveSignal, TestDoubleForeignKeyModel)
from .utils import assert_select_number_queries_on_model
@pytest.mark.django_db
def test_slicing_and_only():
# test for bug: https://github.com/depop/django-dirtyfields/issues/1
for _ in range(10):
TestModelWithNonEditableFields.objects.create()
qs_ = TestModelWithNonEditableFields.objects.only('pk').filter()
[o for o in qs_.filter().order_by('pk')]
@pytest.mark.django_db
def test_dirty_fields_ignores_the_editable_property_of_fields():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/17
tm = TestModelWithNonEditableFields.objects.create()
# Changing values should flag them as dirty
tm.boolean = False
tm.characters = 'testing'
assert tm.get_dirty_fields() == {
'boolean': True,
'characters': ''
}
@pytest.mark.django_db
def test_mandatory_foreign_key_field_not_initialized_is_not_raising_related_object_exception():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/26
with pytest.raises(IntegrityError):
TestModelWithForeignKey.objects.create()
@pytest.mark.django_db
@override_settings(DEBUG=True) # The test runner sets DEBUG to False. Set to True to enable SQL logging.
def test_relationship_model_loading_issue():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/34
# Query tests with models that are not using django-dirtyfields
tm1 = OrdinaryTestModel.objects.create()
tm2 = OrdinaryTestModel.objects.create()
OrdinaryTestModelWithForeignKey.objects.create(fkey=tm1)
OrdinaryTestModelWithForeignKey.objects.create(fkey=tm2)
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 0): # should be 0 since we don't access the relationship for now
for tmf in OrdinaryTestModelWithForeignKey.objects.all():
tmf.pk
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 2):
for tmf in OrdinaryTestModelWithForeignKey.objects.all():
tmf.fkey # access the relationship here
with assert_select_number_queries_on_model(OrdinaryTestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(OrdinaryTestModel, 0): # should be 0 since we use `select_related`
for tmf in OrdinaryTestModelWithForeignKey.objects.select_related('fkey').all():
tmf.fkey # access the relationship here
# Query tests with models that are using django-dirtyfields
tm1 = TestModel.objects.create()
tm2 = TestModel.objects.create()
TestModelWithForeignKey.objects.create(fkey=tm1)
TestModelWithForeignKey.objects.create(fkey=tm2)
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 0): # should be 0, was 2 before bug fixing
for tmf in TestModelWithForeignKey.objects.all():
tmf.pk # we don't need the relationship here
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 2):
for tmf in TestModelWithForeignKey.objects.all():
tmf.fkey # access the relationship here
with assert_select_number_queries_on_model(TestModelWithForeignKey, 1):
with assert_select_number_queries_on_model(TestModel, 0): # should be 0 since we use `selected_related` (was 2 before)
for tmf in TestModelWithForeignKey.objects.select_related('fkey').all():
tmf.fkey # access the relationship here
@pytest.mark.django_db
def test_relationship_option_for_foreign_key_to_self():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/22
tm = TestModelWithSelfForeignKey.objects.create()
tm1 = TestModelWithSelfForeignKey.objects.create(fkey=tm)
tm.fkey = tm1
tm.save()
# Trying to access an instance was triggering a "RuntimeError: maximum recursion depth exceeded"
TestModelWithSelfForeignKey.objects.all()[0]
@pytest.mark.django_db
def test_expressions_not_taken_into_account_for_dirty_check():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/39
from django.db.models import F
tm = TestExpressionModel.objects.create()
tm.counter = F('counter') + 1
# This save() was raising a ValidationError: [u"'F(counter) + Value(1)' value must be an integer."]
# caused by a call to_python() on an expression node
tm.save()
@pytest.mark.django_db
def test_pre_save_signal_make_dirty_checking_not_consistent():
# first case
model = TestModelWithPreSaveSignal.objects.create(data='specific_value')
assert model.data_updated_on_presave is 'presave_value'
# second case
model = TestModelWithPreSaveSignal(data='specific_value')
model.save()
assert model.data_updated_on_presave is 'presave_value'
# third case
model = TestModelWithPreSaveSignal()
model.data = 'specific_value'
model.save()
assert model.data_updated_on_presave is 'presave_value'
@pytest.mark.django_db
def test_foreign_key_deferred_field():
# Non regression test case for bug:
# https://github.com/romgar/django-dirtyfields/issues/84
tm = TestModel.objects.create()
TestDoubleForeignKeyModel.objects.create(fkey1=tm)
list(TestDoubleForeignKeyModel.objects.only('fkey1')) # RuntimeError was raised here! | 0.55097 | 0.337995 |
import gym
import random
import numpy as np
import pickle
import time
from config import number_states_to_keep, data_folder
from utils import rgb2grey
import copy
env_name = "CarRacing-v0"
env = gym.make(env_name)
if __name__ == "__main__":
from pyglet.window import key
a = np.array([0.0, 0.0, 0.0])
def key_press(k, mod):
global restart
if k == 0xFF0D:
restart = True
if k == key.LEFT:
a[0] = -1.0
if k == key.RIGHT:
a[0] = +1.0
if k == key.UP:
a[1] = +1.0
if k == key.DOWN:
a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
def key_release(k, mod):
if k == key.LEFT and a[0] == -1.0:
a[0] = 0
if k == key.RIGHT and a[0] == +1.0:
a[0] = 0
if k == key.UP:
a[1] = 0
if k == key.DOWN:
a[2] = 0
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
is_open = True
# loop over episodes
while is_open:
first_state = env.reset()
# convert colour image to greyscale
first_state_grey = rgb2grey(first_state)
# to record velocity, we want to keep track of the X most recent states (X = number_states_to_keep)
recent_states_fifo = [first_state_grey] * number_states_to_keep
total_reward = 0.0
steps = 0
restart = False
# log visited states and actions
state_log = []
action_log = []
# loop over steps of an episode
while True:
s, r, done, info = env.step(a)
# add the visited action to the action log
action_log.append(copy.deepcopy(a))
# keep a fifo list of the most recently visited states
recent_states_fifo.pop(0)
grey_state = rgb2grey(s)
recent_states_fifo.append(grey_state)
assert len(recent_states_fifo) == number_states_to_keep
# add the stack of recently visited states to the state log
state_log.append(recent_states_fifo)
total_reward += r
if steps % 200 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
is_open = env.render()
if done or restart or is_open is False:
# save a pickle of the visited states and actions
timestamp = time.strftime('%b-%d-%Y_%H%M%S', time.localtime())
for i in [["states", state_log], ["actions", action_log]]:
filename = data_folder + i[0] + '_' + timestamp + ".pickle"
outfile = open(filename, 'wb')
pickle.dump(i[1], outfile)
outfile.close()
break
env.close() | get_experience.py | import gym
import random
import numpy as np
import pickle
import time
from config import number_states_to_keep, data_folder
from utils import rgb2grey
import copy
env_name = "CarRacing-v0"
env = gym.make(env_name)
if __name__ == "__main__":
from pyglet.window import key
a = np.array([0.0, 0.0, 0.0])
def key_press(k, mod):
global restart
if k == 0xFF0D:
restart = True
if k == key.LEFT:
a[0] = -1.0
if k == key.RIGHT:
a[0] = +1.0
if k == key.UP:
a[1] = +1.0
if k == key.DOWN:
a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
def key_release(k, mod):
if k == key.LEFT and a[0] == -1.0:
a[0] = 0
if k == key.RIGHT and a[0] == +1.0:
a[0] = 0
if k == key.UP:
a[1] = 0
if k == key.DOWN:
a[2] = 0
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
is_open = True
# loop over episodes
while is_open:
first_state = env.reset()
# convert colour image to greyscale
first_state_grey = rgb2grey(first_state)
# to record velocity, we want to keep track of the X most recent states (X = number_states_to_keep)
recent_states_fifo = [first_state_grey] * number_states_to_keep
total_reward = 0.0
steps = 0
restart = False
# log visited states and actions
state_log = []
action_log = []
# loop over steps of an episode
while True:
s, r, done, info = env.step(a)
# add the visited action to the action log
action_log.append(copy.deepcopy(a))
# keep a fifo list of the most recently visited states
recent_states_fifo.pop(0)
grey_state = rgb2grey(s)
recent_states_fifo.append(grey_state)
assert len(recent_states_fifo) == number_states_to_keep
# add the stack of recently visited states to the state log
state_log.append(recent_states_fifo)
total_reward += r
if steps % 200 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
is_open = env.render()
if done or restart or is_open is False:
# save a pickle of the visited states and actions
timestamp = time.strftime('%b-%d-%Y_%H%M%S', time.localtime())
for i in [["states", state_log], ["actions", action_log]]:
filename = data_folder + i[0] + '_' + timestamp + ".pickle"
outfile = open(filename, 'wb')
pickle.dump(i[1], outfile)
outfile.close()
break
env.close() | 0.354321 | 0.369599 |
import os
import random
from .. import Problem, ProblemInstance, instance_path
from ...util import mkfile, curr_dir_relative
from ...system import *
NUM_TRIALS = 20
def partition(xs, rate=50):
k = int((len(xs) / 100)*rate)
return xs[:k], xs[k:]
class Hexose(Problem):
def __init__(self, problem):
self.problem = problem
super().__init__(f"hexose_{self.problem}")
def generate_instances(self, experiment):
instances = []
pos = []
neg = []
with open(curr_dir_relative(f'{self.problem}/exs.pl')) as f:
for line in f:
line = line.strip()
if line.startswith('pos'):
pos.append(line)
elif line.startswith('neg'):
neg.append(line)
for trial in experiment.trials:
random.shuffle(pos)
random.shuffle(neg)
train_pos, test_pos = partition(pos)
train_neg, test_neg = partition(neg)
for system in experiment.systems:
data_path = instance_path(os.path.join(experiment.output_path, f"{self.problem}"), self, system, trial)
test_settings = BasicTestSettings(
exs_file=self.write_examples(data_path, test_pos, test_neg, name="test_exs.pl"),
bk_file=self.bk_file()
)
train_exs_file = self.write_examples(data_path, train_pos, train_neg)
if isinstance(system, Popper):
train_settings = self.generate_popper(data_path, train_exs_file,
curr_dir_relative('popper-bias.pl'))
elif isinstance(system, Aleph):
train_settings = self.generate_aleph(data_path, train_pos, train_neg)
elif isinstance(system, Metagol):
train_settings = self.generate_metagol(data_path, train_exs_file)
instances.append(ProblemInstance(self, system, train_settings, test_settings))
return instances
def bk_file(self):
return curr_dir_relative(f'common_bk.pl')
def problem_bk(self):
return curr_dir_relative(os.path.join(f'/{self.problem}', 'bk.pl'))
def generate_bk(self, data_path, base_bk, problem_bk):
with open(base_bk, 'r') as f:
base_bk = f.read()
with open(problem_bk, 'r') as f:
problem_bk = f.read()
bk_file = mkfile(data_path, 'bk.pl')
with open(bk_file, 'w') as f:
f.write(problem_bk)
f.write(base_bk)
return bk_file
def write_examples(self, data_path, pos_examples, neg_examples, name="exs.pl"):
exs_file = mkfile(data_path, name)
with open(exs_file, 'w') as f:
for example in pos_examples + neg_examples:
f.write(f'{example}\n')
return exs_file
def generate_popper(self, data_path, exs_file, source_bias_file):
return PopperTrainSettings(
exs_file=exs_file,
bias_file=popper.generate_bias_file(data_path, source_bias_file),
bk_file=self.generate_bk(data_path, self.bk_file(), self.problem_bk()),
stats_file=os.sep.join([data_path, 'stats.json'])
)
def generate_aleph(self, data_path, pos_examples, neg_examples):
output_file = mkfile(data_path, 'input.pl')
base_aleph_file = curr_dir_relative(os.path.join(f"{self.problem}", 'aleph-modes.pl'))
bk_file = self.generate_bk(data_path, self.bk_file(), self.problem_bk())
return aleph.gen_aleph_train_settings(
output_file,
base_aleph_file,
bk_file,
strip_examples(pos_examples),
strip_examples(neg_examples))
def generate_metagol(self, data_path, exs_file):
return MetagolTrainSettings(
exs_file=exs_file,
prim_file=curr_dir_relative(os.path.join(f"{self.problem}", 'metagol-prims.pl')),
bk_file=self.generate_bk(data_path, self.bk_file(), self.problem_bk())
)
# This is a bit of a hacky way to turn "pos(f(A,B)). " or "neg(f(A,B)). " into "f(A,B)" for Aleph.
def strip_examples(exs):
return [s.strip()[4:-2] for s in exs] | ilpexp/problem/hexose/hexose.py | import os
import random
from .. import Problem, ProblemInstance, instance_path
from ...util import mkfile, curr_dir_relative
from ...system import *
NUM_TRIALS = 20
def partition(xs, rate=50):
k = int((len(xs) / 100)*rate)
return xs[:k], xs[k:]
class Hexose(Problem):
def __init__(self, problem):
self.problem = problem
super().__init__(f"hexose_{self.problem}")
def generate_instances(self, experiment):
instances = []
pos = []
neg = []
with open(curr_dir_relative(f'{self.problem}/exs.pl')) as f:
for line in f:
line = line.strip()
if line.startswith('pos'):
pos.append(line)
elif line.startswith('neg'):
neg.append(line)
for trial in experiment.trials:
random.shuffle(pos)
random.shuffle(neg)
train_pos, test_pos = partition(pos)
train_neg, test_neg = partition(neg)
for system in experiment.systems:
data_path = instance_path(os.path.join(experiment.output_path, f"{self.problem}"), self, system, trial)
test_settings = BasicTestSettings(
exs_file=self.write_examples(data_path, test_pos, test_neg, name="test_exs.pl"),
bk_file=self.bk_file()
)
train_exs_file = self.write_examples(data_path, train_pos, train_neg)
if isinstance(system, Popper):
train_settings = self.generate_popper(data_path, train_exs_file,
curr_dir_relative('popper-bias.pl'))
elif isinstance(system, Aleph):
train_settings = self.generate_aleph(data_path, train_pos, train_neg)
elif isinstance(system, Metagol):
train_settings = self.generate_metagol(data_path, train_exs_file)
instances.append(ProblemInstance(self, system, train_settings, test_settings))
return instances
def bk_file(self):
return curr_dir_relative(f'common_bk.pl')
def problem_bk(self):
return curr_dir_relative(os.path.join(f'/{self.problem}', 'bk.pl'))
def generate_bk(self, data_path, base_bk, problem_bk):
with open(base_bk, 'r') as f:
base_bk = f.read()
with open(problem_bk, 'r') as f:
problem_bk = f.read()
bk_file = mkfile(data_path, 'bk.pl')
with open(bk_file, 'w') as f:
f.write(problem_bk)
f.write(base_bk)
return bk_file
def write_examples(self, data_path, pos_examples, neg_examples, name="exs.pl"):
exs_file = mkfile(data_path, name)
with open(exs_file, 'w') as f:
for example in pos_examples + neg_examples:
f.write(f'{example}\n')
return exs_file
def generate_popper(self, data_path, exs_file, source_bias_file):
return PopperTrainSettings(
exs_file=exs_file,
bias_file=popper.generate_bias_file(data_path, source_bias_file),
bk_file=self.generate_bk(data_path, self.bk_file(), self.problem_bk()),
stats_file=os.sep.join([data_path, 'stats.json'])
)
def generate_aleph(self, data_path, pos_examples, neg_examples):
output_file = mkfile(data_path, 'input.pl')
base_aleph_file = curr_dir_relative(os.path.join(f"{self.problem}", 'aleph-modes.pl'))
bk_file = self.generate_bk(data_path, self.bk_file(), self.problem_bk())
return aleph.gen_aleph_train_settings(
output_file,
base_aleph_file,
bk_file,
strip_examples(pos_examples),
strip_examples(neg_examples))
def generate_metagol(self, data_path, exs_file):
return MetagolTrainSettings(
exs_file=exs_file,
prim_file=curr_dir_relative(os.path.join(f"{self.problem}", 'metagol-prims.pl')),
bk_file=self.generate_bk(data_path, self.bk_file(), self.problem_bk())
)
# This is a bit of a hacky way to turn "pos(f(A,B)). " or "neg(f(A,B)). " into "f(A,B)" for Aleph.
def strip_examples(exs):
return [s.strip()[4:-2] for s in exs] | 0.398524 | 0.267277 |
MSVC_BIG_WARNING_FLAGS = [
"/W3",
]
LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
"-Wno-c99-extensions",
"-Wno-deprecated-declarations",
"-Wno-missing-noreturn",
"-Wno-missing-prototypes",
"-Wno-missing-variable-declarations",
"-Wno-null-conversion",
"-Wno-shadow",
"-Wno-shift-sign-overflow",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-member-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
"-Wno-unused-template",
"-Wno-used-but-marked-unused",
"-Wno-zero-as-null-pointer-constant",
# gtest depends on this GNU extension being offered.
"-Wno-gnu-zero-variadic-macro-arguments",
]
MSVC_DEFINES = [
"/DNOMINMAX", # Don't define min and max macros (windows.h)
# Don't bloat namespace with incompatible winsock versions.
"/DWIN32_LEAN_AND_MEAN",
# Don't warn about usage of insecure C functions.
"/D_CRT_SECURE_NO_WARNINGS",
"/D_SCL_SECURE_NO_WARNINGS",
# Introduced in VS 2017 15.8, allow overaligned types in aligned_storage
"/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
]
COPT_VARS = {
"ABSL_GCC_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvarargs",
"-Wvla", # variable-length array
"-Wwrite-strings",
# Don't define min and max macros (Build on Windows using gcc)
"-DNOMINMAX",
],
"ABSL_GCC_TEST_FLAGS": [
"-Wno-conversion-null",
"-Wno-deprecated-declarations",
"-Wno-missing-declarations",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
],
"ABSL_LLVM_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
"-Wformat-security",
"-Wgnu-redeclared-enum",
"-Winfinite-recursion",
"-Winvalid-constexpr",
"-Wliteral-conversion",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wself-assign",
"-Wshadow-all",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
# Warnings that are enabled by group warning flags like -Wall that we
# explicitly disable.
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
# Don't define min and max macros (Build on Windows using clang)
"-DNOMINMAX",
],
"ABSL_LLVM_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_CLANG_CL_FLAGS":
(MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES),
"ABSL_CLANG_CL_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_MSVC_FLAGS":
MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES + [
# Increase the number of sections available in object files
"/bigobj",
"/wd4005", # macro-redefinition
"/wd4068", # unknown pragma
# qualifier applied to function type has no meaning; ignored
"/wd4180",
# conversion from 'type1' to 'type2', possible loss of data
"/wd4244",
# conversion from 'size_t' to 'type', possible loss of data
"/wd4267",
# The decorated name was longer than the compiler limit
"/wd4503",
# forcing value to bool 'true' or 'false' (performance warning)
"/wd4800",
],
"ABSL_MSVC_TEST_FLAGS": [
"/wd4018", # signed/unsigned mismatch
"/wd4101", # unreferenced local variable
"/wd4503", # decorated name length exceeded, name was truncated
"/wd4996", # use of deprecated symbol
"/DNOMINMAX", # disable the min() and max() macros from <windows.h>
],
"ABSL_MSVC_LINKOPTS": [
# Object file doesn't export any previously undefined symbols
"-ignore:4221",
],
# "HWAES" is an abbreviation for "hardware AES" (AES - Advanced Encryption
# Standard). These flags are used for detecting whether or not the target
# architecture has hardware support for AES instructions which can be used
# to improve performance of some random bit generators.
"ABSL_RANDOM_HWAES_ARM64_FLAGS": ["-march=armv8-a+crypto"],
"ABSL_RANDOM_HWAES_ARM32_FLAGS": ["-mfpu=neon"],
"ABSL_RANDOM_HWAES_X64_FLAGS": [
"-maes",
"-msse4.1",
],
"ABSL_RANDOM_HWAES_MSVC_X64_FLAGS": [],
} | absl/copts/copts.py | MSVC_BIG_WARNING_FLAGS = [
"/W3",
]
LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
"-Wno-c99-extensions",
"-Wno-deprecated-declarations",
"-Wno-missing-noreturn",
"-Wno-missing-prototypes",
"-Wno-missing-variable-declarations",
"-Wno-null-conversion",
"-Wno-shadow",
"-Wno-shift-sign-overflow",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-member-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
"-Wno-unused-template",
"-Wno-used-but-marked-unused",
"-Wno-zero-as-null-pointer-constant",
# gtest depends on this GNU extension being offered.
"-Wno-gnu-zero-variadic-macro-arguments",
]
MSVC_DEFINES = [
"/DNOMINMAX", # Don't define min and max macros (windows.h)
# Don't bloat namespace with incompatible winsock versions.
"/DWIN32_LEAN_AND_MEAN",
# Don't warn about usage of insecure C functions.
"/D_CRT_SECURE_NO_WARNINGS",
"/D_SCL_SECURE_NO_WARNINGS",
# Introduced in VS 2017 15.8, allow overaligned types in aligned_storage
"/D_ENABLE_EXTENDED_ALIGNED_STORAGE",
]
COPT_VARS = {
"ABSL_GCC_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvarargs",
"-Wvla", # variable-length array
"-Wwrite-strings",
# Don't define min and max macros (Build on Windows using gcc)
"-DNOMINMAX",
],
"ABSL_GCC_TEST_FLAGS": [
"-Wno-conversion-null",
"-Wno-deprecated-declarations",
"-Wno-missing-declarations",
"-Wno-sign-compare",
"-Wno-unused-function",
"-Wno-unused-parameter",
"-Wno-unused-private-field",
],
"ABSL_LLVM_FLAGS": [
"-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
"-Wformat-security",
"-Wgnu-redeclared-enum",
"-Winfinite-recursion",
"-Winvalid-constexpr",
"-Wliteral-conversion",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wself-assign",
"-Wshadow-all",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
# Warnings that are enabled by group warning flags like -Wall that we
# explicitly disable.
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
# Don't define min and max macros (Build on Windows using clang)
"-DNOMINMAX",
],
"ABSL_LLVM_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_CLANG_CL_FLAGS":
(MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES),
"ABSL_CLANG_CL_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_MSVC_FLAGS":
MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES + [
# Increase the number of sections available in object files
"/bigobj",
"/wd4005", # macro-redefinition
"/wd4068", # unknown pragma
# qualifier applied to function type has no meaning; ignored
"/wd4180",
# conversion from 'type1' to 'type2', possible loss of data
"/wd4244",
# conversion from 'size_t' to 'type', possible loss of data
"/wd4267",
# The decorated name was longer than the compiler limit
"/wd4503",
# forcing value to bool 'true' or 'false' (performance warning)
"/wd4800",
],
"ABSL_MSVC_TEST_FLAGS": [
"/wd4018", # signed/unsigned mismatch
"/wd4101", # unreferenced local variable
"/wd4503", # decorated name length exceeded, name was truncated
"/wd4996", # use of deprecated symbol
"/DNOMINMAX", # disable the min() and max() macros from <windows.h>
],
"ABSL_MSVC_LINKOPTS": [
# Object file doesn't export any previously undefined symbols
"-ignore:4221",
],
# "HWAES" is an abbreviation for "hardware AES" (AES - Advanced Encryption
# Standard). These flags are used for detecting whether or not the target
# architecture has hardware support for AES instructions which can be used
# to improve performance of some random bit generators.
"ABSL_RANDOM_HWAES_ARM64_FLAGS": ["-march=armv8-a+crypto"],
"ABSL_RANDOM_HWAES_ARM32_FLAGS": ["-mfpu=neon"],
"ABSL_RANDOM_HWAES_X64_FLAGS": [
"-maes",
"-msse4.1",
],
"ABSL_RANDOM_HWAES_MSVC_X64_FLAGS": [],
} | 0.493164 | 0.211071 |
import copy
import warnings
from math import sqrt, exp, log, cosh, sinh
import numpy as np
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises, assert_equal, assert_is_instance, \
assert_greater, assert_greater_equal
from nilearn._utils.testing import is_spd
from nilearn.connectivity.embedding import check_mat, map_sym, map_eig, \
geometric_mean, grad_geometric_mean, sym_to_vec, vec_to_sym, \
prec_to_partial, CovEmbedding
def test_check_mat():
"""Test check_mat function"""
non_square = np.ones((2, 3))
assert_raises(ValueError, check_mat, non_square, 'square')
non_sym = np.array([[0, 1], [0, 0]])
assert_raises(ValueError, check_mat, non_sym, 'symmetric')
non_spd = np.ones((3, 3))
assert_raises(ValueError, check_mat, non_spd, 'spd')
def test_map_sym():
"""Test map_sym function"""
# Test on exp map
sym = np.ones((2, 2))
sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]])
assert_array_almost_equal(map_sym(np.exp, sym), sym_exp)
# Test on sqrt map
spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]])
spd = spd_sqrt.dot(spd_sqrt)
assert_array_almost_equal(map_sym(np.sqrt, spd), spd_sqrt)
# Test on log map
spd = np.array([[1.25, 0.75], [0.75, 1.25]])
spd_log = np.array([[0., log(2.)], [log(2.), 0.]])
assert_array_almost_equal(map_sym(np.log, spd), spd_log)
def test_geometric_mean_couple():
"""Test geometric_mean function for two matrices"""
n_features = 7
spd1 = np.ones((n_features, n_features))
spd1 = spd1.dot(spd1) + n_features * np.eye(n_features)
spd2 = np.tril(np.ones((n_features, n_features)))
spd2 = spd2.dot(spd2.T)
vals_spd2, vecs_spd2 = np.linalg.eigh(spd2)
spd2_sqrt = map_eig(np.sqrt, vals_spd2, vecs_spd2)
spd2_inv_sqrt = map_eig(np.sqrt, 1. / vals_spd2, vecs_spd2)
geo = spd2_sqrt.dot(map_sym(np.sqrt,
spd2_inv_sqrt.dot(spd1).dot(spd2_inv_sqrt))).dot(spd2_sqrt)
assert_array_almost_equal(geometric_mean([spd1, spd2]), geo)
def test_geometric_mean_diagonal():
"""Test geometric_mean function for diagonal matrices"""
n_matrices = 20
n_features = 5
diags = []
for k in range(n_matrices):
diag = np.eye(n_features)
diag[k % n_features, k % n_features] = 1e4 + k
diag[(n_features - 1) // (k + 1), (n_features - 1) // (k + 1)] = \
(k + 1) * 1e-4
diags.append(diag)
geo = np.prod(np.array(diags), axis=0) ** (1 / float(len(diags)))
assert_array_almost_equal(geometric_mean(diags), geo)
def test_geometric_mean_geodesic():
"""Test geometric_mean function for single geodesic matrices"""
n_matrices = 10
n_features = 6
sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features))
sym = sym * sym[:, np.newaxis]
times = np.arange(n_matrices)
non_singular = np.eye(n_features)
non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]])
spds = []
for time in times:
spds.append(non_singular.dot(map_sym(np.exp, time * sym)).dot(
non_singular.T))
geo = non_singular.dot(map_sym(np.exp, times.mean() * sym)).dot(
non_singular.T)
assert_array_almost_equal(geometric_mean(spds), geo)
def random_diagonal(p, v_min=1., v_max=2., rand_gen=None):
"""Generate a random diagonal matrix.
Parameters
----------
p : int
The first dimension of the array.
v_min : float, optional (default to 1.)
Minimal element.
v_max : float, optional (default to 2.)
Maximal element.
rand_gen: numpy.random.RandomState or None, optional
Random generator to use for generation.
Returns
-------
output : numpy.ndarray, shape (p, p)
A diagonal matrix with the given minimal and maximal elements.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
diag = rand_gen.rand(p) * (v_max - v_min) + v_min
diag[diag == np.amax(diag)] = v_max
diag[diag == np.amin(diag)] = v_min
return np.diag(diag)
def random_spd(p, eig_min, cond, rand_gen=None):
"""Generate a random symmetric positive definite matrix.
Parameters
----------
p : int
The first dimension of the array.
eig_min : float
Minimal eigenvalue.
cond : float
Condition number, defined as the ratio of the maximum eigenvalue to the
minimum one.
rand_gen: numpy.random.RandomState or None, optional
Random generator to use for generation.
Returns
-------
ouput : numpy.ndarray, shape (p, p)
A symmetric positive definite matrix with the given minimal eigenvalue
and condition number.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
mat = rand_gen.randn(p, p)
unitary, _ = linalg.qr(mat)
diag = random_diagonal(p, v_min=eig_min, v_max=cond * eig_min,
rand_gen=rand_gen)
return unitary.dot(diag).dot(unitary.T)
def random_non_singular(p, sing_min=1., sing_max=2., rand_gen=None):
"""Generate a random nonsingular matrix.
Parameters
----------
p : int
The first dimension of the array.
sing_min : float, optional (default to 1.)
Minimal singular value.
sing_max : float, optional (default to 2.)
Maximal singular value.
Returns
-------
output : numpy.ndarray, shape (p, p)
A nonsingular matrix with the given minimal and maximal singular
values.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
diag = random_diagonal(p, v_min=sing_min, v_max=sing_max,
rand_gen=rand_gen)
mat1 = rand_gen.randn(p, p)
mat2 = rand_gen.randn(p, p)
unitary1, _ = linalg.qr(mat1)
unitary2, _ = linalg.qr(mat2)
return unitary1.dot(diag).dot(unitary2.T)
def test_geometric_mean_properties():
"""Test geometric_mean function for random spd matrices
"""
n_matrices = 40
n_features = 15
rand_gen = np.random.RandomState(0)
spds = []
for k in range(n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
rand_gen=rand_gen))
input_spds = copy.copy(spds)
geo = geometric_mean(spds)
# Generic
assert_is_instance(spds, list)
for spd, input_spd in zip(spds, input_spds):
assert_array_equal(spd, input_spd)
assert(is_spd(geo))
# Invariance under reordering
spds.reverse()
spds.insert(0, spds[1])
spds.pop(2)
assert_array_almost_equal(geometric_mean(spds), geo)
# Invariance under congruent transformation
non_singular = random_non_singular(n_features, rand_gen=rand_gen)
spds_cong = [non_singular.dot(spd).dot(non_singular.T) for spd in spds]
assert_array_almost_equal(geometric_mean(spds_cong),
non_singular.dot(geo).dot(non_singular.T))
# Invariance under inversion
spds_inv = [linalg.inv(spd) for spd in spds]
init = linalg.inv(np.mean(spds, axis=0))
assert_array_almost_equal(geometric_mean(spds_inv, init=init),
linalg.inv(geo))
# Gradient norm is decreasing
grad_norm = grad_geometric_mean(spds)
difference = np.diff(grad_norm)
assert_greater_equal(0., np.amax(difference))
# Check warning if gradient norm in the last step is less than
# tolerance
max_iter = 1
tol = 1e-10
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
geo = geometric_mean(spds, max_iter=max_iter, tol=tol)
assert_equal(len(w), 1)
grad_norm = grad_geometric_mean(spds, max_iter=max_iter, tol=tol)
assert_equal(len(grad_norm), max_iter)
assert_greater(grad_norm[-1], tol)
# Evaluate convergence. A warning is printed if tolerance is not reached
for p in [.5, 1.]: # proportion of badly conditionned matrices
spds = []
for k in range(int(p * n_matrices)):
spds.append(random_spd(n_features, eig_min=1e-2, cond=1e6,
rand_gen=rand_gen))
for k in range(int(p * n_matrices), n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
rand_gen=rand_gen))
if p < 1:
max_iter = 30
else:
max_iter = 60
geo = geometric_mean(spds, max_iter=max_iter, tol=1e-5)
def test_geometric_mean_checks():
n_features = 5
# Non square input matrix
mat1 = np.ones((n_features, n_features + 1))
assert_raises(ValueError, geometric_mean, [mat1])
# Input matrices of different shapes
mat1 = np.eye(n_features)
mat2 = np.ones((n_features + 1, n_features + 1))
assert_raises(ValueError, geometric_mean, [mat1, mat2])
# Non spd input matrix
assert_raises(ValueError, geometric_mean, [mat2])
def test_sym_to_vec():
"""Test sym_to_vec function"""
# Check output value is correct
sym = np.ones((3, 3))
vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.])
assert_array_almost_equal(sym_to_vec(sym), vec)
mask_sym = sym > 0
mask_vec = np.ones(6, dtype=bool)
assert_array_equal(sym_to_vec(mask_sym, isometry=False), mask_vec)
# Check vec_to_sym is the inverse function of sym_to_vec
n_features = 19
rand_gen = np.random.RandomState(0)
m = rand_gen.rand(n_features, n_features)
sym = m + m.T
vec = sym_to_vec(sym)
assert_array_almost_equal(vec_to_sym(vec), sym)
syms = np.asarray([sym, 2. * sym, 0.5 * sym])
vecs = sym_to_vec(syms)
assert_array_almost_equal(vec_to_sym(vecs), syms)
vec = sym_to_vec(sym, isometry=False)
assert_array_almost_equal(vec_to_sym(vec, isometry=False),
sym)
assert_array_almost_equal(vec[..., -n_features:], sym[..., -1, :])
vecs = sym_to_vec(syms, isometry=False)
assert_array_almost_equal(vec_to_sym(vecs, isometry=False),
syms)
assert_array_almost_equal(vecs[..., -n_features:], syms[..., -1, :])
def test_vec_to_sym():
"""Test vec_to_sym function"""
# Check error if unsuitable size
vec = np.ones(31)
assert_raises(ValueError, vec_to_sym, vec)
# Check output value is correct
vec = np.ones(6, )
sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.],
[1., 1., sqrt(2)]]) / sqrt(2)
assert_array_almost_equal(vec_to_sym(vec), sym)
mask_vec = vec > 0
mask_sym = np.ones((3, 3), dtype=bool)
assert_array_equal(vec_to_sym(mask_vec, isometry=False), mask_sym)
# Check sym_to_vec is the inverse function of vec_to_sym
n = 41
p = n * (n + 1) / 2
rand_gen = np.random.RandomState(0)
vec = rand_gen.rand(p)
sym = vec_to_sym(vec)
assert_array_almost_equal(sym_to_vec(sym), vec)
sym = vec_to_sym(vec, isometry=False)
assert_array_almost_equal(sym_to_vec(sym, isometry=False),
vec)
vecs = np.asarray([vec, 2. * vec, 0.5 * vec])
syms = vec_to_sym(vecs)
assert_array_almost_equal(sym_to_vec(syms), vecs)
syms = vec_to_sym(vecs, isometry=False)
assert_array_almost_equal(sym_to_vec(syms, isometry=False),
vecs)
def test_prec_to_partial():
"""Test prec_to_partial function"""
prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]])
partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.],
[-sqrt(2.) / 2., sqrt(2.) / 2., 1.]])
assert_array_almost_equal(prec_to_partial(prec), partial)
def test_fit_transform():
"""Test fit_transform method for class CovEmbedding"""
n_subjects = 10
n_features = 49
n_samples = 200
# Generate signals and compute empirical covariances
covs = []
signals = []
rand_gen = np.random.RandomState(0)
for k in range(n_subjects):
signal = rand_gen.randn(n_samples, n_features)
signals.append(signal)
signal -= signal.mean(axis=0)
covs.append((signal.T).dot(signal) / n_samples)
input_covs = copy.copy(covs)
for kind in ["correlation", "tangent", "precision", "partial correlation"]:
estimators = {'kind': kind}
cov_embedding = CovEmbedding(**estimators)
covs_transformed = cov_embedding.fit_transform(signals)
# Generic
assert_is_instance(covs_transformed, np.ndarray)
assert_equal(len(covs_transformed), len(covs))
for k, vec in enumerate(covs_transformed):
assert_equal(vec.size, n_features * (n_features + 1) / 2)
assert_array_equal(input_covs[k], covs[k])
cov_new = vec_to_sym(vec)
assert(is_spd(covs[k]))
# Positive definiteness if expected and output value checks
if estimators["kind"] == "tangent":
assert_array_almost_equal(cov_new, cov_new.T)
geo_sqrt = map_sym(np.sqrt, cov_embedding.mean_cov_)
assert(is_spd(geo_sqrt))
assert(is_spd(cov_embedding.whitening_))
assert_array_almost_equal(
cov_embedding.whitening_.dot(geo_sqrt), np.eye(n_features))
assert_array_almost_equal(geo_sqrt.dot(
map_sym(np.exp, cov_new)).dot(geo_sqrt), covs[k])
if estimators["kind"] == "precision":
assert(is_spd(cov_new))
assert_array_almost_equal(cov_new.dot(covs[k]),
np.eye(n_features))
if estimators["kind"] == "correlation":
assert(is_spd(cov_new))
d = np.sqrt(np.diag(np.diag(covs[k])))
assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k])
if estimators["kind"] == "partial correlation":
prec = linalg.inv(covs[k])
d = np.sqrt(np.diag(np.diag(prec)))
assert_array_almost_equal(d.dot(cov_new).dot(d), -prec +\
2 * np.diag(np.diag(prec))) | nilearn/connectivity/tests/test_embedding.py | import copy
import warnings
from math import sqrt, exp, log, cosh, sinh
import numpy as np
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises, assert_equal, assert_is_instance, \
assert_greater, assert_greater_equal
from nilearn._utils.testing import is_spd
from nilearn.connectivity.embedding import check_mat, map_sym, map_eig, \
geometric_mean, grad_geometric_mean, sym_to_vec, vec_to_sym, \
prec_to_partial, CovEmbedding
def test_check_mat():
"""Test check_mat function"""
non_square = np.ones((2, 3))
assert_raises(ValueError, check_mat, non_square, 'square')
non_sym = np.array([[0, 1], [0, 0]])
assert_raises(ValueError, check_mat, non_sym, 'symmetric')
non_spd = np.ones((3, 3))
assert_raises(ValueError, check_mat, non_spd, 'spd')
def test_map_sym():
"""Test map_sym function"""
# Test on exp map
sym = np.ones((2, 2))
sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]])
assert_array_almost_equal(map_sym(np.exp, sym), sym_exp)
# Test on sqrt map
spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]])
spd = spd_sqrt.dot(spd_sqrt)
assert_array_almost_equal(map_sym(np.sqrt, spd), spd_sqrt)
# Test on log map
spd = np.array([[1.25, 0.75], [0.75, 1.25]])
spd_log = np.array([[0., log(2.)], [log(2.), 0.]])
assert_array_almost_equal(map_sym(np.log, spd), spd_log)
def test_geometric_mean_couple():
"""Test geometric_mean function for two matrices"""
n_features = 7
spd1 = np.ones((n_features, n_features))
spd1 = spd1.dot(spd1) + n_features * np.eye(n_features)
spd2 = np.tril(np.ones((n_features, n_features)))
spd2 = spd2.dot(spd2.T)
vals_spd2, vecs_spd2 = np.linalg.eigh(spd2)
spd2_sqrt = map_eig(np.sqrt, vals_spd2, vecs_spd2)
spd2_inv_sqrt = map_eig(np.sqrt, 1. / vals_spd2, vecs_spd2)
geo = spd2_sqrt.dot(map_sym(np.sqrt,
spd2_inv_sqrt.dot(spd1).dot(spd2_inv_sqrt))).dot(spd2_sqrt)
assert_array_almost_equal(geometric_mean([spd1, spd2]), geo)
def test_geometric_mean_diagonal():
"""Test geometric_mean function for diagonal matrices"""
n_matrices = 20
n_features = 5
diags = []
for k in range(n_matrices):
diag = np.eye(n_features)
diag[k % n_features, k % n_features] = 1e4 + k
diag[(n_features - 1) // (k + 1), (n_features - 1) // (k + 1)] = \
(k + 1) * 1e-4
diags.append(diag)
geo = np.prod(np.array(diags), axis=0) ** (1 / float(len(diags)))
assert_array_almost_equal(geometric_mean(diags), geo)
def test_geometric_mean_geodesic():
"""Test geometric_mean function for single geodesic matrices"""
n_matrices = 10
n_features = 6
sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features))
sym = sym * sym[:, np.newaxis]
times = np.arange(n_matrices)
non_singular = np.eye(n_features)
non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]])
spds = []
for time in times:
spds.append(non_singular.dot(map_sym(np.exp, time * sym)).dot(
non_singular.T))
geo = non_singular.dot(map_sym(np.exp, times.mean() * sym)).dot(
non_singular.T)
assert_array_almost_equal(geometric_mean(spds), geo)
def random_diagonal(p, v_min=1., v_max=2., rand_gen=None):
"""Generate a random diagonal matrix.
Parameters
----------
p : int
The first dimension of the array.
v_min : float, optional (default to 1.)
Minimal element.
v_max : float, optional (default to 2.)
Maximal element.
rand_gen: numpy.random.RandomState or None, optional
Random generator to use for generation.
Returns
-------
output : numpy.ndarray, shape (p, p)
A diagonal matrix with the given minimal and maximal elements.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
diag = rand_gen.rand(p) * (v_max - v_min) + v_min
diag[diag == np.amax(diag)] = v_max
diag[diag == np.amin(diag)] = v_min
return np.diag(diag)
def random_spd(p, eig_min, cond, rand_gen=None):
"""Generate a random symmetric positive definite matrix.
Parameters
----------
p : int
The first dimension of the array.
eig_min : float
Minimal eigenvalue.
cond : float
Condition number, defined as the ratio of the maximum eigenvalue to the
minimum one.
rand_gen: numpy.random.RandomState or None, optional
Random generator to use for generation.
Returns
-------
ouput : numpy.ndarray, shape (p, p)
A symmetric positive definite matrix with the given minimal eigenvalue
and condition number.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
mat = rand_gen.randn(p, p)
unitary, _ = linalg.qr(mat)
diag = random_diagonal(p, v_min=eig_min, v_max=cond * eig_min,
rand_gen=rand_gen)
return unitary.dot(diag).dot(unitary.T)
def random_non_singular(p, sing_min=1., sing_max=2., rand_gen=None):
"""Generate a random nonsingular matrix.
Parameters
----------
p : int
The first dimension of the array.
sing_min : float, optional (default to 1.)
Minimal singular value.
sing_max : float, optional (default to 2.)
Maximal singular value.
Returns
-------
output : numpy.ndarray, shape (p, p)
A nonsingular matrix with the given minimal and maximal singular
values.
"""
if rand_gen is None:
rand_gen = np.random.RandomState(0)
diag = random_diagonal(p, v_min=sing_min, v_max=sing_max,
rand_gen=rand_gen)
mat1 = rand_gen.randn(p, p)
mat2 = rand_gen.randn(p, p)
unitary1, _ = linalg.qr(mat1)
unitary2, _ = linalg.qr(mat2)
return unitary1.dot(diag).dot(unitary2.T)
def test_geometric_mean_properties():
"""Test geometric_mean function for random spd matrices
"""
n_matrices = 40
n_features = 15
rand_gen = np.random.RandomState(0)
spds = []
for k in range(n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
rand_gen=rand_gen))
input_spds = copy.copy(spds)
geo = geometric_mean(spds)
# Generic
assert_is_instance(spds, list)
for spd, input_spd in zip(spds, input_spds):
assert_array_equal(spd, input_spd)
assert(is_spd(geo))
# Invariance under reordering
spds.reverse()
spds.insert(0, spds[1])
spds.pop(2)
assert_array_almost_equal(geometric_mean(spds), geo)
# Invariance under congruent transformation
non_singular = random_non_singular(n_features, rand_gen=rand_gen)
spds_cong = [non_singular.dot(spd).dot(non_singular.T) for spd in spds]
assert_array_almost_equal(geometric_mean(spds_cong),
non_singular.dot(geo).dot(non_singular.T))
# Invariance under inversion
spds_inv = [linalg.inv(spd) for spd in spds]
init = linalg.inv(np.mean(spds, axis=0))
assert_array_almost_equal(geometric_mean(spds_inv, init=init),
linalg.inv(geo))
# Gradient norm is decreasing
grad_norm = grad_geometric_mean(spds)
difference = np.diff(grad_norm)
assert_greater_equal(0., np.amax(difference))
# Check warning if gradient norm in the last step is less than
# tolerance
max_iter = 1
tol = 1e-10
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
geo = geometric_mean(spds, max_iter=max_iter, tol=tol)
assert_equal(len(w), 1)
grad_norm = grad_geometric_mean(spds, max_iter=max_iter, tol=tol)
assert_equal(len(grad_norm), max_iter)
assert_greater(grad_norm[-1], tol)
# Evaluate convergence. A warning is printed if tolerance is not reached
for p in [.5, 1.]: # proportion of badly conditionned matrices
spds = []
for k in range(int(p * n_matrices)):
spds.append(random_spd(n_features, eig_min=1e-2, cond=1e6,
rand_gen=rand_gen))
for k in range(int(p * n_matrices), n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
rand_gen=rand_gen))
if p < 1:
max_iter = 30
else:
max_iter = 60
geo = geometric_mean(spds, max_iter=max_iter, tol=1e-5)
def test_geometric_mean_checks():
n_features = 5
# Non square input matrix
mat1 = np.ones((n_features, n_features + 1))
assert_raises(ValueError, geometric_mean, [mat1])
# Input matrices of different shapes
mat1 = np.eye(n_features)
mat2 = np.ones((n_features + 1, n_features + 1))
assert_raises(ValueError, geometric_mean, [mat1, mat2])
# Non spd input matrix
assert_raises(ValueError, geometric_mean, [mat2])
def test_sym_to_vec():
"""Test sym_to_vec function"""
# Check output value is correct
sym = np.ones((3, 3))
vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.])
assert_array_almost_equal(sym_to_vec(sym), vec)
mask_sym = sym > 0
mask_vec = np.ones(6, dtype=bool)
assert_array_equal(sym_to_vec(mask_sym, isometry=False), mask_vec)
# Check vec_to_sym is the inverse function of sym_to_vec
n_features = 19
rand_gen = np.random.RandomState(0)
m = rand_gen.rand(n_features, n_features)
sym = m + m.T
vec = sym_to_vec(sym)
assert_array_almost_equal(vec_to_sym(vec), sym)
syms = np.asarray([sym, 2. * sym, 0.5 * sym])
vecs = sym_to_vec(syms)
assert_array_almost_equal(vec_to_sym(vecs), syms)
vec = sym_to_vec(sym, isometry=False)
assert_array_almost_equal(vec_to_sym(vec, isometry=False),
sym)
assert_array_almost_equal(vec[..., -n_features:], sym[..., -1, :])
vecs = sym_to_vec(syms, isometry=False)
assert_array_almost_equal(vec_to_sym(vecs, isometry=False),
syms)
assert_array_almost_equal(vecs[..., -n_features:], syms[..., -1, :])
def test_vec_to_sym():
"""Test vec_to_sym function"""
# Check error if unsuitable size
vec = np.ones(31)
assert_raises(ValueError, vec_to_sym, vec)
# Check output value is correct
vec = np.ones(6, )
sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.],
[1., 1., sqrt(2)]]) / sqrt(2)
assert_array_almost_equal(vec_to_sym(vec), sym)
mask_vec = vec > 0
mask_sym = np.ones((3, 3), dtype=bool)
assert_array_equal(vec_to_sym(mask_vec, isometry=False), mask_sym)
# Check sym_to_vec is the inverse function of vec_to_sym
n = 41
p = n * (n + 1) / 2
rand_gen = np.random.RandomState(0)
vec = rand_gen.rand(p)
sym = vec_to_sym(vec)
assert_array_almost_equal(sym_to_vec(sym), vec)
sym = vec_to_sym(vec, isometry=False)
assert_array_almost_equal(sym_to_vec(sym, isometry=False),
vec)
vecs = np.asarray([vec, 2. * vec, 0.5 * vec])
syms = vec_to_sym(vecs)
assert_array_almost_equal(sym_to_vec(syms), vecs)
syms = vec_to_sym(vecs, isometry=False)
assert_array_almost_equal(sym_to_vec(syms, isometry=False),
vecs)
def test_prec_to_partial():
"""Test prec_to_partial function"""
prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]])
partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.],
[-sqrt(2.) / 2., sqrt(2.) / 2., 1.]])
assert_array_almost_equal(prec_to_partial(prec), partial)
def test_fit_transform():
"""Test fit_transform method for class CovEmbedding"""
n_subjects = 10
n_features = 49
n_samples = 200
# Generate signals and compute empirical covariances
covs = []
signals = []
rand_gen = np.random.RandomState(0)
for k in range(n_subjects):
signal = rand_gen.randn(n_samples, n_features)
signals.append(signal)
signal -= signal.mean(axis=0)
covs.append((signal.T).dot(signal) / n_samples)
input_covs = copy.copy(covs)
for kind in ["correlation", "tangent", "precision", "partial correlation"]:
estimators = {'kind': kind}
cov_embedding = CovEmbedding(**estimators)
covs_transformed = cov_embedding.fit_transform(signals)
# Generic
assert_is_instance(covs_transformed, np.ndarray)
assert_equal(len(covs_transformed), len(covs))
for k, vec in enumerate(covs_transformed):
assert_equal(vec.size, n_features * (n_features + 1) / 2)
assert_array_equal(input_covs[k], covs[k])
cov_new = vec_to_sym(vec)
assert(is_spd(covs[k]))
# Positive definiteness if expected and output value checks
if estimators["kind"] == "tangent":
assert_array_almost_equal(cov_new, cov_new.T)
geo_sqrt = map_sym(np.sqrt, cov_embedding.mean_cov_)
assert(is_spd(geo_sqrt))
assert(is_spd(cov_embedding.whitening_))
assert_array_almost_equal(
cov_embedding.whitening_.dot(geo_sqrt), np.eye(n_features))
assert_array_almost_equal(geo_sqrt.dot(
map_sym(np.exp, cov_new)).dot(geo_sqrt), covs[k])
if estimators["kind"] == "precision":
assert(is_spd(cov_new))
assert_array_almost_equal(cov_new.dot(covs[k]),
np.eye(n_features))
if estimators["kind"] == "correlation":
assert(is_spd(cov_new))
d = np.sqrt(np.diag(np.diag(covs[k])))
assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k])
if estimators["kind"] == "partial correlation":
prec = linalg.inv(covs[k])
d = np.sqrt(np.diag(np.diag(prec)))
assert_array_almost_equal(d.dot(cov_new).dot(d), -prec +\
2 * np.diag(np.diag(prec))) | 0.834811 | 0.7413 |
import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = pd.date_range(0, -1, freq="5T")
data = pd.Series(None, index=dti)
# Add data in the bike count format.
bike_counts = pd.read_sql_query(
"SELECT ts, bikes, spaces FROM bike_count "
+ "WHERE station_id = %(station_id)s AND "
+ "ts >= %(start)s AND ts <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# bike_count[0] is the index, [1..3] are the columns in the order
# selected in the above query
for bike_count in bike_counts.itertuples():
# Do not insert counts with no bikes or spaces (inactive stations).
if not (bike_count[2] == 0 and bike_count[3] == 0):
ts = pd.to_datetime(bike_count[1], infer_datetime_format=True)
# Round the timestamp to the nearest five minute mark.
ts += datetime.timedelta(seconds=150)
ts = ts.replace(
minute=(ts.minute - (ts.minute % 5)), second=0, microsecond=0)
# A status of np.nan means the station is neither full nor empty.
status = np.nan
if bike_count[2] == 0:
status = "empty"
elif bike_count[3] == 0:
status = "full"
# Create index with only one entry, ts.
index = pd.date_range(ts, ts, freq="5T")
data_list.append(pd.Series(status, index=index))
if len(data_list) > 0:
data = pd.concat(data_list)
try:
data_list = []
# Add data in the outage format.
outages = pd.read_sql_query(
"SELECT outage_type, outage_start, outage_end FROM outage "
+ "WHERE station_id = %(station_id)s AND "
+ "outage_start >= %(start)s AND outage_end <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# Merge each outage into dataframe.
for outage in outages.itertuples():
ostart = pd.to_datetime(outage[2], infer_datetime_format=True)
ostart += datetime.timedelta(seconds=150)
ostart = ostart.replace(
minute=(ostart.minute - (ostart.minute % 5)),
second=0, microsecond=0)
oend = pd.to_datetime(outage[3], infer_datetime_format=True)
oend += datetime.timedelta(seconds=150)
oend = oend.replace(
minute=(oend.minute - (oend.minute % 5)),
second=0, microsecond=0)
index = pd.date_range(ostart, oend, freq="5T")
data_list.append(pd.Series(outage[1], index=index))
outage_data = pd.concat(data_list)
outage_data = outage_data.groupby(outage_data.index).first()
conn = db_engine.connect()
# Determine timeframe where outages were recorded for this station.
query = sql.text(
"SELECT MIN(outage_start) FROM outage "
"WHERE station_id = :station_id")
outage_data_start = conn.execute(
query, station_id=station_id).fetchone()[0]
query = sql.text(
"SELECT MAX(outage_end) FROM outage "
"WHERE station_id = :station_id")
outage_data_end = conn.execute(
query, station_id=station_id).fetchone()[0]
outage_data_start = pd.to_datetime(
outage_data_start, infer_datetime_format=True)
outage_data_end = pd.to_datetime(
outage_data_end, infer_datetime_format=True)
range_start = outage_data_start if outage_data_start > start else start
range_end = outage_data_end if outage_data_end < end else end
range_start = range_start.replace(
minute=(range_start.minute - (range_start.minute % 5)),
second=0, microsecond=0)
range_end = range_end.replace(
minute=(range_end.minute - (range_end.minute % 5)),
second=0, microsecond=0)
# Add NaN for those times when the station is not full or empty.
outage_data = outage_data.reindex(pd.date_range(
range_start, range_end, freq="5T"))
# Remove any timestamps from outage_data that are in the bike_count
# data.
unique = outage_data.index.difference(data.index)
outage_data = outage_data.reindex(unique)
except ValueError as ex:
outage_data = None
print(ex)
# Merge the two series together.
if outage_data is not None:
data = pd.concat([data, outage_data])
# Remove any remaining stray duplicates.
data = data.groupby(data.index).first()
data.sort_index(inplace=True)
return data
def bal(s, balance):
"""
s: The series to balance.
balance: The status to balance on. "empty" or "full"
"""
if(balance == "empty"):
df_empty = s[s == "empty"]
df_not_empty = s[s != "empty"]
return pd.concat([
df_not_empty.sample(len(df_empty)),
df_empty])
elif(balance == "full"):
df_full = s[s == "full"]
df_not_full = s[s != "full"]
return pd.concat([
df_not_full.sample(len(df_full)),
df_full]) | cabi/prepare_data/utils.py | import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = pd.date_range(0, -1, freq="5T")
data = pd.Series(None, index=dti)
# Add data in the bike count format.
bike_counts = pd.read_sql_query(
"SELECT ts, bikes, spaces FROM bike_count "
+ "WHERE station_id = %(station_id)s AND "
+ "ts >= %(start)s AND ts <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# bike_count[0] is the index, [1..3] are the columns in the order
# selected in the above query
for bike_count in bike_counts.itertuples():
# Do not insert counts with no bikes or spaces (inactive stations).
if not (bike_count[2] == 0 and bike_count[3] == 0):
ts = pd.to_datetime(bike_count[1], infer_datetime_format=True)
# Round the timestamp to the nearest five minute mark.
ts += datetime.timedelta(seconds=150)
ts = ts.replace(
minute=(ts.minute - (ts.minute % 5)), second=0, microsecond=0)
# A status of np.nan means the station is neither full nor empty.
status = np.nan
if bike_count[2] == 0:
status = "empty"
elif bike_count[3] == 0:
status = "full"
# Create index with only one entry, ts.
index = pd.date_range(ts, ts, freq="5T")
data_list.append(pd.Series(status, index=index))
if len(data_list) > 0:
data = pd.concat(data_list)
try:
data_list = []
# Add data in the outage format.
outages = pd.read_sql_query(
"SELECT outage_type, outage_start, outage_end FROM outage "
+ "WHERE station_id = %(station_id)s AND "
+ "outage_start >= %(start)s AND outage_end <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# Merge each outage into dataframe.
for outage in outages.itertuples():
ostart = pd.to_datetime(outage[2], infer_datetime_format=True)
ostart += datetime.timedelta(seconds=150)
ostart = ostart.replace(
minute=(ostart.minute - (ostart.minute % 5)),
second=0, microsecond=0)
oend = pd.to_datetime(outage[3], infer_datetime_format=True)
oend += datetime.timedelta(seconds=150)
oend = oend.replace(
minute=(oend.minute - (oend.minute % 5)),
second=0, microsecond=0)
index = pd.date_range(ostart, oend, freq="5T")
data_list.append(pd.Series(outage[1], index=index))
outage_data = pd.concat(data_list)
outage_data = outage_data.groupby(outage_data.index).first()
conn = db_engine.connect()
# Determine timeframe where outages were recorded for this station.
query = sql.text(
"SELECT MIN(outage_start) FROM outage "
"WHERE station_id = :station_id")
outage_data_start = conn.execute(
query, station_id=station_id).fetchone()[0]
query = sql.text(
"SELECT MAX(outage_end) FROM outage "
"WHERE station_id = :station_id")
outage_data_end = conn.execute(
query, station_id=station_id).fetchone()[0]
outage_data_start = pd.to_datetime(
outage_data_start, infer_datetime_format=True)
outage_data_end = pd.to_datetime(
outage_data_end, infer_datetime_format=True)
range_start = outage_data_start if outage_data_start > start else start
range_end = outage_data_end if outage_data_end < end else end
range_start = range_start.replace(
minute=(range_start.minute - (range_start.minute % 5)),
second=0, microsecond=0)
range_end = range_end.replace(
minute=(range_end.minute - (range_end.minute % 5)),
second=0, microsecond=0)
# Add NaN for those times when the station is not full or empty.
outage_data = outage_data.reindex(pd.date_range(
range_start, range_end, freq="5T"))
# Remove any timestamps from outage_data that are in the bike_count
# data.
unique = outage_data.index.difference(data.index)
outage_data = outage_data.reindex(unique)
except ValueError as ex:
outage_data = None
print(ex)
# Merge the two series together.
if outage_data is not None:
data = pd.concat([data, outage_data])
# Remove any remaining stray duplicates.
data = data.groupby(data.index).first()
data.sort_index(inplace=True)
return data
def bal(s, balance):
"""
s: The series to balance.
balance: The status to balance on. "empty" or "full"
"""
if(balance == "empty"):
df_empty = s[s == "empty"]
df_not_empty = s[s != "empty"]
return pd.concat([
df_not_empty.sample(len(df_empty)),
df_empty])
elif(balance == "full"):
df_full = s[s == "full"]
df_not_full = s[s != "full"]
return pd.concat([
df_not_full.sample(len(df_full)),
df_full]) | 0.531209 | 0.648905 |
import numpy as np
from maci.misc.space import MADiscrete, MABox
from maci.environments.env_spec import MAEnvSpec
from rllab.core.serializable import Serializable
class DifferentialGame(Serializable):
def __init__(self, game_name, agent_num, action_low=-10, action_high=10):
Serializable.quick_init(self, locals())
self.game = game_name
self.agent_num = agent_num
# self.action_num = action_num
self.action_range = [action_low, action_high]
lows = np.array([np.array([action_low]) for _ in range(self.agent_num)])
highs = np.array([np.array([action_high]) for _ in range(self.agent_num)])
self.action_spaces = MABox(lows=lows, highs=highs)
self.observation_spaces = MADiscrete([1] * self.agent_num)
self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces)
self.t = 0
self.numplots = 0
self.payoff = {}
if self.game == 'zero_sum':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: a1 * a2
self.payoff[1] = lambda a1, a2: -a1 * a2
elif self.game == 'trigonometric':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: np.cos(a2) * a1
self.payoff[1] = lambda a1, a2: np.sin(a1) * a2
# This is also an extension to the classical matching pennies game, but here the
# payoff function is smooth:
elif self.game == 'mataching_pennies':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: (a1-0.5)*(a2-0.5)
self.payoff[1] = lambda a1, a2: (a1-0.5)*(a2-0.5)
elif self.game == 'rotational':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: 0.5 * a1 * a1 + 10 * a1 * a2
self.payoff[1] = lambda a1, a2: 0.5 * a2 * a2 - 10 * a1 * a2
elif self.game == 'wolf':
assert self.agent_num == 2
def V(alpha, beta, payoff):
u = payoff[(0, 0)] - payoff[(0, 1)] - payoff[(1, 0)] + payoff[(1, 1)]
return alpha * beta * u + alpha * (payoff[(0, 1)] - payoff[(1, 1)]) + beta * (
payoff[(1, 0)] - payoff[(1, 1)]) + payoff[(1, 1)]
payoff_0 = np.array([[0, 3], [1, 2]])
payoff_1 = np.array([[3, 2], [0, 1]])
self.payoff[0] = lambda a1, a2: V(a1, a2, payoff_0)
self.payoff[1] = lambda a1, a2: V(a1, a2, payoff_1)
elif self.game == 'ma_softq':
assert self.agent_num == 2
h1 = 0.8
h2 = 1.
s1 = 3.
s2 = 1.
x1 = -5.
x2 = 5.
y1 = -5.
y2 = 5.
c = 10.
def max_f(a1, a2):
f1 = h1 * (-(np.square(a1 - x1) / s1) - (np.square(a2 - y1) / s1))
f2 = h2 * (-(np.square(a1 - x2) / s2) - (np.square(a2 - y2) / s2)) + c
return max(f1, f2)
self.payoff[0] = lambda a1, a2: max_f(a1, a2)
self.payoff[1] = lambda a1, a2: max_f(a1, a2)
self.rewards = np.zeros((self.agent_num,))
@staticmethod
def get_game_list():
return {
'zero_sum': {'agent_num': 2, 'action_num': 2}
}
def step(self, actions):
assert len(actions) == self.agent_num
reward_n = np.zeros((self.agent_num,))
for i in range(self.agent_num):
# print('actions', actions)
reward_n[i] = self.payoff[i](*tuple(actions))
self.rewards = reward_n
# print(reward_n)
state_n = np.array(list([[0. * i] for i in range(self.agent_num)]))
info = {}
done_n = np.array([True] * self.agent_num)
self.t += 1
return state_n, reward_n, done_n, info
def reset(self):
return np.array(list([[0. * i] for i in range(self.agent_num)]))
def render(self, mode='human', close=False):
if mode == 'human':
print(self.__str__())
def get_joint_reward(self):
return self.rewards
def terminate(self):
pass
def __str__(self):
content = 'Game Name {}, Number of Agent {}, Action Range {}\n'.format(self.game, self.agent_num, self.action_range)
return content
if __name__ == '__main__':
print(DifferentialGame.get_game_list())
game = DifferentialGame('zero_sum', agent_num=2)
print(game) | maci/environments/differential_game.py | import numpy as np
from maci.misc.space import MADiscrete, MABox
from maci.environments.env_spec import MAEnvSpec
from rllab.core.serializable import Serializable
class DifferentialGame(Serializable):
def __init__(self, game_name, agent_num, action_low=-10, action_high=10):
Serializable.quick_init(self, locals())
self.game = game_name
self.agent_num = agent_num
# self.action_num = action_num
self.action_range = [action_low, action_high]
lows = np.array([np.array([action_low]) for _ in range(self.agent_num)])
highs = np.array([np.array([action_high]) for _ in range(self.agent_num)])
self.action_spaces = MABox(lows=lows, highs=highs)
self.observation_spaces = MADiscrete([1] * self.agent_num)
self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces)
self.t = 0
self.numplots = 0
self.payoff = {}
if self.game == 'zero_sum':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: a1 * a2
self.payoff[1] = lambda a1, a2: -a1 * a2
elif self.game == 'trigonometric':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: np.cos(a2) * a1
self.payoff[1] = lambda a1, a2: np.sin(a1) * a2
# This is also an extension to the classical matching pennies game, but here the
# payoff function is smooth:
elif self.game == 'mataching_pennies':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: (a1-0.5)*(a2-0.5)
self.payoff[1] = lambda a1, a2: (a1-0.5)*(a2-0.5)
elif self.game == 'rotational':
assert self.agent_num == 2
self.payoff[0] = lambda a1, a2: 0.5 * a1 * a1 + 10 * a1 * a2
self.payoff[1] = lambda a1, a2: 0.5 * a2 * a2 - 10 * a1 * a2
elif self.game == 'wolf':
assert self.agent_num == 2
def V(alpha, beta, payoff):
u = payoff[(0, 0)] - payoff[(0, 1)] - payoff[(1, 0)] + payoff[(1, 1)]
return alpha * beta * u + alpha * (payoff[(0, 1)] - payoff[(1, 1)]) + beta * (
payoff[(1, 0)] - payoff[(1, 1)]) + payoff[(1, 1)]
payoff_0 = np.array([[0, 3], [1, 2]])
payoff_1 = np.array([[3, 2], [0, 1]])
self.payoff[0] = lambda a1, a2: V(a1, a2, payoff_0)
self.payoff[1] = lambda a1, a2: V(a1, a2, payoff_1)
elif self.game == 'ma_softq':
assert self.agent_num == 2
h1 = 0.8
h2 = 1.
s1 = 3.
s2 = 1.
x1 = -5.
x2 = 5.
y1 = -5.
y2 = 5.
c = 10.
def max_f(a1, a2):
f1 = h1 * (-(np.square(a1 - x1) / s1) - (np.square(a2 - y1) / s1))
f2 = h2 * (-(np.square(a1 - x2) / s2) - (np.square(a2 - y2) / s2)) + c
return max(f1, f2)
self.payoff[0] = lambda a1, a2: max_f(a1, a2)
self.payoff[1] = lambda a1, a2: max_f(a1, a2)
self.rewards = np.zeros((self.agent_num,))
@staticmethod
def get_game_list():
return {
'zero_sum': {'agent_num': 2, 'action_num': 2}
}
def step(self, actions):
assert len(actions) == self.agent_num
reward_n = np.zeros((self.agent_num,))
for i in range(self.agent_num):
# print('actions', actions)
reward_n[i] = self.payoff[i](*tuple(actions))
self.rewards = reward_n
# print(reward_n)
state_n = np.array(list([[0. * i] for i in range(self.agent_num)]))
info = {}
done_n = np.array([True] * self.agent_num)
self.t += 1
return state_n, reward_n, done_n, info
def reset(self):
return np.array(list([[0. * i] for i in range(self.agent_num)]))
def render(self, mode='human', close=False):
if mode == 'human':
print(self.__str__())
def get_joint_reward(self):
return self.rewards
def terminate(self):
pass
def __str__(self):
content = 'Game Name {}, Number of Agent {}, Action Range {}\n'.format(self.game, self.agent_num, self.action_range)
return content
if __name__ == '__main__':
print(DifferentialGame.get_game_list())
game = DifferentialGame('zero_sum', agent_num=2)
print(game) | 0.538741 | 0.53048 |
import os
import pathlib
from ote.tests.test_case import (create_export_test_case,
create_test_case,
skip_if_cuda_not_available)
from ote.utils.misc import run_through_shell
def create_image_classification_export_test_case(**kwargs):
expected_outputs_dir = os.path.join(os.path.dirname(__file__), '..', 'expected_outputs')
ExportTestCase = create_export_test_case('image_classification',
**kwargs,
metric_keys=['accuracy'],
expected_outputs_dir=expected_outputs_dir)
class ClassificationExportTestCase(ExportTestCase):
def test_export_on_gpu(self):
skip_if_cuda_not_available()
export_dir = os.path.join(self.output_folder, 'gpu_export')
self.do_export(export_dir, on_gpu=True)
def test_export_on_cpu(self):
export_dir = os.path.join(self.output_folder, 'cpu_export')
self.do_export(export_dir, on_gpu=False)
def do_export(self, export_dir, on_gpu):
if not os.path.exists(export_dir):
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {os.path.dirname(self.template_file)};'
f'pip install -r requirements.txt;'
f'python3 export.py --openvino'
f' --load-weights snapshot.pth'
f' --save-model-to {export_dir}'
)
self.assertTrue(len(list(pathlib.Path(export_dir).rglob('*.onnx'))) > 0, 'Export to onnx failed')
self.assertTrue(len(list(pathlib.Path(export_dir).rglob('*.bin'))) > 0, 'Export to openvino failed')
return ClassificationExportTestCase
def create_image_classification_test_case(**kwargs):
expected_outputs_dir = os.path.join(os.path.dirname(__file__), '..', 'expected_outputs')
TrainTestCase = create_test_case('image_classification',
**kwargs,
metric_keys=['accuracy'],
expected_outputs_dir=expected_outputs_dir,
batch_size=2)
class ClassificationTrainTestCase(TrainTestCase):
def do_finetuning(self, on_gpu):
self.total_epochs = 5
log_file = os.path.join(self.output_folder, 'test_finetuning.log')
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {self.template_folder};'
f'python3 train.py'
f' --train-ann-files {self.ann_file}'
f' --train-data-roots {os.path.join(self.img_root, "train")}'
f' --val-ann-files {self.ann_file}'
f' --val-data-roots {os.path.join(self.img_root, "val")}'
f' --load-weights snapshot.pth'
f' --save-checkpoints-to {self.output_folder}'
f' --gpu-num 1'
f' --batch-size {self.batch_size}'
f' --epochs {self.total_epochs}'
f' | tee {log_file}')
self.assertTrue(os.path.exists(os.path.join(self.output_folder, 'latest.pth')))
return ClassificationTrainTestCase | models/image_classification/tests/common/image_classification_test_case.py |
import os
import pathlib
from ote.tests.test_case import (create_export_test_case,
create_test_case,
skip_if_cuda_not_available)
from ote.utils.misc import run_through_shell
def create_image_classification_export_test_case(**kwargs):
expected_outputs_dir = os.path.join(os.path.dirname(__file__), '..', 'expected_outputs')
ExportTestCase = create_export_test_case('image_classification',
**kwargs,
metric_keys=['accuracy'],
expected_outputs_dir=expected_outputs_dir)
class ClassificationExportTestCase(ExportTestCase):
def test_export_on_gpu(self):
skip_if_cuda_not_available()
export_dir = os.path.join(self.output_folder, 'gpu_export')
self.do_export(export_dir, on_gpu=True)
def test_export_on_cpu(self):
export_dir = os.path.join(self.output_folder, 'cpu_export')
self.do_export(export_dir, on_gpu=False)
def do_export(self, export_dir, on_gpu):
if not os.path.exists(export_dir):
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {os.path.dirname(self.template_file)};'
f'pip install -r requirements.txt;'
f'python3 export.py --openvino'
f' --load-weights snapshot.pth'
f' --save-model-to {export_dir}'
)
self.assertTrue(len(list(pathlib.Path(export_dir).rglob('*.onnx'))) > 0, 'Export to onnx failed')
self.assertTrue(len(list(pathlib.Path(export_dir).rglob('*.bin'))) > 0, 'Export to openvino failed')
return ClassificationExportTestCase
def create_image_classification_test_case(**kwargs):
expected_outputs_dir = os.path.join(os.path.dirname(__file__), '..', 'expected_outputs')
TrainTestCase = create_test_case('image_classification',
**kwargs,
metric_keys=['accuracy'],
expected_outputs_dir=expected_outputs_dir,
batch_size=2)
class ClassificationTrainTestCase(TrainTestCase):
def do_finetuning(self, on_gpu):
self.total_epochs = 5
log_file = os.path.join(self.output_folder, 'test_finetuning.log')
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {self.template_folder};'
f'python3 train.py'
f' --train-ann-files {self.ann_file}'
f' --train-data-roots {os.path.join(self.img_root, "train")}'
f' --val-ann-files {self.ann_file}'
f' --val-data-roots {os.path.join(self.img_root, "val")}'
f' --load-weights snapshot.pth'
f' --save-checkpoints-to {self.output_folder}'
f' --gpu-num 1'
f' --batch-size {self.batch_size}'
f' --epochs {self.total_epochs}'
f' | tee {log_file}')
self.assertTrue(os.path.exists(os.path.join(self.output_folder, 'latest.pth')))
return ClassificationTrainTestCase | 0.41253 | 0.252995 |
import socket
import ssl
import datetime
import requests
import sys
import whois
from config import DOMAINS, DAYS_LIMIT_CERT, DAYS_LIMIT_DOMAIN, APITOKEN, CHATID
date_fmt = r'%b %d %H:%M:%S %Y %Z'
MESSAGE_CERTIFICATE_EXPIRED = "⚠️ SSL expired on {}"
MESSAGE_HOSTNAME_MISMATCH = "⚠️ SSL hostname mismatch on {}"
MESSAGE_EXCEPTION = "⚠️ SSL exception on {}: {}"
def send_message(text):
"""
Send message to the Telegram via API
:param text: message
"""
url = 'https://api.telegram.org/bot{}/sendMessage'.format(APITOKEN)
data = {
'text': text,
'chat_id': CHATID,
'disable_web_page_preview': True
}
requests.post(url, json=data)
def ssl_expiry_datetime(hostname):
"""
Get SSL expiration date
Source link: https://serverlesscode.com/post/ssl-expiration-alerts-with-lambda/
:param hostname: hostname
:return datetime object or None
"""
context = ssl.create_default_context()
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname,
)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((hostname, 443))
ssl_info = conn.getpeercert()
except ssl.SSLError as e:
if e.verify_code == 10:
send_message(MESSAGE_CERTIFICATE_EXPIRED.format(hostname))
elif e.verify_code == 62:
send_message(MESSAGE_HOSTNAME_MISMATCH.format(hostname))
else:
send_message(MESSAGE_EXCEPTION.format(hostname, e.verify_message))
return None
# Parse the string from the certificate into a Python datetime object
return datetime.datetime.strptime(ssl_info['notAfter'], date_fmt)
def check_ssl_time_left(domain):
"""
Count days left and generate a warning message
:param domain: domain
:return:
"""
cert_expire_at = ssl_expiry_datetime(domain)
if cert_expire_at is not None:
time_left = cert_expire_at - datetime.datetime.now()
message = 'SSL cert for {} has {}'.format(domain, days_left_to_format_string(time_left))
if time_left.days <= DAYS_LIMIT_CERT:
message = '{}'.format(message)
send_message(message)
print(message)
def days_left_to_format_string(timedelta):
"""
Calculate days left from timedelta and return string message
:param timedelta: timedelta object
:return: string message with the days left
"""
return '{} day{} left'.format(timedelta.days, ('s', '')[timedelta.days == 1])
if not APITOKEN:
print('No APITOKEN was found in config file.')
exit()
for domain in DOMAINS:
try:
check_ssl_time_left(domain)
w = whois.whois(domain)
expdays = 'Expiration date for {} has {}'.format(domain, days_left_to_format_string(w.expiration_date-datetime.datetime.now()))
print(expdays)
if (w.expiration_date-datetime.datetime.now()).days <= DAYS_LIMIT_DOMAIN:
send_message(w.expiration_date)
except Exception as e:
print("Unexpected error:", e) | main.py |
import socket
import ssl
import datetime
import requests
import sys
import whois
from config import DOMAINS, DAYS_LIMIT_CERT, DAYS_LIMIT_DOMAIN, APITOKEN, CHATID
date_fmt = r'%b %d %H:%M:%S %Y %Z'
MESSAGE_CERTIFICATE_EXPIRED = "⚠️ SSL expired on {}"
MESSAGE_HOSTNAME_MISMATCH = "⚠️ SSL hostname mismatch on {}"
MESSAGE_EXCEPTION = "⚠️ SSL exception on {}: {}"
def send_message(text):
"""
Send message to the Telegram via API
:param text: message
"""
url = 'https://api.telegram.org/bot{}/sendMessage'.format(APITOKEN)
data = {
'text': text,
'chat_id': CHATID,
'disable_web_page_preview': True
}
requests.post(url, json=data)
def ssl_expiry_datetime(hostname):
"""
Get SSL expiration date
Source link: https://serverlesscode.com/post/ssl-expiration-alerts-with-lambda/
:param hostname: hostname
:return datetime object or None
"""
context = ssl.create_default_context()
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname,
)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((hostname, 443))
ssl_info = conn.getpeercert()
except ssl.SSLError as e:
if e.verify_code == 10:
send_message(MESSAGE_CERTIFICATE_EXPIRED.format(hostname))
elif e.verify_code == 62:
send_message(MESSAGE_HOSTNAME_MISMATCH.format(hostname))
else:
send_message(MESSAGE_EXCEPTION.format(hostname, e.verify_message))
return None
# Parse the string from the certificate into a Python datetime object
return datetime.datetime.strptime(ssl_info['notAfter'], date_fmt)
def check_ssl_time_left(domain):
"""
Count days left and generate a warning message
:param domain: domain
:return:
"""
cert_expire_at = ssl_expiry_datetime(domain)
if cert_expire_at is not None:
time_left = cert_expire_at - datetime.datetime.now()
message = 'SSL cert for {} has {}'.format(domain, days_left_to_format_string(time_left))
if time_left.days <= DAYS_LIMIT_CERT:
message = '{}'.format(message)
send_message(message)
print(message)
def days_left_to_format_string(timedelta):
"""
Calculate days left from timedelta and return string message
:param timedelta: timedelta object
:return: string message with the days left
"""
return '{} day{} left'.format(timedelta.days, ('s', '')[timedelta.days == 1])
if not APITOKEN:
print('No APITOKEN was found in config file.')
exit()
for domain in DOMAINS:
try:
check_ssl_time_left(domain)
w = whois.whois(domain)
expdays = 'Expiration date for {} has {}'.format(domain, days_left_to_format_string(w.expiration_date-datetime.datetime.now()))
print(expdays)
if (w.expiration_date-datetime.datetime.now()).days <= DAYS_LIMIT_DOMAIN:
send_message(w.expiration_date)
except Exception as e:
print("Unexpected error:", e) | 0.398524 | 0.102529 |
import datetime
import json
from typing import Any, Dict, List, Optional
import pytest
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from werkzeug.test import Client
from backend.api.trusted_api_auth_helper import TrustedApiAuthHelper
from backend.common.consts.alliance_color import AllianceColor
from backend.common.consts.auth_type import AuthType
from backend.common.consts.event_type import EventType
from backend.common.models.api_auth_access import ApiAuthAccess
from backend.common.models.event import Event
from backend.common.models.match import Match
AUTH_ID = "tEsT_id_0"
AUTH_SECRET = "321tEsTsEcReT"
REQUEST_PATH = "/api/trusted/v1/event/2014casj/matches/update"
def setup_event(
remap_teams: Optional[Dict[str, str]] = None,
timezone_id: Optional[str] = "America/Los_Angeles",
) -> None:
Event(
id="2014casj",
year=2014,
event_short="casj",
timezone_id=timezone_id,
start_date=datetime.datetime(2014, 4, 1),
end_date=datetime.datetime(2014, 4, 3),
event_type_enum=EventType.OFFSEASON,
remap_teams=remap_teams,
).put()
def setup_auth(access_types: List[AuthType]) -> None:
ApiAuthAccess(
id=AUTH_ID,
secret=AUTH_SECRET,
event_list=[ndb.Key(Event, "2014casj")],
auth_types_enum=access_types,
).put()
def get_auth_headers(request_path: str, request_body) -> Dict[str, str]:
return {
"X-TBA-Auth-Id": AUTH_ID,
"X-TBA-AUth-Sig": TrustedApiAuthHelper.compute_auth_signature(
AUTH_SECRET, request_path, request_body
),
}
def test_bad_event_key(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/asdf/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_event(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/2015casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_auth_type(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_INFO])
resp = api_client.post(
"/api/trusted/v1/event/2014casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 401
def test_no_auth(api_client: Client) -> None:
setup_event()
request_body = json.dumps([])
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 401
@pytest.mark.parametrize(
"request_data",
[
"",
"not_json",
["is_not_dict"],
[{}],
[{"comp_level": "meow"}],
[{"comp_level": "qf", "set_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": 1, "alliances": "abc"}],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": []}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"score": 0, "teams": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": "abc"}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": [], "score": 0, "surrogates": ["bad_team"]}
},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "surrogates": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": "blah",
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"red": {"bad_key": 0}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"time_utc": "foo",
}
],
],
)
def test_bad_json(api_client: Client, request_data: Any) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
request_body = json.dumps(request_data)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 400
def test_matches_update(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
"time_utc": "2014-08-31T16:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
# add another match
matches = [
{
"comp_level": "f",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {
"teams": ["frc1", "frc2", "frc3"],
"score": 250,
"surrogates": ["frc1"],
"dqs": ["frc2"],
},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 260},
},
"score_breakdown": {
"red": {
"auto": 20,
"assist": 40,
"truss+catch": 20,
"teleop_goal+foul": 20,
},
"blue": {
"auto": 40,
"assist": 60,
"truss+catch": 10,
"teleop_goal+foul": 40,
},
},
"time_string": "10:00 AM",
"time_utc": "2014-08-31T17:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 2
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
assert "2014casj_f1m1" in [m.key.id() for m in db_matches]
# verify match data
match = Match.get_by_id("2014casj_f1m1")
assert match is not None
assert match.time == datetime.datetime(2014, 8, 31, 17, 0)
assert match.time_string == "10:00 AM"
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.RED]["score"] == 250
assert match.alliances[AllianceColor.RED]["surrogates"] == ["frc1"]
assert match.alliances[AllianceColor.RED]["dqs"] == ["frc1", "frc2", "frc3"]
breakdown = match.score_breakdown
assert breakdown is not None
assert breakdown[AllianceColor.RED]["truss+catch"] == 20
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc6"]
assert match.alliances[AllianceColor.BLUE]["score"] == 260
assert match.alliances[AllianceColor.BLUE]["surrogates"] == []
assert match.alliances[AllianceColor.BLUE]["dqs"] == []
def test_calculate_match_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 2,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "12:00 PM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 3,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "4:00 PM",
},
# day 2
{
"comp_level": "qm",
"set_number": 1,
"match_number": 4,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 4
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 16, 0)
match = Match.get_by_id("2014casj_qm2")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 19, 0)
match = Match.get_by_id("2014casj_qm3")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 23, 0)
match = Match.get_by_id("2014casj_qm4")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 3, 16, 0)
def test_calculate_match_time_bad_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "blahhh",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_calculate_match_time_skip_no_timezone(api_client: Client) -> None:
setup_event(timezone_id=None)
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_add_match_remapteams(api_client: Client) -> None:
setup_event(remap_teams={"frc6": "frc254B"})
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc254B"]
assert match.team_key_names == ["frc1", "frc2", "frc3", "frc4", "frc5", "frc254B"] | src/backend/api/handlers/tests/update_event_matches_test.py | import datetime
import json
from typing import Any, Dict, List, Optional
import pytest
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from werkzeug.test import Client
from backend.api.trusted_api_auth_helper import TrustedApiAuthHelper
from backend.common.consts.alliance_color import AllianceColor
from backend.common.consts.auth_type import AuthType
from backend.common.consts.event_type import EventType
from backend.common.models.api_auth_access import ApiAuthAccess
from backend.common.models.event import Event
from backend.common.models.match import Match
AUTH_ID = "tEsT_id_0"
AUTH_SECRET = "321tEsTsEcReT"
REQUEST_PATH = "/api/trusted/v1/event/2014casj/matches/update"
def setup_event(
remap_teams: Optional[Dict[str, str]] = None,
timezone_id: Optional[str] = "America/Los_Angeles",
) -> None:
Event(
id="2014casj",
year=2014,
event_short="casj",
timezone_id=timezone_id,
start_date=datetime.datetime(2014, 4, 1),
end_date=datetime.datetime(2014, 4, 3),
event_type_enum=EventType.OFFSEASON,
remap_teams=remap_teams,
).put()
def setup_auth(access_types: List[AuthType]) -> None:
ApiAuthAccess(
id=AUTH_ID,
secret=AUTH_SECRET,
event_list=[ndb.Key(Event, "2014casj")],
auth_types_enum=access_types,
).put()
def get_auth_headers(request_path: str, request_body) -> Dict[str, str]:
return {
"X-TBA-Auth-Id": AUTH_ID,
"X-TBA-AUth-Sig": TrustedApiAuthHelper.compute_auth_signature(
AUTH_SECRET, request_path, request_body
),
}
def test_bad_event_key(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/asdf/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_event(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/2015casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_auth_type(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_INFO])
resp = api_client.post(
"/api/trusted/v1/event/2014casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 401
def test_no_auth(api_client: Client) -> None:
setup_event()
request_body = json.dumps([])
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 401
@pytest.mark.parametrize(
"request_data",
[
"",
"not_json",
["is_not_dict"],
[{}],
[{"comp_level": "meow"}],
[{"comp_level": "qf", "set_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": 1, "alliances": "abc"}],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": []}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"score": 0, "teams": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": "abc"}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": [], "score": 0, "surrogates": ["bad_team"]}
},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "surrogates": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": "blah",
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"red": {"bad_key": 0}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"time_utc": "foo",
}
],
],
)
def test_bad_json(api_client: Client, request_data: Any) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
request_body = json.dumps(request_data)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 400
def test_matches_update(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
"time_utc": "2014-08-31T16:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
# add another match
matches = [
{
"comp_level": "f",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {
"teams": ["frc1", "frc2", "frc3"],
"score": 250,
"surrogates": ["frc1"],
"dqs": ["frc2"],
},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 260},
},
"score_breakdown": {
"red": {
"auto": 20,
"assist": 40,
"truss+catch": 20,
"teleop_goal+foul": 20,
},
"blue": {
"auto": 40,
"assist": 60,
"truss+catch": 10,
"teleop_goal+foul": 40,
},
},
"time_string": "10:00 AM",
"time_utc": "2014-08-31T17:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 2
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
assert "2014casj_f1m1" in [m.key.id() for m in db_matches]
# verify match data
match = Match.get_by_id("2014casj_f1m1")
assert match is not None
assert match.time == datetime.datetime(2014, 8, 31, 17, 0)
assert match.time_string == "10:00 AM"
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.RED]["score"] == 250
assert match.alliances[AllianceColor.RED]["surrogates"] == ["frc1"]
assert match.alliances[AllianceColor.RED]["dqs"] == ["frc1", "frc2", "frc3"]
breakdown = match.score_breakdown
assert breakdown is not None
assert breakdown[AllianceColor.RED]["truss+catch"] == 20
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc6"]
assert match.alliances[AllianceColor.BLUE]["score"] == 260
assert match.alliances[AllianceColor.BLUE]["surrogates"] == []
assert match.alliances[AllianceColor.BLUE]["dqs"] == []
def test_calculate_match_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 2,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "12:00 PM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 3,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "4:00 PM",
},
# day 2
{
"comp_level": "qm",
"set_number": 1,
"match_number": 4,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 4
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 16, 0)
match = Match.get_by_id("2014casj_qm2")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 19, 0)
match = Match.get_by_id("2014casj_qm3")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 23, 0)
match = Match.get_by_id("2014casj_qm4")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 3, 16, 0)
def test_calculate_match_time_bad_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "blahhh",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_calculate_match_time_skip_no_timezone(api_client: Client) -> None:
setup_event(timezone_id=None)
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_add_match_remapteams(api_client: Client) -> None:
setup_event(remap_teams={"frc6": "frc254B"})
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc254B"]
assert match.team_key_names == ["frc1", "frc2", "frc3", "frc4", "frc5", "frc254B"] | 0.616243 | 0.251659 |
# imports
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = "cpu"
print(device)
from google.colab import drive
drive.mount('/content/drive')
import torch, torchvision, torchvision.transforms as transforms
from torchvision.datasets import FashionMNIST
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
fashiontrain = FashionMNIST(root='/content/drive/My Drive/Colab Notebooks/ML Assignment 3', train=True, download=True, transform=transform)
fashiontest = FashionMNIST(root='/content/drive/My Drive/Colab Notebooks/ML Assignment 3', train=False, download=True, transform=transform)
svm_train_y = fashiontrain.targets.detach().numpy().reshape(-1, 1)
svm_test_y = fashiontest.targets.detach().numpy().reshape(-1, 1)
print(svm_train_y.shape)
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainloader = torch.utils.data.DataLoader(fashiontrain, batch_size=100, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(fashiontest, batch_size=100, shuffle=False, num_workers=2)
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# Defining CNN
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.svm_x = None
self.svm_x_is_None = True
self.conv1 = nn.Conv2d(1, 6, 4)
self.conv2 = nn.Conv2d(6, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.count = 0
self.create_svm_x = False
self.create_svm_test_x = False
self.svm_test_x = None
self.svm_test_x_is_None = True
self.fc1 = nn.Linear(16 * 5 * 5, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
self.count += 1
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
_x = x.cpu().detach().numpy()
if self.create_svm_x:
if self.svm_x_is_None:
self.svm_x = _x
self.svm_x_is_None = False
else:
self.svm_x = np.concatenate((self.svm_x, _x), axis=0)
if self.create_svm_test_x:
if self.svm_test_x_is_None:
self.svm_test_x = _x
self.svm_test_x_is_None = False
else:
self.svm_test_x = np.concatenate((self.svm_test_x, _x), axis=0)
x = F.relu(x)
x = self.fc2(x)
return x
net = Net()
net.to(device)
# Training CNN
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
print(len(trainloader))
net.create_svm_x = True
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 200 == 199: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
net.create_svm_x = False
print(net.svm_x.shape)
print(net.count)
print('Finished Training')
# Testing CNN
dataiter = iter(testloader)
images, labels = dataiter.next()
outputs = net(images)
correct = 0
total = 0
net.create_svm_test_x = True
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(net.svm_test_x.shape)
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
import torch
from sklearn.metrics import confusion_matrix
trainloader1 = torch.utils.data.DataLoader(fashiontrain, batch_size=60000, shuffle=True, num_workers=2)
testloader1 = torch.utils.data.DataLoader(fashiontest, batch_size=10000, shuffle=False, num_workers=2)
with torch.no_grad():
for data in testloader1:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
cm_test = confusion_matrix(labels,predicted)
with torch.no_grad():
for data in trainloader1:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
cm_train = confusion_matrix(labels,predicted)
print(cm_train)
print(cm_test)
# Defining SVM
import sklearn.svm as svm
svClassifier = svm.SVC(kernel='rbf', gamma='scale', verbose=True)
svClassifier.fit(net.svm_x, svm_train_y.reshape(-1, ))
# Generating test dataset for SVM
svClassifier.score(net.svm_test_x, svm_test_y.reshape(-1, ))
svm_test_y.reshape(-1, )
svm_test_y.shape | Problem 2/cnn.py |
# imports
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = "cpu"
print(device)
from google.colab import drive
drive.mount('/content/drive')
import torch, torchvision, torchvision.transforms as transforms
from torchvision.datasets import FashionMNIST
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
fashiontrain = FashionMNIST(root='/content/drive/My Drive/Colab Notebooks/ML Assignment 3', train=True, download=True, transform=transform)
fashiontest = FashionMNIST(root='/content/drive/My Drive/Colab Notebooks/ML Assignment 3', train=False, download=True, transform=transform)
svm_train_y = fashiontrain.targets.detach().numpy().reshape(-1, 1)
svm_test_y = fashiontest.targets.detach().numpy().reshape(-1, 1)
print(svm_train_y.shape)
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainloader = torch.utils.data.DataLoader(fashiontrain, batch_size=100, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(fashiontest, batch_size=100, shuffle=False, num_workers=2)
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# Defining CNN
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.svm_x = None
self.svm_x_is_None = True
self.conv1 = nn.Conv2d(1, 6, 4)
self.conv2 = nn.Conv2d(6, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.count = 0
self.create_svm_x = False
self.create_svm_test_x = False
self.svm_test_x = None
self.svm_test_x_is_None = True
self.fc1 = nn.Linear(16 * 5 * 5, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
self.count += 1
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
_x = x.cpu().detach().numpy()
if self.create_svm_x:
if self.svm_x_is_None:
self.svm_x = _x
self.svm_x_is_None = False
else:
self.svm_x = np.concatenate((self.svm_x, _x), axis=0)
if self.create_svm_test_x:
if self.svm_test_x_is_None:
self.svm_test_x = _x
self.svm_test_x_is_None = False
else:
self.svm_test_x = np.concatenate((self.svm_test_x, _x), axis=0)
x = F.relu(x)
x = self.fc2(x)
return x
net = Net()
net.to(device)
# Training CNN
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
print(len(trainloader))
net.create_svm_x = True
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 200 == 199: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
net.create_svm_x = False
print(net.svm_x.shape)
print(net.count)
print('Finished Training')
# Testing CNN
dataiter = iter(testloader)
images, labels = dataiter.next()
outputs = net(images)
correct = 0
total = 0
net.create_svm_test_x = True
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(net.svm_test_x.shape)
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
import torch
from sklearn.metrics import confusion_matrix
trainloader1 = torch.utils.data.DataLoader(fashiontrain, batch_size=60000, shuffle=True, num_workers=2)
testloader1 = torch.utils.data.DataLoader(fashiontest, batch_size=10000, shuffle=False, num_workers=2)
with torch.no_grad():
for data in testloader1:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
cm_test = confusion_matrix(labels,predicted)
with torch.no_grad():
for data in trainloader1:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
predicted = predicted.cpu()
cm_train = confusion_matrix(labels,predicted)
print(cm_train)
print(cm_test)
# Defining SVM
import sklearn.svm as svm
svClassifier = svm.SVC(kernel='rbf', gamma='scale', verbose=True)
svClassifier.fit(net.svm_x, svm_train_y.reshape(-1, ))
# Generating test dataset for SVM
svClassifier.score(net.svm_test_x, svm_test_y.reshape(-1, ))
svm_test_y.reshape(-1, )
svm_test_y.shape | 0.861538 | 0.588978 |
import datetime
from enum import Enum
from typing import List
import demoji
import requests
from sqlalchemy import func, or_, create_engine
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.orm.attributes import InstrumentedAttribute
from definitions import MYSQL_TEXT_COLUMN_MAX_LENGTH, MYSQL_MEDIUM_TEXT_COLUMN_MAX_LENGTH, \
SQLALCHEMY_CREATE_ENGINE_KWARGS, MYSQL_URL_PARAMS_STRING, PYTHON_SIDE_DB_ENCODING
from environment_settings import DB_ENGINE_BASE_URL
from src.models.feedback import Feedback
from src.models.listing_observation import ListingObservation
from src.models.scraping_session import ScrapingSession
from src.models.seller import Seller
from src.models.seller_observation import SellerObservation
from src.models.settings import Settings
from src.models.user_credential import UserCredential
DB_ENGINE_URL = DB_ENGINE_BASE_URL + MYSQL_URL_PARAMS_STRING
def kill_all_db_conns_for_user_and_current_ip(db_username):
external_ip = requests.get('https://checkip.amazonaws.com').text.split(":")[0].strip()
stmt = "SELECT id, time FROM information_schema.processlist WHERE host LIKE \'{0}:%\' AND user=\'{1}\' ORDER BY " \
"time ASC".format(
external_ip, db_username)
engine = get_engine()
with engine.connect() as con:
rs = con.execute(stmt)
rows = [row for row in rs]
for row in rows[1:]:
kill_statement = "KILL " + str(row[0]) + ";"
con.execute(kill_statement)
class EXTREMAL_TIMESTAMP(Enum):
LOWEST = 0
HIGHEST = 1
def _get_timestamp_on_scraping_session_row(db_session: Session, session_without_time,
extremal_type: EXTREMAL_TIMESTAMP):
if extremal_type is EXTREMAL_TIMESTAMP.HIGHEST:
sqlalchemy_min_max_func = func.max
python_min_max_func = max
extreme_date = datetime.datetime.fromtimestamp(0).strftime("%Y-%m-%d %H:%M:%S") # very low date
elif extremal_type is EXTREMAL_TIMESTAMP.LOWEST:
sqlalchemy_min_max_func = func.min
python_min_max_func = min
extreme_date = max(datetime.datetime.now(), datetime.datetime.utcnow()).strftime(
"%Y-%m-%d %H:%M:%S") # very high date
else:
raise Exception
extremal_timestamps_of_child_objects = []
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(SellerObservation.created_date, extreme_date))).filter(
SellerObservation.session_id == session_without_time.id).scalar())
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(ListingObservation.created_date, extreme_date))).filter(
ListingObservation.session_id == session_without_time.id).scalar())
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(Feedback.created_date, extreme_date))).filter(
Feedback.session_id == session_without_time.id).scalar())
valid_extremal_timestamps_of_child_objects = [timestamp for timestamp in extremal_timestamps_of_child_objects if
timestamp is not None]
assert len(valid_extremal_timestamps_of_child_objects) >= 1
extremal_timestamp = python_min_max_func(valid_extremal_timestamps_of_child_objects)
return extremal_timestamp
def _fix_time_columns_on_broken_scraping_session_rows(db_session: Session, market_id: str):
with db_session.no_autoflush:
sessions_without_time = db_session.query(ScrapingSession).filter(ScrapingSession.market == market_id).filter(
or_(ScrapingSession.time_started == None, ScrapingSession.time_finished == None)).all()
sessions_without_start_time = [session for session in sessions_without_time if session.time_started is None]
for session_without_start_time in sessions_without_start_time:
start_time = _get_timestamp_on_scraping_session_row(db_session, session_without_start_time,
EXTREMAL_TIMESTAMP.LOWEST)
session_without_start_time.time_started = start_time
sessions_without_finish_time = [session for session in sessions_without_time if session.time_finished is None]
for session_without_finish_time in sessions_without_finish_time:
finish_time = _get_timestamp_on_scraping_session_row(db_session, session_without_finish_time,
EXTREMAL_TIMESTAMP.HIGHEST)
session_without_finish_time.time_finished = finish_time
def _get_broken_sellers(db_session: Session, market_id: str):
return db_session.query(Seller).filter(Seller.registration_date == None, Seller.market == market_id)
def _get_broken_listings(db_session: Session, market_id: str):
return db_session.query(ListingObservation).filter(ListingObservation.url == None).join(ScrapingSession).filter(ScrapingSession.market == market_id)
def _get_scraping_sessions_with_no_children(db_session: Session, market_id: str):
scraping_sessions = db_session.query(ScrapingSession).filter(ScrapingSession.market == market_id).all()
ids_scraping_sessions_with_no_children = []
for scraping_session in scraping_sessions:
listing_count = db_session.query(func.count(ListingObservation.id)).filter(
ListingObservation.session_id == scraping_session.id).scalar()
seller_observation_count = db_session.query(func.count(SellerObservation.id)).filter(
SellerObservation.session_id == scraping_session.id).scalar()
if listing_count + seller_observation_count == 0:
ids_scraping_sessions_with_no_children.append(scraping_session.id)
return db_session.query(ScrapingSession).filter(ScrapingSession.id.in_(ids_scraping_sessions_with_no_children))
def _get_prompt_str(broken_sellers: List[Seller], broken_listings: List[ListingObservation], market_id: str) -> str:
if len(broken_sellers) > 0:
seller_ids_str = "Broken seller ids: \n\n" + "\n".join([str(seller.id) for seller in broken_sellers])
else:
seller_ids_str = ""
if len(broken_listings) > 0:
listing_ids_str = "Broken listing ids: \n\n" + "\n".join([str(listing.id) for listing in broken_listings])
else:
listing_ids_str = ""
prompt_str = f"""{market_id}\n\n{len(broken_sellers)} broken sellers and {len(broken_listings)} broken listings to be deleted.
{seller_ids_str}
{listing_ids_str}
Proceed? (Y/N)
"""
return prompt_str
def _release_busy_user_credentials(db_session, market_id):
busy_user_credentials: List[UserCredential] = db_session.query(UserCredential).filter(UserCredential.thread_id != -1, UserCredential.market_id == market_id).all()
for busy_user_credential in busy_user_credentials:
busy_user_credential.thread_id = -1
db_session.flush()
def fix_integrity_of_database(db_session: Session, market_id: str):
# remove incomplete sellers
# remove incomplete listing_observations
# remove scraping_sessions with no children
# fix broken time columns on scraping sessions
# release busy user credentials for this market
broken_sellers = _get_broken_sellers(db_session, market_id)
broken_listings = _get_broken_listings(db_session, market_id)
if len(broken_sellers.all() + broken_listings.all()) > 0:
prompt_str = _get_prompt_str(broken_sellers.all(), broken_listings.all(), market_id)
ans = input(prompt_str)
if ans == "Y":
db_session.query(Seller).filter(Seller.id.in_([seller.id for seller in broken_sellers.all()])).delete(synchronize_session=False)
db_session.query(ListingObservation).filter(ListingObservation.id.in_([listing.id for listing in broken_listings.all()])).delete(synchronize_session=False)
else:
print("Please manually ensure the integrity of the database before starting new scraping session.")
db_session.expire_all()
db_session.close()
exit()
scraping_sessions_with_no_children = _get_scraping_sessions_with_no_children(db_session, market_id)
scraping_sessions_with_no_children.delete(synchronize_session=False)
_fix_time_columns_on_broken_scraping_session_rows(db_session, market_id)
_release_busy_user_credentials(db_session, market_id)
db_session.commit()
def sanitize_error(error_text, vars):
# This sanitation is necessary because of a bug in either SQLAlchemy or MySQL. If a compiled statement is sent as
# string
# parameter in a row insertion, field names are not escaped properly inside the string field.
for var in vars:
error_text = error_text.replace(var, var + "&&")
return error_text
def _shorten_and_sanitize_text(max_length, text):
text = demoji.replace(bytes(text, PYTHON_SIDE_DB_ENCODING).decode(PYTHON_SIDE_DB_ENCODING, 'ignore').strip())
encoded_text = text.encode(PYTHON_SIDE_DB_ENCODING)
if len(encoded_text) <= max_length:
return text
mxlen = max_length
while (encoded_text[mxlen - 1] & 0xc0 == 0xc0):
mxlen -= 1
text = encoded_text[:mxlen].decode(PYTHON_SIDE_DB_ENCODING)
assert (len(text.encode(PYTHON_SIDE_DB_ENCODING)) <= max_length)
return text
def shorten_and_sanitize_for_text_column(text):
return _shorten_and_sanitize_text(MYSQL_TEXT_COLUMN_MAX_LENGTH, text)
def shorten_and_sanitize_for_medium_text_column(text):
return _shorten_and_sanitize_text(MYSQL_MEDIUM_TEXT_COLUMN_MAX_LENGTH, text)
def get_column_name(column: InstrumentedAttribute) -> str:
return column.expression._Annotated__element.description
def get_engine(echo: bool=None):
create_engine_kwargs = dict(SQLALCHEMY_CREATE_ENGINE_KWARGS)
if echo is not None: create_engine_kwargs.update({'echo': echo})
engine = create_engine(DB_ENGINE_URL, **create_engine_kwargs)
return engine
def get_db_session(engine):
Session = sessionmaker(
bind=engine)
db_session = Session()
db_session.rollback()
return db_session
def get_settings(market_name: str, db_session=None) -> Settings:
if db_session:
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
else:
engine = get_engine()
db_session = get_db_session(engine)
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
db_session.expunge_all()
db_session.close()
if existing_settings:
return existing_settings
else:
raise IntegrityError
def set_settings(db_session: Session, market_name: str, refill_queue_when_complete: bool = False) -> None:
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
if not existing_settings:
settings = Settings(refill_queue_when_complete=refill_queue_when_complete, market=market_name)
db_session.add(settings)
db_session.commit() | src/db_utils.py | import datetime
from enum import Enum
from typing import List
import demoji
import requests
from sqlalchemy import func, or_, create_engine
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.orm.attributes import InstrumentedAttribute
from definitions import MYSQL_TEXT_COLUMN_MAX_LENGTH, MYSQL_MEDIUM_TEXT_COLUMN_MAX_LENGTH, \
SQLALCHEMY_CREATE_ENGINE_KWARGS, MYSQL_URL_PARAMS_STRING, PYTHON_SIDE_DB_ENCODING
from environment_settings import DB_ENGINE_BASE_URL
from src.models.feedback import Feedback
from src.models.listing_observation import ListingObservation
from src.models.scraping_session import ScrapingSession
from src.models.seller import Seller
from src.models.seller_observation import SellerObservation
from src.models.settings import Settings
from src.models.user_credential import UserCredential
DB_ENGINE_URL = DB_ENGINE_BASE_URL + MYSQL_URL_PARAMS_STRING
def kill_all_db_conns_for_user_and_current_ip(db_username):
external_ip = requests.get('https://checkip.amazonaws.com').text.split(":")[0].strip()
stmt = "SELECT id, time FROM information_schema.processlist WHERE host LIKE \'{0}:%\' AND user=\'{1}\' ORDER BY " \
"time ASC".format(
external_ip, db_username)
engine = get_engine()
with engine.connect() as con:
rs = con.execute(stmt)
rows = [row for row in rs]
for row in rows[1:]:
kill_statement = "KILL " + str(row[0]) + ";"
con.execute(kill_statement)
class EXTREMAL_TIMESTAMP(Enum):
LOWEST = 0
HIGHEST = 1
def _get_timestamp_on_scraping_session_row(db_session: Session, session_without_time,
extremal_type: EXTREMAL_TIMESTAMP):
if extremal_type is EXTREMAL_TIMESTAMP.HIGHEST:
sqlalchemy_min_max_func = func.max
python_min_max_func = max
extreme_date = datetime.datetime.fromtimestamp(0).strftime("%Y-%m-%d %H:%M:%S") # very low date
elif extremal_type is EXTREMAL_TIMESTAMP.LOWEST:
sqlalchemy_min_max_func = func.min
python_min_max_func = min
extreme_date = max(datetime.datetime.now(), datetime.datetime.utcnow()).strftime(
"%Y-%m-%d %H:%M:%S") # very high date
else:
raise Exception
extremal_timestamps_of_child_objects = []
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(SellerObservation.created_date, extreme_date))).filter(
SellerObservation.session_id == session_without_time.id).scalar())
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(ListingObservation.created_date, extreme_date))).filter(
ListingObservation.session_id == session_without_time.id).scalar())
extremal_timestamps_of_child_objects.append(db_session.query(sqlalchemy_min_max_func(
func.coalesce(Feedback.created_date, extreme_date))).filter(
Feedback.session_id == session_without_time.id).scalar())
valid_extremal_timestamps_of_child_objects = [timestamp for timestamp in extremal_timestamps_of_child_objects if
timestamp is not None]
assert len(valid_extremal_timestamps_of_child_objects) >= 1
extremal_timestamp = python_min_max_func(valid_extremal_timestamps_of_child_objects)
return extremal_timestamp
def _fix_time_columns_on_broken_scraping_session_rows(db_session: Session, market_id: str):
with db_session.no_autoflush:
sessions_without_time = db_session.query(ScrapingSession).filter(ScrapingSession.market == market_id).filter(
or_(ScrapingSession.time_started == None, ScrapingSession.time_finished == None)).all()
sessions_without_start_time = [session for session in sessions_without_time if session.time_started is None]
for session_without_start_time in sessions_without_start_time:
start_time = _get_timestamp_on_scraping_session_row(db_session, session_without_start_time,
EXTREMAL_TIMESTAMP.LOWEST)
session_without_start_time.time_started = start_time
sessions_without_finish_time = [session for session in sessions_without_time if session.time_finished is None]
for session_without_finish_time in sessions_without_finish_time:
finish_time = _get_timestamp_on_scraping_session_row(db_session, session_without_finish_time,
EXTREMAL_TIMESTAMP.HIGHEST)
session_without_finish_time.time_finished = finish_time
def _get_broken_sellers(db_session: Session, market_id: str):
return db_session.query(Seller).filter(Seller.registration_date == None, Seller.market == market_id)
def _get_broken_listings(db_session: Session, market_id: str):
return db_session.query(ListingObservation).filter(ListingObservation.url == None).join(ScrapingSession).filter(ScrapingSession.market == market_id)
def _get_scraping_sessions_with_no_children(db_session: Session, market_id: str):
scraping_sessions = db_session.query(ScrapingSession).filter(ScrapingSession.market == market_id).all()
ids_scraping_sessions_with_no_children = []
for scraping_session in scraping_sessions:
listing_count = db_session.query(func.count(ListingObservation.id)).filter(
ListingObservation.session_id == scraping_session.id).scalar()
seller_observation_count = db_session.query(func.count(SellerObservation.id)).filter(
SellerObservation.session_id == scraping_session.id).scalar()
if listing_count + seller_observation_count == 0:
ids_scraping_sessions_with_no_children.append(scraping_session.id)
return db_session.query(ScrapingSession).filter(ScrapingSession.id.in_(ids_scraping_sessions_with_no_children))
def _get_prompt_str(broken_sellers: List[Seller], broken_listings: List[ListingObservation], market_id: str) -> str:
if len(broken_sellers) > 0:
seller_ids_str = "Broken seller ids: \n\n" + "\n".join([str(seller.id) for seller in broken_sellers])
else:
seller_ids_str = ""
if len(broken_listings) > 0:
listing_ids_str = "Broken listing ids: \n\n" + "\n".join([str(listing.id) for listing in broken_listings])
else:
listing_ids_str = ""
prompt_str = f"""{market_id}\n\n{len(broken_sellers)} broken sellers and {len(broken_listings)} broken listings to be deleted.
{seller_ids_str}
{listing_ids_str}
Proceed? (Y/N)
"""
return prompt_str
def _release_busy_user_credentials(db_session, market_id):
busy_user_credentials: List[UserCredential] = db_session.query(UserCredential).filter(UserCredential.thread_id != -1, UserCredential.market_id == market_id).all()
for busy_user_credential in busy_user_credentials:
busy_user_credential.thread_id = -1
db_session.flush()
def fix_integrity_of_database(db_session: Session, market_id: str):
# remove incomplete sellers
# remove incomplete listing_observations
# remove scraping_sessions with no children
# fix broken time columns on scraping sessions
# release busy user credentials for this market
broken_sellers = _get_broken_sellers(db_session, market_id)
broken_listings = _get_broken_listings(db_session, market_id)
if len(broken_sellers.all() + broken_listings.all()) > 0:
prompt_str = _get_prompt_str(broken_sellers.all(), broken_listings.all(), market_id)
ans = input(prompt_str)
if ans == "Y":
db_session.query(Seller).filter(Seller.id.in_([seller.id for seller in broken_sellers.all()])).delete(synchronize_session=False)
db_session.query(ListingObservation).filter(ListingObservation.id.in_([listing.id for listing in broken_listings.all()])).delete(synchronize_session=False)
else:
print("Please manually ensure the integrity of the database before starting new scraping session.")
db_session.expire_all()
db_session.close()
exit()
scraping_sessions_with_no_children = _get_scraping_sessions_with_no_children(db_session, market_id)
scraping_sessions_with_no_children.delete(synchronize_session=False)
_fix_time_columns_on_broken_scraping_session_rows(db_session, market_id)
_release_busy_user_credentials(db_session, market_id)
db_session.commit()
def sanitize_error(error_text, vars):
# This sanitation is necessary because of a bug in either SQLAlchemy or MySQL. If a compiled statement is sent as
# string
# parameter in a row insertion, field names are not escaped properly inside the string field.
for var in vars:
error_text = error_text.replace(var, var + "&&")
return error_text
def _shorten_and_sanitize_text(max_length, text):
text = demoji.replace(bytes(text, PYTHON_SIDE_DB_ENCODING).decode(PYTHON_SIDE_DB_ENCODING, 'ignore').strip())
encoded_text = text.encode(PYTHON_SIDE_DB_ENCODING)
if len(encoded_text) <= max_length:
return text
mxlen = max_length
while (encoded_text[mxlen - 1] & 0xc0 == 0xc0):
mxlen -= 1
text = encoded_text[:mxlen].decode(PYTHON_SIDE_DB_ENCODING)
assert (len(text.encode(PYTHON_SIDE_DB_ENCODING)) <= max_length)
return text
def shorten_and_sanitize_for_text_column(text):
return _shorten_and_sanitize_text(MYSQL_TEXT_COLUMN_MAX_LENGTH, text)
def shorten_and_sanitize_for_medium_text_column(text):
return _shorten_and_sanitize_text(MYSQL_MEDIUM_TEXT_COLUMN_MAX_LENGTH, text)
def get_column_name(column: InstrumentedAttribute) -> str:
return column.expression._Annotated__element.description
def get_engine(echo: bool=None):
create_engine_kwargs = dict(SQLALCHEMY_CREATE_ENGINE_KWARGS)
if echo is not None: create_engine_kwargs.update({'echo': echo})
engine = create_engine(DB_ENGINE_URL, **create_engine_kwargs)
return engine
def get_db_session(engine):
Session = sessionmaker(
bind=engine)
db_session = Session()
db_session.rollback()
return db_session
def get_settings(market_name: str, db_session=None) -> Settings:
if db_session:
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
else:
engine = get_engine()
db_session = get_db_session(engine)
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
db_session.expunge_all()
db_session.close()
if existing_settings:
return existing_settings
else:
raise IntegrityError
def set_settings(db_session: Session, market_name: str, refill_queue_when_complete: bool = False) -> None:
existing_settings = db_session.query(Settings).filter(Settings.market == market_name).first()
if not existing_settings:
settings = Settings(refill_queue_when_complete=refill_queue_when_complete, market=market_name)
db_session.add(settings)
db_session.commit() | 0.57093 | 0.144269 |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 414723703
"""
"""
random actions, total chaos
"""
board = gamma_new(8, 8, 6, 11)
assert board is not None
assert gamma_move(board, 1, 7, 6) == 1
assert gamma_move(board, 1, 6, 2) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 4, 0, 2) == 1
assert gamma_move(board, 5, 1, 3) == 1
assert gamma_busy_fields(board, 5) == 1
assert gamma_move(board, 6, 7, 2) == 1
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 4, 2, 1) == 1
assert gamma_free_fields(board, 4) == 54
assert gamma_move(board, 5, 0, 0) == 1
assert gamma_busy_fields(board, 5) == 2
assert gamma_move(board, 6, 2, 5) == 1
assert gamma_move(board, 6, 3, 1) == 1
assert gamma_move(board, 1, 5, 5) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 3, 7) == 1
assert gamma_move(board, 4, 5, 7) == 1
assert gamma_move(board, 5, 7, 2) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 6, 0, 3) == 1
assert gamma_move(board, 6, 4, 5) == 1
assert gamma_busy_fields(board, 6) == 5
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 7, 7) == 1
assert gamma_move(board, 5, 0, 5) == 1
assert gamma_move(board, 5, 1, 0) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 4, 4) == 1
assert gamma_move(board, 6, 7, 4) == 1
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 6, 2) == 0
assert gamma_move(board, 3, 5, 3) == 1
assert gamma_move(board, 3, 3, 6) == 1
assert gamma_move(board, 4, 1, 7) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 5, 4) == 1
assert gamma_move(board, 6, 3, 3) == 1
assert gamma_move(board, 6, 4, 1) == 1
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 5, 1) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 4) == 1
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_busy_fields(board, 5) == 5
assert gamma_move(board, 6, 4, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 3, 7, 6) == 0
assert gamma_move(board, 3, 2, 6) == 1
assert gamma_golden_move(board, 3, 0, 1) == 0
assert gamma_move(board, 4, 0, 7) == 1
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 0, 0) == 0
assert gamma_move(board, 6, 2, 2) == 1
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_move(board, 3, 5, 3) == 0
assert gamma_move(board, 3, 3, 7) == 0
assert gamma_move(board, 4, 6, 6) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_golden_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_busy_fields(board, 5) == 5
assert gamma_golden_move(board, 5, 4, 3) == 1
assert gamma_move(board, 6, 0, 5) == 0
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 5, 3) == 0
board791313350 = gamma_board(board)
assert board791313350 is not None
assert board791313350 == ("44.3.4.4\n"
".2336241\n"
"5.6.61..\n"
".32465.6\n"
"65.653..\n"
"4.61.116\n"
"..4661..\n"
"55.12...\n")
del board791313350
board791313350 = None
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 3, 4, 6) == 0
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_move(board, 6, 0, 7) == 0
assert gamma_move(board, 6, 4, 2) == 1
assert gamma_free_fields(board, 6) == 22
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_busy_fields(board, 1) == 8
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 5, 3) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 4, 1, 6) == 0
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_move(board, 5, 6, 3) == 1
assert gamma_move(board, 6, 2, 1) == 0
assert gamma_move(board, 6, 5, 0) == 1
assert gamma_move(board, 1, 4, 6) == 0
board901236666 = gamma_board(board)
assert board901236666 is not None
assert board901236666 == ("44.3.4.4\n"
".2336241\n"
"5.6.61..\n"
".32465.6\n"
"65.6535.\n"
"4.616116\n"
"..4661..\n"
"551126..\n")
del board901236666
board901236666 = None
assert gamma_move(board, 2, 5, 3) == 0
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 5, 6, 1) == 1
assert gamma_golden_move(board, 5, 6, 2) == 0
assert gamma_move(board, 6, 5, 1) == 0
assert gamma_move(board, 6, 1, 5) == 1
assert gamma_golden_move(board, 6, 6, 6) == 1
assert gamma_move(board, 1, 0, 7) == 0
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_move(board, 4, 0, 6) == 1
board618007408 = gamma_board(board)
assert board618007408 is not None
assert board618007408 == ("44.3.4.4\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"4.616116\n"
".346615.\n"
"551126..\n")
del board618007408
board618007408 = None
assert gamma_move(board, 5, 7, 0) == 1
assert gamma_move(board, 5, 0, 7) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_free_fields(board, 3) == 13
assert gamma_move(board, 4, 0, 6) == 0
assert gamma_move(board, 5, 7, 6) == 0
assert gamma_move(board, 6, 1, 0) == 0
assert gamma_move(board, 6, 4, 1) == 0
assert gamma_move(board, 1, 6, 7) == 1
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 3, 4, 0) == 0
board156220217 = gamma_board(board)
assert board156220217 is not None
assert board156220217 == ("44.3.414\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"43616116\n"
".346615.\n"
"551126.5\n")
del board156220217
board156220217 = None
assert gamma_move(board, 4, 5, 0) == 0
assert gamma_move(board, 4, 0, 5) == 0
board840750507 = gamma_board(board)
assert board840750507 is not None
assert board840750507 == ("44.3.414\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"43616116\n"
".346615.\n"
"551126.5\n")
del board840750507
board840750507 = None
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 7, 6) == 0
assert gamma_free_fields(board, 5) == 12
assert gamma_move(board, 6, 7, 4) == 0
assert gamma_busy_fields(board, 6) == 15
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 4, 4, 6) == 0
assert gamma_move(board, 5, 6, 1) == 0
assert gamma_move(board, 6, 7, 2) == 0
assert gamma_move(board, 6, 6, 4) == 1
assert gamma_move(board, 1, 5, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 6) == 0
assert gamma_move(board, 2, 4, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 4, 7, 4) == 0
assert gamma_move(board, 5, 5, 3) == 0
assert gamma_move(board, 5, 5, 1) == 0
assert gamma_move(board, 6, 4, 0) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 2, 5, 3) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_move(board, 4, 3, 7) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 5, 5, 5) == 0
assert gamma_move(board, 6, 7, 6) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_busy_fields(board, 1) == 9
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 3, 7) == 0
assert gamma_move(board, 3, 2, 6) == 0
assert gamma_move(board, 4, 5, 6) == 0
assert gamma_move(board, 5, 4, 0) == 0
assert gamma_move(board, 6, 4, 0) == 0
assert gamma_move(board, 6, 6, 1) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_golden_move(board, 2, 7, 6) == 1
assert gamma_move(board, 3, 7, 4) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 4, 1, 7) == 0
assert gamma_busy_fields(board, 4) == 8
assert gamma_move(board, 5, 7, 4) == 0
assert gamma_move(board, 5, 7, 7) == 0
assert gamma_move(board, 6, 5, 7) == 0
assert gamma_busy_fields(board, 6) == 16
assert gamma_move(board, 1, 3, 7) == 0
assert gamma_move(board, 1, 2, 7) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 7) == 0
assert gamma_golden_move(board, 4, 1, 5) == 1
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 1, 4) == 0
assert gamma_move(board, 6, 0, 6) == 0
assert gamma_move(board, 6, 0, 1) == 0
assert gamma_free_fields(board, 6) == 9
assert gamma_golden_move(board, 6, 0, 2) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 1, 6, 7) == 0
assert gamma_move(board, 2, 3, 7) == 0
gamma_delete(board) | z2/part2/interactive/jm/random_fuzzy_arrows_1/414723703.py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 414723703
"""
"""
random actions, total chaos
"""
board = gamma_new(8, 8, 6, 11)
assert board is not None
assert gamma_move(board, 1, 7, 6) == 1
assert gamma_move(board, 1, 6, 2) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 4, 0, 2) == 1
assert gamma_move(board, 5, 1, 3) == 1
assert gamma_busy_fields(board, 5) == 1
assert gamma_move(board, 6, 7, 2) == 1
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 4, 2, 1) == 1
assert gamma_free_fields(board, 4) == 54
assert gamma_move(board, 5, 0, 0) == 1
assert gamma_busy_fields(board, 5) == 2
assert gamma_move(board, 6, 2, 5) == 1
assert gamma_move(board, 6, 3, 1) == 1
assert gamma_move(board, 1, 5, 5) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 3, 7) == 1
assert gamma_move(board, 4, 5, 7) == 1
assert gamma_move(board, 5, 7, 2) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 6, 0, 3) == 1
assert gamma_move(board, 6, 4, 5) == 1
assert gamma_busy_fields(board, 6) == 5
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 7, 7) == 1
assert gamma_move(board, 5, 0, 5) == 1
assert gamma_move(board, 5, 1, 0) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 4, 4) == 1
assert gamma_move(board, 6, 7, 4) == 1
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 6, 2) == 0
assert gamma_move(board, 3, 5, 3) == 1
assert gamma_move(board, 3, 3, 6) == 1
assert gamma_move(board, 4, 1, 7) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 5, 4) == 1
assert gamma_move(board, 6, 3, 3) == 1
assert gamma_move(board, 6, 4, 1) == 1
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 5, 1) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 4) == 1
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_busy_fields(board, 5) == 5
assert gamma_move(board, 6, 4, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 3, 7, 6) == 0
assert gamma_move(board, 3, 2, 6) == 1
assert gamma_golden_move(board, 3, 0, 1) == 0
assert gamma_move(board, 4, 0, 7) == 1
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 0, 0) == 0
assert gamma_move(board, 6, 2, 2) == 1
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_move(board, 3, 5, 3) == 0
assert gamma_move(board, 3, 3, 7) == 0
assert gamma_move(board, 4, 6, 6) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_golden_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_busy_fields(board, 5) == 5
assert gamma_golden_move(board, 5, 4, 3) == 1
assert gamma_move(board, 6, 0, 5) == 0
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 5, 3) == 0
board791313350 = gamma_board(board)
assert board791313350 is not None
assert board791313350 == ("44.3.4.4\n"
".2336241\n"
"5.6.61..\n"
".32465.6\n"
"65.653..\n"
"4.61.116\n"
"..4661..\n"
"55.12...\n")
del board791313350
board791313350 = None
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 3, 4, 6) == 0
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_move(board, 6, 0, 7) == 0
assert gamma_move(board, 6, 4, 2) == 1
assert gamma_free_fields(board, 6) == 22
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_busy_fields(board, 1) == 8
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 5, 3) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 4, 1, 6) == 0
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_move(board, 5, 6, 3) == 1
assert gamma_move(board, 6, 2, 1) == 0
assert gamma_move(board, 6, 5, 0) == 1
assert gamma_move(board, 1, 4, 6) == 0
board901236666 = gamma_board(board)
assert board901236666 is not None
assert board901236666 == ("44.3.4.4\n"
".2336241\n"
"5.6.61..\n"
".32465.6\n"
"65.6535.\n"
"4.616116\n"
"..4661..\n"
"551126..\n")
del board901236666
board901236666 = None
assert gamma_move(board, 2, 5, 3) == 0
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 5, 6, 1) == 1
assert gamma_golden_move(board, 5, 6, 2) == 0
assert gamma_move(board, 6, 5, 1) == 0
assert gamma_move(board, 6, 1, 5) == 1
assert gamma_golden_move(board, 6, 6, 6) == 1
assert gamma_move(board, 1, 0, 7) == 0
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_move(board, 4, 0, 6) == 1
board618007408 = gamma_board(board)
assert board618007408 is not None
assert board618007408 == ("44.3.4.4\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"4.616116\n"
".346615.\n"
"551126..\n")
del board618007408
board618007408 = None
assert gamma_move(board, 5, 7, 0) == 1
assert gamma_move(board, 5, 0, 7) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_free_fields(board, 3) == 13
assert gamma_move(board, 4, 0, 6) == 0
assert gamma_move(board, 5, 7, 6) == 0
assert gamma_move(board, 6, 1, 0) == 0
assert gamma_move(board, 6, 4, 1) == 0
assert gamma_move(board, 1, 6, 7) == 1
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 3, 4, 0) == 0
board156220217 = gamma_board(board)
assert board156220217 is not None
assert board156220217 == ("44.3.414\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"43616116\n"
".346615.\n"
"551126.5\n")
del board156220217
board156220217 = None
assert gamma_move(board, 4, 5, 0) == 0
assert gamma_move(board, 4, 0, 5) == 0
board840750507 = gamma_board(board)
assert board840750507 is not None
assert board840750507 == ("44.3.414\n"
"42336261\n"
"566.61..\n"
".32465.6\n"
"65.6535.\n"
"43616116\n"
".346615.\n"
"551126.5\n")
del board840750507
board840750507 = None
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 7, 6) == 0
assert gamma_free_fields(board, 5) == 12
assert gamma_move(board, 6, 7, 4) == 0
assert gamma_busy_fields(board, 6) == 15
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 4, 4, 6) == 0
assert gamma_move(board, 5, 6, 1) == 0
assert gamma_move(board, 6, 7, 2) == 0
assert gamma_move(board, 6, 6, 4) == 1
assert gamma_move(board, 1, 5, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 6) == 0
assert gamma_move(board, 2, 4, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 4, 7, 4) == 0
assert gamma_move(board, 5, 5, 3) == 0
assert gamma_move(board, 5, 5, 1) == 0
assert gamma_move(board, 6, 4, 0) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 2, 5, 3) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_move(board, 4, 3, 7) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 5, 5, 5) == 0
assert gamma_move(board, 6, 7, 6) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_busy_fields(board, 1) == 9
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 3, 7) == 0
assert gamma_move(board, 3, 2, 6) == 0
assert gamma_move(board, 4, 5, 6) == 0
assert gamma_move(board, 5, 4, 0) == 0
assert gamma_move(board, 6, 4, 0) == 0
assert gamma_move(board, 6, 6, 1) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_golden_move(board, 2, 7, 6) == 1
assert gamma_move(board, 3, 7, 4) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 4, 1, 7) == 0
assert gamma_busy_fields(board, 4) == 8
assert gamma_move(board, 5, 7, 4) == 0
assert gamma_move(board, 5, 7, 7) == 0
assert gamma_move(board, 6, 5, 7) == 0
assert gamma_busy_fields(board, 6) == 16
assert gamma_move(board, 1, 3, 7) == 0
assert gamma_move(board, 1, 2, 7) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 7) == 0
assert gamma_golden_move(board, 4, 1, 5) == 1
assert gamma_move(board, 5, 5, 7) == 0
assert gamma_move(board, 5, 1, 4) == 0
assert gamma_move(board, 6, 0, 6) == 0
assert gamma_move(board, 6, 0, 1) == 0
assert gamma_free_fields(board, 6) == 9
assert gamma_golden_move(board, 6, 0, 2) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 1, 6, 7) == 0
assert gamma_move(board, 2, 3, 7) == 0
gamma_delete(board) | 0.81928 | 0.874292 |
"""Parameter info lib for resource completers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope.concepts import deps
from googlecloudsdk.calliope.concepts import handlers
from googlecloudsdk.calliope.concepts import util
from googlecloudsdk.command_lib.util import parameter_info_lib
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import typing # pylint: disable=unused-import
class ResourceParameterInfo(parameter_info_lib.ParameterInfoByConvention):
"""Gets parameter info for resource arguments."""
def __init__(self, resource_info, parsed_args, argument, **kwargs):
"""Initializes."""
self.resource_info = resource_info # type: handlers.ResourceInfo
super(ResourceParameterInfo, self).__init__(
parsed_args,
argument,
**kwargs)
def GetValue(self, parameter_name, check_properties=True):
# type: (...) -> typing.Optional[str]
"""Returns the program state value for parameter_name.
Args:
parameter_name: The parameter name.
check_properties: bool, whether to check the properties (unused).
Returns:
The program state value for parameter_name.
"""
del check_properties # Unused.
attribute_name = (
self.resource_info.resource_spec.AttributeName(parameter_name))
current = properties.VALUES.core.disable_prompts.GetBool()
# TODO(b/73073941): Come up with a better way to temporarily disable
# prompts. This prevents arbitrary fallthroughs with prompting from
# being run during completion.
properties.VALUES.core.disable_prompts.Set(True)
try:
return deps.Get(
attribute_name,
self.resource_info.BuildFullFallthroughsMap(),
parsed_args=self.parsed_args) if attribute_name else None
except deps.AttributeNotFoundError:
return None
finally:
properties.VALUES.core.disable_prompts.Set(current)
def _AttributeName(self, parameter_name):
# type: (...) -> typing.Optional[str]
"""Helper function to get the corresponding attribute for a parameter."""
return self.resource_info.resource_spec.AttributeName(parameter_name)
def GetDest(self, parameter_name, prefix=None):
# type: (...) -> typing.Optional[str]
"""Returns the argument parser dest name for parameter_name with prefix.
Args:
parameter_name: The resource parameter name.
prefix: The prefix name for parameter_name if not None.
Returns:
The argument parser dest name for parameter_name.
"""
del prefix # Unused.
attribute_name = self._AttributeName(parameter_name)
flag_name = self.resource_info.attribute_to_args_map.get(attribute_name,
None)
if not flag_name:
return None
return util.NamespaceFormat(flag_name)
def GetFlag(self, parameter_name, parameter_value=None,
check_properties=True, for_update=False):
# type: (...) -> typing.Optional[str]
"""Returns the command line flag for parameter.
If the flag is already present in program values, returns None.
If the user needs to specify it, returns a string in the form
'--flag-name=value'. If the flag is boolean and True, returns '--flag-name'.
Args:
parameter_name: The parameter name.
parameter_value: The parameter value if not None. Otherwise
GetValue() is used to get the value.
check_properties: Check property values if parsed_args don't help.
for_update: Return flag for a cache update command.
Returns:
The command line flag for the parameter, or None.
"""
del for_update
attribute_name = self._AttributeName(parameter_name)
flag_name = self.resource_info.attribute_to_args_map.get(
attribute_name, None)
if not flag_name:
# Project attributes are typically elided in favor of the global --project
# flag. If the project flag is brought under the concept argument umbrella
# this can be removed.
if attribute_name == 'project':
flag_name = '--project'
else:
return None
program_value = self.GetValue(parameter_name)
if parameter_value != program_value:
if parameter_value is None:
parameter_value = program_value
if parameter_value:
if parameter_value is True:
return flag_name
return '{name}={value}'.format(name=flag_name, value=parameter_value)
return None | lib/googlecloudsdk/command_lib/util/concepts/resource_parameter_info.py | """Parameter info lib for resource completers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope.concepts import deps
from googlecloudsdk.calliope.concepts import handlers
from googlecloudsdk.calliope.concepts import util
from googlecloudsdk.command_lib.util import parameter_info_lib
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import typing # pylint: disable=unused-import
class ResourceParameterInfo(parameter_info_lib.ParameterInfoByConvention):
"""Gets parameter info for resource arguments."""
def __init__(self, resource_info, parsed_args, argument, **kwargs):
"""Initializes."""
self.resource_info = resource_info # type: handlers.ResourceInfo
super(ResourceParameterInfo, self).__init__(
parsed_args,
argument,
**kwargs)
def GetValue(self, parameter_name, check_properties=True):
# type: (...) -> typing.Optional[str]
"""Returns the program state value for parameter_name.
Args:
parameter_name: The parameter name.
check_properties: bool, whether to check the properties (unused).
Returns:
The program state value for parameter_name.
"""
del check_properties # Unused.
attribute_name = (
self.resource_info.resource_spec.AttributeName(parameter_name))
current = properties.VALUES.core.disable_prompts.GetBool()
# TODO(b/73073941): Come up with a better way to temporarily disable
# prompts. This prevents arbitrary fallthroughs with prompting from
# being run during completion.
properties.VALUES.core.disable_prompts.Set(True)
try:
return deps.Get(
attribute_name,
self.resource_info.BuildFullFallthroughsMap(),
parsed_args=self.parsed_args) if attribute_name else None
except deps.AttributeNotFoundError:
return None
finally:
properties.VALUES.core.disable_prompts.Set(current)
def _AttributeName(self, parameter_name):
# type: (...) -> typing.Optional[str]
"""Helper function to get the corresponding attribute for a parameter."""
return self.resource_info.resource_spec.AttributeName(parameter_name)
def GetDest(self, parameter_name, prefix=None):
# type: (...) -> typing.Optional[str]
"""Returns the argument parser dest name for parameter_name with prefix.
Args:
parameter_name: The resource parameter name.
prefix: The prefix name for parameter_name if not None.
Returns:
The argument parser dest name for parameter_name.
"""
del prefix # Unused.
attribute_name = self._AttributeName(parameter_name)
flag_name = self.resource_info.attribute_to_args_map.get(attribute_name,
None)
if not flag_name:
return None
return util.NamespaceFormat(flag_name)
def GetFlag(self, parameter_name, parameter_value=None,
check_properties=True, for_update=False):
# type: (...) -> typing.Optional[str]
"""Returns the command line flag for parameter.
If the flag is already present in program values, returns None.
If the user needs to specify it, returns a string in the form
'--flag-name=value'. If the flag is boolean and True, returns '--flag-name'.
Args:
parameter_name: The parameter name.
parameter_value: The parameter value if not None. Otherwise
GetValue() is used to get the value.
check_properties: Check property values if parsed_args don't help.
for_update: Return flag for a cache update command.
Returns:
The command line flag for the parameter, or None.
"""
del for_update
attribute_name = self._AttributeName(parameter_name)
flag_name = self.resource_info.attribute_to_args_map.get(
attribute_name, None)
if not flag_name:
# Project attributes are typically elided in favor of the global --project
# flag. If the project flag is brought under the concept argument umbrella
# this can be removed.
if attribute_name == 'project':
flag_name = '--project'
else:
return None
program_value = self.GetValue(parameter_name)
if parameter_value != program_value:
if parameter_value is None:
parameter_value = program_value
if parameter_value:
if parameter_value is True:
return flag_name
return '{name}={value}'.format(name=flag_name, value=parameter_value)
return None | 0.666062 | 0.113875 |
import os
import time
import tinctest
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from gppylib.commands.base import REMOTE
from gppylib.commands.gp import GpLogFilter
from gppylib.gparray import GpArray
from gppylib.db.dbconn import DbURL, connect
_DEFAULT_OUT_FILE = '/tmp/cluster.logs'
_DEFAULT_USER = os.environ.get('USER')
_DEFAULT_PORT = int(os.environ.get('PGPORT', 5432))
class GpLogException(Exception):
pass
class GpLog(object):
"""
This class lets users perform operations on logs from the cluster
"""
@staticmethod
def gather_log(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE,
dbname=_DEFAULT_USER, host='localhost', port=_DEFAULT_PORT,
user=_DEFAULT_USER,
errors_only=False, master_only=False):
"""
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type out_file: string
@param out_file: File to which the gathered logs should be written to.Defaults to /tmp/cluster.logs
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@type errors_only: boolean
@param errors_only: When set to true, gathers only errors from logs.Defaults to False.
@type master_only: boolean
@param master_only: When set to true, gathers logs only from the master host.
"""
try:
# TODO - When the cluster is down or this fails,
# no exception is thrown from run_sql_command
GpLog._gather_log_from_gp_toolkit(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
except Exception, e:
tinctest.logger.exception("Gather log failed: %s" %e)
raise GpLogException("Gathering log failed. Make sure you can connect to the cluster.")
# TODO - use this as a backup if gp toolkit fails
"""
GpLog._gather_log_from_gp_log_filter(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
"""
@staticmethod
def check_log_for_errors(start_time, end_time=None, host='localhost',
user=_DEFAULT_USER, port=_DEFAULT_PORT,
dbname=_DEFAULT_USER):
"""
Check logs in the given duration for any error messages.
Returns True / False based on whether errors were found in the logs.
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@rtype: boolean
@return: Returns True if there are errors found in the log in the given duration, False otherwise.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("checking log from %s to %s" % (format_start_time, format_end_time))
sql_cmd = "select logseverity,count(logseverity) from gp_toolkit.gp_log_database " + \
"where (logseverity=\'FATAL\' or logseverity=\'ERROR\' or logseverity='PANIC') " + \
"and (logtime >=\'%s\' and logtime <= \'%s\') group by logseverity;" % \
(format_start_time, format_end_time)
try:
result = PSQL.run_sql_command(sql_cmd, dbname=dbname, host=host, port=port, username=user, flags='-a -x')
if "RECORD" in result:
return True
except Exception, e:
tinctest.logger.exception("Failed while checking logs - %s" %e)
raise GpLogException("Failed while checking logs. Make sure you can connect to the cluster")
return False
@staticmethod
def _test_connection(host='localhost',port=_DEFAULT_PORT, user=_DEFAULT_USER,
dbname=_DEFAULT_USER):
try:
connect(DbURL(hostname=host,
port=port,
dbname=dbname,
username=user))
except Exception, expt:
tinctest.logger.error("Failed to connect to hostname %s, port %s, database %s, as user %s"
% (host, port, dbname, user))
tinctest.logger.exception(expt)
return False
return True
@staticmethod
def _gather_log_from_gp_log_filter(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
The tuples returned are (dbid, hostname, datadir, logdata). sorted by dbid.
Returns True/False based on whether matching log entries were found.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime())
tinctest.logger.info("Collecting log from %s to %s into the file -%s" % (format_start_time,format_end_time, out_file))
array = GpArray.initFromCatalog(DbURL(hostname=host, port=port, username=user, dbname=dbname), True)
log_chunks = []
for seg in array.getDbList():
tinctest.logger.info("Collecting log for segment - %s : %s" %(seg.getSegmentHostName(), seg.getSegmentContentId()))
if master_only and seg.getSegmentContentId() != -1:
continue
cmd = GpLogFilter('collect log chunk',
'\\`ls -rt %s | tail -1\\`' % os.path.join(seg.getSegmentDataDirectory(), 'pg_log', '*.csv'),
start=format_start_time, end=format_end_time,
trouble=errors_only,
ctxt=REMOTE,
remoteHost=seg.getSegmentHostName())
cmd.run()
rc = cmd.get_results().rc
if rc:
tinctest.logger.warning("Failed command execution %s : %s" %(cmd, cmd.get_results().stderr))
continue
log_data = cmd.get_results().stdout
if not log_data:
tinctest.logger.warning("No log data returned for the given time frame.")
else:
log_chunks.append((seg.getSegmentContentId(),
seg.getSegmentHostName(),
seg.getSegmentDataDirectory(),
log_data))
if log_chunks:
tinctest.logger.info("Writing log data to file - %s" %(out_file))
with open(out_file, 'w') as f:
for part in log_chunks:
f.write("-"*70)
f.write("\n DBID %s (%s:%s)\n" % (part[0], part[1], part[2]))
f.write("-"*70)
f.write("\n%s" % part[3])
f.write("\n\n")
@staticmethod
def _gather_log_from_gp_toolkit(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
This function gathers logs by querying external tables in gptoolkit. If the cluster is not up and running,
use _gather_log_from_gp_log_filter which uses the utility gplogfilter to gather logs.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("Collecting log from %s to %s" % (format_start_time,format_end_time))
sql_cmd = "select * from gp_toolkit.gp_log_database where logtime >=\'%s\' and logtime <= \'%s\';" % \
(format_start_time, format_end_time)
PSQL.run_sql_command(sql_cmd, out_file=out_file, dbname=dbname,
host=host, port=port, username=user, flags='-a -x') | src/test/tinc/tincrepo/mpp/lib/gplog.py | import os
import time
import tinctest
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from gppylib.commands.base import REMOTE
from gppylib.commands.gp import GpLogFilter
from gppylib.gparray import GpArray
from gppylib.db.dbconn import DbURL, connect
_DEFAULT_OUT_FILE = '/tmp/cluster.logs'
_DEFAULT_USER = os.environ.get('USER')
_DEFAULT_PORT = int(os.environ.get('PGPORT', 5432))
class GpLogException(Exception):
pass
class GpLog(object):
"""
This class lets users perform operations on logs from the cluster
"""
@staticmethod
def gather_log(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE,
dbname=_DEFAULT_USER, host='localhost', port=_DEFAULT_PORT,
user=_DEFAULT_USER,
errors_only=False, master_only=False):
"""
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type out_file: string
@param out_file: File to which the gathered logs should be written to.Defaults to /tmp/cluster.logs
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@type errors_only: boolean
@param errors_only: When set to true, gathers only errors from logs.Defaults to False.
@type master_only: boolean
@param master_only: When set to true, gathers logs only from the master host.
"""
try:
# TODO - When the cluster is down or this fails,
# no exception is thrown from run_sql_command
GpLog._gather_log_from_gp_toolkit(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
except Exception, e:
tinctest.logger.exception("Gather log failed: %s" %e)
raise GpLogException("Gathering log failed. Make sure you can connect to the cluster.")
# TODO - use this as a backup if gp toolkit fails
"""
GpLog._gather_log_from_gp_log_filter(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
"""
@staticmethod
def check_log_for_errors(start_time, end_time=None, host='localhost',
user=_DEFAULT_USER, port=_DEFAULT_PORT,
dbname=_DEFAULT_USER):
"""
Check logs in the given duration for any error messages.
Returns True / False based on whether errors were found in the logs.
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@rtype: boolean
@return: Returns True if there are errors found in the log in the given duration, False otherwise.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("checking log from %s to %s" % (format_start_time, format_end_time))
sql_cmd = "select logseverity,count(logseverity) from gp_toolkit.gp_log_database " + \
"where (logseverity=\'FATAL\' or logseverity=\'ERROR\' or logseverity='PANIC') " + \
"and (logtime >=\'%s\' and logtime <= \'%s\') group by logseverity;" % \
(format_start_time, format_end_time)
try:
result = PSQL.run_sql_command(sql_cmd, dbname=dbname, host=host, port=port, username=user, flags='-a -x')
if "RECORD" in result:
return True
except Exception, e:
tinctest.logger.exception("Failed while checking logs - %s" %e)
raise GpLogException("Failed while checking logs. Make sure you can connect to the cluster")
return False
@staticmethod
def _test_connection(host='localhost',port=_DEFAULT_PORT, user=_DEFAULT_USER,
dbname=_DEFAULT_USER):
try:
connect(DbURL(hostname=host,
port=port,
dbname=dbname,
username=user))
except Exception, expt:
tinctest.logger.error("Failed to connect to hostname %s, port %s, database %s, as user %s"
% (host, port, dbname, user))
tinctest.logger.exception(expt)
return False
return True
@staticmethod
def _gather_log_from_gp_log_filter(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
The tuples returned are (dbid, hostname, datadir, logdata). sorted by dbid.
Returns True/False based on whether matching log entries were found.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime())
tinctest.logger.info("Collecting log from %s to %s into the file -%s" % (format_start_time,format_end_time, out_file))
array = GpArray.initFromCatalog(DbURL(hostname=host, port=port, username=user, dbname=dbname), True)
log_chunks = []
for seg in array.getDbList():
tinctest.logger.info("Collecting log for segment - %s : %s" %(seg.getSegmentHostName(), seg.getSegmentContentId()))
if master_only and seg.getSegmentContentId() != -1:
continue
cmd = GpLogFilter('collect log chunk',
'\\`ls -rt %s | tail -1\\`' % os.path.join(seg.getSegmentDataDirectory(), 'pg_log', '*.csv'),
start=format_start_time, end=format_end_time,
trouble=errors_only,
ctxt=REMOTE,
remoteHost=seg.getSegmentHostName())
cmd.run()
rc = cmd.get_results().rc
if rc:
tinctest.logger.warning("Failed command execution %s : %s" %(cmd, cmd.get_results().stderr))
continue
log_data = cmd.get_results().stdout
if not log_data:
tinctest.logger.warning("No log data returned for the given time frame.")
else:
log_chunks.append((seg.getSegmentContentId(),
seg.getSegmentHostName(),
seg.getSegmentDataDirectory(),
log_data))
if log_chunks:
tinctest.logger.info("Writing log data to file - %s" %(out_file))
with open(out_file, 'w') as f:
for part in log_chunks:
f.write("-"*70)
f.write("\n DBID %s (%s:%s)\n" % (part[0], part[1], part[2]))
f.write("-"*70)
f.write("\n%s" % part[3])
f.write("\n\n")
@staticmethod
def _gather_log_from_gp_toolkit(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
This function gathers logs by querying external tables in gptoolkit. If the cluster is not up and running,
use _gather_log_from_gp_log_filter which uses the utility gplogfilter to gather logs.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("Collecting log from %s to %s" % (format_start_time,format_end_time))
sql_cmd = "select * from gp_toolkit.gp_log_database where logtime >=\'%s\' and logtime <= \'%s\';" % \
(format_start_time, format_end_time)
PSQL.run_sql_command(sql_cmd, out_file=out_file, dbname=dbname,
host=host, port=port, username=user, flags='-a -x') | 0.417271 | 0.118691 |
import sys
from unittest import mock
from unittest.mock import call
import fake_rpi
from freezegun import freeze_time
from config import config
from config.config import MODES
sys.modules['RPi'] = fake_rpi.RPi # Fake RPi
sys.modules['RPi.GPIO'] = fake_rpi.RPi.GPIO # Fake GPIO
config.MODE = MODES['vegetation']
from controllers.light_controller import LightController
from controllers import light_controller
from state import STATE
STATE['thermometer']['top'] = 24
STATE['thermometer']['bottom'] = 24
@freeze_time("2021-05-23 20:00")
def test_both_starts_greater_on():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'ON'
@freeze_time("2021-05-23 20:00")
def test_one_starts_greater_on():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 08:00")
def test_both_starts_less_on():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'ON'
@freeze_time("2021-05-23 08:00")
def test_one_starts_less_on():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 10:00")
def test_both_starts_greater_off():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 10:00")
def test_one_starts_greater_off():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 22:00")
def test_both_starts_less_off():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 23:00")
def test_one_starts_less_off():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 08:00")
def test_max_temperature():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
STATE['thermometer']['top'] = 30
STATE['thermometer']['bottom'] = 34
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF' | tests/light_controller.py | import sys
from unittest import mock
from unittest.mock import call
import fake_rpi
from freezegun import freeze_time
from config import config
from config.config import MODES
sys.modules['RPi'] = fake_rpi.RPi # Fake RPi
sys.modules['RPi.GPIO'] = fake_rpi.RPi.GPIO # Fake GPIO
config.MODE = MODES['vegetation']
from controllers.light_controller import LightController
from controllers import light_controller
from state import STATE
STATE['thermometer']['top'] = 24
STATE['thermometer']['bottom'] = 24
@freeze_time("2021-05-23 20:00")
def test_both_starts_greater_on():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'ON'
@freeze_time("2021-05-23 20:00")
def test_one_starts_greater_on():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 08:00")
def test_both_starts_less_on():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'ON'
@freeze_time("2021-05-23 08:00")
def test_one_starts_less_on():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.on') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'ON'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 10:00")
def test_both_starts_greater_off():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 10:00")
def test_one_starts_greater_off():
light_controller.DAY_STARTS_AT = '19:00'
light_controller.DAY_ENDS_AT = '07:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 22:00")
def test_both_starts_less_off():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 23:00")
def test_one_starts_less_off():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'one'
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF'
@freeze_time("2021-05-23 08:00")
def test_max_temperature():
light_controller.DAY_STARTS_AT = '07:00'
light_controller.DAY_ENDS_AT = '19:00'
light_controller.LIGHT_MODE = 'both'
STATE['thermometer']['top'] = 30
STATE['thermometer']['bottom'] = 34
with mock.patch('devices.relay.off') as mocked_method:
lc = LightController(STATE)
lc.control()
expected = [call(8), call(25)]
called = mocked_method.call_args_list
assert expected == called
assert STATE['light']['1'] == 'OFF'
assert STATE['light']['2'] == 'OFF' | 0.485844 | 0.350644 |
import ConfigParser
import os
import re
import shutil
import functest.utils.functest_logger as ft_logger
import functest.opnfv_tests.openstack.tempest.conf_utils as tempest_utils
logger = ft_logger.Logger("sdnvpn-tempest").getLogger()
def main():
verifier_repo_dir = tempest_utils.get_verifier_repo_dir(None)
src_tempest_dir = tempest_utils.get_verifier_deployment_dir(None, None)
if not src_tempest_dir:
logger.error("Rally deployment not found.")
exit(-1)
tempest_utils.configure_verifier(src_tempest_dir)
src_tempest_conf = os.path.join(src_tempest_dir, 'tempest.conf')
bgpvpn_tempest_conf = src_tempest_dir + '/bgpvpn_tempest.conf'
if not os.path.isfile(src_tempest_conf):
logger.error("tempest.conf not found in %s." % src_tempest_conf)
exit(-1)
shutil.copy(src_tempest_conf, bgpvpn_tempest_conf)
logger.info("Copying tempest.conf to %s." % bgpvpn_tempest_conf)
config = ConfigParser.RawConfigParser()
config.read(bgpvpn_tempest_conf)
config.set('service_available', 'bgpvpn', 'True')
logger.debug("Updating %s with bgpvpn=True" % bgpvpn_tempest_conf)
with open(bgpvpn_tempest_conf, 'wb') as tempest_conf:
config.write(tempest_conf)
cmd_line = (verifier_repo_dir +
"/run_tempest.sh -C %s -t -N -- "
"networking_bgpvpn_tempest" % bgpvpn_tempest_conf)
logger.info("Executing: %s" % cmd_line)
cmd = os.popen(cmd_line)
output = cmd.read()
logger.debug(output)
# Results parsing
error_logs = ""
duration = 0
failed = 0
try:
# Look For errors
error_logs = ""
for match in re.findall('(.*?)[. ]*FAILED', output):
error_logs += match
# look for duration
m = re.search('tests in(.*)sec', output)
duration = m.group(1)
# Look for num tests run
m = re.search('Ran:(.*)tests', output)
num_tests = m.group(1)
# Look for tests failed
m = re.search('Failed:(.*)', output)
failed = m.group(1)
# Look for name of the tests
testcases = re.findall("\{0\} (.*)", output)
results = {"duration": duration,
"num_tests": num_tests, "failed": failed,
"tests": testcases}
if int(failed) == 0:
status = "PASS"
else:
status = "FAILED"
return {"status": status, "details": results}
except:
logger.error("Problem when parsing the results.")
if __name__ == '__main__':
main() | sdnvpn/test/functest/tempest.py | import ConfigParser
import os
import re
import shutil
import functest.utils.functest_logger as ft_logger
import functest.opnfv_tests.openstack.tempest.conf_utils as tempest_utils
logger = ft_logger.Logger("sdnvpn-tempest").getLogger()
def main():
verifier_repo_dir = tempest_utils.get_verifier_repo_dir(None)
src_tempest_dir = tempest_utils.get_verifier_deployment_dir(None, None)
if not src_tempest_dir:
logger.error("Rally deployment not found.")
exit(-1)
tempest_utils.configure_verifier(src_tempest_dir)
src_tempest_conf = os.path.join(src_tempest_dir, 'tempest.conf')
bgpvpn_tempest_conf = src_tempest_dir + '/bgpvpn_tempest.conf'
if not os.path.isfile(src_tempest_conf):
logger.error("tempest.conf not found in %s." % src_tempest_conf)
exit(-1)
shutil.copy(src_tempest_conf, bgpvpn_tempest_conf)
logger.info("Copying tempest.conf to %s." % bgpvpn_tempest_conf)
config = ConfigParser.RawConfigParser()
config.read(bgpvpn_tempest_conf)
config.set('service_available', 'bgpvpn', 'True')
logger.debug("Updating %s with bgpvpn=True" % bgpvpn_tempest_conf)
with open(bgpvpn_tempest_conf, 'wb') as tempest_conf:
config.write(tempest_conf)
cmd_line = (verifier_repo_dir +
"/run_tempest.sh -C %s -t -N -- "
"networking_bgpvpn_tempest" % bgpvpn_tempest_conf)
logger.info("Executing: %s" % cmd_line)
cmd = os.popen(cmd_line)
output = cmd.read()
logger.debug(output)
# Results parsing
error_logs = ""
duration = 0
failed = 0
try:
# Look For errors
error_logs = ""
for match in re.findall('(.*?)[. ]*FAILED', output):
error_logs += match
# look for duration
m = re.search('tests in(.*)sec', output)
duration = m.group(1)
# Look for num tests run
m = re.search('Ran:(.*)tests', output)
num_tests = m.group(1)
# Look for tests failed
m = re.search('Failed:(.*)', output)
failed = m.group(1)
# Look for name of the tests
testcases = re.findall("\{0\} (.*)", output)
results = {"duration": duration,
"num_tests": num_tests, "failed": failed,
"tests": testcases}
if int(failed) == 0:
status = "PASS"
else:
status = "FAILED"
return {"status": status, "details": results}
except:
logger.error("Problem when parsing the results.")
if __name__ == '__main__':
main() | 0.235812 | 0.129981 |
import black
import isort
def black_format(code: str, is_pyi: bool = False, line_length: int = 88) -> str:
"""Formats the provided code snippet using black"""
try:
return black.format_file_contents(
code,
fast=True,
mode=black.FileMode( # type: ignore
is_pyi=is_pyi,
line_length=line_length,
),
)
except black.NothingChanged:
return code
def black_test(code: str, expected_output: str = ""):
"""Tests that the given code:
- Behaves the same when formatted multiple times with isort.
- Agrees with black formatting.
- Matches the desired output or itself if none is provided.
"""
expected_output = expected_output or code
# output should stay consistent over multiple runs
output = isort.code(code, profile="black")
assert output == isort.code(code, profile="black")
# output should agree with black
black_output = black_format(output)
assert output == black_output
# output should match expected output
assert output == expected_output
def test_black_snippet_one():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/tests/test_black.py
"""
black_test(
"""#!/usr/bin/env python3
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
import inspect
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
from platform import system
import regex as re
import sys
from tempfile import TemporaryDirectory
import types
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
List,
Tuple,
Iterator,
TypeVar,
)
import unittest
from unittest.mock import patch, MagicMock
import click
from click import unstyle
from click.testing import CliRunner
import black
from black import Feature, TargetVersion
try:
import blackd
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
except ImportError:
has_blackd_deps = False
else:
has_blackd_deps = True
from pathspec import PathSpec
# Import other test classes
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
""",
"""#!/usr/bin/env python3
import asyncio
import inspect
import logging
import os
import sys
import types
import unittest
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
from io import BytesIO, TextIOWrapper
from pathlib import Path
from platform import system
from tempfile import TemporaryDirectory
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
Iterator,
List,
Tuple,
TypeVar,
)
from unittest.mock import MagicMock, patch
import black
import click
import regex as re
from black import Feature, TargetVersion
from click import unstyle
from click.testing import CliRunner
try:
import blackd
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
except ImportError:
has_blackd_deps = False
else:
has_blackd_deps = True
from pathspec import PathSpec
# Import other test classes
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
""",
)
def test_black_snippet_two():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/tests/test_primer.py
"""
black_test(
'''#!/usr/bin/env python3
import asyncio
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import StringIO
from os import getpid
from pathlib import Path
from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
from typing import Any, Callable, Generator, Iterator, Tuple
from unittest.mock import Mock, patch
from click.testing import CliRunner
from black_primer import cli, lib
EXPECTED_ANALYSIS_OUTPUT = """\
-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- 0 projects disabled by config
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
Failed projects:
## black:
- Returned 69
- stdout:
Black didn't work
"""
''',
'''#!/usr/bin/env python3
import asyncio
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import StringIO
from os import getpid
from pathlib import Path
from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
from typing import Any, Callable, Generator, Iterator, Tuple
from unittest.mock import Mock, patch
from black_primer import cli, lib
from click.testing import CliRunner
EXPECTED_ANALYSIS_OUTPUT = """-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- 0 projects disabled by config
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
Failed projects:
## black:
- Returned 69
- stdout:
Black didn't work
"""
''',
)
def test_black_snippet_three():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/src/black/__init__.py
"""
black_test(
"""import ast
import asyncio
from abc import ABC, abstractmethod
from collections import defaultdict
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
from contextlib import contextmanager
from datetime import datetime
from enum import Enum
from functools import lru_cache, partial, wraps
import io
import itertools
import logging
from multiprocessing import Manager, freeze_support
import os
from pathlib import Path
import pickle
import regex as re
import signal
import sys
import tempfile
import tokenize
import traceback
from typing import (
Any,
Callable,
Collection,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Set,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
TYPE_CHECKING,
)
from typing_extensions import Final
from mypy_extensions import mypyc_attr
from appdirs import user_cache_dir
from dataclasses import dataclass, field, replace
import click
import toml
from typed_ast import ast3, ast27
from pathspec import PathSpec
# lib2to3 fork
from blib2to3.pytree import Node, Leaf, type_repr
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
from _black_version import version as __version__
if TYPE_CHECKING:
import colorama # noqa: F401
DEFAULT_LINE_LENGTH = 88
""",
"""import ast
import asyncio
import io
import itertools
import logging
import os
import pickle
import signal
import sys
import tempfile
import tokenize
import traceback
from abc import ABC, abstractmethod
from collections import defaultdict
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
from datetime import datetime
from enum import Enum
from functools import lru_cache, partial, wraps
from multiprocessing import Manager, freeze_support
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Set,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import click
import regex as re
import toml
from _black_version import version as __version__
from appdirs import user_cache_dir
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
# lib2to3 fork
from blib2to3.pytree import Leaf, Node, type_repr
from mypy_extensions import mypyc_attr
from pathspec import PathSpec
from typed_ast import ast3, ast27
from typing_extensions import Final
if TYPE_CHECKING:
import colorama # noqa: F401
DEFAULT_LINE_LENGTH = 88
""",
) | tests/unit/profiles/test_black.py | import black
import isort
def black_format(code: str, is_pyi: bool = False, line_length: int = 88) -> str:
"""Formats the provided code snippet using black"""
try:
return black.format_file_contents(
code,
fast=True,
mode=black.FileMode( # type: ignore
is_pyi=is_pyi,
line_length=line_length,
),
)
except black.NothingChanged:
return code
def black_test(code: str, expected_output: str = ""):
"""Tests that the given code:
- Behaves the same when formatted multiple times with isort.
- Agrees with black formatting.
- Matches the desired output or itself if none is provided.
"""
expected_output = expected_output or code
# output should stay consistent over multiple runs
output = isort.code(code, profile="black")
assert output == isort.code(code, profile="black")
# output should agree with black
black_output = black_format(output)
assert output == black_output
# output should match expected output
assert output == expected_output
def test_black_snippet_one():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/tests/test_black.py
"""
black_test(
"""#!/usr/bin/env python3
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
import inspect
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
from platform import system
import regex as re
import sys
from tempfile import TemporaryDirectory
import types
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
List,
Tuple,
Iterator,
TypeVar,
)
import unittest
from unittest.mock import patch, MagicMock
import click
from click import unstyle
from click.testing import CliRunner
import black
from black import Feature, TargetVersion
try:
import blackd
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
except ImportError:
has_blackd_deps = False
else:
has_blackd_deps = True
from pathspec import PathSpec
# Import other test classes
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
""",
"""#!/usr/bin/env python3
import asyncio
import inspect
import logging
import os
import sys
import types
import unittest
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
from io import BytesIO, TextIOWrapper
from pathlib import Path
from platform import system
from tempfile import TemporaryDirectory
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
Iterator,
List,
Tuple,
TypeVar,
)
from unittest.mock import MagicMock, patch
import black
import click
import regex as re
from black import Feature, TargetVersion
from click import unstyle
from click.testing import CliRunner
try:
import blackd
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
except ImportError:
has_blackd_deps = False
else:
has_blackd_deps = True
from pathspec import PathSpec
# Import other test classes
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
""",
)
def test_black_snippet_two():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/tests/test_primer.py
"""
black_test(
'''#!/usr/bin/env python3
import asyncio
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import StringIO
from os import getpid
from pathlib import Path
from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
from typing import Any, Callable, Generator, Iterator, Tuple
from unittest.mock import Mock, patch
from click.testing import CliRunner
from black_primer import cli, lib
EXPECTED_ANALYSIS_OUTPUT = """\
-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- 0 projects disabled by config
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
Failed projects:
## black:
- Returned 69
- stdout:
Black didn't work
"""
''',
'''#!/usr/bin/env python3
import asyncio
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import StringIO
from os import getpid
from pathlib import Path
from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
from typing import Any, Callable, Generator, Iterator, Tuple
from unittest.mock import Mock, patch
from black_primer import cli, lib
from click.testing import CliRunner
EXPECTED_ANALYSIS_OUTPUT = """-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- 0 projects disabled by config
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
Failed projects:
## black:
- Returned 69
- stdout:
Black didn't work
"""
''',
)
def test_black_snippet_three():
"""Test consistent code formatting between isort and black for code snippet from black repository.
See: https://github.com/psf/black/blob/master/src/black/__init__.py
"""
black_test(
"""import ast
import asyncio
from abc import ABC, abstractmethod
from collections import defaultdict
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
from contextlib import contextmanager
from datetime import datetime
from enum import Enum
from functools import lru_cache, partial, wraps
import io
import itertools
import logging
from multiprocessing import Manager, freeze_support
import os
from pathlib import Path
import pickle
import regex as re
import signal
import sys
import tempfile
import tokenize
import traceback
from typing import (
Any,
Callable,
Collection,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Set,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
TYPE_CHECKING,
)
from typing_extensions import Final
from mypy_extensions import mypyc_attr
from appdirs import user_cache_dir
from dataclasses import dataclass, field, replace
import click
import toml
from typed_ast import ast3, ast27
from pathspec import PathSpec
# lib2to3 fork
from blib2to3.pytree import Node, Leaf, type_repr
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
from _black_version import version as __version__
if TYPE_CHECKING:
import colorama # noqa: F401
DEFAULT_LINE_LENGTH = 88
""",
"""import ast
import asyncio
import io
import itertools
import logging
import os
import pickle
import signal
import sys
import tempfile
import tokenize
import traceback
from abc import ABC, abstractmethod
from collections import defaultdict
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
from datetime import datetime
from enum import Enum
from functools import lru_cache, partial, wraps
from multiprocessing import Manager, freeze_support
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Set,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import click
import regex as re
import toml
from _black_version import version as __version__
from appdirs import user_cache_dir
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
# lib2to3 fork
from blib2to3.pytree import Leaf, Node, type_repr
from mypy_extensions import mypyc_attr
from pathspec import PathSpec
from typed_ast import ast3, ast27
from typing_extensions import Final
if TYPE_CHECKING:
import colorama # noqa: F401
DEFAULT_LINE_LENGTH = 88
""",
) | 0.530966 | 0.415314 |
import discord
from discord.ext import tasks, commands
import asyncio
from io import BytesIO
import requests
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps
import random
class Event(commands.Cog):
def __init__(self, technetium):
self.bot = technetium #botを受け取る。
self.accent_color = (255, 210, 0)
self.font_path = "./fonts/Harenosora.otf"
self.bump_notice2.start()
def cog_unload(self):
self.bump_notice2.cancel()
@tasks.loop(reconnect=True)
async def bump_notice2(self):
disboard_bot = self.bot.get_user(302050872383242240)
channel = self.bot.get_channel(617960149067366410)
mention = '<@&596668500916043796>'
Interval = datetime.timedelta(hours=2)
def check1(m):
return m.author == disboard_bot and ':thumbsup:' in m.embeds[0].description
mes = await channel.history().filter(check1).next()
if mes is not None and "Bump canceled" not in channel.topic:
timedata1 = datetime.datetime.utcnow() - mes.created_at
if timedata1 >= Interval:
embed1 = discord.Embed(title='⏫Bunp Reminder..!!!',
description=f'Bumpされてから結構経ちましたよー。\r!d bumpをしてほしいんね。',
color=0x0080ff)
await channel.send(mention, embed=embed1)
else:
try:
# クライアントクローズか2時間経過するのを待つ
await asyncio.wait_for(
self.asyncio.Event(loop=self.bot.loop).wait(),
(Interval - timedata1).total_seconds()
)
except asyncio.TimeoutError:
# 2時間経過
embed2 = discord.Embed(title='⏫Bunp Reminder!!!!!',
description=f'Bumpができますよー。\r!d bumpをしてほしいんね。',
color=0x0080ff)
await channel.send(mention, embed=embed2)
else:
# クライアントクローズ
pass
@bump_notice2.before_loop
async def before_bump_notice2(self):
await self.bot.wait_until_ready()
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if (
after.channel is not None
and (before.channel is None or before.channel != after.channel)
):
embed = discord.Embed(title='ボイスチャンネル入室通知',
description=f'{member.mention}さんが入室しました。',
color=0x00ff00)
await self.bot.get_channel(596668583728119809).send(embed=embed, delete_after=180)
if (
before.channel is not None
and (after.channel is None or before.channel != after.channel)
):
embed = discord.Embed(title='ボイスチャンネル退出通知',
description=f'{member.mention}さんが退出しました。',
color=0xff0000)
await self.bot.get_channel(596668583728119809).send(embed=embed, delete_after=180)
def add_base_image(self, icon_path, member_name, text2):
icon_size = 160
icon = Image.new("RGBA", ((icon_size + 30)*3, (icon_size + 30)*3), 0) #icon
draw1 = ImageDraw.Draw(icon)
draw1.ellipse((9*3, 9*3, (icon_size + 21)*3, (icon_size + 21)*3), fill="White") #縁取り
draw1.ellipse((0, 0, (icon_size + 30)*3, (icon_size + 30)*3), width=3*3, outline="White") #周りの白抜きされた丸
icon = icon.resize((icon_size + 30, icon_size + 30), Image.ANTIALIAS) #エイリアス
mask = Image.new("L", (icon_size*3, icon_size*3), 0)
draw2 = ImageDraw.Draw(mask)
draw2.ellipse((0, 0, icon_size*3, icon_size*3), fill=255) #マスク用
mask = mask.resize((icon_size, icon_size), Image.ANTIALIAS) #エイリアス
icon_img = Image.open(BytesIO(requests.get(icon_path).content)).convert("RGBA").resize(size=(icon_size, icon_size), resample=Image.ANTIALIAS) #サイズ変更
icon.paste(icon_img, ((icon.size[0]-icon_size)//2, (icon.size[0]-icon_size)//2), mask) #下地へ合成
image = Image.open('./images/discord_cafe_welcome.png')
image.paste(icon, ((image.size[0]-icon.size[0])//2, 200), icon) #背景と合成
draw3 = ImageDraw.Draw(image)
max_length = 550
font1 = ImageFont.truetype(self.font_path, 30)
font2 = ImageFont.truetype(self.font_path, 23)
text1 = f"{member_name}さん"
height = 430
if draw3.textsize(text1, font=font1)[0] > max_length:
while draw3.textsize(text1 + '…', font=font1)[0] > max_length:
text1 = text1[:-1]
text1 = text1[:-3] + '…さん'
draw3.text(((image.size[0]-draw3.textsize(text1, font=font1)[0])//2, 400), text1, "White", font=font1)
for text in text2:
draw3.text(((image.size[0]-draw3.textsize(text, font=font2)[0])//2, height), text, "White", font=font2)
height += 24
return image
@commands.Cog.listener()
async def on_member_join(self, member):
image = self.add_base_image(member.avatar_url, member.name, [f"Welcome! #{len(member.guild.members)}"])
arr = BytesIO()
image.save(arr, format='png')
arr.seek(0)
file = discord.File(fp=arr, filename="Welcome_image.png")
cl = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(title=f"利用規約はここから",
description=f"{member.guild.name}へようこそ!{member.mention}さん!\n{len(member.guild.members)}人目の参加者です!\n> 何か困ったことがあればぜひとも運営にメンションをしてください。\n> 基本一人は常駐してます。",
url="https://www.elegraph.cf/?page_id=24",
color=cl)
embed.set_author(name=f"{member.display_name}さんが参加しました~!", icon_url=member.avatar_url)
embed.set_footer(text=member.guild.name, icon_url=member.guild.icon_url)
embed.set_thumbnail(url=member.avatar_url)
embed.set_image(url="attachment://Welcome_image.png")
await self.bot.get_channel(596668568909643817).send(file=file, embed=embed)
def setup(technetium):
technetium.add_cog(Event(technetium)) | technetium_cogs/event.py | import discord
from discord.ext import tasks, commands
import asyncio
from io import BytesIO
import requests
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps
import random
class Event(commands.Cog):
def __init__(self, technetium):
self.bot = technetium #botを受け取る。
self.accent_color = (255, 210, 0)
self.font_path = "./fonts/Harenosora.otf"
self.bump_notice2.start()
def cog_unload(self):
self.bump_notice2.cancel()
@tasks.loop(reconnect=True)
async def bump_notice2(self):
disboard_bot = self.bot.get_user(302050872383242240)
channel = self.bot.get_channel(617960149067366410)
mention = '<@&596668500916043796>'
Interval = datetime.timedelta(hours=2)
def check1(m):
return m.author == disboard_bot and ':thumbsup:' in m.embeds[0].description
mes = await channel.history().filter(check1).next()
if mes is not None and "Bump canceled" not in channel.topic:
timedata1 = datetime.datetime.utcnow() - mes.created_at
if timedata1 >= Interval:
embed1 = discord.Embed(title='⏫Bunp Reminder..!!!',
description=f'Bumpされてから結構経ちましたよー。\r!d bumpをしてほしいんね。',
color=0x0080ff)
await channel.send(mention, embed=embed1)
else:
try:
# クライアントクローズか2時間経過するのを待つ
await asyncio.wait_for(
self.asyncio.Event(loop=self.bot.loop).wait(),
(Interval - timedata1).total_seconds()
)
except asyncio.TimeoutError:
# 2時間経過
embed2 = discord.Embed(title='⏫Bunp Reminder!!!!!',
description=f'Bumpができますよー。\r!d bumpをしてほしいんね。',
color=0x0080ff)
await channel.send(mention, embed=embed2)
else:
# クライアントクローズ
pass
@bump_notice2.before_loop
async def before_bump_notice2(self):
await self.bot.wait_until_ready()
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if (
after.channel is not None
and (before.channel is None or before.channel != after.channel)
):
embed = discord.Embed(title='ボイスチャンネル入室通知',
description=f'{member.mention}さんが入室しました。',
color=0x00ff00)
await self.bot.get_channel(596668583728119809).send(embed=embed, delete_after=180)
if (
before.channel is not None
and (after.channel is None or before.channel != after.channel)
):
embed = discord.Embed(title='ボイスチャンネル退出通知',
description=f'{member.mention}さんが退出しました。',
color=0xff0000)
await self.bot.get_channel(596668583728119809).send(embed=embed, delete_after=180)
def add_base_image(self, icon_path, member_name, text2):
icon_size = 160
icon = Image.new("RGBA", ((icon_size + 30)*3, (icon_size + 30)*3), 0) #icon
draw1 = ImageDraw.Draw(icon)
draw1.ellipse((9*3, 9*3, (icon_size + 21)*3, (icon_size + 21)*3), fill="White") #縁取り
draw1.ellipse((0, 0, (icon_size + 30)*3, (icon_size + 30)*3), width=3*3, outline="White") #周りの白抜きされた丸
icon = icon.resize((icon_size + 30, icon_size + 30), Image.ANTIALIAS) #エイリアス
mask = Image.new("L", (icon_size*3, icon_size*3), 0)
draw2 = ImageDraw.Draw(mask)
draw2.ellipse((0, 0, icon_size*3, icon_size*3), fill=255) #マスク用
mask = mask.resize((icon_size, icon_size), Image.ANTIALIAS) #エイリアス
icon_img = Image.open(BytesIO(requests.get(icon_path).content)).convert("RGBA").resize(size=(icon_size, icon_size), resample=Image.ANTIALIAS) #サイズ変更
icon.paste(icon_img, ((icon.size[0]-icon_size)//2, (icon.size[0]-icon_size)//2), mask) #下地へ合成
image = Image.open('./images/discord_cafe_welcome.png')
image.paste(icon, ((image.size[0]-icon.size[0])//2, 200), icon) #背景と合成
draw3 = ImageDraw.Draw(image)
max_length = 550
font1 = ImageFont.truetype(self.font_path, 30)
font2 = ImageFont.truetype(self.font_path, 23)
text1 = f"{member_name}さん"
height = 430
if draw3.textsize(text1, font=font1)[0] > max_length:
while draw3.textsize(text1 + '…', font=font1)[0] > max_length:
text1 = text1[:-1]
text1 = text1[:-3] + '…さん'
draw3.text(((image.size[0]-draw3.textsize(text1, font=font1)[0])//2, 400), text1, "White", font=font1)
for text in text2:
draw3.text(((image.size[0]-draw3.textsize(text, font=font2)[0])//2, height), text, "White", font=font2)
height += 24
return image
@commands.Cog.listener()
async def on_member_join(self, member):
image = self.add_base_image(member.avatar_url, member.name, [f"Welcome! #{len(member.guild.members)}"])
arr = BytesIO()
image.save(arr, format='png')
arr.seek(0)
file = discord.File(fp=arr, filename="Welcome_image.png")
cl = discord.Color(random.randint(0, 0xFFFFFF))
embed = discord.Embed(title=f"利用規約はここから",
description=f"{member.guild.name}へようこそ!{member.mention}さん!\n{len(member.guild.members)}人目の参加者です!\n> 何か困ったことがあればぜひとも運営にメンションをしてください。\n> 基本一人は常駐してます。",
url="https://www.elegraph.cf/?page_id=24",
color=cl)
embed.set_author(name=f"{member.display_name}さんが参加しました~!", icon_url=member.avatar_url)
embed.set_footer(text=member.guild.name, icon_url=member.guild.icon_url)
embed.set_thumbnail(url=member.avatar_url)
embed.set_image(url="attachment://Welcome_image.png")
await self.bot.get_channel(596668568909643817).send(file=file, embed=embed)
def setup(technetium):
technetium.add_cog(Event(technetium)) | 0.221351 | 0.144994 |
"""Tests for the Systemd Journal parser."""
from __future__ import unicode_literals
import unittest
try:
from plaso.parsers import systemd_journal
except ImportError:
systemd_journal = None
from tests.parsers import test_lib
@unittest.skipIf(systemd_journal is None, 'requires LZMA compression support')
class SystemdJournalParserTest(test_lib.ParserTestCase):
"""Tests for the Systemd Journal parser."""
def testParse(self):
"""Tests the Parse function."""
parser = systemd_journal.SystemdJournalParser()
storage_writer = self._ParseFile([
'systemd', 'journal', 'system.journal'], parser)
self.assertEqual(storage_writer.number_of_events, 2101)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2017-01-27 09:40:55.913258')
expected_message = (
'test-VirtualBox [systemd, pid: 1] Started User Manager for '
'UID 1000.')
self._TestGetMessageStrings(event, expected_message, expected_message)
# This event uses XZ compressed data
event = events[2098]
self.CheckTimestamp(event.timestamp, '2017-02-06 16:24:32.564585')
expected_message = 'test-VirtualBox [root, pid: 22921] {0:s}'.format(
'a' * 692)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
def testParseDirty(self):
"""Tests the Parse function on a 'dirty' journal file."""
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
parser = systemd_journal.SystemdJournalParser()
path_segments = [
'systemd', 'journal',
'system@00053f9c9a4c1e0e-2e18a70e8b327fed.journalTILDE'
]
file_entry = self._GetTestFileEntry(path_segments)
file_object = file_entry.GetFileObject()
parser.ParseFileObject(parser_mediator, file_object)
self.assertEqual(storage_writer.number_of_events, 2211)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2016-10-24 13:20:01.063423')
expected_message = (
'test-VirtualBox [systemd-journald, pid: 569] Runtime journal '
'(/run/log/journal/) is 1.2M, max 9.9M, 8.6M free.')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(storage_writer.number_of_errors, 1)
errors = list(storage_writer.GetErrors())
error = errors[0]
expected_error_message = (
'Unable to complete parsing journal file: '
'object offset should be after hash tables (4308912 < 2527472) at '
'offset 0x0041bfb0')
self.assertEqual(error.message, expected_error_message)
if __name__ == '__main__':
unittest.main() | tests/parsers/systemd_journal.py | """Tests for the Systemd Journal parser."""
from __future__ import unicode_literals
import unittest
try:
from plaso.parsers import systemd_journal
except ImportError:
systemd_journal = None
from tests.parsers import test_lib
@unittest.skipIf(systemd_journal is None, 'requires LZMA compression support')
class SystemdJournalParserTest(test_lib.ParserTestCase):
"""Tests for the Systemd Journal parser."""
def testParse(self):
"""Tests the Parse function."""
parser = systemd_journal.SystemdJournalParser()
storage_writer = self._ParseFile([
'systemd', 'journal', 'system.journal'], parser)
self.assertEqual(storage_writer.number_of_events, 2101)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2017-01-27 09:40:55.913258')
expected_message = (
'test-VirtualBox [systemd, pid: 1] Started User Manager for '
'UID 1000.')
self._TestGetMessageStrings(event, expected_message, expected_message)
# This event uses XZ compressed data
event = events[2098]
self.CheckTimestamp(event.timestamp, '2017-02-06 16:24:32.564585')
expected_message = 'test-VirtualBox [root, pid: 22921] {0:s}'.format(
'a' * 692)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
def testParseDirty(self):
"""Tests the Parse function on a 'dirty' journal file."""
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(storage_writer)
parser = systemd_journal.SystemdJournalParser()
path_segments = [
'systemd', 'journal',
'system@00053f9c9a4c1e0e-2e18a70e8b327fed.journalTILDE'
]
file_entry = self._GetTestFileEntry(path_segments)
file_object = file_entry.GetFileObject()
parser.ParseFileObject(parser_mediator, file_object)
self.assertEqual(storage_writer.number_of_events, 2211)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2016-10-24 13:20:01.063423')
expected_message = (
'test-VirtualBox [systemd-journald, pid: 569] Runtime journal '
'(/run/log/journal/) is 1.2M, max 9.9M, 8.6M free.')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(storage_writer.number_of_errors, 1)
errors = list(storage_writer.GetErrors())
error = errors[0]
expected_error_message = (
'Unable to complete parsing journal file: '
'object offset should be after hash tables (4308912 < 2527472) at '
'offset 0x0041bfb0')
self.assertEqual(error.message, expected_error_message)
if __name__ == '__main__':
unittest.main() | 0.802865 | 0.496826 |
from typing import List, Tuple
import numpy as np
from tensorflow import keras
from tensorflow.keras.preprocessing import sequence
from src.binaryds import BinaryDs
LN100 = 2 * np.log(10)
class DataGenerator(keras.utils.Sequence):
def __init__(self, dataset: BinaryDs, batch_size: int,
predict: bool = False,
fake_pad: bool = False, pad_len: int = 0):
self.dataset: BinaryDs = dataset
self.batch_size = batch_size
self.indices: List[int] = []
self.fake_pad = fake_pad
self.pad_len = pad_len
self.predict = predict
self.len = 0
self.remainder = 0
self.__init_len()
self.on_epoch_end()
def __init_len(self):
"""
Sets self.len and self.remainder (used at init time)
:return:
"""
self.len = int(self.dataset.get_examples_no() / self.batch_size)
self.remainder = self.dataset.get_examples_no() % self.batch_size
if self.remainder > 0:
self.len += 1
def __len__(self):
return self.len
def __getitem__(self, index):
real_index = self.indices[index]
if self.remainder > 0 and real_index == self.len - 1:
amount = self.remainder
else:
amount = self.batch_size
data = self.dataset.read(real_index * self.batch_size, amount)
return self.__generate_sequences(data)
def on_epoch_end(self):
self.indices = np.arange(self.len)
if not self.predict:
np.random.shuffle(self.indices)
def __generate_sequences(self, data: List[Tuple[int, bytes]]):
"""
Generates the pairs (X, y) that will be used during the training.
More specifically generates y and shuffle X.
If fake_pad is true, randomly removes data from X. This is useful in case
the training samples have always the same amount of features, but during
inference this number may change.
:param data: binary dataset containing the samples
:return: (X, y) as np.arrays. X shape will be (samples, features),
y will be (samples) or (samples, categories) depending if binary or
multiclass classification
"""
cats = self.dataset.get_categories()
y, x = zip(*data)
if cats > 2:
y = keras.utils.to_categorical(y, num_classes=cats)
else:
y = [[y] for y in y]
# keras does not like bytearrays, so int list then
# cut a portion of example so network learns to deal with padding
if not self.dataset.is_encoded() and self.fake_pad:
# amount of removed data randomly decided
if self.pad_len == 0:
limit = self.dataset.features - 32
# 99% values should be between 0 and limit
elambda = LN100 / limit
beta = 1 / elambda
d = np.random.default_rng().exponential(beta, size=len(x))
# clamping destroys the distribution, not a big deal
cut = np.array(np.floor(np.clip(d, 0, limit)),
dtype=np.int32)
x = [list(sample)[:-cut[idx]] for idx, sample in enumerate(x)]
# amount of removed data is a fixed value
elif self.dataset.features != self.pad_len:
cut = np.full(len(x), self.dataset.features - self.pad_len)
x = [list(sample)[:-cut[idx]] for idx, sample in enumerate(x)]
else:
x = [list(sample) for sample in x]
# keep only encoded examples of `pad_len` length
elif self.dataset.is_encoded() and self.pad_len != 0:
new_x = []
for sample in x:
padded = 0
for byte in sample:
if byte != 0x00:
break
else:
padded += 1
if self.dataset.get_features() - padded <= self.pad_len:
# shorter than requested, keep intact
new_x.append(sample)
else:
# longer than requested, cut it to the requested len
new_x.append(sample[-self.pad_len:])
x = [list(sample) for sample in new_x]
# keep everything without removing data
else:
x = [list(sample) for sample in x]
x = np.array(x)
x = sequence.pad_sequences(x, maxlen=self.dataset.features,
padding="pre", truncating="pre",
value=0, dtype="int32")
y = np.array(y)
assert len(x) == len(y), \
"Something went wrong... different X and y len"
if self.predict:
return x
else:
indices = np.arange(x.shape[0])
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
return x, y | src/datagenerator.py | from typing import List, Tuple
import numpy as np
from tensorflow import keras
from tensorflow.keras.preprocessing import sequence
from src.binaryds import BinaryDs
LN100 = 2 * np.log(10)
class DataGenerator(keras.utils.Sequence):
def __init__(self, dataset: BinaryDs, batch_size: int,
predict: bool = False,
fake_pad: bool = False, pad_len: int = 0):
self.dataset: BinaryDs = dataset
self.batch_size = batch_size
self.indices: List[int] = []
self.fake_pad = fake_pad
self.pad_len = pad_len
self.predict = predict
self.len = 0
self.remainder = 0
self.__init_len()
self.on_epoch_end()
def __init_len(self):
"""
Sets self.len and self.remainder (used at init time)
:return:
"""
self.len = int(self.dataset.get_examples_no() / self.batch_size)
self.remainder = self.dataset.get_examples_no() % self.batch_size
if self.remainder > 0:
self.len += 1
def __len__(self):
return self.len
def __getitem__(self, index):
real_index = self.indices[index]
if self.remainder > 0 and real_index == self.len - 1:
amount = self.remainder
else:
amount = self.batch_size
data = self.dataset.read(real_index * self.batch_size, amount)
return self.__generate_sequences(data)
def on_epoch_end(self):
self.indices = np.arange(self.len)
if not self.predict:
np.random.shuffle(self.indices)
def __generate_sequences(self, data: List[Tuple[int, bytes]]):
"""
Generates the pairs (X, y) that will be used during the training.
More specifically generates y and shuffle X.
If fake_pad is true, randomly removes data from X. This is useful in case
the training samples have always the same amount of features, but during
inference this number may change.
:param data: binary dataset containing the samples
:return: (X, y) as np.arrays. X shape will be (samples, features),
y will be (samples) or (samples, categories) depending if binary or
multiclass classification
"""
cats = self.dataset.get_categories()
y, x = zip(*data)
if cats > 2:
y = keras.utils.to_categorical(y, num_classes=cats)
else:
y = [[y] for y in y]
# keras does not like bytearrays, so int list then
# cut a portion of example so network learns to deal with padding
if not self.dataset.is_encoded() and self.fake_pad:
# amount of removed data randomly decided
if self.pad_len == 0:
limit = self.dataset.features - 32
# 99% values should be between 0 and limit
elambda = LN100 / limit
beta = 1 / elambda
d = np.random.default_rng().exponential(beta, size=len(x))
# clamping destroys the distribution, not a big deal
cut = np.array(np.floor(np.clip(d, 0, limit)),
dtype=np.int32)
x = [list(sample)[:-cut[idx]] for idx, sample in enumerate(x)]
# amount of removed data is a fixed value
elif self.dataset.features != self.pad_len:
cut = np.full(len(x), self.dataset.features - self.pad_len)
x = [list(sample)[:-cut[idx]] for idx, sample in enumerate(x)]
else:
x = [list(sample) for sample in x]
# keep only encoded examples of `pad_len` length
elif self.dataset.is_encoded() and self.pad_len != 0:
new_x = []
for sample in x:
padded = 0
for byte in sample:
if byte != 0x00:
break
else:
padded += 1
if self.dataset.get_features() - padded <= self.pad_len:
# shorter than requested, keep intact
new_x.append(sample)
else:
# longer than requested, cut it to the requested len
new_x.append(sample[-self.pad_len:])
x = [list(sample) for sample in new_x]
# keep everything without removing data
else:
x = [list(sample) for sample in x]
x = np.array(x)
x = sequence.pad_sequences(x, maxlen=self.dataset.features,
padding="pre", truncating="pre",
value=0, dtype="int32")
y = np.array(y)
assert len(x) == len(y), \
"Something went wrong... different X and y len"
if self.predict:
return x
else:
indices = np.arange(x.shape[0])
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
return x, y | 0.871748 | 0.420629 |
from collections import deque
import time
import datetime
import numpy as np
import pytest
from unittest.mock import MagicMock
from nessai.nestedsampler import NestedSampler
@pytest.fixture()
def sampler(sampler):
sampler.state = MagicMock()
return sampler
def test_log_evidence(sampler):
"""Check evidence is returned"""
sampler.state.logZ = -2
assert NestedSampler.log_evidence.__get__(sampler) == -2
def test_information(sampler):
"""Check most recent information estimate is returned"""
sampler.state.info = [1, 2, 3]
assert NestedSampler.information.__get__(sampler) == 3
def test_likelihood_calls(sampler):
"""Check likelihood calls from model are returned"""
sampler.model = MagicMock()
sampler.model.likelihood_evaluations = 10
assert NestedSampler.likelihood_calls.__get__(sampler) == 10
def test_likelihood_evaluation_time(sampler):
"""Assert the time is the some of the time for individual proposals"""
sampler.model = MagicMock()
sampler.model.likelihood_evaluation_time = 1.0
assert NestedSampler.likelihood_evaluation_time.__get__(sampler) == 1.0
def test_population_time(sampler):
"""Assert the time is the some of the time for individual proposals"""
sampler._uninformed_proposal = MagicMock()
sampler._flow_proposal = MagicMock()
sampler._uninformed_proposal.population_time = 1
sampler._flow_proposal.population_time = 2
assert NestedSampler.proposal_population_time.__get__(sampler) == 3
def test_acceptance(sampler):
"""Test the acceptance calculation"""
sampler.iteration = 10
sampler.likelihood_calls = 100
assert NestedSampler.acceptance.__get__(sampler) == 0.1
def test_current_sampling_time(sampler):
"""Test the current sampling time"""
sampler.finalised = False
sampler.sampling_time = datetime.timedelta(seconds=10)
sampler.sampling_start_time = datetime.datetime.now()
time.sleep(0.01)
t = NestedSampler.current_sampling_time.__get__(sampler)
assert t.total_seconds() > 10.
def test_current_sampling_time_finalised(sampler):
"""Test the current sampling time if the sampling has been finalised"""
sampler.finalised = True
sampler.sampling_time = 10
assert NestedSampler.current_sampling_time.__get__(sampler) == 10
def test_last_updated(sampler):
"""Assert last training iteration is returned"""
sampler.training_iterations = [10, 20]
assert NestedSampler.last_updated.__get__(sampler) == 20
def test_last_updated_no_training(sampler):
"""Assert None is return if the flow has not been trained"""
sampler.training_iterations = []
assert NestedSampler.last_updated.__get__(sampler) == 0
def test_mean_acceptance(sampler):
"""Assert the mean is returned"""
sampler.acceptance_history = [1.0, 2.0, 3.0]
assert NestedSampler.mean_acceptance.__get__(sampler) == 2.0
def test_mean_acceptance_empty(sampler):
"""Assert nan is returned if no points have been proposed"""
sampler.acceptance_history = deque(maxlen=10)
assert np.isnan(NestedSampler.mean_acceptance.__get__(sampler)) | tests/test_nested_sampler/test_properties.py | from collections import deque
import time
import datetime
import numpy as np
import pytest
from unittest.mock import MagicMock
from nessai.nestedsampler import NestedSampler
@pytest.fixture()
def sampler(sampler):
sampler.state = MagicMock()
return sampler
def test_log_evidence(sampler):
"""Check evidence is returned"""
sampler.state.logZ = -2
assert NestedSampler.log_evidence.__get__(sampler) == -2
def test_information(sampler):
"""Check most recent information estimate is returned"""
sampler.state.info = [1, 2, 3]
assert NestedSampler.information.__get__(sampler) == 3
def test_likelihood_calls(sampler):
"""Check likelihood calls from model are returned"""
sampler.model = MagicMock()
sampler.model.likelihood_evaluations = 10
assert NestedSampler.likelihood_calls.__get__(sampler) == 10
def test_likelihood_evaluation_time(sampler):
"""Assert the time is the some of the time for individual proposals"""
sampler.model = MagicMock()
sampler.model.likelihood_evaluation_time = 1.0
assert NestedSampler.likelihood_evaluation_time.__get__(sampler) == 1.0
def test_population_time(sampler):
"""Assert the time is the some of the time for individual proposals"""
sampler._uninformed_proposal = MagicMock()
sampler._flow_proposal = MagicMock()
sampler._uninformed_proposal.population_time = 1
sampler._flow_proposal.population_time = 2
assert NestedSampler.proposal_population_time.__get__(sampler) == 3
def test_acceptance(sampler):
"""Test the acceptance calculation"""
sampler.iteration = 10
sampler.likelihood_calls = 100
assert NestedSampler.acceptance.__get__(sampler) == 0.1
def test_current_sampling_time(sampler):
"""Test the current sampling time"""
sampler.finalised = False
sampler.sampling_time = datetime.timedelta(seconds=10)
sampler.sampling_start_time = datetime.datetime.now()
time.sleep(0.01)
t = NestedSampler.current_sampling_time.__get__(sampler)
assert t.total_seconds() > 10.
def test_current_sampling_time_finalised(sampler):
"""Test the current sampling time if the sampling has been finalised"""
sampler.finalised = True
sampler.sampling_time = 10
assert NestedSampler.current_sampling_time.__get__(sampler) == 10
def test_last_updated(sampler):
"""Assert last training iteration is returned"""
sampler.training_iterations = [10, 20]
assert NestedSampler.last_updated.__get__(sampler) == 20
def test_last_updated_no_training(sampler):
"""Assert None is return if the flow has not been trained"""
sampler.training_iterations = []
assert NestedSampler.last_updated.__get__(sampler) == 0
def test_mean_acceptance(sampler):
"""Assert the mean is returned"""
sampler.acceptance_history = [1.0, 2.0, 3.0]
assert NestedSampler.mean_acceptance.__get__(sampler) == 2.0
def test_mean_acceptance_empty(sampler):
"""Assert nan is returned if no points have been proposed"""
sampler.acceptance_history = deque(maxlen=10)
assert np.isnan(NestedSampler.mean_acceptance.__get__(sampler)) | 0.904839 | 0.682174 |
import math
from argparse import ArgumentParser
from datetime import timedelta as delta
import numpy as np
import pytest
from parcels import AdvectionEE
from parcels import AdvectionRK4
from parcels import AdvectionRK45
from parcels import FieldSet
from parcels import JITParticle
from parcels import ParticleSet
from parcels import ScipyParticle
from parcels import timer
from parcels import Variable
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def stommel_fieldset(xdim=200, ydim=200, grid_type='A'):
"""Simulate a periodic current along a western boundary, with significantly
larger velocities along the western edge than the rest of the region
The original test description can be found in: <NAME>, 2009,
Numerical Simulation of Passive tracers dispersion in the sea,
Ph.D. dissertation, University of Bologna
http://amsdottorato.unibo.it/1733/1/Fabbroni_Nicoletta_Tesi.pdf
"""
a = b = 10000 * 1e3
scalefac = 0.05 # to scale for physically meaningful velocities
dx, dy = a / xdim, b / ydim
# Coordinates of the test fieldset (on A-grid in deg)
lon = np.linspace(0, a, xdim, dtype=np.float32)
lat = np.linspace(0, b, ydim, dtype=np.float32)
# Define arrays U (zonal), V (meridional) and P (sea surface height)
U = np.zeros((lat.size, lon.size), dtype=np.float32)
V = np.zeros((lat.size, lon.size), dtype=np.float32)
P = np.zeros((lat.size, lon.size), dtype=np.float32)
beta = 2e-11
r = 1/(11.6*86400)
es = r/(beta*a)
for j in range(lat.size):
for i in range(lon.size):
xi = lon[i] / a
yi = lat[j] / b
P[j, i] = (1 - math.exp(-xi / es) - xi) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'A':
U[j, i] = -(1 - math.exp(-xi / es) - xi) * math.pi ** 2 * np.cos(math.pi * yi) * scalefac
V[j, i] = (math.exp(-xi / es) / es - 1) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'C':
V[:, 1:] = (P[:, 1:] - P[:, 0:-1]) / dx * a
U[1:, :] = -(P[1:, :] - P[0:-1, :]) / dy * b
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
if grid_type == 'C':
fieldset.U.interp_method = 'cgrid_velocity'
fieldset.V.interp_method = 'cgrid_velocity'
return fieldset
def UpdateP(particle, fieldset, time):
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def stommel_example(npart=1, mode='jit', verbose=False, method=AdvectionRK4, grid_type='A',
outfile="StommelParticle.nc", repeatdt=None, write_fields=True):
timer.fieldset = timer.Timer('FieldSet', parent=timer.stommel)
fieldset = stommel_fieldset(grid_type=grid_type)
if write_fields:
filename = 'stommel'
fieldset.write(filename)
timer.fieldset.stop()
# Determine particle class according to mode
timer.pset = timer.Timer('Pset', parent=timer.stommel)
timer.psetinit = timer.Timer('Pset_init', parent=timer.pset)
ParticleClass = JITParticle if mode == 'jit' else ScipyParticle
class MyParticle(ParticleClass):
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle, repeatdt=repeatdt,
start=(10e3, 5000e3), finish=(100e3, 5000e3), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Execute for 30 days, with 1hour timesteps and 12-hourly output
runtime = delta(days=600)
dt = delta(hours=1)
outputdt = delta(days=5)
print("Stommel: Advecting %d particles for %s" % (npart, runtime))
timer.psetinit.stop()
timer.psetrun = timer.Timer('Pset_run', parent=timer.pset)
pset.execute(method + pset.Kernel(UpdateP), runtime=runtime, dt=dt,
moviedt=None, output_file=pset.ParticleFile(name=outfile, outputdt=outputdt))
if verbose:
print("Final particle positions:\n%s" % pset)
timer.psetrun.stop()
timer.pset.stop()
return pset
@pytest.mark.parametrize('grid_type', ['A', 'C'])
@pytest.mark.parametrize('mode', ['jit', 'scipy'])
def test_stommel_fieldset(mode, grid_type, tmpdir):
timer.root = timer.Timer('Main')
timer.stommel = timer.Timer('Stommel', parent=timer.root)
outfile = tmpdir.join("StommelParticle")
psetRK4 = stommel_example(1, mode=mode, method=method['RK4'], grid_type=grid_type, outfile=outfile, write_fields=False)
psetRK45 = stommel_example(1, mode=mode, method=method['RK45'], grid_type=grid_type, outfile=outfile, write_fields=False)
assert np.allclose(psetRK4.lon, psetRK45.lon, rtol=1e-3)
assert np.allclose(psetRK4.lat, psetRK45.lat, rtol=1.1e-3)
err_adv = np.abs(psetRK4.p_start - psetRK4.p)
assert(err_adv <= 1.e-1).all()
err_smpl = np.array([abs(psetRK4.p[i] - psetRK4.fieldset.P[0., psetRK4.lon[i], psetRK4.lat[i], psetRK4.depth[i]]) for i in range(psetRK4.size)])
assert(err_smpl <= 1.e-1).all()
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree()
if __name__ == "__main__":
timer.root = timer.Timer('Main')
timer.args = timer.Timer('Args', parent=timer.root)
p = ArgumentParser(description="""
Example of particle advection in the steady-state solution of the Stommel equation""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing computation')
p.add_argument('-p', '--particles', type=int, default=1,
help='Number of particles to advect')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
p.add_argument('-o', '--outfile', default='StommelParticle.nc',
help='Name of output file')
p.add_argument('-r', '--repeatdt', default=None, type=int,
help='repeatdt of the ParticleSet')
args = p.parse_args()
timer.args.stop()
timer.stommel = timer.Timer('Stommel', parent=timer.root)
stommel_example(args.particles, mode=args.mode, verbose=args.verbose, method=method[args.method],
outfile=args.outfile, repeatdt=args.repeatdt)
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree() | parcels/examples/example_stommel.py | import math
from argparse import ArgumentParser
from datetime import timedelta as delta
import numpy as np
import pytest
from parcels import AdvectionEE
from parcels import AdvectionRK4
from parcels import AdvectionRK45
from parcels import FieldSet
from parcels import JITParticle
from parcels import ParticleSet
from parcels import ScipyParticle
from parcels import timer
from parcels import Variable
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def stommel_fieldset(xdim=200, ydim=200, grid_type='A'):
"""Simulate a periodic current along a western boundary, with significantly
larger velocities along the western edge than the rest of the region
The original test description can be found in: <NAME>, 2009,
Numerical Simulation of Passive tracers dispersion in the sea,
Ph.D. dissertation, University of Bologna
http://amsdottorato.unibo.it/1733/1/Fabbroni_Nicoletta_Tesi.pdf
"""
a = b = 10000 * 1e3
scalefac = 0.05 # to scale for physically meaningful velocities
dx, dy = a / xdim, b / ydim
# Coordinates of the test fieldset (on A-grid in deg)
lon = np.linspace(0, a, xdim, dtype=np.float32)
lat = np.linspace(0, b, ydim, dtype=np.float32)
# Define arrays U (zonal), V (meridional) and P (sea surface height)
U = np.zeros((lat.size, lon.size), dtype=np.float32)
V = np.zeros((lat.size, lon.size), dtype=np.float32)
P = np.zeros((lat.size, lon.size), dtype=np.float32)
beta = 2e-11
r = 1/(11.6*86400)
es = r/(beta*a)
for j in range(lat.size):
for i in range(lon.size):
xi = lon[i] / a
yi = lat[j] / b
P[j, i] = (1 - math.exp(-xi / es) - xi) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'A':
U[j, i] = -(1 - math.exp(-xi / es) - xi) * math.pi ** 2 * np.cos(math.pi * yi) * scalefac
V[j, i] = (math.exp(-xi / es) / es - 1) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'C':
V[:, 1:] = (P[:, 1:] - P[:, 0:-1]) / dx * a
U[1:, :] = -(P[1:, :] - P[0:-1, :]) / dy * b
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
if grid_type == 'C':
fieldset.U.interp_method = 'cgrid_velocity'
fieldset.V.interp_method = 'cgrid_velocity'
return fieldset
def UpdateP(particle, fieldset, time):
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def stommel_example(npart=1, mode='jit', verbose=False, method=AdvectionRK4, grid_type='A',
outfile="StommelParticle.nc", repeatdt=None, write_fields=True):
timer.fieldset = timer.Timer('FieldSet', parent=timer.stommel)
fieldset = stommel_fieldset(grid_type=grid_type)
if write_fields:
filename = 'stommel'
fieldset.write(filename)
timer.fieldset.stop()
# Determine particle class according to mode
timer.pset = timer.Timer('Pset', parent=timer.stommel)
timer.psetinit = timer.Timer('Pset_init', parent=timer.pset)
ParticleClass = JITParticle if mode == 'jit' else ScipyParticle
class MyParticle(ParticleClass):
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle, repeatdt=repeatdt,
start=(10e3, 5000e3), finish=(100e3, 5000e3), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Execute for 30 days, with 1hour timesteps and 12-hourly output
runtime = delta(days=600)
dt = delta(hours=1)
outputdt = delta(days=5)
print("Stommel: Advecting %d particles for %s" % (npart, runtime))
timer.psetinit.stop()
timer.psetrun = timer.Timer('Pset_run', parent=timer.pset)
pset.execute(method + pset.Kernel(UpdateP), runtime=runtime, dt=dt,
moviedt=None, output_file=pset.ParticleFile(name=outfile, outputdt=outputdt))
if verbose:
print("Final particle positions:\n%s" % pset)
timer.psetrun.stop()
timer.pset.stop()
return pset
@pytest.mark.parametrize('grid_type', ['A', 'C'])
@pytest.mark.parametrize('mode', ['jit', 'scipy'])
def test_stommel_fieldset(mode, grid_type, tmpdir):
timer.root = timer.Timer('Main')
timer.stommel = timer.Timer('Stommel', parent=timer.root)
outfile = tmpdir.join("StommelParticle")
psetRK4 = stommel_example(1, mode=mode, method=method['RK4'], grid_type=grid_type, outfile=outfile, write_fields=False)
psetRK45 = stommel_example(1, mode=mode, method=method['RK45'], grid_type=grid_type, outfile=outfile, write_fields=False)
assert np.allclose(psetRK4.lon, psetRK45.lon, rtol=1e-3)
assert np.allclose(psetRK4.lat, psetRK45.lat, rtol=1.1e-3)
err_adv = np.abs(psetRK4.p_start - psetRK4.p)
assert(err_adv <= 1.e-1).all()
err_smpl = np.array([abs(psetRK4.p[i] - psetRK4.fieldset.P[0., psetRK4.lon[i], psetRK4.lat[i], psetRK4.depth[i]]) for i in range(psetRK4.size)])
assert(err_smpl <= 1.e-1).all()
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree()
if __name__ == "__main__":
timer.root = timer.Timer('Main')
timer.args = timer.Timer('Args', parent=timer.root)
p = ArgumentParser(description="""
Example of particle advection in the steady-state solution of the Stommel equation""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing computation')
p.add_argument('-p', '--particles', type=int, default=1,
help='Number of particles to advect')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
p.add_argument('-o', '--outfile', default='StommelParticle.nc',
help='Name of output file')
p.add_argument('-r', '--repeatdt', default=None, type=int,
help='repeatdt of the ParticleSet')
args = p.parse_args()
timer.args.stop()
timer.stommel = timer.Timer('Stommel', parent=timer.root)
stommel_example(args.particles, mode=args.mode, verbose=args.verbose, method=method[args.method],
outfile=args.outfile, repeatdt=args.repeatdt)
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree() | 0.645455 | 0.408631 |
"tigereye task module."
import abc
import string
import argparse
from .error import InternalError, UsageError
from .util import subclasses, funcargs_eval, error_warn, args_pop
class Task(object):
__metaclass__ = abc.ABCMeta
def __new__(cls, targv):
parser = argparse.ArgumentParser(description='tigereye %s'%cls.__name__)
parser.add_argument('--import-task', metavar='task', action='append', help='import task')
parser.add_argument('--import-function', metavar='function', action='append', help='import function')
parser.add_argument('--import-module', metavar='module', action='append', help='import module')
parser.add_argument('--name', metavar='task name', help='task name')
parser.add_argument('--calc', metavar='calc', action='append', help='python code for manipulating data.')
parser.add_argument('--output', metavar='output', action='append', help='output variable.')
targv, add_opts = args_pop(targv, "--add-option", 1)
targv, discard_opts = args_pop(targv, "--discard-option", 1)
for opt in add_opts:
import pdb; pdb.set_trace()
_discard_opts = []
for opt in discard_opts:
opt = opt.strip()
if opt.startswith("--"):
opt = opt[2:]
elif opt.startswith("-"):
opt = opt[1:]
opt.replace("-", "_")
_discard_opts.append(opt)
obj = super(Task, cls).__new__(cls)
obj.parser = parser
obj.targs = None
obj._discard_options = _discard_opts
return obj
@abc.abstractmethod
def __init__(self, targv):
pass
def run(self, gvars):
assert self.targs is not None
if self.targs.import_task:
self.targs.import_task = None
if self.targs.import_function:
self.targs.import_function = None
if self.targs.import_module:
self.targs.import_module = None
for opt in self._discard_options:
setattr(self.targs, opt, None)
newgvars = dict(gvars)
if hasattr(self.targs, 'calc') and self.targs.calc:
for calc in self.targs.calc:
self.handle_calc_opt(calc, newgvars)
out = self.perform(newgvars)
if hasattr(self.targs, 'output') and self.targs.output:
for output_arg in self.targs.output:
s = output_arg.split("$")
vargs, kwargs = funcargs_eval(s[0], s[1:], newgvars)
for k, v in kwargs.items():
if k not in string.ascii_uppercase[:26]:
gvars[k] = v
else:
raise UsageError("'%s' is a reserved word."%k)
@abc.abstractmethod
def perform(self, gvars):
pass
def handle_calc_opt(self, calc, gvars):
s = calc.split("$")
vargs, kwargs = funcargs_eval(s[0], s[1:], gvars)
for k, v in kwargs.items():
if k not in string.ascii_uppercase[:26]:
gvars[k] = v
else:
error_warn("'%s' is a reserved word."%k) | tigereye/task.py | "tigereye task module."
import abc
import string
import argparse
from .error import InternalError, UsageError
from .util import subclasses, funcargs_eval, error_warn, args_pop
class Task(object):
__metaclass__ = abc.ABCMeta
def __new__(cls, targv):
parser = argparse.ArgumentParser(description='tigereye %s'%cls.__name__)
parser.add_argument('--import-task', metavar='task', action='append', help='import task')
parser.add_argument('--import-function', metavar='function', action='append', help='import function')
parser.add_argument('--import-module', metavar='module', action='append', help='import module')
parser.add_argument('--name', metavar='task name', help='task name')
parser.add_argument('--calc', metavar='calc', action='append', help='python code for manipulating data.')
parser.add_argument('--output', metavar='output', action='append', help='output variable.')
targv, add_opts = args_pop(targv, "--add-option", 1)
targv, discard_opts = args_pop(targv, "--discard-option", 1)
for opt in add_opts:
import pdb; pdb.set_trace()
_discard_opts = []
for opt in discard_opts:
opt = opt.strip()
if opt.startswith("--"):
opt = opt[2:]
elif opt.startswith("-"):
opt = opt[1:]
opt.replace("-", "_")
_discard_opts.append(opt)
obj = super(Task, cls).__new__(cls)
obj.parser = parser
obj.targs = None
obj._discard_options = _discard_opts
return obj
@abc.abstractmethod
def __init__(self, targv):
pass
def run(self, gvars):
assert self.targs is not None
if self.targs.import_task:
self.targs.import_task = None
if self.targs.import_function:
self.targs.import_function = None
if self.targs.import_module:
self.targs.import_module = None
for opt in self._discard_options:
setattr(self.targs, opt, None)
newgvars = dict(gvars)
if hasattr(self.targs, 'calc') and self.targs.calc:
for calc in self.targs.calc:
self.handle_calc_opt(calc, newgvars)
out = self.perform(newgvars)
if hasattr(self.targs, 'output') and self.targs.output:
for output_arg in self.targs.output:
s = output_arg.split("$")
vargs, kwargs = funcargs_eval(s[0], s[1:], newgvars)
for k, v in kwargs.items():
if k not in string.ascii_uppercase[:26]:
gvars[k] = v
else:
raise UsageError("'%s' is a reserved word."%k)
@abc.abstractmethod
def perform(self, gvars):
pass
def handle_calc_opt(self, calc, gvars):
s = calc.split("$")
vargs, kwargs = funcargs_eval(s[0], s[1:], gvars)
for k, v in kwargs.items():
if k not in string.ascii_uppercase[:26]:
gvars[k] = v
else:
error_warn("'%s' is a reserved word."%k) | 0.327023 | 0.10217 |
from __future__ import print_function
import json
import logging
import sys
from distutils import spawn
_ERROR_MESSAGE_EXECUTABLE_MARKER = "__EXECUTABLE_NAME__"
_ERROR_MESSAGE_DEFAULT_TEMPLATE = "Not found: {}".format(_ERROR_MESSAGE_EXECUTABLE_MARKER)
def configure_logger():
"""
Configures the logging settings to log more information than default and set the appropriate log level.
"""
logger = logging.getLogger("require_executable")
formatter = logging.Formatter(
fmt="%(levelname)-8s %(asctime)s %(name)-28s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def main():
logger = configure_logger()
logger.info("Reading json input from stdin")
query = json.loads(sys.stdin.read())
if "required_executables" not in query:
logger.error("Input json is missing required key \"required_executables\".")
sys.exit(1)
required_executables = query["required_executables"].split(",")
error_message_template = query.get("error_message", _ERROR_MESSAGE_DEFAULT_TEMPLATE)
found = {}
not_found = []
for executable in required_executables:
# Ignore empty string
if not executable.strip():
continue
maybe_executable = spawn.find_executable(executable)
if not maybe_executable:
not_found.append(executable)
else:
logger.info("{} resolved to {}".format(executable, maybe_executable))
found[executable] = maybe_executable
if len(not_found) > 0:
logger.error("Not all executables found:\n")
for executable in not_found:
print(error_message_template.replace(_ERROR_MESSAGE_EXECUTABLE_MARKER, executable), file=sys.stderr)
sys.exit(1)
# Output json to stdout so terraform can read it in
print(json.dumps(found))
if __name__ == "__main__":
main() | modules/require-executable/require_executable.py | from __future__ import print_function
import json
import logging
import sys
from distutils import spawn
_ERROR_MESSAGE_EXECUTABLE_MARKER = "__EXECUTABLE_NAME__"
_ERROR_MESSAGE_DEFAULT_TEMPLATE = "Not found: {}".format(_ERROR_MESSAGE_EXECUTABLE_MARKER)
def configure_logger():
"""
Configures the logging settings to log more information than default and set the appropriate log level.
"""
logger = logging.getLogger("require_executable")
formatter = logging.Formatter(
fmt="%(levelname)-8s %(asctime)s %(name)-28s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def main():
logger = configure_logger()
logger.info("Reading json input from stdin")
query = json.loads(sys.stdin.read())
if "required_executables" not in query:
logger.error("Input json is missing required key \"required_executables\".")
sys.exit(1)
required_executables = query["required_executables"].split(",")
error_message_template = query.get("error_message", _ERROR_MESSAGE_DEFAULT_TEMPLATE)
found = {}
not_found = []
for executable in required_executables:
# Ignore empty string
if not executable.strip():
continue
maybe_executable = spawn.find_executable(executable)
if not maybe_executable:
not_found.append(executable)
else:
logger.info("{} resolved to {}".format(executable, maybe_executable))
found[executable] = maybe_executable
if len(not_found) > 0:
logger.error("Not all executables found:\n")
for executable in not_found:
print(error_message_template.replace(_ERROR_MESSAGE_EXECUTABLE_MARKER, executable), file=sys.stderr)
sys.exit(1)
# Output json to stdout so terraform can read it in
print(json.dumps(found))
if __name__ == "__main__":
main() | 0.369315 | 0.051918 |
import csv
import logging
import StringIO
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import graph_data
class GraphCsvHandler(request_handler.RequestHandler):
"""Request handler for getting data from one series as CSV."""
def get(self):
"""Gets CSV from data store and outputs it.
Request parameters:
test_path: Full test path of one trace.
rev: End revision number; if not given, latest revision is used.
num_points: Number of Rows to get data for.
attr: Comma-separated list of attributes (columns) to return.
Outputs:
CSV file contents.
"""
test_path = self.request.get('test_path')
rev = self.request.get('rev')
num_points = int(self.request.get('num_points', 500))
attributes = self.request.get('attr', 'revision,value').split(',')
if not test_path:
self.ReportError('No test path given.', status=400)
return
logging.info('Got request to /graph_csv for test: "%s".', test_path)
test_key = utils.TestKey(test_path)
test = test_key.get()
assert(datastore_hooks.IsUnalteredQueryPermitted() or
not test.internal_only)
datastore_hooks.SetSinglePrivilegedRequest()
q = graph_data.Row.query()
q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
if rev:
q = q.filter(graph_data.Row.revision <= int(rev))
q = q.order(-graph_data.Row.revision)
points = reversed(q.fetch(limit=num_points))
rows = self._GenerateRows(points, attributes)
output = StringIO.StringIO()
csv.writer(output).writerows(rows)
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.csv' % test.test_name)
self.response.out.write(output.getvalue())
def post(self):
"""A post request is the same as a get request for this endpoint."""
self.get()
def _GenerateRows(self, points, attributes):
"""Generates CSV rows based on the attributes given.
Args:
points: A list of Row entities.
attributes: A list of properties of Row entities to get.
Returns:
A list of lists of attribute values for the given points.
"""
rows = [attributes]
for point in points:
row = []
for attr in attributes:
row.append(getattr(point, attr, ''))
rows.append(row)
return rows | dashboard/dashboard/graph_csv.py | import csv
import logging
import StringIO
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import graph_data
class GraphCsvHandler(request_handler.RequestHandler):
"""Request handler for getting data from one series as CSV."""
def get(self):
"""Gets CSV from data store and outputs it.
Request parameters:
test_path: Full test path of one trace.
rev: End revision number; if not given, latest revision is used.
num_points: Number of Rows to get data for.
attr: Comma-separated list of attributes (columns) to return.
Outputs:
CSV file contents.
"""
test_path = self.request.get('test_path')
rev = self.request.get('rev')
num_points = int(self.request.get('num_points', 500))
attributes = self.request.get('attr', 'revision,value').split(',')
if not test_path:
self.ReportError('No test path given.', status=400)
return
logging.info('Got request to /graph_csv for test: "%s".', test_path)
test_key = utils.TestKey(test_path)
test = test_key.get()
assert(datastore_hooks.IsUnalteredQueryPermitted() or
not test.internal_only)
datastore_hooks.SetSinglePrivilegedRequest()
q = graph_data.Row.query()
q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
if rev:
q = q.filter(graph_data.Row.revision <= int(rev))
q = q.order(-graph_data.Row.revision)
points = reversed(q.fetch(limit=num_points))
rows = self._GenerateRows(points, attributes)
output = StringIO.StringIO()
csv.writer(output).writerows(rows)
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.csv' % test.test_name)
self.response.out.write(output.getvalue())
def post(self):
"""A post request is the same as a get request for this endpoint."""
self.get()
def _GenerateRows(self, points, attributes):
"""Generates CSV rows based on the attributes given.
Args:
points: A list of Row entities.
attributes: A list of properties of Row entities to get.
Returns:
A list of lists of attribute values for the given points.
"""
rows = [attributes]
for point in points:
row = []
for attr in attributes:
row.append(getattr(point, attr, ''))
rows.append(row)
return rows | 0.781497 | 0.28398 |
import base64
import dataclasses
import algosdk
from algosdk.kmd import KMDClient
from algosdk.wallet import Wallet
from algosdk.v2client import algod, indexer
from algosdk import mnemonic, util
from algoapp_method_unittest import *
from asa_state_observer.asa_state_observer_asc1 import (
compile_stateful,
asa_state_observer,
asa_state_observe_closeout_or_clear,
GLOBAL_INTS,
GLOBAL_BYTES,
LOCAL_INTS,
LOCAL_BYTES,
METHOD_ASA_OPTED_IN,
METHOD_ASA_AMOUNT_EQ,
METHOD_ASA_AMOUNT_GT,
METHOD_ASA_AMOUNT_GE,
METHOD_ASA_AMOUNT_LT,
METHOD_ASA_AMOUNT_LE,
)
__author__ = "<NAME> <<EMAIL>>"
ALGOD_ADDRESS = "http://localhost:4001"
ALGOD_TOKEN = 64 * "a"
KMD_ADDRESS = "http://localhost:4002"
KMD_TOKEN = 64 * "a"
INDEXER_ADDRESS = "http://localhost:8980"
INDEXER_TOKEN = 64 * "a"
FUND_ACCOUNT_ALGOS = util.algos_to_microalgos(1000) # Algos
FLAT_FEE = 1000
algod_client = algod.AlgodClient(
algod_token=ALGOD_TOKEN, algod_address=ALGOD_ADDRESS
)
kmd_client = KMDClient(
kmd_token=KMD_TOKEN, kmd_address=KMD_ADDRESS
)
indexer_client = indexer.IndexerClient(
indexer_token=INDEXER_TOKEN, indexer_address=INDEXER_ADDRESS
)
@dataclasses.dataclass
class Account:
address: str
private_key: str
lsig: str = None
def mnemonic(self) -> str:
return mnemonic.from_private_key(self.private_key)
def is_lsig(self):
return not self.private_key and self.lsig
@classmethod
def create_account(cls):
private_key, address = algosdk.account.generate_account()
return cls(private_key=private_key, address=address)
def wait_for_confirmation(client, txid):
"""
Utility function to wait until the transaction is confirmed before
proceeding.
"""
last_round = client.status().get("last-round")
txinfo = client.pending_transaction_info(txid)
while not txinfo.get("confirmed-round", -1) > 0:
print(f"Waiting for transaction {txid} confirmation.")
last_round += 1
client.status_after_block(last_round)
txinfo = client.pending_transaction_info(txid)
print(f"Transaction {txid} confirmed in round "
f"{txinfo.get('confirmed-round')}.")
return txinfo
def get_params(client):
params = client.suggested_params()
params.flat_fee = True
params.fee = FLAT_FEE
return params
def sign(account, txn):
if account.is_lsig():
return transaction.LogicSigTransaction(txn, account.lsig)
else:
assert account.private_key
return txn.sign(account.private_key)
def sign_send_wait(account, txn):
"""Sign a transaction, submit it, and wait for its confirmation."""
signed_txn = sign(account, txn)
tx_id = signed_txn.transaction.get_txid()
transaction.write_to_file([signed_txn], "/tmp/txn.signed", overwrite=True)
algod_client.send_transactions([signed_txn])
wait_for_confirmation(algod_client, tx_id)
return algod_client.pending_transaction_info(tx_id)
def find_sandbox_faucet():
default_wallet_name = kmd_client.list_wallets()[0]["name"]
wallet = Wallet(
default_wallet_name, "", kmd_client
) # Sandbox's wallet has no password
for account_ in wallet.list_keys():
info = indexer_client.account_info(account_).get("account")
if info.get("status") == "Online" and info.get("created-at-round") == 0:
return Account(
address=account_,
private_key=wallet.export_key(account_)
)
raise KeyError("Could not find sandbox faucet")
def create_and_fund(faucet: Account) -> Account:
new_account = Account.create_account()
print(f"Funding new account: {new_account.address}.")
fund(faucet, new_account)
return new_account
def fund(faucet: Account, receiver: Account, amount=FUND_ACCOUNT_ALGOS):
params = get_params(algod_client)
txn = transaction.PaymentTxn(
faucet.address, params, receiver.address, amount
)
return sign_send_wait(faucet, txn)
def compile_program(source_code):
compile_response = algod_client.compile(source_code)
return base64.b64decode(compile_response["result"])
def create_application(creator: Account):
global_schema = transaction.StateSchema(GLOBAL_INTS, GLOBAL_BYTES)
local_schema = transaction.StateSchema(LOCAL_INTS, LOCAL_BYTES)
approval_program_teal = compile_stateful(asa_state_observer())
approval_program = compile_program(approval_program_teal)
with open('/tmp/approval.teal', 'w') as f:
f.write(approval_program_teal)
clear_program_teal = compile_stateful(
asa_state_observe_closeout_or_clear()
)
clear_program = compile_program(clear_program_teal)
with open('/tmp/clear.teal', 'w') as f:
f.write(clear_program_teal)
on_complete = transaction.OnComplete.NoOpOC.real
params = get_params(algod_client)
txn = transaction.ApplicationCreateTxn(
creator.address,
params,
on_complete,
approval_program,
clear_program,
global_schema,
local_schema,
)
transaction_response = sign_send_wait(creator, txn)
return transaction_response["application-index"]
def create_asset(
creator_account,
total: int,
unit_name: str,
asset_name: str,
decimals: int,
frozen: bool = False,
manager=None,
reserve=None,
freeze=None,
clawback=None,
disable_empty_addresses: bool = True,
):
"""Create an asset and return its ID."""
params = get_params(algod_client)
txn = transaction.AssetConfigTxn(
sender=creator_account.address,
sp=params,
total=total,
decimals=decimals,
unit_name=unit_name,
asset_name=asset_name,
manager=creator_account.address if not isinstance(manager, str) else manager,
reserve=creator_account.address if not isinstance(reserve, str) else reserve,
freeze=creator_account.address if not isinstance(freeze, str) else freeze,
clawback=creator_account.address if not isinstance(clawback, str) else clawback,
default_frozen=False if not frozen else True,
strict_empty_address_check=disable_empty_addresses,
)
ptx = sign_send_wait(creator_account, txn)
return ptx["asset-index"]
def call_asa_state_observer(
method: str,
caller: Account,
app_id: int,
target_asa_id: int,
target_account: Account,
amount: int = 0,
):
params = get_params(algod_client)
args = []
if method == METHOD_ASA_OPTED_IN:
args = [METHOD_ASA_OPTED_IN.encode()]
elif method == METHOD_ASA_AMOUNT_EQ:
args = [METHOD_ASA_AMOUNT_EQ.encode(), amount]
elif method == METHOD_ASA_AMOUNT_GT:
args = [METHOD_ASA_AMOUNT_GT.encode(), amount]
elif method == METHOD_ASA_AMOUNT_GE:
args = [METHOD_ASA_AMOUNT_GE.encode(), amount]
elif method == METHOD_ASA_AMOUNT_LT:
args = [METHOD_ASA_AMOUNT_LT.encode(), amount]
elif method == METHOD_ASA_AMOUNT_LE:
args = [METHOD_ASA_AMOUNT_LE.encode(), amount]
else:
quit(f"{method} is an invalid method call.")
test_txn = transaction.ApplicationNoOpTxn(
sender=caller.address,
sp=params,
index=app_id,
app_args=args,
foreign_assets=[target_asa_id],
accounts=[target_account.address],
)
return sign(caller, test_txn)
def test():
faucet = find_sandbox_faucet()
print(f" --- Sandbox ALGO Faucet: {faucet.address}.\n")
deployer = create_and_fund(faucet)
asa_state_observer_id = create_application(deployer)
print(f" --- ASA State Observer App ID: {asa_state_observer_id}.\n")
test_asa_id = create_asset(
creator_account=deployer,
total=1,
unit_name='TST',
asset_name='Test ASA',
decimals=0,
)
print(f" --- Test Asset ID: {test_asa_id}.\n")
test_stats = Tests()
# --/ APPROVAL TESTS SESSION
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_OPTED_IN,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_EQ,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
# --/ END APPROVAL TESTS SESSION
# --/ REJECTION TESTS SESSION
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_OPTED_IN,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=42,
target_account=deployer
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_EQ,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
# --/ END REJECTION TESTS SESSION
return test_stats
if __name__ == "__main__":
print(test().__str__()) | asa_state_observer_test.py | import base64
import dataclasses
import algosdk
from algosdk.kmd import KMDClient
from algosdk.wallet import Wallet
from algosdk.v2client import algod, indexer
from algosdk import mnemonic, util
from algoapp_method_unittest import *
from asa_state_observer.asa_state_observer_asc1 import (
compile_stateful,
asa_state_observer,
asa_state_observe_closeout_or_clear,
GLOBAL_INTS,
GLOBAL_BYTES,
LOCAL_INTS,
LOCAL_BYTES,
METHOD_ASA_OPTED_IN,
METHOD_ASA_AMOUNT_EQ,
METHOD_ASA_AMOUNT_GT,
METHOD_ASA_AMOUNT_GE,
METHOD_ASA_AMOUNT_LT,
METHOD_ASA_AMOUNT_LE,
)
__author__ = "<NAME> <<EMAIL>>"
ALGOD_ADDRESS = "http://localhost:4001"
ALGOD_TOKEN = 64 * "a"
KMD_ADDRESS = "http://localhost:4002"
KMD_TOKEN = 64 * "a"
INDEXER_ADDRESS = "http://localhost:8980"
INDEXER_TOKEN = 64 * "a"
FUND_ACCOUNT_ALGOS = util.algos_to_microalgos(1000) # Algos
FLAT_FEE = 1000
algod_client = algod.AlgodClient(
algod_token=ALGOD_TOKEN, algod_address=ALGOD_ADDRESS
)
kmd_client = KMDClient(
kmd_token=KMD_TOKEN, kmd_address=KMD_ADDRESS
)
indexer_client = indexer.IndexerClient(
indexer_token=INDEXER_TOKEN, indexer_address=INDEXER_ADDRESS
)
@dataclasses.dataclass
class Account:
address: str
private_key: str
lsig: str = None
def mnemonic(self) -> str:
return mnemonic.from_private_key(self.private_key)
def is_lsig(self):
return not self.private_key and self.lsig
@classmethod
def create_account(cls):
private_key, address = algosdk.account.generate_account()
return cls(private_key=private_key, address=address)
def wait_for_confirmation(client, txid):
"""
Utility function to wait until the transaction is confirmed before
proceeding.
"""
last_round = client.status().get("last-round")
txinfo = client.pending_transaction_info(txid)
while not txinfo.get("confirmed-round", -1) > 0:
print(f"Waiting for transaction {txid} confirmation.")
last_round += 1
client.status_after_block(last_round)
txinfo = client.pending_transaction_info(txid)
print(f"Transaction {txid} confirmed in round "
f"{txinfo.get('confirmed-round')}.")
return txinfo
def get_params(client):
params = client.suggested_params()
params.flat_fee = True
params.fee = FLAT_FEE
return params
def sign(account, txn):
if account.is_lsig():
return transaction.LogicSigTransaction(txn, account.lsig)
else:
assert account.private_key
return txn.sign(account.private_key)
def sign_send_wait(account, txn):
"""Sign a transaction, submit it, and wait for its confirmation."""
signed_txn = sign(account, txn)
tx_id = signed_txn.transaction.get_txid()
transaction.write_to_file([signed_txn], "/tmp/txn.signed", overwrite=True)
algod_client.send_transactions([signed_txn])
wait_for_confirmation(algod_client, tx_id)
return algod_client.pending_transaction_info(tx_id)
def find_sandbox_faucet():
default_wallet_name = kmd_client.list_wallets()[0]["name"]
wallet = Wallet(
default_wallet_name, "", kmd_client
) # Sandbox's wallet has no password
for account_ in wallet.list_keys():
info = indexer_client.account_info(account_).get("account")
if info.get("status") == "Online" and info.get("created-at-round") == 0:
return Account(
address=account_,
private_key=wallet.export_key(account_)
)
raise KeyError("Could not find sandbox faucet")
def create_and_fund(faucet: Account) -> Account:
new_account = Account.create_account()
print(f"Funding new account: {new_account.address}.")
fund(faucet, new_account)
return new_account
def fund(faucet: Account, receiver: Account, amount=FUND_ACCOUNT_ALGOS):
params = get_params(algod_client)
txn = transaction.PaymentTxn(
faucet.address, params, receiver.address, amount
)
return sign_send_wait(faucet, txn)
def compile_program(source_code):
compile_response = algod_client.compile(source_code)
return base64.b64decode(compile_response["result"])
def create_application(creator: Account):
global_schema = transaction.StateSchema(GLOBAL_INTS, GLOBAL_BYTES)
local_schema = transaction.StateSchema(LOCAL_INTS, LOCAL_BYTES)
approval_program_teal = compile_stateful(asa_state_observer())
approval_program = compile_program(approval_program_teal)
with open('/tmp/approval.teal', 'w') as f:
f.write(approval_program_teal)
clear_program_teal = compile_stateful(
asa_state_observe_closeout_or_clear()
)
clear_program = compile_program(clear_program_teal)
with open('/tmp/clear.teal', 'w') as f:
f.write(clear_program_teal)
on_complete = transaction.OnComplete.NoOpOC.real
params = get_params(algod_client)
txn = transaction.ApplicationCreateTxn(
creator.address,
params,
on_complete,
approval_program,
clear_program,
global_schema,
local_schema,
)
transaction_response = sign_send_wait(creator, txn)
return transaction_response["application-index"]
def create_asset(
creator_account,
total: int,
unit_name: str,
asset_name: str,
decimals: int,
frozen: bool = False,
manager=None,
reserve=None,
freeze=None,
clawback=None,
disable_empty_addresses: bool = True,
):
"""Create an asset and return its ID."""
params = get_params(algod_client)
txn = transaction.AssetConfigTxn(
sender=creator_account.address,
sp=params,
total=total,
decimals=decimals,
unit_name=unit_name,
asset_name=asset_name,
manager=creator_account.address if not isinstance(manager, str) else manager,
reserve=creator_account.address if not isinstance(reserve, str) else reserve,
freeze=creator_account.address if not isinstance(freeze, str) else freeze,
clawback=creator_account.address if not isinstance(clawback, str) else clawback,
default_frozen=False if not frozen else True,
strict_empty_address_check=disable_empty_addresses,
)
ptx = sign_send_wait(creator_account, txn)
return ptx["asset-index"]
def call_asa_state_observer(
method: str,
caller: Account,
app_id: int,
target_asa_id: int,
target_account: Account,
amount: int = 0,
):
params = get_params(algod_client)
args = []
if method == METHOD_ASA_OPTED_IN:
args = [METHOD_ASA_OPTED_IN.encode()]
elif method == METHOD_ASA_AMOUNT_EQ:
args = [METHOD_ASA_AMOUNT_EQ.encode(), amount]
elif method == METHOD_ASA_AMOUNT_GT:
args = [METHOD_ASA_AMOUNT_GT.encode(), amount]
elif method == METHOD_ASA_AMOUNT_GE:
args = [METHOD_ASA_AMOUNT_GE.encode(), amount]
elif method == METHOD_ASA_AMOUNT_LT:
args = [METHOD_ASA_AMOUNT_LT.encode(), amount]
elif method == METHOD_ASA_AMOUNT_LE:
args = [METHOD_ASA_AMOUNT_LE.encode(), amount]
else:
quit(f"{method} is an invalid method call.")
test_txn = transaction.ApplicationNoOpTxn(
sender=caller.address,
sp=params,
index=app_id,
app_args=args,
foreign_assets=[target_asa_id],
accounts=[target_account.address],
)
return sign(caller, test_txn)
def test():
faucet = find_sandbox_faucet()
print(f" --- Sandbox ALGO Faucet: {faucet.address}.\n")
deployer = create_and_fund(faucet)
asa_state_observer_id = create_application(deployer)
print(f" --- ASA State Observer App ID: {asa_state_observer_id}.\n")
test_asa_id = create_asset(
creator_account=deployer,
total=1,
unit_name='TST',
asset_name='Test ASA',
decimals=0,
)
print(f" --- Test Asset ID: {test_asa_id}.\n")
test_stats = Tests()
# --/ APPROVAL TESTS SESSION
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_OPTED_IN,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_EQ,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
approval_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
# --/ END APPROVAL TESTS SESSION
# --/ REJECTION TESTS SESSION
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_OPTED_IN,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=42,
target_account=deployer
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_EQ,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_GE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=42
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LT,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=1
)
)
rejection_unit_test(
algod_client,
test_stats,
call_asa_state_observer(
method=METHOD_ASA_AMOUNT_LE,
caller=deployer,
app_id=asa_state_observer_id,
target_asa_id=test_asa_id,
target_account=deployer,
amount=0
)
)
# --/ END REJECTION TESTS SESSION
return test_stats
if __name__ == "__main__":
print(test().__str__()) | 0.533884 | 0.126192 |
import json
import socket
import time
import sys
from typing import Any, Optional
from ev3dev2simulator.config.config import get_simulation_settings, load_config
from ev3dev2simulator.connection.message.command import Command
THIS = sys.modules[__name__]
class ClientSocket:
"""
Class responsible for the establishing and maintaining the socket connection with the simulator.
This connection is a TCP stream.
"""
def __init__(self):
load_config(None)
port = int(get_simulation_settings()['exec_settings']['socket_port'])
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(('localhost', port))
time.sleep(1)
def send_command(self, command: Command, wait_for_response=False) -> Optional[object]:
"""
Serialise and send the given Command to the simulator.
:param command: to send.
:param wait_for_response: set to True if you expect a result and want to wait for it blocking.
"""
jsn = self.serialize(command)
self.client.send(jsn)
if wait_for_response:
while True:
data = self.client.recv(32)
if data:
return self.deserialize(data)
return None
@staticmethod
def serialize(message: Any) -> bytes:
"""
Serialize the given message so it can be send via a stream channel.
:param message: to be serialized.
:return: bytes representing the message.
"""
obj_dict = message.serialize()
jsn = json.dumps(obj_dict)
jsn = jsn.ljust(int(get_simulation_settings()['exec_settings']['message_size']), '#')
return str.encode(jsn)
@staticmethod
def deserialize(data: bytes) -> Any:
"""
Deserialize the given data.
:param data: to be deserialized.
:return: any type representing value inside the data.
"""
val = data.decode()
obj_dict = json.loads(val)
return obj_dict['value']
THIS.CLIENT_SOCKET = None
def get_client_socket() -> ClientSocket:
"""
Functionality to make clientSocket a singleton. Creates a client if it does not exists and returns it either way.
"""
if not THIS.CLIENT_SOCKET:
THIS.CLIENT_SOCKET = ClientSocket()
return THIS.CLIENT_SOCKET | ev3dev2simulator/connection/client_socket.py | import json
import socket
import time
import sys
from typing import Any, Optional
from ev3dev2simulator.config.config import get_simulation_settings, load_config
from ev3dev2simulator.connection.message.command import Command
THIS = sys.modules[__name__]
class ClientSocket:
"""
Class responsible for the establishing and maintaining the socket connection with the simulator.
This connection is a TCP stream.
"""
def __init__(self):
load_config(None)
port = int(get_simulation_settings()['exec_settings']['socket_port'])
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(('localhost', port))
time.sleep(1)
def send_command(self, command: Command, wait_for_response=False) -> Optional[object]:
"""
Serialise and send the given Command to the simulator.
:param command: to send.
:param wait_for_response: set to True if you expect a result and want to wait for it blocking.
"""
jsn = self.serialize(command)
self.client.send(jsn)
if wait_for_response:
while True:
data = self.client.recv(32)
if data:
return self.deserialize(data)
return None
@staticmethod
def serialize(message: Any) -> bytes:
"""
Serialize the given message so it can be send via a stream channel.
:param message: to be serialized.
:return: bytes representing the message.
"""
obj_dict = message.serialize()
jsn = json.dumps(obj_dict)
jsn = jsn.ljust(int(get_simulation_settings()['exec_settings']['message_size']), '#')
return str.encode(jsn)
@staticmethod
def deserialize(data: bytes) -> Any:
"""
Deserialize the given data.
:param data: to be deserialized.
:return: any type representing value inside the data.
"""
val = data.decode()
obj_dict = json.loads(val)
return obj_dict['value']
THIS.CLIENT_SOCKET = None
def get_client_socket() -> ClientSocket:
"""
Functionality to make clientSocket a singleton. Creates a client if it does not exists and returns it either way.
"""
if not THIS.CLIENT_SOCKET:
THIS.CLIENT_SOCKET = ClientSocket()
return THIS.CLIENT_SOCKET | 0.627723 | 0.144903 |
import numpy as np
from .harmonics import ut_E
from .utilities import Bunch
from ._time_conversion import _normalize_time
def reconstruct(t, coef,
epoch='python',
verbose=True,
constit=None,
min_SNR=2,
min_PE=0):
"""
Reconstruct a tidal signal.
Parameters
----------
t : array_like
Time in days since ``epoch``.
coef : `Bunch`
Data structure returned by `utide.solve`.
epoch : {string, `datetime.date`, `datetime.datetime`}, optional
Valid strings are 'python' (default); 'matlab' if `t` is
an array of Matlab datenums; or an arbitrary date in the
form 'YYYY-MM-DD'. The default corresponds to the Python
standard library `datetime` proleptic Gregorian calendar,
starting with 1 on January 1 of year 1.
verbose : {True, False}, optional
True to enable output message (default). False turns off all
messages.
constit : {None, array_like}, optional
List of strings with standard letter abbreviations of
tidal constituents, to be used in reconstruction if present
in coef; alternative to the SNR and PE criteria.
min_SNR : float, optional, default 2
Include only the constituents with signal-to-noise SNR >= min_SNR,
where SNR is based on the constituent confidence intervals in
``coef``.
min_PE : float, optional, default 0
Include only the constituents with percent energy PE >= min_PE,
where PE is based on the amplitudes in ``coef``.
Returns
-------
tide : `Bunch`
Scalar time series is returned as `tide.h`; a vector
series as `tide.u`, `tide.v`. Each is an ndarray with
``np.nan`` as the missing value.
Most input kwargs are included: 'epoch', 'constit',
'min_SNR', and 'min_PE'.
The input time array is included as 't_in', and 't_mpl';
the former is the original input time argument, and the
latter is the time as a matplotlib datenum. If 'epoch'
is 'python', these will be identical, and the names will
point to the same array.
"""
out = Bunch(t_in=t, epoch=epoch, constit=constit, min_SNR=min_SNR,
min_PE=min_PE)
t = np.atleast_1d(t)
if t.ndim != 1:
raise ValueError("t must be a 1-D array")
t = _normalize_time(t, epoch)
if epoch == 'python':
out.t_mpl = out.t_in
else:
out.t_mpl = t
t = np.ma.masked_invalid(t)
goodmask = ~np.ma.getmaskarray(t)
t = t.compressed()
u, v = _reconstruct(t, goodmask, coef,
verbose=verbose,
constit=constit,
min_SNR=min_SNR,
min_PE=min_PE)
if v is not None:
out.u, out.v = u, v
else:
out.h = u
return out
def _reconstruct(t, goodmask, coef, verbose, constit, min_SNR, min_PE):
twodim = coef['aux']['opt']['twodim']
# Determine constituents to include.
if constit is not None:
ind = [i for i, c in enumerate(coef['name']) if c in constit]
elif (min_SNR == 0 and min_PE == 0) or coef['aux']['opt']['nodiagn']:
ind = slice(None)
else:
if twodim:
E = coef['Lsmaj']**2 + coef['Lsmin']**2
N = (coef['Lsmaj_ci']/1.96)**2 + (coef['Lsmin_ci']/1.96)**2
else:
E = coef['A']**2
N = (coef['A_ci']/1.96)**2
SNR = E / N
PE = 100 * E / E.sum()
with np.errstate(invalid='ignore'):
ind = np.logical_and(SNR >= min_SNR, PE >= min_PE)
# Complex coefficients.
rpd = np.pi/180
if twodim:
ap = 0.5 * ((coef['Lsmaj'][ind] + coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] - coef['g'][ind]) * rpd))
am = 0.5 * ((coef['Lsmaj'][ind] - coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] + coef['g'][ind]) * rpd))
else:
ap = 0.5 * coef['A'][ind] * np.exp(-1j*coef['g'][ind] * rpd)
am = np.conj(ap)
ngflgs = [coef['aux']['opt']['nodsatlint'],
coef['aux']['opt']['nodsatnone'],
coef['aux']['opt']['gwchlint'],
coef['aux']['opt']['gwchnone']]
if verbose:
print('prep/calcs ... ', end='')
E = ut_E(t,
coef['aux']['reftime'], coef['aux']['frq'][ind],
coef['aux']['lind'][ind], coef['aux']['lat'], ngflgs,
coef['aux']['opt']['prefilt'])
fit = np.dot(E, ap) + np.dot(np.conj(E), am)
# Mean (& trend).
u = np.empty(goodmask.shape, dtype=float)
u.fill(np.nan)
trend = not coef['aux']['opt']['notrend']
if twodim:
v = u.copy()
u[goodmask] = np.real(fit) + coef['umean']
v[goodmask] = np.imag(fit) + coef['vmean']
if trend:
u[goodmask] += coef['uslope'] * (t - coef['aux']['reftime'])
v[goodmask] += coef['vslope'] * (t - coef['aux']['reftime'])
else:
u[goodmask] = np.real(fit) + coef['mean']
if trend:
u[goodmask] += coef['slope'] * (t - coef['aux']['reftime'])
v = None
if verbose:
print('done.')
return u, v | utide/_reconstruct.py | import numpy as np
from .harmonics import ut_E
from .utilities import Bunch
from ._time_conversion import _normalize_time
def reconstruct(t, coef,
epoch='python',
verbose=True,
constit=None,
min_SNR=2,
min_PE=0):
"""
Reconstruct a tidal signal.
Parameters
----------
t : array_like
Time in days since ``epoch``.
coef : `Bunch`
Data structure returned by `utide.solve`.
epoch : {string, `datetime.date`, `datetime.datetime`}, optional
Valid strings are 'python' (default); 'matlab' if `t` is
an array of Matlab datenums; or an arbitrary date in the
form 'YYYY-MM-DD'. The default corresponds to the Python
standard library `datetime` proleptic Gregorian calendar,
starting with 1 on January 1 of year 1.
verbose : {True, False}, optional
True to enable output message (default). False turns off all
messages.
constit : {None, array_like}, optional
List of strings with standard letter abbreviations of
tidal constituents, to be used in reconstruction if present
in coef; alternative to the SNR and PE criteria.
min_SNR : float, optional, default 2
Include only the constituents with signal-to-noise SNR >= min_SNR,
where SNR is based on the constituent confidence intervals in
``coef``.
min_PE : float, optional, default 0
Include only the constituents with percent energy PE >= min_PE,
where PE is based on the amplitudes in ``coef``.
Returns
-------
tide : `Bunch`
Scalar time series is returned as `tide.h`; a vector
series as `tide.u`, `tide.v`. Each is an ndarray with
``np.nan`` as the missing value.
Most input kwargs are included: 'epoch', 'constit',
'min_SNR', and 'min_PE'.
The input time array is included as 't_in', and 't_mpl';
the former is the original input time argument, and the
latter is the time as a matplotlib datenum. If 'epoch'
is 'python', these will be identical, and the names will
point to the same array.
"""
out = Bunch(t_in=t, epoch=epoch, constit=constit, min_SNR=min_SNR,
min_PE=min_PE)
t = np.atleast_1d(t)
if t.ndim != 1:
raise ValueError("t must be a 1-D array")
t = _normalize_time(t, epoch)
if epoch == 'python':
out.t_mpl = out.t_in
else:
out.t_mpl = t
t = np.ma.masked_invalid(t)
goodmask = ~np.ma.getmaskarray(t)
t = t.compressed()
u, v = _reconstruct(t, goodmask, coef,
verbose=verbose,
constit=constit,
min_SNR=min_SNR,
min_PE=min_PE)
if v is not None:
out.u, out.v = u, v
else:
out.h = u
return out
def _reconstruct(t, goodmask, coef, verbose, constit, min_SNR, min_PE):
twodim = coef['aux']['opt']['twodim']
# Determine constituents to include.
if constit is not None:
ind = [i for i, c in enumerate(coef['name']) if c in constit]
elif (min_SNR == 0 and min_PE == 0) or coef['aux']['opt']['nodiagn']:
ind = slice(None)
else:
if twodim:
E = coef['Lsmaj']**2 + coef['Lsmin']**2
N = (coef['Lsmaj_ci']/1.96)**2 + (coef['Lsmin_ci']/1.96)**2
else:
E = coef['A']**2
N = (coef['A_ci']/1.96)**2
SNR = E / N
PE = 100 * E / E.sum()
with np.errstate(invalid='ignore'):
ind = np.logical_and(SNR >= min_SNR, PE >= min_PE)
# Complex coefficients.
rpd = np.pi/180
if twodim:
ap = 0.5 * ((coef['Lsmaj'][ind] + coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] - coef['g'][ind]) * rpd))
am = 0.5 * ((coef['Lsmaj'][ind] - coef['Lsmin'][ind]) *
np.exp(1j*(coef['theta'][ind] + coef['g'][ind]) * rpd))
else:
ap = 0.5 * coef['A'][ind] * np.exp(-1j*coef['g'][ind] * rpd)
am = np.conj(ap)
ngflgs = [coef['aux']['opt']['nodsatlint'],
coef['aux']['opt']['nodsatnone'],
coef['aux']['opt']['gwchlint'],
coef['aux']['opt']['gwchnone']]
if verbose:
print('prep/calcs ... ', end='')
E = ut_E(t,
coef['aux']['reftime'], coef['aux']['frq'][ind],
coef['aux']['lind'][ind], coef['aux']['lat'], ngflgs,
coef['aux']['opt']['prefilt'])
fit = np.dot(E, ap) + np.dot(np.conj(E), am)
# Mean (& trend).
u = np.empty(goodmask.shape, dtype=float)
u.fill(np.nan)
trend = not coef['aux']['opt']['notrend']
if twodim:
v = u.copy()
u[goodmask] = np.real(fit) + coef['umean']
v[goodmask] = np.imag(fit) + coef['vmean']
if trend:
u[goodmask] += coef['uslope'] * (t - coef['aux']['reftime'])
v[goodmask] += coef['vslope'] * (t - coef['aux']['reftime'])
else:
u[goodmask] = np.real(fit) + coef['mean']
if trend:
u[goodmask] += coef['slope'] * (t - coef['aux']['reftime'])
v = None
if verbose:
print('done.')
return u, v | 0.796411 | 0.562417 |
import os.path
import sys
import re
from httplib2 import Http
from urllib import urlencode
def main():
# cd to the script directory
try: __file__
except NameError:
basepath = '/Users/andreivarabyou/Documents/git/picEdit/build/'
else:
basepath = os.path.dirname(__file__)
if basepath != "":
os.chdir(basepath)
compile_html()
compile_css()
compile_js()
def compile_js():
file = open("../dist/js/picedit.js", "r")
src_js = file.read()
file.close()
# get the plugin description
title = re.compile(r'^\/\*.+?\*\/', re.DOTALL | re.IGNORECASE)
title = re.findall(title, src_js)
# minify the js
url = 'http://javascript-minifier.com/raw'
body = {'input': src_js}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
file = open("../dist/js/picedit.min.js", "w")
file.write(title[0] + "\n" + content)
file.close()
def compile_css():
# concatenate styles and save destination unminified
file = open("../src/css/font.css", "r")
src_style1 = file.read()
file.close()
file = open("../src/css/styles.css", "r")
src_style2 = file.read()
file.close()
file = open("../dist/css/styles.css", "w")
css_source = src_style1 + "\n" + src_style2
file.write(css_source)
file.close()
# minify the css
url = 'http://cssminifier.com/raw'
body = {'input': css_source}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
file = open("../dist/css/styles.min.css", "w")
file.write(content)
file.close()
def compile_html():
# loads sources from the disk
file = open("../src/index.html", "r")
src_html = file.read()
file.close()
file = open("../src/js/picedit.js", "r")
src_js = file.read()
file.close()
# extract html code
phtm = re.compile(r'<!-- begin_picedit_box -->.+<!-- end_picedit_box -->', re.IGNORECASE | re.DOTALL)
pouthtm = re.findall(phtm, src_html)
# minify the html
url = 'http://www.willpeavy.com/minifier/'
body = {'html': pouthtm[0]}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
outhtml = re.findall('(<textarea.+?>)(.+?)(</textarea)', content)
#comment and uncomment source and dist code
unc = re.compile('(\/\*unhide_in_prod\*\/.*?)(\/\*)(.+?)(\*\/)(.*\/\*unhide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
src_js = unc.sub(r'\1 \3 \5', src_js)
unc = re.compile('(\/\*hide_in_prod\*\/)(.+)(\/\*hide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
src_js = unc.sub(r'\1 /* \2 */ \3', src_js)
#apply compiled html
unc = re.compile('compiled_template_markup', re.DOTALL)
src_js = unc.sub(outhtml[0][1], src_js)
#save pre-processed js to the dist folder
file = open("../dist/js/picedit.js", "w")
file.write(src_js)
file.close()
# run the main function
if __name__ == "__main__":
main() | application/libraries/javascript/js/picEdit-master/build/build.py |
import os.path
import sys
import re
from httplib2 import Http
from urllib import urlencode
def main():
# cd to the script directory
try: __file__
except NameError:
basepath = '/Users/andreivarabyou/Documents/git/picEdit/build/'
else:
basepath = os.path.dirname(__file__)
if basepath != "":
os.chdir(basepath)
compile_html()
compile_css()
compile_js()
def compile_js():
file = open("../dist/js/picedit.js", "r")
src_js = file.read()
file.close()
# get the plugin description
title = re.compile(r'^\/\*.+?\*\/', re.DOTALL | re.IGNORECASE)
title = re.findall(title, src_js)
# minify the js
url = 'http://javascript-minifier.com/raw'
body = {'input': src_js}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
file = open("../dist/js/picedit.min.js", "w")
file.write(title[0] + "\n" + content)
file.close()
def compile_css():
# concatenate styles and save destination unminified
file = open("../src/css/font.css", "r")
src_style1 = file.read()
file.close()
file = open("../src/css/styles.css", "r")
src_style2 = file.read()
file.close()
file = open("../dist/css/styles.css", "w")
css_source = src_style1 + "\n" + src_style2
file.write(css_source)
file.close()
# minify the css
url = 'http://cssminifier.com/raw'
body = {'input': css_source}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
file = open("../dist/css/styles.min.css", "w")
file.write(content)
file.close()
def compile_html():
# loads sources from the disk
file = open("../src/index.html", "r")
src_html = file.read()
file.close()
file = open("../src/js/picedit.js", "r")
src_js = file.read()
file.close()
# extract html code
phtm = re.compile(r'<!-- begin_picedit_box -->.+<!-- end_picedit_box -->', re.IGNORECASE | re.DOTALL)
pouthtm = re.findall(phtm, src_html)
# minify the html
url = 'http://www.willpeavy.com/minifier/'
body = {'html': pouthtm[0]}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
http = Http()
resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
outhtml = re.findall('(<textarea.+?>)(.+?)(</textarea)', content)
#comment and uncomment source and dist code
unc = re.compile('(\/\*unhide_in_prod\*\/.*?)(\/\*)(.+?)(\*\/)(.*\/\*unhide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
src_js = unc.sub(r'\1 \3 \5', src_js)
unc = re.compile('(\/\*hide_in_prod\*\/)(.+)(\/\*hide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
src_js = unc.sub(r'\1 /* \2 */ \3', src_js)
#apply compiled html
unc = re.compile('compiled_template_markup', re.DOTALL)
src_js = unc.sub(outhtml[0][1], src_js)
#save pre-processed js to the dist folder
file = open("../dist/js/picedit.js", "w")
file.write(src_js)
file.close()
# run the main function
if __name__ == "__main__":
main() | 0.145449 | 0.044995 |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
import json
from .util import *
class Crop():
def __init__(self, key):
self.key = key
self.tau = {}
self.beta = {}
self.delta = {}
self.gamma = {}
self.leontief = {}
self.eta = {}
self.baseline_inputs = {}
self.baseline_revenue = {}
self.econ_factors = {}
self.pmp_keys = {}
self.pmp_keys['TAU'] = self.tau
self.pmp_keys['BETA'] = self.beta
self.pmp_keys['DELTA'] = self.delta
self.pmp_keys['GAMMA'] = self.gamma
self.pmp_keys['ETA'] = self.eta
self.pmp_keys['LEONTIEF'] = self.leontief
self.pmp_keys['INPUTS'] = self.baseline_inputs
self.pmp_keys['REV'] = self.baseline_revenue
self.crop_keys = {}
self.crop_keys['ALFAL'] = 'alfalfa'
self.crop_keys['ALPIS'] = 'pistachio'
self.crop_keys['CORN'] = 'corn'
self.crop_keys['COTTN'] = 'cotton'
self.crop_keys['CUCUR'] = 'melon'
self.crop_keys['DRYBN'] = 'field_misc'
self.crop_keys['FRTOM'] = 'tomato'
self.crop_keys['GRAIN'] = 'grain'
self.crop_keys['ONGAR'] = 'onion'
self.crop_keys['OTHDEC'] = 'deciduous_misc'
self.crop_keys['OTHFLD'] = 'field_misc'
self.crop_keys['OTHTRK'] = 'field_misc'
self.crop_keys['PASTR'] = 'pasture'
self.crop_keys['POTATO'] = 'potatoe'
self.crop_keys['PRTOM'] = 'tomato'
self.crop_keys['RICE'] = 'rice'
self.crop_keys['SAFLR'] = 'safflower'
self.crop_keys['SBEET'] = 'vegetable_small'
self.crop_keys['SUBTRP'] = 'subtropical_misc'
self.crop_keys['VINE'] = 'grape'
self.sub = 0.17
for k,v in json.load(open('cord/crop/%s_properties.json' % key)).items():
setattr(self,k,v)
def set_pmp_parameters(self, all_parameters, district):
for parameter_name in all_parameters:
parameter_data = all_parameters[parameter_name]
district_index = parameter_data['Region'] == district
district_crops = parameter_data['Crop'][district_index]
district_values = parameter_data['Level'][district_index]
if 'Input' in parameter_data:
district_factors = parameter_data['Input'][district_index]
parameter_dict = self.pmp_keys[parameter_name]
for y in parameter_data.index[district_index]:
if 'Input' in parameter_data:
if district_factors[y] in parameter_dict:
parameter_dict[district_factors[y]][district_crops[y]] = district_values[y]
else:
parameter_dict[district_factors[y]] = {}
parameter_dict[district_factors[y]][district_crops[y]] = district_values[y]
else:
parameter_dict[district_crops[y]] = district_values[y]
def set_econ_parameters(self, econ_parameters, district):
for parameter_name in econ_parameters:
econ_data = econ_parameters[parameter_name]
district_list = list(econ_data['DISTRICT'])
district_index = district_list.index(district)
if parameter_name == 'WCST':
self.water_source_list = []
for source in econ_data:
if source != 'DISTRICT':
self.water_source_list.append(source)
self.econ_factors[source + '_price'] = econ_data[source][district_index]
elif parameter_name == 'WSOU':
for source in econ_data:
if source != 'DISTRICT':
self.econ_factors[source] = econ_data[source][district_index]
elif parameter_name == 'LABOR' or parameter_name == 'SUPPL':
self.econ_factors[parameter_name] = {}
for crop in econ_data:
self.leontief[parameter_name][crop] = econ_data[crop][district_index]
self.econ_factors[parameter_name] = 1.0
elif parameter_name == 'PRICE' or parameter_name == 'LANDCOST':
self.econ_factors[parameter_name] = {}
for crop in econ_data:
self.econ_factors[parameter_name][crop] = econ_data[crop][district_index]
def find_pmp_acreage(self, water_source_constraint, land_constraint, x0):
bb = (0.0, land_constraint)
bnds = []
for crop in self.crop_list:
bnds.append(bb)
water_constraint = 0.0
water_cost = 0.0
self.econ_factors['WATER'] = {}
for source in water_source_constraint:
water_constraint += water_source_constraint[source]
water_cost += water_source_constraint[source]*self.econ_factors[source + '_price']
for crop in self.crop_list:
if crop == 'ALFAL' or crop == 'PASTR':
self.econ_factors['WATER'][crop] = 50.0
else:
self.econ_factors['WATER'][crop] = water_cost/water_constraint
con1 = {'type': 'ineq', 'fun': self.constrain_resource, 'args' : (land_constraint, 'LAND')}
con2 = {'type': 'ineq', 'fun': self.constrain_resource, 'args' : (water_constraint, 'WATER')}
cons = [con1, con2]
minimizer_kwargs = {"method":'SLSQP',"bounds": bnds, "constraints":cons}
sol = scipy.optimize.basinhopping(self.calc_ag_profit,x0,minimizer_kwargs=minimizer_kwargs)
return sol.x
def constrain_resource(self, x, resource_constraint, resource_type):
sum_resource = 0.0
i = 0
for crop in self.crop_list:
sum_resource += x[i]*self.leontief[resource_type][crop]
i += 1
return resource_constraint - sum_resource
def calc_ag_profit(self, x):
total_revenue = 0.0
i = 0
for crop in self.crop_list:
total_factor_beta = 0.0
if x[i] > 0.0:
for factor in ['LAND', 'WATER']:
total_factor_beta += self.beta[factor][crop]*((x[i]*self.leontief[factor][crop])**((self.sub-1.0)/self.sub))
for factor in ['SUPPL', 'LABOR']:
total_factor_beta += self.beta[factor][crop]*((self.leontief[factor][crop])**((self.sub-1.0)/self.sub))##needs to be fixed in PMP calibration
if total_factor_beta > 0.0:
total_factor_beta = total_factor_beta**(self.sub/(self.sub-1.0))
else:
total_factor_beta = 0.0
total_revenue -= self.econ_factors['PRICE'][crop]*self.tau[crop]*total_factor_beta
total_revenue += self.delta[crop]*np.exp(self.gamma[crop]*x[i])
total_revenue += self.econ_factors['WATER'][crop]*x[i]*self.leontief['WATER'][crop]
for factor in ['SUPPL', 'LABOR']:
total_revenue += self.leontief[factor][crop]####needs to be fixed in PMP calibration
i += 1
return total_revenue
def make_crop_list(self):
self.crop_list = []
for y in self.baseline_inputs['LAND']:
if self.baseline_inputs['LAND'][y] > 50.0:
self.crop_list.append(y) | Stochastic_engine/cord/crop.py | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
import json
from .util import *
class Crop():
def __init__(self, key):
self.key = key
self.tau = {}
self.beta = {}
self.delta = {}
self.gamma = {}
self.leontief = {}
self.eta = {}
self.baseline_inputs = {}
self.baseline_revenue = {}
self.econ_factors = {}
self.pmp_keys = {}
self.pmp_keys['TAU'] = self.tau
self.pmp_keys['BETA'] = self.beta
self.pmp_keys['DELTA'] = self.delta
self.pmp_keys['GAMMA'] = self.gamma
self.pmp_keys['ETA'] = self.eta
self.pmp_keys['LEONTIEF'] = self.leontief
self.pmp_keys['INPUTS'] = self.baseline_inputs
self.pmp_keys['REV'] = self.baseline_revenue
self.crop_keys = {}
self.crop_keys['ALFAL'] = 'alfalfa'
self.crop_keys['ALPIS'] = 'pistachio'
self.crop_keys['CORN'] = 'corn'
self.crop_keys['COTTN'] = 'cotton'
self.crop_keys['CUCUR'] = 'melon'
self.crop_keys['DRYBN'] = 'field_misc'
self.crop_keys['FRTOM'] = 'tomato'
self.crop_keys['GRAIN'] = 'grain'
self.crop_keys['ONGAR'] = 'onion'
self.crop_keys['OTHDEC'] = 'deciduous_misc'
self.crop_keys['OTHFLD'] = 'field_misc'
self.crop_keys['OTHTRK'] = 'field_misc'
self.crop_keys['PASTR'] = 'pasture'
self.crop_keys['POTATO'] = 'potatoe'
self.crop_keys['PRTOM'] = 'tomato'
self.crop_keys['RICE'] = 'rice'
self.crop_keys['SAFLR'] = 'safflower'
self.crop_keys['SBEET'] = 'vegetable_small'
self.crop_keys['SUBTRP'] = 'subtropical_misc'
self.crop_keys['VINE'] = 'grape'
self.sub = 0.17
for k,v in json.load(open('cord/crop/%s_properties.json' % key)).items():
setattr(self,k,v)
def set_pmp_parameters(self, all_parameters, district):
for parameter_name in all_parameters:
parameter_data = all_parameters[parameter_name]
district_index = parameter_data['Region'] == district
district_crops = parameter_data['Crop'][district_index]
district_values = parameter_data['Level'][district_index]
if 'Input' in parameter_data:
district_factors = parameter_data['Input'][district_index]
parameter_dict = self.pmp_keys[parameter_name]
for y in parameter_data.index[district_index]:
if 'Input' in parameter_data:
if district_factors[y] in parameter_dict:
parameter_dict[district_factors[y]][district_crops[y]] = district_values[y]
else:
parameter_dict[district_factors[y]] = {}
parameter_dict[district_factors[y]][district_crops[y]] = district_values[y]
else:
parameter_dict[district_crops[y]] = district_values[y]
def set_econ_parameters(self, econ_parameters, district):
for parameter_name in econ_parameters:
econ_data = econ_parameters[parameter_name]
district_list = list(econ_data['DISTRICT'])
district_index = district_list.index(district)
if parameter_name == 'WCST':
self.water_source_list = []
for source in econ_data:
if source != 'DISTRICT':
self.water_source_list.append(source)
self.econ_factors[source + '_price'] = econ_data[source][district_index]
elif parameter_name == 'WSOU':
for source in econ_data:
if source != 'DISTRICT':
self.econ_factors[source] = econ_data[source][district_index]
elif parameter_name == 'LABOR' or parameter_name == 'SUPPL':
self.econ_factors[parameter_name] = {}
for crop in econ_data:
self.leontief[parameter_name][crop] = econ_data[crop][district_index]
self.econ_factors[parameter_name] = 1.0
elif parameter_name == 'PRICE' or parameter_name == 'LANDCOST':
self.econ_factors[parameter_name] = {}
for crop in econ_data:
self.econ_factors[parameter_name][crop] = econ_data[crop][district_index]
def find_pmp_acreage(self, water_source_constraint, land_constraint, x0):
bb = (0.0, land_constraint)
bnds = []
for crop in self.crop_list:
bnds.append(bb)
water_constraint = 0.0
water_cost = 0.0
self.econ_factors['WATER'] = {}
for source in water_source_constraint:
water_constraint += water_source_constraint[source]
water_cost += water_source_constraint[source]*self.econ_factors[source + '_price']
for crop in self.crop_list:
if crop == 'ALFAL' or crop == 'PASTR':
self.econ_factors['WATER'][crop] = 50.0
else:
self.econ_factors['WATER'][crop] = water_cost/water_constraint
con1 = {'type': 'ineq', 'fun': self.constrain_resource, 'args' : (land_constraint, 'LAND')}
con2 = {'type': 'ineq', 'fun': self.constrain_resource, 'args' : (water_constraint, 'WATER')}
cons = [con1, con2]
minimizer_kwargs = {"method":'SLSQP',"bounds": bnds, "constraints":cons}
sol = scipy.optimize.basinhopping(self.calc_ag_profit,x0,minimizer_kwargs=minimizer_kwargs)
return sol.x
def constrain_resource(self, x, resource_constraint, resource_type):
sum_resource = 0.0
i = 0
for crop in self.crop_list:
sum_resource += x[i]*self.leontief[resource_type][crop]
i += 1
return resource_constraint - sum_resource
def calc_ag_profit(self, x):
total_revenue = 0.0
i = 0
for crop in self.crop_list:
total_factor_beta = 0.0
if x[i] > 0.0:
for factor in ['LAND', 'WATER']:
total_factor_beta += self.beta[factor][crop]*((x[i]*self.leontief[factor][crop])**((self.sub-1.0)/self.sub))
for factor in ['SUPPL', 'LABOR']:
total_factor_beta += self.beta[factor][crop]*((self.leontief[factor][crop])**((self.sub-1.0)/self.sub))##needs to be fixed in PMP calibration
if total_factor_beta > 0.0:
total_factor_beta = total_factor_beta**(self.sub/(self.sub-1.0))
else:
total_factor_beta = 0.0
total_revenue -= self.econ_factors['PRICE'][crop]*self.tau[crop]*total_factor_beta
total_revenue += self.delta[crop]*np.exp(self.gamma[crop]*x[i])
total_revenue += self.econ_factors['WATER'][crop]*x[i]*self.leontief['WATER'][crop]
for factor in ['SUPPL', 'LABOR']:
total_revenue += self.leontief[factor][crop]####needs to be fixed in PMP calibration
i += 1
return total_revenue
def make_crop_list(self):
self.crop_list = []
for y in self.baseline_inputs['LAND']:
if self.baseline_inputs['LAND'][y] > 50.0:
self.crop_list.append(y) | 0.335677 | 0.165189 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['EnvironmentArgs', 'Environment']
@pulumi.input_type
class EnvironmentArgs:
def __init__(__self__, *,
dag_s3_path: pulumi.Input[str],
execution_role_arn: pulumi.Input[str],
network_configuration: pulumi.Input['EnvironmentNetworkConfigurationArgs'],
source_bucket_arn: pulumi.Input[str],
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Environment resource.
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input['EnvironmentNetworkConfigurationArgs'] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input['EnvironmentLoggingConfigurationArgs'] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
pulumi.set(__self__, "dag_s3_path", dag_s3_path)
pulumi.set(__self__, "execution_role_arn", execution_role_arn)
pulumi.set(__self__, "network_configuration", network_configuration)
pulumi.set(__self__, "source_bucket_arn", source_bucket_arn)
if airflow_configuration_options is not None:
pulumi.set(__self__, "airflow_configuration_options", airflow_configuration_options)
if airflow_version is not None:
pulumi.set(__self__, "airflow_version", airflow_version)
if environment_class is not None:
pulumi.set(__self__, "environment_class", environment_class)
if kms_key is not None:
pulumi.set(__self__, "kms_key", kms_key)
if logging_configuration is not None:
pulumi.set(__self__, "logging_configuration", logging_configuration)
if max_workers is not None:
pulumi.set(__self__, "max_workers", max_workers)
if min_workers is not None:
pulumi.set(__self__, "min_workers", min_workers)
if name is not None:
pulumi.set(__self__, "name", name)
if plugins_s3_object_version is not None:
pulumi.set(__self__, "plugins_s3_object_version", plugins_s3_object_version)
if plugins_s3_path is not None:
pulumi.set(__self__, "plugins_s3_path", plugins_s3_path)
if requirements_s3_object_version is not None:
pulumi.set(__self__, "requirements_s3_object_version", requirements_s3_object_version)
if requirements_s3_path is not None:
pulumi.set(__self__, "requirements_s3_path", requirements_s3_path)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if webserver_access_mode is not None:
pulumi.set(__self__, "webserver_access_mode", webserver_access_mode)
if weekly_maintenance_window_start is not None:
pulumi.set(__self__, "weekly_maintenance_window_start", weekly_maintenance_window_start)
@property
@pulumi.getter(name="dagS3Path")
def dag_s3_path(self) -> pulumi.Input[str]:
"""
The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "dag_s3_path")
@dag_s3_path.setter
def dag_s3_path(self, value: pulumi.Input[str]):
pulumi.set(self, "dag_s3_path", value)
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
"""
return pulumi.get(self, "execution_role_arn")
@execution_role_arn.setter
def execution_role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "execution_role_arn", value)
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> pulumi.Input['EnvironmentNetworkConfigurationArgs']:
"""
Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
"""
return pulumi.get(self, "network_configuration")
@network_configuration.setter
def network_configuration(self, value: pulumi.Input['EnvironmentNetworkConfigurationArgs']):
pulumi.set(self, "network_configuration", value)
@property
@pulumi.getter(name="sourceBucketArn")
def source_bucket_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
"""
return pulumi.get(self, "source_bucket_arn")
@source_bucket_arn.setter
def source_bucket_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "source_bucket_arn", value)
@property
@pulumi.getter(name="airflowConfigurationOptions")
def airflow_configuration_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
"""
return pulumi.get(self, "airflow_configuration_options")
@airflow_configuration_options.setter
def airflow_configuration_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "airflow_configuration_options", value)
@property
@pulumi.getter(name="airflowVersion")
def airflow_version(self) -> Optional[pulumi.Input[str]]:
"""
Airflow version of your environment, will be set by default to the latest version that MWAA supports.
"""
return pulumi.get(self, "airflow_version")
@airflow_version.setter
def airflow_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "airflow_version", value)
@property
@pulumi.getter(name="environmentClass")
def environment_class(self) -> Optional[pulumi.Input[str]]:
"""
Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
"""
return pulumi.get(self, "environment_class")
@environment_class.setter
def environment_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_class", value)
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
"""
return pulumi.get(self, "kms_key")
@kms_key.setter
def kms_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key", value)
@property
@pulumi.getter(name="loggingConfiguration")
def logging_configuration(self) -> Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']]:
"""
The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "logging_configuration")
@logging_configuration.setter
def logging_configuration(self, value: Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']]):
pulumi.set(self, "logging_configuration", value)
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
"""
return pulumi.get(self, "max_workers")
@max_workers.setter
def max_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_workers", value)
@property
@pulumi.getter(name="minWorkers")
def min_workers(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of workers that you want to run in your environment. Will be `1` by default.
"""
return pulumi.get(self, "min_workers")
@min_workers.setter
def min_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_workers", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Apache Airflow Environment
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pluginsS3ObjectVersion")
def plugins_s3_object_version(self) -> Optional[pulumi.Input[str]]:
"""
The plugins.zip file version you want to use.
"""
return pulumi.get(self, "plugins_s3_object_version")
@plugins_s3_object_version.setter
def plugins_s3_object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugins_s3_object_version", value)
@property
@pulumi.getter(name="pluginsS3Path")
def plugins_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "plugins_s3_path")
@plugins_s3_path.setter
def plugins_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugins_s3_path", value)
@property
@pulumi.getter(name="requirementsS3ObjectVersion")
def requirements_s3_object_version(self) -> Optional[pulumi.Input[str]]:
"""
The requirements.txt file version you want to use.
"""
return pulumi.get(self, "requirements_s3_object_version")
@requirements_s3_object_version.setter
def requirements_s3_object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requirements_s3_object_version", value)
@property
@pulumi.getter(name="requirementsS3Path")
def requirements_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "requirements_s3_path")
@requirements_s3_path.setter
def requirements_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requirements_s3_path", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An array of key:value pairs to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="webserverAccessMode")
def webserver_access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
"""
return pulumi.get(self, "webserver_access_mode")
@webserver_access_mode.setter
def webserver_access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webserver_access_mode", value)
@property
@pulumi.getter(name="weeklyMaintenanceWindowStart")
def weekly_maintenance_window_start(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the start date for the weekly maintenance window.
"""
return pulumi.get(self, "weekly_maintenance_window_start")
@weekly_maintenance_window_start.setter
def weekly_maintenance_window_start(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "weekly_maintenance_window_start", value)
class Environment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Creates a MWAA Environment resource.
## Example Usage
A MWAA Environment requires an IAM role (`iam.Role`), two subnets in the private zone (`ec2.Subnet`) and a versioned S3 bucket (`s3.Bucket`).
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with Airflow configuration options
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
airflow_configuration_options={
"core.default_task_retries": "16",
"core.parallelism": "1",
},
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with logging configurations
Note that Airflow task logs are enabled by default with the `INFO` log level.
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
logging_configuration=aws.mwaa.EnvironmentLoggingConfigurationArgs(
dag_processing_logs=aws.mwaa.EnvironmentLoggingConfigurationDagProcessingLogsArgs(
enabled=True,
log_level="DEBUG",
),
scheduler_logs=aws.mwaa.EnvironmentLoggingConfigurationSchedulerLogsArgs(
enabled=True,
log_level="INFO",
),
task_logs=aws.mwaa.EnvironmentLoggingConfigurationTaskLogsArgs(
enabled=True,
log_level="WARNING",
),
webserver_logs=aws.mwaa.EnvironmentLoggingConfigurationWebserverLogsArgs(
enabled=True,
log_level="ERROR",
),
worker_logs=aws.mwaa.EnvironmentLoggingConfigurationWorkerLogsArgs(
enabled=True,
log_level="CRITICAL",
),
),
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with tags
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"],
tags={
"Name": "example",
"Environment": "production",
})
```
## Import
MWAA Environment can be imported using `Name` e.g.
```sh
$ pulumi import aws:mwaa/environment:Environment example MyAirflowEnvironment
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnvironmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a MWAA Environment resource.
## Example Usage
A MWAA Environment requires an IAM role (`iam.Role`), two subnets in the private zone (`ec2.Subnet`) and a versioned S3 bucket (`s3.Bucket`).
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with Airflow configuration options
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
airflow_configuration_options={
"core.default_task_retries": "16",
"core.parallelism": "1",
},
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with logging configurations
Note that Airflow task logs are enabled by default with the `INFO` log level.
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
logging_configuration=aws.mwaa.EnvironmentLoggingConfigurationArgs(
dag_processing_logs=aws.mwaa.EnvironmentLoggingConfigurationDagProcessingLogsArgs(
enabled=True,
log_level="DEBUG",
),
scheduler_logs=aws.mwaa.EnvironmentLoggingConfigurationSchedulerLogsArgs(
enabled=True,
log_level="INFO",
),
task_logs=aws.mwaa.EnvironmentLoggingConfigurationTaskLogsArgs(
enabled=True,
log_level="WARNING",
),
webserver_logs=aws.mwaa.EnvironmentLoggingConfigurationWebserverLogsArgs(
enabled=True,
log_level="ERROR",
),
worker_logs=aws.mwaa.EnvironmentLoggingConfigurationWorkerLogsArgs(
enabled=True,
log_level="CRITICAL",
),
),
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with tags
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"],
tags={
"Name": "example",
"Environment": "production",
})
```
## Import
MWAA Environment can be imported using `Name` e.g.
```sh
$ pulumi import aws:mwaa/environment:Environment example MyAirflowEnvironment
```
:param str resource_name: The name of the resource.
:param EnvironmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnvironmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['airflow_configuration_options'] = airflow_configuration_options
__props__['airflow_version'] = airflow_version
if dag_s3_path is None and not opts.urn:
raise TypeError("Missing required property 'dag_s3_path'")
__props__['dag_s3_path'] = dag_s3_path
__props__['environment_class'] = environment_class
if execution_role_arn is None and not opts.urn:
raise TypeError("Missing required property 'execution_role_arn'")
__props__['execution_role_arn'] = execution_role_arn
__props__['kms_key'] = kms_key
__props__['logging_configuration'] = logging_configuration
__props__['max_workers'] = max_workers
__props__['min_workers'] = min_workers
__props__['name'] = name
if network_configuration is None and not opts.urn:
raise TypeError("Missing required property 'network_configuration'")
__props__['network_configuration'] = network_configuration
__props__['plugins_s3_object_version'] = plugins_s3_object_version
__props__['plugins_s3_path'] = plugins_s3_path
__props__['requirements_s3_object_version'] = requirements_s3_object_version
__props__['requirements_s3_path'] = requirements_s3_path
if source_bucket_arn is None and not opts.urn:
raise TypeError("Missing required property 'source_bucket_arn'")
__props__['source_bucket_arn'] = source_bucket_arn
__props__['tags'] = tags
__props__['webserver_access_mode'] = webserver_access_mode
__props__['weekly_maintenance_window_start'] = weekly_maintenance_window_start
__props__['arn'] = None
__props__['created_at'] = None
__props__['last_updateds'] = None
__props__['service_role_arn'] = None
__props__['status'] = None
__props__['webserver_url'] = None
super(Environment, __self__).__init__(
'aws:mwaa/environment:Environment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
last_updateds: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentLastUpdatedArgs']]]]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
service_role_arn: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
webserver_url: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None) -> 'Environment':
"""
Get an existing Environment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] arn: The ARN of the MWAA Environment
:param pulumi.Input[str] created_at: The Created At date of the MWAA Environment
* `logging_configuration.<LOG_TYPE>.cloud_watch_log_group_arn` - Provides the ARN for the CloudWatch group where the logs will be published
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] service_role_arn: The Service Role ARN of the Amazon MWAA Environment
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[str] status: The status of the Amazon MWAA Environment
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] webserver_url: The webserver URL of the MWAA Environment
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["airflow_configuration_options"] = airflow_configuration_options
__props__["airflow_version"] = airflow_version
__props__["arn"] = arn
__props__["created_at"] = created_at
__props__["dag_s3_path"] = dag_s3_path
__props__["environment_class"] = environment_class
__props__["execution_role_arn"] = execution_role_arn
__props__["kms_key"] = kms_key
__props__["last_updateds"] = last_updateds
__props__["logging_configuration"] = logging_configuration
__props__["max_workers"] = max_workers
__props__["min_workers"] = min_workers
__props__["name"] = name
__props__["network_configuration"] = network_configuration
__props__["plugins_s3_object_version"] = plugins_s3_object_version
__props__["plugins_s3_path"] = plugins_s3_path
__props__["requirements_s3_object_version"] = requirements_s3_object_version
__props__["requirements_s3_path"] = requirements_s3_path
__props__["service_role_arn"] = service_role_arn
__props__["source_bucket_arn"] = source_bucket_arn
__props__["status"] = status
__props__["tags"] = tags
__props__["webserver_access_mode"] = webserver_access_mode
__props__["webserver_url"] = webserver_url
__props__["weekly_maintenance_window_start"] = weekly_maintenance_window_start
return Environment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="airflowConfigurationOptions")
def airflow_configuration_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
"""
return pulumi.get(self, "airflow_configuration_options")
@property
@pulumi.getter(name="airflowVersion")
def airflow_version(self) -> pulumi.Output[str]:
"""
Airflow version of your environment, will be set by default to the latest version that MWAA supports.
"""
return pulumi.get(self, "airflow_version")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the MWAA Environment
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The Created At date of the MWAA Environment
* `logging_configuration.<LOG_TYPE>.cloud_watch_log_group_arn` - Provides the ARN for the CloudWatch group where the logs will be published
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="dagS3Path")
def dag_s3_path(self) -> pulumi.Output[str]:
"""
The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "dag_s3_path")
@property
@pulumi.getter(name="environmentClass")
def environment_class(self) -> pulumi.Output[str]:
"""
Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
"""
return pulumi.get(self, "environment_class")
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
"""
return pulumi.get(self, "execution_role_arn")
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
"""
return pulumi.get(self, "kms_key")
@property
@pulumi.getter(name="lastUpdateds")
def last_updateds(self) -> pulumi.Output[Sequence['outputs.EnvironmentLastUpdated']]:
return pulumi.get(self, "last_updateds")
@property
@pulumi.getter(name="loggingConfiguration")
def logging_configuration(self) -> pulumi.Output['outputs.EnvironmentLoggingConfiguration']:
"""
The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "logging_configuration")
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> pulumi.Output[int]:
"""
The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
"""
return pulumi.get(self, "max_workers")
@property
@pulumi.getter(name="minWorkers")
def min_workers(self) -> pulumi.Output[int]:
"""
The minimum number of workers that you want to run in your environment. Will be `1` by default.
"""
return pulumi.get(self, "min_workers")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Apache Airflow Environment
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> pulumi.Output['outputs.EnvironmentNetworkConfiguration']:
"""
Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
"""
return pulumi.get(self, "network_configuration")
@property
@pulumi.getter(name="pluginsS3ObjectVersion")
def plugins_s3_object_version(self) -> pulumi.Output[str]:
"""
The plugins.zip file version you want to use.
"""
return pulumi.get(self, "plugins_s3_object_version")
@property
@pulumi.getter(name="pluginsS3Path")
def plugins_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "plugins_s3_path")
@property
@pulumi.getter(name="requirementsS3ObjectVersion")
def requirements_s3_object_version(self) -> pulumi.Output[str]:
"""
The requirements.txt file version you want to use.
"""
return pulumi.get(self, "requirements_s3_object_version")
@property
@pulumi.getter(name="requirementsS3Path")
def requirements_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "requirements_s3_path")
@property
@pulumi.getter(name="serviceRoleArn")
def service_role_arn(self) -> pulumi.Output[str]:
"""
The Service Role ARN of the Amazon MWAA Environment
"""
return pulumi.get(self, "service_role_arn")
@property
@pulumi.getter(name="sourceBucketArn")
def source_bucket_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
"""
return pulumi.get(self, "source_bucket_arn")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the Amazon MWAA Environment
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An array of key:value pairs to associate with the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="webserverAccessMode")
def webserver_access_mode(self) -> pulumi.Output[str]:
"""
Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
"""
return pulumi.get(self, "webserver_access_mode")
@property
@pulumi.getter(name="webserverUrl")
def webserver_url(self) -> pulumi.Output[str]:
"""
The webserver URL of the MWAA Environment
"""
return pulumi.get(self, "webserver_url")
@property
@pulumi.getter(name="weeklyMaintenanceWindowStart")
def weekly_maintenance_window_start(self) -> pulumi.Output[str]:
"""
Specifies the start date for the weekly maintenance window.
"""
return pulumi.get(self, "weekly_maintenance_window_start")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_aws/mwaa/environment.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['EnvironmentArgs', 'Environment']
@pulumi.input_type
class EnvironmentArgs:
def __init__(__self__, *,
dag_s3_path: pulumi.Input[str],
execution_role_arn: pulumi.Input[str],
network_configuration: pulumi.Input['EnvironmentNetworkConfigurationArgs'],
source_bucket_arn: pulumi.Input[str],
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Environment resource.
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input['EnvironmentNetworkConfigurationArgs'] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input['EnvironmentLoggingConfigurationArgs'] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
pulumi.set(__self__, "dag_s3_path", dag_s3_path)
pulumi.set(__self__, "execution_role_arn", execution_role_arn)
pulumi.set(__self__, "network_configuration", network_configuration)
pulumi.set(__self__, "source_bucket_arn", source_bucket_arn)
if airflow_configuration_options is not None:
pulumi.set(__self__, "airflow_configuration_options", airflow_configuration_options)
if airflow_version is not None:
pulumi.set(__self__, "airflow_version", airflow_version)
if environment_class is not None:
pulumi.set(__self__, "environment_class", environment_class)
if kms_key is not None:
pulumi.set(__self__, "kms_key", kms_key)
if logging_configuration is not None:
pulumi.set(__self__, "logging_configuration", logging_configuration)
if max_workers is not None:
pulumi.set(__self__, "max_workers", max_workers)
if min_workers is not None:
pulumi.set(__self__, "min_workers", min_workers)
if name is not None:
pulumi.set(__self__, "name", name)
if plugins_s3_object_version is not None:
pulumi.set(__self__, "plugins_s3_object_version", plugins_s3_object_version)
if plugins_s3_path is not None:
pulumi.set(__self__, "plugins_s3_path", plugins_s3_path)
if requirements_s3_object_version is not None:
pulumi.set(__self__, "requirements_s3_object_version", requirements_s3_object_version)
if requirements_s3_path is not None:
pulumi.set(__self__, "requirements_s3_path", requirements_s3_path)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if webserver_access_mode is not None:
pulumi.set(__self__, "webserver_access_mode", webserver_access_mode)
if weekly_maintenance_window_start is not None:
pulumi.set(__self__, "weekly_maintenance_window_start", weekly_maintenance_window_start)
@property
@pulumi.getter(name="dagS3Path")
def dag_s3_path(self) -> pulumi.Input[str]:
"""
The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "dag_s3_path")
@dag_s3_path.setter
def dag_s3_path(self, value: pulumi.Input[str]):
pulumi.set(self, "dag_s3_path", value)
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
"""
return pulumi.get(self, "execution_role_arn")
@execution_role_arn.setter
def execution_role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "execution_role_arn", value)
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> pulumi.Input['EnvironmentNetworkConfigurationArgs']:
"""
Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
"""
return pulumi.get(self, "network_configuration")
@network_configuration.setter
def network_configuration(self, value: pulumi.Input['EnvironmentNetworkConfigurationArgs']):
pulumi.set(self, "network_configuration", value)
@property
@pulumi.getter(name="sourceBucketArn")
def source_bucket_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
"""
return pulumi.get(self, "source_bucket_arn")
@source_bucket_arn.setter
def source_bucket_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "source_bucket_arn", value)
@property
@pulumi.getter(name="airflowConfigurationOptions")
def airflow_configuration_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
"""
return pulumi.get(self, "airflow_configuration_options")
@airflow_configuration_options.setter
def airflow_configuration_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "airflow_configuration_options", value)
@property
@pulumi.getter(name="airflowVersion")
def airflow_version(self) -> Optional[pulumi.Input[str]]:
"""
Airflow version of your environment, will be set by default to the latest version that MWAA supports.
"""
return pulumi.get(self, "airflow_version")
@airflow_version.setter
def airflow_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "airflow_version", value)
@property
@pulumi.getter(name="environmentClass")
def environment_class(self) -> Optional[pulumi.Input[str]]:
"""
Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
"""
return pulumi.get(self, "environment_class")
@environment_class.setter
def environment_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_class", value)
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
"""
return pulumi.get(self, "kms_key")
@kms_key.setter
def kms_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key", value)
@property
@pulumi.getter(name="loggingConfiguration")
def logging_configuration(self) -> Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']]:
"""
The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "logging_configuration")
@logging_configuration.setter
def logging_configuration(self, value: Optional[pulumi.Input['EnvironmentLoggingConfigurationArgs']]):
pulumi.set(self, "logging_configuration", value)
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
"""
return pulumi.get(self, "max_workers")
@max_workers.setter
def max_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_workers", value)
@property
@pulumi.getter(name="minWorkers")
def min_workers(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of workers that you want to run in your environment. Will be `1` by default.
"""
return pulumi.get(self, "min_workers")
@min_workers.setter
def min_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_workers", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Apache Airflow Environment
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pluginsS3ObjectVersion")
def plugins_s3_object_version(self) -> Optional[pulumi.Input[str]]:
"""
The plugins.zip file version you want to use.
"""
return pulumi.get(self, "plugins_s3_object_version")
@plugins_s3_object_version.setter
def plugins_s3_object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugins_s3_object_version", value)
@property
@pulumi.getter(name="pluginsS3Path")
def plugins_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "plugins_s3_path")
@plugins_s3_path.setter
def plugins_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugins_s3_path", value)
@property
@pulumi.getter(name="requirementsS3ObjectVersion")
def requirements_s3_object_version(self) -> Optional[pulumi.Input[str]]:
"""
The requirements.txt file version you want to use.
"""
return pulumi.get(self, "requirements_s3_object_version")
@requirements_s3_object_version.setter
def requirements_s3_object_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requirements_s3_object_version", value)
@property
@pulumi.getter(name="requirementsS3Path")
def requirements_s3_path(self) -> Optional[pulumi.Input[str]]:
"""
The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "requirements_s3_path")
@requirements_s3_path.setter
def requirements_s3_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requirements_s3_path", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An array of key:value pairs to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="webserverAccessMode")
def webserver_access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
"""
return pulumi.get(self, "webserver_access_mode")
@webserver_access_mode.setter
def webserver_access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webserver_access_mode", value)
@property
@pulumi.getter(name="weeklyMaintenanceWindowStart")
def weekly_maintenance_window_start(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the start date for the weekly maintenance window.
"""
return pulumi.get(self, "weekly_maintenance_window_start")
@weekly_maintenance_window_start.setter
def weekly_maintenance_window_start(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "weekly_maintenance_window_start", value)
class Environment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Creates a MWAA Environment resource.
## Example Usage
A MWAA Environment requires an IAM role (`iam.Role`), two subnets in the private zone (`ec2.Subnet`) and a versioned S3 bucket (`s3.Bucket`).
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with Airflow configuration options
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
airflow_configuration_options={
"core.default_task_retries": "16",
"core.parallelism": "1",
},
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with logging configurations
Note that Airflow task logs are enabled by default with the `INFO` log level.
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
logging_configuration=aws.mwaa.EnvironmentLoggingConfigurationArgs(
dag_processing_logs=aws.mwaa.EnvironmentLoggingConfigurationDagProcessingLogsArgs(
enabled=True,
log_level="DEBUG",
),
scheduler_logs=aws.mwaa.EnvironmentLoggingConfigurationSchedulerLogsArgs(
enabled=True,
log_level="INFO",
),
task_logs=aws.mwaa.EnvironmentLoggingConfigurationTaskLogsArgs(
enabled=True,
log_level="WARNING",
),
webserver_logs=aws.mwaa.EnvironmentLoggingConfigurationWebserverLogsArgs(
enabled=True,
log_level="ERROR",
),
worker_logs=aws.mwaa.EnvironmentLoggingConfigurationWorkerLogsArgs(
enabled=True,
log_level="CRITICAL",
),
),
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with tags
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"],
tags={
"Name": "example",
"Environment": "production",
})
```
## Import
MWAA Environment can be imported using `Name` e.g.
```sh
$ pulumi import aws:mwaa/environment:Environment example MyAirflowEnvironment
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnvironmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a MWAA Environment resource.
## Example Usage
A MWAA Environment requires an IAM role (`iam.Role`), two subnets in the private zone (`ec2.Subnet`) and a versioned S3 bucket (`s3.Bucket`).
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with Airflow configuration options
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
airflow_configuration_options={
"core.default_task_retries": "16",
"core.parallelism": "1",
},
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with logging configurations
Note that Airflow task logs are enabled by default with the `INFO` log level.
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
logging_configuration=aws.mwaa.EnvironmentLoggingConfigurationArgs(
dag_processing_logs=aws.mwaa.EnvironmentLoggingConfigurationDagProcessingLogsArgs(
enabled=True,
log_level="DEBUG",
),
scheduler_logs=aws.mwaa.EnvironmentLoggingConfigurationSchedulerLogsArgs(
enabled=True,
log_level="INFO",
),
task_logs=aws.mwaa.EnvironmentLoggingConfigurationTaskLogsArgs(
enabled=True,
log_level="WARNING",
),
webserver_logs=aws.mwaa.EnvironmentLoggingConfigurationWebserverLogsArgs(
enabled=True,
log_level="ERROR",
),
worker_logs=aws.mwaa.EnvironmentLoggingConfigurationWorkerLogsArgs(
enabled=True,
log_level="CRITICAL",
),
),
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"])
```
### Example with tags
```python
import pulumi
import pulumi_aws as aws
example = aws.mwaa.Environment("example",
dag_s3_path="dags/",
execution_role_arn=aws_iam_role["example"]["arn"],
network_configuration=aws.mwaa.EnvironmentNetworkConfigurationArgs(
security_group_ids=[aws_security_group["example"]["id"]],
subnet_ids=[__item["id"] for __item in aws_subnet["private"]],
),
source_bucket_arn=aws_s3_bucket["example"]["arn"],
tags={
"Name": "example",
"Environment": "production",
})
```
## Import
MWAA Environment can be imported using `Name` e.g.
```sh
$ pulumi import aws:mwaa/environment:Environment example MyAirflowEnvironment
```
:param str resource_name: The name of the resource.
:param EnvironmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnvironmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['airflow_configuration_options'] = airflow_configuration_options
__props__['airflow_version'] = airflow_version
if dag_s3_path is None and not opts.urn:
raise TypeError("Missing required property 'dag_s3_path'")
__props__['dag_s3_path'] = dag_s3_path
__props__['environment_class'] = environment_class
if execution_role_arn is None and not opts.urn:
raise TypeError("Missing required property 'execution_role_arn'")
__props__['execution_role_arn'] = execution_role_arn
__props__['kms_key'] = kms_key
__props__['logging_configuration'] = logging_configuration
__props__['max_workers'] = max_workers
__props__['min_workers'] = min_workers
__props__['name'] = name
if network_configuration is None and not opts.urn:
raise TypeError("Missing required property 'network_configuration'")
__props__['network_configuration'] = network_configuration
__props__['plugins_s3_object_version'] = plugins_s3_object_version
__props__['plugins_s3_path'] = plugins_s3_path
__props__['requirements_s3_object_version'] = requirements_s3_object_version
__props__['requirements_s3_path'] = requirements_s3_path
if source_bucket_arn is None and not opts.urn:
raise TypeError("Missing required property 'source_bucket_arn'")
__props__['source_bucket_arn'] = source_bucket_arn
__props__['tags'] = tags
__props__['webserver_access_mode'] = webserver_access_mode
__props__['weekly_maintenance_window_start'] = weekly_maintenance_window_start
__props__['arn'] = None
__props__['created_at'] = None
__props__['last_updateds'] = None
__props__['service_role_arn'] = None
__props__['status'] = None
__props__['webserver_url'] = None
super(Environment, __self__).__init__(
'aws:mwaa/environment:Environment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
airflow_configuration_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
airflow_version: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
dag_s3_path: Optional[pulumi.Input[str]] = None,
environment_class: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
last_updateds: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentLastUpdatedArgs']]]]] = None,
logging_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
min_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']]] = None,
plugins_s3_object_version: Optional[pulumi.Input[str]] = None,
plugins_s3_path: Optional[pulumi.Input[str]] = None,
requirements_s3_object_version: Optional[pulumi.Input[str]] = None,
requirements_s3_path: Optional[pulumi.Input[str]] = None,
service_role_arn: Optional[pulumi.Input[str]] = None,
source_bucket_arn: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
webserver_access_mode: Optional[pulumi.Input[str]] = None,
webserver_url: Optional[pulumi.Input[str]] = None,
weekly_maintenance_window_start: Optional[pulumi.Input[str]] = None) -> 'Environment':
"""
Get an existing Environment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] airflow_configuration_options: The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
:param pulumi.Input[str] airflow_version: Airflow version of your environment, will be set by default to the latest version that MWAA supports.
:param pulumi.Input[str] arn: The ARN of the MWAA Environment
:param pulumi.Input[str] created_at: The Created At date of the MWAA Environment
* `logging_configuration.<LOG_TYPE>.cloud_watch_log_group_arn` - Provides the ARN for the CloudWatch group where the logs will be published
:param pulumi.Input[str] dag_s3_path: The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] environment_class: Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
:param pulumi.Input[str] execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
:param pulumi.Input[pulumi.InputType['EnvironmentLoggingConfigurationArgs']] logging_configuration: The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
:param pulumi.Input[int] max_workers: The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
:param pulumi.Input[int] min_workers: The minimum number of workers that you want to run in your environment. Will be `1` by default.
:param pulumi.Input[str] name: The name of the Apache Airflow Environment
:param pulumi.Input[pulumi.InputType['EnvironmentNetworkConfigurationArgs']] network_configuration: Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
:param pulumi.Input[str] plugins_s3_object_version: The plugins.zip file version you want to use.
:param pulumi.Input[str] plugins_s3_path: The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] requirements_s3_object_version: The requirements.txt file version you want to use.
:param pulumi.Input[str] requirements_s3_path: The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
:param pulumi.Input[str] service_role_arn: The Service Role ARN of the Amazon MWAA Environment
:param pulumi.Input[str] source_bucket_arn: The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
:param pulumi.Input[str] status: The status of the Amazon MWAA Environment
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource.
:param pulumi.Input[str] webserver_access_mode: Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
:param pulumi.Input[str] webserver_url: The webserver URL of the MWAA Environment
:param pulumi.Input[str] weekly_maintenance_window_start: Specifies the start date for the weekly maintenance window.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["airflow_configuration_options"] = airflow_configuration_options
__props__["airflow_version"] = airflow_version
__props__["arn"] = arn
__props__["created_at"] = created_at
__props__["dag_s3_path"] = dag_s3_path
__props__["environment_class"] = environment_class
__props__["execution_role_arn"] = execution_role_arn
__props__["kms_key"] = kms_key
__props__["last_updateds"] = last_updateds
__props__["logging_configuration"] = logging_configuration
__props__["max_workers"] = max_workers
__props__["min_workers"] = min_workers
__props__["name"] = name
__props__["network_configuration"] = network_configuration
__props__["plugins_s3_object_version"] = plugins_s3_object_version
__props__["plugins_s3_path"] = plugins_s3_path
__props__["requirements_s3_object_version"] = requirements_s3_object_version
__props__["requirements_s3_path"] = requirements_s3_path
__props__["service_role_arn"] = service_role_arn
__props__["source_bucket_arn"] = source_bucket_arn
__props__["status"] = status
__props__["tags"] = tags
__props__["webserver_access_mode"] = webserver_access_mode
__props__["webserver_url"] = webserver_url
__props__["weekly_maintenance_window_start"] = weekly_maintenance_window_start
return Environment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="airflowConfigurationOptions")
def airflow_configuration_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options.
"""
return pulumi.get(self, "airflow_configuration_options")
@property
@pulumi.getter(name="airflowVersion")
def airflow_version(self) -> pulumi.Output[str]:
"""
Airflow version of your environment, will be set by default to the latest version that MWAA supports.
"""
return pulumi.get(self, "airflow_version")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the MWAA Environment
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The Created At date of the MWAA Environment
* `logging_configuration.<LOG_TYPE>.cloud_watch_log_group_arn` - Provides the ARN for the CloudWatch group where the logs will be published
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="dagS3Path")
def dag_s3_path(self) -> pulumi.Output[str]:
"""
The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "dag_s3_path")
@property
@pulumi.getter(name="environmentClass")
def environment_class(self) -> pulumi.Output[str]:
"""
Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes.
"""
return pulumi.get(self, "environment_class")
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification.
"""
return pulumi.get(self, "execution_role_arn")
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information.
"""
return pulumi.get(self, "kms_key")
@property
@pulumi.getter(name="lastUpdateds")
def last_updateds(self) -> pulumi.Output[Sequence['outputs.EnvironmentLastUpdated']]:
return pulumi.get(self, "last_updateds")
@property
@pulumi.getter(name="loggingConfiguration")
def logging_configuration(self) -> pulumi.Output['outputs.EnvironmentLoggingConfiguration']:
"""
The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
"""
return pulumi.get(self, "logging_configuration")
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> pulumi.Output[int]:
"""
The maximum number of workers that can be automatically scaled up. Value need to be between `1` and `25`. Will be `10` by default.
"""
return pulumi.get(self, "max_workers")
@property
@pulumi.getter(name="minWorkers")
def min_workers(self) -> pulumi.Output[int]:
"""
The minimum number of workers that you want to run in your environment. Will be `1` by default.
"""
return pulumi.get(self, "min_workers")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Apache Airflow Environment
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> pulumi.Output['outputs.EnvironmentNetworkConfiguration']:
"""
Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details.
"""
return pulumi.get(self, "network_configuration")
@property
@pulumi.getter(name="pluginsS3ObjectVersion")
def plugins_s3_object_version(self) -> pulumi.Output[str]:
"""
The plugins.zip file version you want to use.
"""
return pulumi.get(self, "plugins_s3_object_version")
@property
@pulumi.getter(name="pluginsS3Path")
def plugins_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "plugins_s3_path")
@property
@pulumi.getter(name="requirementsS3ObjectVersion")
def requirements_s3_object_version(self) -> pulumi.Output[str]:
"""
The requirements.txt file version you want to use.
"""
return pulumi.get(self, "requirements_s3_object_version")
@property
@pulumi.getter(name="requirementsS3Path")
def requirements_s3_path(self) -> pulumi.Output[Optional[str]]:
"""
The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html).
"""
return pulumi.get(self, "requirements_s3_path")
@property
@pulumi.getter(name="serviceRoleArn")
def service_role_arn(self) -> pulumi.Output[str]:
"""
The Service Role ARN of the Amazon MWAA Environment
"""
return pulumi.get(self, "service_role_arn")
@property
@pulumi.getter(name="sourceBucketArn")
def source_bucket_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.
"""
return pulumi.get(self, "source_bucket_arn")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the Amazon MWAA Environment
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An array of key:value pairs to associate with the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="webserverAccessMode")
def webserver_access_mode(self) -> pulumi.Output[str]:
"""
Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`.
"""
return pulumi.get(self, "webserver_access_mode")
@property
@pulumi.getter(name="webserverUrl")
def webserver_url(self) -> pulumi.Output[str]:
"""
The webserver URL of the MWAA Environment
"""
return pulumi.get(self, "webserver_url")
@property
@pulumi.getter(name="weeklyMaintenanceWindowStart")
def weekly_maintenance_window_start(self) -> pulumi.Output[str]:
"""
Specifies the start date for the weekly maintenance window.
"""
return pulumi.get(self, "weekly_maintenance_window_start")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.867878 | 0.128388 |
from asyncio import CancelledError, TimeoutError, ensure_future, gather, sleep, wait
from collections import defaultdict
from datetime import datetime, timezone
import logging
from aiohttp import ClientError, ClientResponseError, FormData
import immp
try:
from telethon import TelegramClient, events, tl
from telethon.errors import BadRequestError, ChannelPrivateError
from telethon.sessions import SQLiteSession
from telethon.utils import pack_bot_file_id
except ImportError:
TelegramClient = SQLiteSession = None
log = logging.getLogger(__name__)
class _Schema:
config = immp.Schema({"token": str,
immp.Optional("api-id"): immp.Nullable(int),
immp.Optional("api-hash"): immp.Nullable(str),
immp.Optional("client-updates", False): bool,
immp.Optional("session"): immp.Nullable(str)})
user = immp.Schema({"id": int,
immp.Optional("username"): immp.Nullable(str),
"first_name": str,
immp.Optional("last_name"): immp.Nullable(str)})
channel = immp.Schema({"id": int,
"title": str,
"type": "channel",
immp.Optional("username"): immp.Nullable(str)})
entity = immp.Schema({"type": str,
"offset": int,
"length": int,
immp.Optional("url"): immp.Nullable(str),
immp.Optional("user"): immp.Nullable(user)})
_file = {"file_id": str, immp.Optional("file_name"): immp.Nullable(str)}
_location = {"latitude": float, "longitude": float}
message = immp.Schema({"message_id": int,
"chat": {"id": int},
"date": int,
immp.Optional("edit_date"): immp.Nullable(int),
immp.Optional("from"): immp.Nullable(user),
immp.Optional("forward_from"): immp.Nullable(user),
immp.Optional("forward_date"): immp.Nullable(int),
immp.Optional("forward_from_chat"): immp.Nullable(channel),
immp.Optional("forward_from_message_id"): immp.Nullable(int),
immp.Optional("forward_signature"): immp.Nullable(str),
immp.Optional("forward_sender_name"): immp.Nullable(str),
immp.Optional("text"): immp.Nullable(str),
immp.Optional("caption"): immp.Nullable(str),
immp.Optional("entities", list): [entity],
immp.Optional("caption_entities", list): [entity],
immp.Optional("photo", list): [_file],
immp.Optional("sticker"): immp.Nullable({immp.Optional("emoji"):
immp.Nullable(str),
"file_id": str}),
immp.Optional("animation"): immp.Nullable(_file),
immp.Optional("video"): immp.Nullable(_file),
immp.Optional("video_note"): immp.Nullable(_file),
immp.Optional("audio"): immp.Nullable(_file),
immp.Optional("voice"): immp.Nullable(_file),
immp.Optional("document"): immp.Nullable(_file),
immp.Optional("location"): immp.Nullable(_location),
immp.Optional("venue"): immp.Nullable({"location": _location,
"title": str,
"address": str}),
immp.Optional("poll"): immp.Nullable({"question": str,
"is_closed": bool}),
immp.Optional("group_chat_created", False): bool,
immp.Optional("new_chat_members", list): [user],
immp.Optional("left_chat_member"): immp.Nullable(user),
immp.Optional("new_chat_title"): immp.Nullable(str),
immp.Optional("new_chat_photo", list): [_file],
immp.Optional("delete_chat_photo", False): bool,
immp.Optional("migrate_to_chat_id"): immp.Nullable(int)})
# Circular references to embedded messages.
message.raw.update({immp.Optional("reply_to_message"): immp.Nullable(message),
immp.Optional("pinned_message"): immp.Nullable(message)})
update = immp.Schema({"update_id": int,
immp.Optional(immp.Any("message", "edited_message",
"channel_post", "edited_channel_post")): message})
def api(result=None):
success = {"ok": True}
if result:
success["result"] = result
return immp.Schema(immp.Any(success,
{"ok": False,
"description": str,
"error_code": int}))
me = api(user)
file = api({"file_path": str})
send = api(message)
chat = api({"type": str,
immp.Optional("title"): immp.Nullable(str)})
updates = api([update])
class TelegramAPIConnectError(immp.PlugError):
"""
Generic error whilst attempting to call the Telegram API.
"""
class TelegramAPIRequestError(immp.PlugError):
"""
Generic error from the Telegram API.
"""
class _HiddenSender(Exception):
channel_id = -1001228946795
chat_id = 1228946795
@classmethod
def check(cls, value):
if value in (cls.channel_id, cls.chat_id):
raise cls
class TelegramUser(immp.User):
"""
User present in Telegram.
"""
@classmethod
def from_bot_user(cls, telegram, json):
"""
Convert a user :class:`dict` (attached to a message) to a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
json (dict):
Telegram API `User <https://core.telegram.org/bots/api#user>`_ object.
Returns:
.TelegramUser:
Parsed user object.
"""
user = _Schema.user(json)
real_name = user["first_name"]
if user["last_name"]:
real_name = "{} {}".format(real_name, user["last_name"])
avatar = None
if user["username"]:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(user["username"])
return cls(id_=user["id"],
plug=telegram,
username=user["username"],
real_name=real_name,
avatar=avatar,
raw=user)
@classmethod
def from_bot_channel(cls, telegram, json):
"""
Convert a chat :class:`dict` (attached to a message) to a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
json (dict):
Telegram API `Chat <https://core.telegram.org/bots/api#chat>`_ object.
Returns:
.TelegramUser:
Parsed user object.
"""
chat = _Schema.channel(json)
_HiddenSender.check(chat["id"])
return cls(id_=chat["id"],
plug=telegram,
username=chat["username"],
real_name=chat["title"],
raw=chat)
@classmethod
def from_proto_user(cls, telegram, user):
"""
Convert a :class:`telethon.tl.types.User` into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
user (telethon.tl.types.User):
Telegram user retrieved from the MTProto API.
Returns:
.TelegramUser:
Parsed user object.
"""
real_name = user.first_name
if user.last_name:
real_name = "{} {}".format(real_name, user.last_name)
avatar = None
if user.username and user.photo:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(user.username)
return cls(id_=user.id,
plug=telegram,
username=user.username,
real_name=real_name,
avatar=avatar,
raw=user)
@classmethod
def from_proto_channel(cls, telegram, user, author=None):
"""
Convert a :class:`telethon.tl.types.Channel` into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
user (telethon.tl.types.Channel):
Telegram channel retrieved from the MTProto API.
author (str):
Optional post author, for channel messages.
Returns:
.TelegramUser:
Parsed user object.
"""
_HiddenSender.check(user.id)
return cls(id_=user.id,
plug=telegram,
username=user.username,
real_name=author or user.title,
raw=user)
@classmethod
def from_entity(cls, telegram, entity):
"""
Convert a client entity row into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
entity ((str, str, str) tuple):
ID, username and real name of a cached Telegram user.
Returns:
.TelegramUser:
Parsed user object.
"""
id_, username, name = entity
avatar = None
if username:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(username)
return cls(id_=id_,
plug=telegram,
username=username,
real_name=name,
avatar=avatar,
raw=entity)
@property
def link(self):
if self.username:
return "https://t.me/{}".format(self.username)
else:
return "tg://user?id={}".format(self.id)
@link.setter
def link(self, value):
pass
class TelegramSegment(immp.Segment):
"""
Plug-friendly representation of Telegram message formatting.
"""
@classmethod
def to_html(cls, telegram, segment):
"""
Convert a :class:`.Segment` into HTML suitable for Telegram's automatic parsing.
Args:
telegram (.TelegramPlug):
Related plug instance to cross-reference users.
segment (.Segment)
Message segment created by another plug.
Returns:
str:
HTML-formatted string.
"""
text = segment.text.replace("&", "&").replace("<", "<").replace(">", ">")
if segment.mention and segment.mention.plug.network_id == telegram.network_id:
if segment.mention.username:
# Telegram will parse this automatically.
text = "@{}".format(segment.mention.username)
else:
# Make a link that looks like a mention.
text = ("<a href=\"tg://user?id={}\">{}</a>"
.format(segment.mention.id, segment.mention.real_name))
elif segment.link:
text = "<a href=\"{}\">{}</a>".format(segment.link, text)
if segment.code:
text = "<code>{}</code>".format(text)
if segment.pre:
text = "<pre>{}</pre>".format(text)
if segment.bold:
text = "<b>{}</b>".format(text)
if segment.italic:
text = "<i>{}</i>".format(text)
if segment.underline:
text = "<u>{}</u>".format(text)
if segment.strike:
text = "<s>{}</s>".format(text)
return text
class TelegramRichText(immp.RichText):
"""
Wrapper for Telegram-specific parsing of formatting.
"""
@classmethod
def _from_changes(cls, text, changes):
segments = []
points = list(sorted(changes.keys()))
formatting = {}
# Iterate through text in change start/end pairs.
for start, end in zip([0] + points, points + [len(text)]):
formatting.update(changes[start])
if start == end:
# Zero-length segment at the start or end, ignore it.
continue
part = text[start:end]
if isinstance(part, bytes):
part = part.decode("utf-16-le")
segments.append(immp.Segment(part, **formatting))
return cls(segments)
@classmethod
async def from_bot_entities(cls, telegram, text, entities):
"""
Convert a string annotated by Telegram's entities to :class:`.RichText`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the text.
text (str):
Plain text without formatting.
entities (dict list):
List of Telegram API `MessageEntity
<https://core.telegram.org/bots/api#messageentity>`_ objects.
Returns:
.TelegramRichText:
Parsed rich text container.
"""
if not text:
return None
elif not entities:
return immp.RichText([immp.Segment(text)])
# Telegram entities assume the text is UTF-16.
encoded = text.encode("utf-16-le")
changes = defaultdict(dict)
for json in entities:
entity = _Schema.entity(json)
start = entity["offset"] * 2
end = start + (entity["length"] * 2)
if entity["type"] in ("bold", "italic", "underline", "code", "pre"):
key = entity["type"]
value = True
elif entity["type"] == "strikethrough":
key = "strike"
value = True
elif entity["type"] == "url":
key = "link"
value = encoded[start:end].decode("utf-16-le")
elif entity["type"] == "email":
key = "link"
value = "mailto:{}".format(encoded[start:end].decode("utf-16-le"))
elif entity["type"] == "text_link":
key = "link"
value = entity["url"]
elif entity["type"] == "mention":
key = "mention"
username = encoded[start + 2:end].decode("utf-16-le")
value = await telegram.user_from_username(username)
elif entity["type"] == "text_mention":
key = "mention"
value = TelegramUser.from_bot_user(telegram, entity["user"])
else:
continue
clear = False if value is True else None
changes[start][key] = value
changes[end][key] = clear
return cls._from_changes(encoded, changes)
@classmethod
async def from_proto_entities(cls, telegram, text, entities):
"""
Convert a string annotated by Telegram's entities to :class:`.RichText`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the text.
text (str):
Plain text without formatting.
entities (telethon.types.TypeMessageEntity list):
List of Telegram entity objects.
Returns:
.TelegramRichText:
Parsed rich text container.
"""
if not text:
return None
elif not entities:
return immp.RichText([immp.Segment(text)])
changes = defaultdict(dict)
for entity in entities:
value = True
if isinstance(entity, tl.types.MessageEntityBold):
key = "bold"
elif isinstance(entity, tl.types.MessageEntityItalic):
key = "italic"
elif isinstance(entity, tl.types.MessageEntityUnderline):
key = "underline"
elif isinstance(entity, tl.types.MessageEntityStrike):
key = "strike"
elif isinstance(entity, tl.types.MessageEntityCode):
key = "code"
elif isinstance(entity, tl.types.MessageEntityPre):
key = "pre"
elif isinstance(entity, tl.types.MessageEntityUrl):
key = "link"
value = text[entity.offset:entity.offset + entity.length]
elif isinstance(entity, tl.types.MessageEntityTextUrl):
key = "link"
value = entity.url
elif isinstance(entity, tl.types.MessageEntityEmail):
key = "link"
value = "mailto:{}".format(text[entity.offset:entity.offset + entity.length])
elif isinstance(entity, tl.types.MessageEntityMention):
key = "mention"
username = text[entity.offset + 1:entity.offset + entity.length]
value = await telegram.user_from_username(username)
elif isinstance(entity, tl.types.MessageEntityMentionName):
key = "mention"
value = await telegram.user_from_id(entity.user_id)
else:
continue
clear = False if value is True else None
changes[entity.offset][key] = value
changes[entity.offset + entity.length][key] = clear
return cls._from_changes(text, changes)
class TelegramFile(immp.File):
"""
File attachment originating from Telegram.
"""
@classmethod
async def from_id(cls, telegram, id_, type_=immp.File.Type.unknown, name=None):
"""
Generate a file using the bot API URL for a Telegram file.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the file.
id (str):
File ID as provided in the bot API, or constructed from a raw MTProto file.
type (.File.Type):
Corresponding file type.
name (str):
Original filename, if available for the file format.
Returns:
.TelegramFile:
Parsed file object.
"""
file = await telegram._api("getFile", _Schema.file, params={"file_id": id_})
url = ("https://api.telegram.org/file/bot{}/{}"
.format(telegram.config["token"], file["file_path"]))
return immp.File(name, type_, url)
class TelegramMessage(immp.Message):
"""
Message originating from Telegram.
"""
_file_types = ("animation", "video", "video_note", "audio", "voice", "document")
@classmethod
async def from_bot_message(cls, telegram, json):
"""
Convert an API message :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
json (dict):
Telegram API `message <https://core.telegram.org/bots/api#message>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
message = _Schema.message(json)
# Message IDs are just a sequence, only unique to their channel and not the whole network.
# Pair with the chat ID for a network-unique value.
id_ = "{}:{}".format(message["chat"]["id"], message["message_id"])
revision = message["edit_date"] or message["date"]
at = datetime.fromtimestamp(message["date"], timezone.utc)
channel = immp.Channel(telegram, message["chat"]["id"])
edited = bool(message["edit_date"])
text = await TelegramRichText.from_bot_entities(telegram, message["text"],
message["entities"])
user = None
action = False
reply_to = None
joined = None
left = None
title = None
attachments = []
if message["from"]:
user = TelegramUser.from_bot_user(telegram, message["from"])
if message["reply_to_message"]:
reply_to = await cls.from_bot_message(telegram, message["reply_to_message"])
# At most one of these fields will be set.
if message["group_chat_created"]:
action = True
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message["chat"]["title"], bold=True)])
elif message["new_chat_members"]:
joined = [(TelegramUser.from_bot_user(telegram, member))
for member in message["new_chat_members"]]
action = True
if joined == [user]:
text = "joined group via invite link"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, bold=True, link=join.link))
elif message["left_chat_member"]:
left = [TelegramUser.from_bot_user(telegram, message["left_chat_member"])]
action = True
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif message["new_chat_title"]:
title = message["new_chat_title"]
action = True
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif message["new_chat_photo"]:
action = True
text = "changed group photo"
photo = max(message["new_chat_photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
elif message["delete_chat_photo"]:
action = True
text = "removed group photo"
elif message["pinned_message"]:
action = True
text = "pinned a message"
attachments.append(await cls.from_bot_message(telegram, message["pinned_message"]))
elif message["photo"]:
# This is a list of resolutions, find the original sized one to return.
photo = max(message["photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
if message["caption"]:
text = await TelegramRichText.from_bot_entities(telegram, message["caption"],
message["caption_entities"])
elif message["sticker"]:
attachments.append(await TelegramFile.from_id(telegram, message["sticker"]["file_id"],
immp.File.Type.image))
# All real stickers should have an emoji, but webp images uploaded as photos are
# incorrectly categorised as stickers in the API response.
if not text and message["sticker"]["emoji"]:
action = True
text = "sent {} sticker".format(message["sticker"]["emoji"])
elif any(message[key] for key in cls._file_types):
for key in cls._file_types:
if message[key]:
obj = message[key]
break
if key == "animation":
type_ = immp.File.Type.image
elif key in ("video", "video_note"):
type_ = immp.File.Type.video
else:
type_ = immp.File.Type.unknown
attachments.append(await TelegramFile.from_id(telegram, obj["file_id"], type_,
obj["file_name"]))
elif message["venue"]:
attachments.append(immp.Location(latitude=message["venue"]["location"]["latitude"],
longitude=message["venue"]["location"]["longitude"],
name=message["venue"]["title"],
address=message["venue"]["address"]))
elif message["location"]:
attachments.append(immp.Location(latitude=message["location"]["latitude"],
longitude=message["location"]["longitude"]))
elif message["poll"]:
action = True
prefix = "closed the" if message["poll"]["is_closed"] else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message["poll"]["question"], bold=True)])
elif not text:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=at,
channel=channel,
user=user,
raw=message)
if message["forward_date"]:
# Event is a message containing another message. Forwarded messages have no ID, so we
# use a Message instead of a SentMessage here, unless they come from a channel.
forward_id = forward_channel = forward_user = None
if message["forward_from_chat"]:
forward_channel = immp.Channel(telegram, message["forward_from_chat"]["id"])
try:
forward_user = TelegramUser.from_bot_channel(telegram,
message["forward_from_chat"])
except _HiddenSender:
if message["forward_signature"]:
forward_user = immp.User(real_name=message["forward_signature"])
if message["forward_from_message_id"]:
forward_id = "{}:{}".format(message["forward_from_chat"]["id"],
message["forward_from_message_id"])
elif message["forward_from"]:
forward_user = TelegramUser.from_bot_user(telegram, message["forward_from"])
elif message["forward_sender_name"]:
forward_user = immp.User(real_name=message["forward_sender_name"])
forward_common = dict(text=text,
user=forward_user,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
raw=message)
if forward_id:
forward = immp.SentMessage(id_=forward_id,
channel=forward_channel,
**forward_common)
else:
forward = immp.Message(**forward_common)
# Embed the inner message as an attachment.
return immp.SentMessage(attachments=[forward], **common)
else:
return immp.SentMessage(text=text,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
**common)
@classmethod
async def from_bot_update(cls, telegram, update):
"""
Convert an API update :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
update (dict):
Telegram API `update <https://core.telegram.org/bots/api#update>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
for key in ("message", "channel_post"):
if update.get(key):
return await cls.from_bot_message(telegram, update[key])
elif update.get("edited_{}".format(key)):
return await cls.from_bot_message(telegram, update["edited_{}".format(key)])
@classmethod
async def from_proto_message(cls, telegram, message):
"""
Convert a Telegram message event to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
message (telethon.tl.custom.Message):
Received message from an event or get request.
Returns:
.TelegramMessage:
Parsed message object.
"""
id_ = "{}:{}".format(message.chat_id, message.id)
channel = immp.Channel(telegram, message.chat_id)
edited = bool(message.edit_date)
if edited:
revision = int(message.edit_date.timestamp())
elif message.date:
revision = int(message.date.timestamp())
else:
revision = None
text = await TelegramRichText.from_proto_entities(telegram, message.message,
message.entities)
sender = await message.get_sender()
if isinstance(sender, tl.types.Channel):
user = TelegramUser.from_proto_channel(telegram, sender)
else:
user = TelegramUser.from_proto_user(telegram, sender)
action = False
reply_to = None
joined = []
left = []
title = None
attachments = []
if message.reply_to_msg_id:
receipt = immp.Receipt(message.reply_to_msg_id, channel)
reply_to = await telegram.resolve_message(receipt)
if message.photo:
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.photo),
immp.File.Type.image)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.document:
type_ = immp.File.Type.unknown
name = None
for attr in message.document.attributes:
if isinstance(attr, tl.types.DocumentAttributeSticker):
type_ = immp.File.Type.image
if attr.alt and not text:
text = "sent {} sticker".format(attr.alt)
action = True
elif isinstance(attr, tl.types.DocumentAttributeAnimated):
type_ = immp.File.Type.image
elif isinstance(attr, tl.types.DocumentAttributeVideo):
type_ = immp.File.Type.video
if isinstance(attr, tl.types.DocumentAttributeFilename):
name = attr.file_name
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.document),
type_, name)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.poll:
action = True
prefix = "closed the" if message.poll.poll.closed else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message.poll.poll.question, bold=True)])
if message.action:
action = True
if isinstance(message.action, tl.types.MessageActionChatCreate):
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message.action.title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatJoinedByLink):
joined = [user]
text = "joined group via invite link"
elif isinstance(message.action, tl.types.MessageActionChatAddUser):
joined = await gather(*(telegram.user_from_id(id_) for id_ in message.action.users))
if joined == [user]:
text = "joined group"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, link=join.link))
elif isinstance(message.action, tl.types.MessageActionChatDeleteUser):
left = [await telegram.user_from_id(message.action.user_id)]
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif isinstance(message.action, tl.types.MessageActionChatEditTitle):
title = message.action.title
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatEditPhoto):
text = "changed group photo"
elif isinstance(message.action, tl.types.MessageActionChatDeletePhoto):
text = "removed group photo"
elif isinstance(message.action, tl.types.MessageActionPinMessage):
attachments.append(reply_to)
reply_to = None
text = "pinned message"
else:
raise NotImplementedError
if not text and not attachments:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=message.date,
channel=channel,
user=user,
raw=message)
if message.forward:
# Event is a message containing another message. Forwarded messages have no ID, so we
# use a Message instead of a SentMessage here, unless they come from a channel.
forward_id = forward_channel = forward_user = None
if message.forward.channel_id and message.forward.channel_post:
forward_channel = immp.Channel(telegram, message.forward.chat_id)
forward_id = "{}:{}".format(message.forward.chat_id,
message.forward.channel_post)
chat = await message.forward.get_chat()
try:
forward_user = TelegramUser.from_proto_channel(telegram, chat,
message.forward.post_author)
except _HiddenSender:
if message.forward.post_author:
forward_user = immp.User(real_name=message.forward.post_author)
elif message.forward.sender_id:
forward_user = TelegramUser.from_proto_user(telegram,
await message.forward.get_sender())
elif message.forward.from_name:
forward_user = immp.User(real_name=message.forward.from_name)
forward_common = dict(text=text,
user=forward_user,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
raw=message)
if forward_id:
forward = immp.SentMessage(id_=forward_id,
channel=forward_channel,
**forward_common)
else:
forward = immp.Message(**forward_common)
# Embed the inner message as an attachment.
return immp.SentMessage(attachments=[forward], **common)
else:
return immp.SentMessage(text=text,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
**common)
if SQLiteSession:
class Session(SQLiteSession):
def _execute_multi(self, statement, *values):
cursor = self._cursor()
try:
return cursor.execute(statement, values).fetchall()
finally:
cursor.close()
def get_user_entities(self):
return self._execute_multi("SELECT id, username, name FROM entities WHERE id > 0")
def get_chat_entities(self):
return self._execute_multi("SELECT id, username, name FROM entities WHERE id < 0")
def get_entity(self, id_):
return self._execute("SELECT id, username, name FROM entities WHERE id = ?", id_)
def get_entity_username(self, username):
return self._execute("SELECT id, username, name FROM entities WHERE username = ?",
username)
class TelegramPlug(immp.HTTPOpenable, immp.Plug):
"""
Plug for a `Telegram <https://telegram.org>`_ bot.
"""
schema = _Schema.config
network_name = "Telegram"
@property
def network_id(self):
return "telegram:{}".format(self._bot_user["id"]) if self._bot_user else None
def __init__(self, name, config, host):
super().__init__(name, config, host)
if bool(self.config["api-id"]) != bool(self.config["api-hash"]):
raise immp.ConfigError("Both of API ID and hash must be given")
if self.config["client-updates"] and not self.config["api-id"]:
raise immp.ConfigError("Client updates require API ID and hash")
if self.config["session"] and not self.config["api-id"]:
raise immp.ConfigError("Session file requires API ID and hash")
# Connection objects that need to be closed on disconnect.
self._bot_user = self._receive = self._client = None
self._closing = False
# Temporary tracking of migrated chats for the current session.
self._migrations = {}
# Caching of user/username lookups to avoid flooding.
self._users = {}
self._usernames = {}
# Blacklist of channels we have an entity for but can't access. Indexed at startup, with
# chats removed if we receive a message from that channel.
self._blacklist = set()
self._blacklist_task = None
# Update ID from which to retrieve the next batch. Should be one higher than the max seen.
self._offset = 0
# Private chats and non-super groups have a shared incremental message ID. Cache the
# highest we've seen, so that we can attempt to fetch past messages with this as a base.
self._last_id = None
async def _api(self, endpoint, schema=_Schema.api(), quiet=False, **kwargs):
url = "https://api.telegram.org/bot{}/{}".format(self.config["token"], endpoint)
if not quiet:
log.debug("Making API request to %r", endpoint)
try:
async with self.session.post(url, **kwargs) as resp:
try:
json = await resp.json()
data = schema(json)
except ClientResponseError as e:
raise TelegramAPIConnectError("Bad response with code: {}"
.format(resp.status)) from e
except ClientError as e:
raise TelegramAPIConnectError("Request failed") from e
except TimeoutError as e:
raise TelegramAPIConnectError("Request timed out") from e
if not data["ok"]:
raise TelegramAPIRequestError(data["error_code"], data["description"])
return data["result"]
async def start(self):
await super().start()
self._closing = False
self._bot_user = await self._api("getMe", _Schema.me)
if self.config["api-id"] and self.config["api-hash"]:
if not TelegramClient:
raise immp.ConfigError("API ID/hash specified but Telethon is not installed")
log.debug("Starting client")
self._client = TelegramClient(Session(self.config["session"]),
self.config["api-id"], self.config["api-hash"])
if self._client and self.config["client-updates"]:
log.debug("Adding client event handlers")
self._client.add_event_handler(self._handle_raw)
for event in (events.NewMessage, events.MessageEdited, events.ChatAction):
self._client.add_event_handler(self._handle, event)
else:
log.debug("Starting update long-poll")
self._receive = ensure_future(self._poll())
if self._client:
await self._client.start(bot_token=self.config["token"])
# Find the most recently received message, and therefore the current value of the shared
# ID sequence. Fetch the current state, then subtract one from pts to make it replay
# the last message, which should appear in new_messages and other_updates.
state = await self._client(tl.functions.updates.GetStateRequest())
diff = await self._client(tl.functions.updates.GetDifferenceRequest(
state.pts - 1, datetime.utcnow(), state.qts))
if isinstance(diff, tl.types.updates.DifferenceEmpty):
# Unclear if this will happen with the given parameters.
pass
elif diff.new_messages:
self._last_id = diff.new_messages[-1].id
self._blacklist = {_HiddenSender.channel_id}
self._blacklist_task = ensure_future(wait([self._blacklist_users(),
self._blacklist_chats()]))
async def stop(self):
await super().stop()
self._closing = True
if self._receive:
log.debug("Stopping update long-poll")
self._receive.cancel()
self._receive = None
if self._client:
log.debug("Closing client")
await self._client.disconnect()
self._client = None
self._bot_user = None
if self._blacklist:
self._blacklist.clear()
if self._blacklist_task:
self._blacklist_task.cancel()
self._blacklist_task = None
self._offset = 0
self._last_id = None
if self._migrations:
log.warning("Chat migrations require a config update before next run")
async def user_from_id(self, id_):
if not self._client:
log.debug("Client auth required to look up users")
return None
entity = self._client.session.get_entity(id_)
if entity:
return TelegramUser.from_entity(self, entity)
elif id_ in self._users:
return self._users[id_]
try:
data = await self._client(tl.functions.users.GetFullUserRequest(int(id_)))
except BadRequestError:
return None
user = TelegramUser.from_proto_user(self, data.user)
self._users[id_] = user
return user
async def user_from_username(self, username):
if not self._client:
log.debug("Client auth required to look up users")
return None
entity = self._client.session.get_entity_username(username)
if entity:
return TelegramUser.from_entity(self, entity)
elif username in self._usernames:
return self._usernames[username]
try:
data = await self._client(tl.functions.contacts.ResolveUsernameRequest(username))
except BadRequestError:
return None
if not data.users:
return None
user = TelegramUser.from_proto_user(self, data.users[0])
self._usernames[username] = user
return user
async def user_is_system(self, user):
return user.id == str(self._bot_user["id"])
async def _blacklist_users(self):
# For each user in the entity table, check the bot API for a corresponding chat, and
# blacklist those who haven't started a conversation with us yet.
log.debug("Finding users to blacklist")
count = 0
for user in self._client.session.get_user_entities():
try:
await self._api("getChat", quiet=True, params={"chat_id": user[0]})
except TelegramAPIRequestError:
count += 1
self._blacklist.add(user[0])
log.debug("Blacklisted %d users", count)
async def _blacklist_chats(self):
# The entity cache is polluted with channels we've seen outside of participation (e.g.
# mentions and forwards). Narrow down the list by excluding chats we can't access.
log.debug("Finding chats to blacklist")
count = 0
lookup = []
for chat in self._client.session.get_chat_entities():
if chat[0] in self._blacklist:
continue
if str(chat[0]).startswith("-100"):
try:
await self._client(tl.functions.channels.GetChannelsRequest([abs(chat[0])]))
except ChannelPrivateError:
count += 1
self._blacklist.add(chat[0])
else:
lookup.append(abs(chat[0]))
if lookup:
chats = await self._client(tl.functions.messages.GetChatsRequest(lookup))
gone = [-chat.id for chat in chats.chats if isinstance(chat, tl.types.ChatForbidden)]
if gone:
count += len(gone)
self._blacklist.update(gone)
log.debug("Blacklisted %d chats", count)
async def public_channels(self):
if not self._client:
log.debug("Client auth required to look up channels")
return None
# Use the session cache to find all "seen" chats -- not guaranteed to be a complete list.
# Filter out chats we're no longer a member of, or otherwise can't access.
ids = set(chat[0] for chat in self._client.session.get_chat_entities())
return [immp.Channel(self, chat) for chat in ids - self._blacklist]
async def private_channels(self):
if not self._client:
log.debug("Client auth required to look up channels")
return None
# Private channels just use user IDs, so return all users we know about, filtered to those
# we also have a valid chat for.
ids = set(chat[0] for chat in self._client.session.get_user_entities())
return [immp.Channel(self, chat) for chat in ids - self._blacklist]
async def channel_for_user(self, user):
if not isinstance(user, TelegramUser):
return None
try:
await self._api("getChat", params={"chat_id": user.id})
except TelegramAPIRequestError as e:
log.warning("Failed to retrieve user %s channel", user.id, exc_info=e)
# Can't create private channels, users must initiate conversations with bots.
return None
else:
return immp.Channel(self, user.id)
async def channel_is_private(self, channel):
return int(channel.source) > 0
async def channel_title(self, channel):
if await channel.is_private():
return None
if self._client:
entity = self._client.session.get_entity(channel.source)
if entity:
return entity[2]
try:
data = await self._api("getChat", _Schema.chat, params={"chat_id": channel.source})
except TelegramAPIRequestError as e:
log.warning("Failed to retrieve channel %s title", channel.source, exc_info=e)
return None
else:
return data["title"]
async def channel_rename(self, channel, title):
await self._api("setChatTitle", params={"chat_id": channel.source, "title": title})
async def channel_members(self, channel):
if not self._client:
log.debug("Client auth required to list channel members")
return None
# Private channels should just contain the bot and the corresponding user.
if await channel.is_private():
if channel.source == str(self._bot_user["id"]):
return [TelegramUser.from_bot_user(self, self._bot_user)]
elif int(channel.source) > 0:
entity = self._client.session.get_entity(channel.source)
if entity:
return [TelegramUser.from_bot_user(self, self._bot_user),
await self.user_from_id(channel.source)]
# Channel and supergroup chat IDs have a bot-API-only prefix to distinguish them.
if channel.source.startswith("-100"):
chat = int(channel.source[4:])
users = []
try:
while True:
data = await self._client(tl.functions.channels.GetParticipantsRequest(
chat, tl.types.ChannelParticipantsRecent(), len(users), 1000, 0))
if data.users:
users += [TelegramUser.from_proto_user(self, user) for user in data.users]
else:
break
except BadRequestError:
return None
else:
return users
else:
chat = abs(int(channel.source))
try:
data = await self._client(tl.functions.messages.GetFullChatRequest(chat))
except BadRequestError:
return None
else:
return [TelegramUser.from_proto_user(self, user) for user in data.users]
async def channel_remove(self, channel, user):
if user.id == self._bot_user["id"]:
await self._api("leaveChat", params={"chat_id": channel.source})
else:
await self._api("kickChatMember", params={"chat_id": channel.source,
"user_id": user.id})
async def channel_history(self, channel, before=None):
if not self._client:
log.debug("Client auth required to retrieve messages")
return []
# Telegram channels (including supergroups) have their own message ID sequence starting from
# 1. Each user has a shared ID sequence used for non-super groups and private chats.
private_seq = channel.source.startswith("-100")
if not before:
if private_seq:
request = tl.functions.channels.GetFullChannelRequest(int(channel.source))
chat = await self._client(request)
before = immp.Receipt("{}:{}".format(channel.source, chat.full_chat.pts), channel)
elif self._last_id:
before = immp.Receipt("{}:{}".format(channel.source, self._last_id + 1), channel)
else:
log.debug("Before message required to retrieve messages")
return []
chat, message = (int(field) for field in before.id.split(":", 1))
# For a channel-private sequence, we can just retrieve the last batch of messages. For the
# shared sequence, we can't lookup for a specific chat, so we instead fetch a larger batch
# (maxes out at 200) and filter to messages from the target chat.
limit = 50 if private_seq else 200
ids = list(range(max(message - limit, 1), message))
history = filter(None, await self._client.get_messages(entity=chat, ids=ids))
tasks = (TelegramMessage.from_proto_message(self, message) for message in history)
results = await gather(*tasks, return_exceptions=True)
messages = []
for result in results:
if isinstance(result, NotImplementedError):
continue
elif isinstance(result, Exception):
raise result
else:
messages.append(result)
return messages
async def get_message(self, receipt):
if not self._client:
log.debug("Client auth required to retrieve messages")
return None
message = await self._client.get_messages(int(receipt.channel.source), ids=int(receipt.id))
if not message:
log.debug("Failed to find message %d in %d", receipt.id, receipt.channel.source)
return None
try:
return await TelegramMessage.from_proto_message(self, message)
except NotImplementedError:
return None
async def _form_data(self, base, field, attach):
data = FormData(base)
if attach.source:
data.add_field(field, attach.source)
else:
img_resp = await attach.get_content(self.session)
data.add_field(field, img_resp.content, filename=attach.title or field)
return data
async def _upload_attachment(self, chat, msg, attach):
# Upload a file to Telegram in its own message.
# Prefer a source URL if available, else fall back to re-uploading the file.
base = {"chat_id": str(chat)}
if msg.user:
if attach.type == immp.File.Type.image:
what = "an image"
elif attach.type == immp.File.Type.video:
what = "a video"
else:
what = "a file"
rich = immp.Message(text=immp.RichText([immp.Segment("sent {}".format(what))]),
user=msg.user, action=True).render()
text = "".join(TelegramSegment.to_html(self, segment) for segment in rich)
base["caption"] = text
base["parse_mode"] = "HTML"
if attach.type == immp.File.Type.image:
data = await self._form_data(base, "photo", attach)
try:
return await self._api("sendPhoto", _Schema.send, data=data)
except (TelegramAPIConnectError, TelegramAPIRequestError):
log.debug("Failed to upload image, falling back to document upload")
elif attach.type == immp.File.Type.video:
data = await self._form_data(base, "video", attach)
try:
return await self._api("sendVideo", _Schema.send, data=data)
except (TelegramAPIConnectError, TelegramAPIRequestError):
log.debug("Failed to upload video, falling back to document upload")
data = await self._form_data(base, "document", attach)
try:
return await self._api("sendDocument", _Schema.send, data=data)
except TelegramAPIConnectError as e:
log.warning("Failed to upload file", exc_info=e)
return None
def _requests(self, chat, msg):
requests = []
if msg.text or msg.reply_to:
quote = False
reply_to = ""
if isinstance(msg.reply_to, immp.Receipt):
# Reply natively to the given parent message.
reply_to = int(msg.reply_to.id.split(":")[1])
elif isinstance(msg.reply_to, immp.Message):
quote = True
rich = msg.render(edit=msg.edited, quote_reply=quote)
text = "".join(TelegramSegment.to_html(self, segment) for segment in rich)
no_link_preview = "true" if msg.user and msg.user.link else "false"
requests.append(self._api("sendMessage", _Schema.send,
params={"chat_id": chat,
"text": text,
"parse_mode": "HTML",
# Prevent linked user names generating previews.
"disable_web_page_preview": no_link_preview,
"reply_to_message_id": reply_to}))
for attach in msg.attachments:
if isinstance(attach, immp.File):
requests.append(self._upload_attachment(chat, msg, attach))
elif isinstance(attach, immp.Location):
requests.append(self._api("sendLocation", _Schema.send,
params={"chat_id": chat,
"latitude": str(attach.latitude),
"longitude": str(attach.longitude)}))
if msg.user:
caption = immp.Message(user=msg.user, text="sent a location", action=True)
text = "".join(TelegramSegment.to_html(self, segment)
for segment in caption.render())
requests.append(self._api("sendMessage", _Schema.send,
params={"chat_id": chat,
"text": text,
"parse_mode": "HTML"}))
return requests
async def put(self, channel, msg):
chat = channel.source
while chat in self._migrations:
log.debug("Following chat migration: %r -> %r", chat, self._migrations[chat])
chat = self._migrations[chat]
requests = []
for attach in msg.attachments:
# Generate requests for attached messages first.
if isinstance(attach, immp.Receipt):
# Forward the messages natively using the given chat/ID.
forward_chat, forward_id = map(int, attach.id.split(":"))
requests.append(self._api("forwardMessage", _Schema.send,
params={"chat_id": chat,
"from_chat_id": forward_chat,
"message_id": forward_id}))
elif isinstance(attach, immp.Message):
requests += self._requests(chat, attach)
own_requests = self._requests(chat, msg)
if requests and not own_requests and msg.user:
# Forwarding a message but no content to show who forwarded it.
info = immp.Message(user=msg.user, action=True, text="forwarded a message")
own_requests = self._requests(chat, info)
requests += own_requests
ids = []
for request in requests:
result = await request
if not result:
continue
sent = await TelegramMessage.from_bot_message(self, result)
ids.append(sent.id)
self._post_recv(sent)
return ids
async def delete(self, sent):
chat, message = sent.id.split(":", 1)
await self._api("deleteMessage", params={"chat_id": chat, "message_id": message})
def _migrate(self, old, new):
log.warning("Chat has migrated: %r -> %r", old, new)
self._migrations[old] = new
for name, channel in self.host.channels.items():
if channel.plug is self and channel.source == old:
log.debug("Updating named channel %r in place", name)
channel.source = new
def _post_recv(self, sent):
self.queue(sent)
chat, seq = (int(part) for part in sent.id.split(":", 1))
if self._blacklist:
self._blacklist.discard(chat)
if not str(chat).startswith("-100"):
self._last_id = seq
async def _poll(self):
while not self._closing:
params = {"offset": self._offset,
"timeout": 240}
fetch = ensure_future(self._api("getUpdates", _Schema.updates, params=params))
try:
result = await fetch
except CancelledError:
log.debug("Cancelling polling")
return
except (TelegramAPIConnectError, TelegramAPIRequestError) as e:
log.debug("Unexpected response or timeout: %r", e)
log.debug("Reconnecting in 3 seconds")
await sleep(3)
continue
except Exception as e:
log.exception("Uncaught exception during long-poll: %r", e)
raise
for update in result:
log.debug("Received an update")
if "message" in update and update["message"]["migrate_to_chat_id"]:
old = update["message"]["chat"]["id"]
new = update["message"]["migrate_to_chat_id"]
self._migrate(old, new)
if any(key in update or "edited_{}".format(key) in update
for key in ("message", "channel_post")):
try:
sent = await TelegramMessage.from_bot_update(self, update)
except NotImplementedError:
log.debug("Skipping message with no usable parts")
except CancelledError:
log.debug("Cancel request for plug %r getter", self.name)
return
else:
self._post_recv(sent)
else:
log.debug("Ignoring update with unknown keys: %s", ", ".join(update.keys()))
self._offset = max(update["update_id"] + 1, self._offset)
async def _handle_raw(self, event):
log.debug("Received a %s event", event.__class__.__qualname__)
if isinstance(event, tl.types.UpdateNewMessage):
if isinstance(event.message.action, tl.types.MessageActionChatMigrateTo):
old = event.message.chat_id
new = int("-100{}".format(event.message.action.channel_id))
self._migrate(old, new)
async def _handle(self, event):
if isinstance(event, events.ChatAction.Event):
message = event.action_message
else:
message = event.message
try:
sent = await TelegramMessage.from_proto_message(self, message)
except NotImplementedError:
log.debug("Skipping message with no usable parts")
else:
self._post_recv(sent) | immp/plug/telegram.py | from asyncio import CancelledError, TimeoutError, ensure_future, gather, sleep, wait
from collections import defaultdict
from datetime import datetime, timezone
import logging
from aiohttp import ClientError, ClientResponseError, FormData
import immp
try:
from telethon import TelegramClient, events, tl
from telethon.errors import BadRequestError, ChannelPrivateError
from telethon.sessions import SQLiteSession
from telethon.utils import pack_bot_file_id
except ImportError:
TelegramClient = SQLiteSession = None
log = logging.getLogger(__name__)
class _Schema:
config = immp.Schema({"token": str,
immp.Optional("api-id"): immp.Nullable(int),
immp.Optional("api-hash"): immp.Nullable(str),
immp.Optional("client-updates", False): bool,
immp.Optional("session"): immp.Nullable(str)})
user = immp.Schema({"id": int,
immp.Optional("username"): immp.Nullable(str),
"first_name": str,
immp.Optional("last_name"): immp.Nullable(str)})
channel = immp.Schema({"id": int,
"title": str,
"type": "channel",
immp.Optional("username"): immp.Nullable(str)})
entity = immp.Schema({"type": str,
"offset": int,
"length": int,
immp.Optional("url"): immp.Nullable(str),
immp.Optional("user"): immp.Nullable(user)})
_file = {"file_id": str, immp.Optional("file_name"): immp.Nullable(str)}
_location = {"latitude": float, "longitude": float}
message = immp.Schema({"message_id": int,
"chat": {"id": int},
"date": int,
immp.Optional("edit_date"): immp.Nullable(int),
immp.Optional("from"): immp.Nullable(user),
immp.Optional("forward_from"): immp.Nullable(user),
immp.Optional("forward_date"): immp.Nullable(int),
immp.Optional("forward_from_chat"): immp.Nullable(channel),
immp.Optional("forward_from_message_id"): immp.Nullable(int),
immp.Optional("forward_signature"): immp.Nullable(str),
immp.Optional("forward_sender_name"): immp.Nullable(str),
immp.Optional("text"): immp.Nullable(str),
immp.Optional("caption"): immp.Nullable(str),
immp.Optional("entities", list): [entity],
immp.Optional("caption_entities", list): [entity],
immp.Optional("photo", list): [_file],
immp.Optional("sticker"): immp.Nullable({immp.Optional("emoji"):
immp.Nullable(str),
"file_id": str}),
immp.Optional("animation"): immp.Nullable(_file),
immp.Optional("video"): immp.Nullable(_file),
immp.Optional("video_note"): immp.Nullable(_file),
immp.Optional("audio"): immp.Nullable(_file),
immp.Optional("voice"): immp.Nullable(_file),
immp.Optional("document"): immp.Nullable(_file),
immp.Optional("location"): immp.Nullable(_location),
immp.Optional("venue"): immp.Nullable({"location": _location,
"title": str,
"address": str}),
immp.Optional("poll"): immp.Nullable({"question": str,
"is_closed": bool}),
immp.Optional("group_chat_created", False): bool,
immp.Optional("new_chat_members", list): [user],
immp.Optional("left_chat_member"): immp.Nullable(user),
immp.Optional("new_chat_title"): immp.Nullable(str),
immp.Optional("new_chat_photo", list): [_file],
immp.Optional("delete_chat_photo", False): bool,
immp.Optional("migrate_to_chat_id"): immp.Nullable(int)})
# Circular references to embedded messages.
message.raw.update({immp.Optional("reply_to_message"): immp.Nullable(message),
immp.Optional("pinned_message"): immp.Nullable(message)})
update = immp.Schema({"update_id": int,
immp.Optional(immp.Any("message", "edited_message",
"channel_post", "edited_channel_post")): message})
def api(result=None):
success = {"ok": True}
if result:
success["result"] = result
return immp.Schema(immp.Any(success,
{"ok": False,
"description": str,
"error_code": int}))
me = api(user)
file = api({"file_path": str})
send = api(message)
chat = api({"type": str,
immp.Optional("title"): immp.Nullable(str)})
updates = api([update])
class TelegramAPIConnectError(immp.PlugError):
"""
Generic error whilst attempting to call the Telegram API.
"""
class TelegramAPIRequestError(immp.PlugError):
"""
Generic error from the Telegram API.
"""
class _HiddenSender(Exception):
channel_id = -1001228946795
chat_id = 1228946795
@classmethod
def check(cls, value):
if value in (cls.channel_id, cls.chat_id):
raise cls
class TelegramUser(immp.User):
"""
User present in Telegram.
"""
@classmethod
def from_bot_user(cls, telegram, json):
"""
Convert a user :class:`dict` (attached to a message) to a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
json (dict):
Telegram API `User <https://core.telegram.org/bots/api#user>`_ object.
Returns:
.TelegramUser:
Parsed user object.
"""
user = _Schema.user(json)
real_name = user["first_name"]
if user["last_name"]:
real_name = "{} {}".format(real_name, user["last_name"])
avatar = None
if user["username"]:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(user["username"])
return cls(id_=user["id"],
plug=telegram,
username=user["username"],
real_name=real_name,
avatar=avatar,
raw=user)
@classmethod
def from_bot_channel(cls, telegram, json):
"""
Convert a chat :class:`dict` (attached to a message) to a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
json (dict):
Telegram API `Chat <https://core.telegram.org/bots/api#chat>`_ object.
Returns:
.TelegramUser:
Parsed user object.
"""
chat = _Schema.channel(json)
_HiddenSender.check(chat["id"])
return cls(id_=chat["id"],
plug=telegram,
username=chat["username"],
real_name=chat["title"],
raw=chat)
@classmethod
def from_proto_user(cls, telegram, user):
"""
Convert a :class:`telethon.tl.types.User` into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
user (telethon.tl.types.User):
Telegram user retrieved from the MTProto API.
Returns:
.TelegramUser:
Parsed user object.
"""
real_name = user.first_name
if user.last_name:
real_name = "{} {}".format(real_name, user.last_name)
avatar = None
if user.username and user.photo:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(user.username)
return cls(id_=user.id,
plug=telegram,
username=user.username,
real_name=real_name,
avatar=avatar,
raw=user)
@classmethod
def from_proto_channel(cls, telegram, user, author=None):
"""
Convert a :class:`telethon.tl.types.Channel` into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
user (telethon.tl.types.Channel):
Telegram channel retrieved from the MTProto API.
author (str):
Optional post author, for channel messages.
Returns:
.TelegramUser:
Parsed user object.
"""
_HiddenSender.check(user.id)
return cls(id_=user.id,
plug=telegram,
username=user.username,
real_name=author or user.title,
raw=user)
@classmethod
def from_entity(cls, telegram, entity):
"""
Convert a client entity row into a :class:`.User`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the user.
entity ((str, str, str) tuple):
ID, username and real name of a cached Telegram user.
Returns:
.TelegramUser:
Parsed user object.
"""
id_, username, name = entity
avatar = None
if username:
avatar = "https://t.me/i/userpic/320/{}.jpg".format(username)
return cls(id_=id_,
plug=telegram,
username=username,
real_name=name,
avatar=avatar,
raw=entity)
@property
def link(self):
if self.username:
return "https://t.me/{}".format(self.username)
else:
return "tg://user?id={}".format(self.id)
@link.setter
def link(self, value):
pass
class TelegramSegment(immp.Segment):
"""
Plug-friendly representation of Telegram message formatting.
"""
@classmethod
def to_html(cls, telegram, segment):
"""
Convert a :class:`.Segment` into HTML suitable for Telegram's automatic parsing.
Args:
telegram (.TelegramPlug):
Related plug instance to cross-reference users.
segment (.Segment)
Message segment created by another plug.
Returns:
str:
HTML-formatted string.
"""
text = segment.text.replace("&", "&").replace("<", "<").replace(">", ">")
if segment.mention and segment.mention.plug.network_id == telegram.network_id:
if segment.mention.username:
# Telegram will parse this automatically.
text = "@{}".format(segment.mention.username)
else:
# Make a link that looks like a mention.
text = ("<a href=\"tg://user?id={}\">{}</a>"
.format(segment.mention.id, segment.mention.real_name))
elif segment.link:
text = "<a href=\"{}\">{}</a>".format(segment.link, text)
if segment.code:
text = "<code>{}</code>".format(text)
if segment.pre:
text = "<pre>{}</pre>".format(text)
if segment.bold:
text = "<b>{}</b>".format(text)
if segment.italic:
text = "<i>{}</i>".format(text)
if segment.underline:
text = "<u>{}</u>".format(text)
if segment.strike:
text = "<s>{}</s>".format(text)
return text
class TelegramRichText(immp.RichText):
"""
Wrapper for Telegram-specific parsing of formatting.
"""
@classmethod
def _from_changes(cls, text, changes):
segments = []
points = list(sorted(changes.keys()))
formatting = {}
# Iterate through text in change start/end pairs.
for start, end in zip([0] + points, points + [len(text)]):
formatting.update(changes[start])
if start == end:
# Zero-length segment at the start or end, ignore it.
continue
part = text[start:end]
if isinstance(part, bytes):
part = part.decode("utf-16-le")
segments.append(immp.Segment(part, **formatting))
return cls(segments)
@classmethod
async def from_bot_entities(cls, telegram, text, entities):
"""
Convert a string annotated by Telegram's entities to :class:`.RichText`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the text.
text (str):
Plain text without formatting.
entities (dict list):
List of Telegram API `MessageEntity
<https://core.telegram.org/bots/api#messageentity>`_ objects.
Returns:
.TelegramRichText:
Parsed rich text container.
"""
if not text:
return None
elif not entities:
return immp.RichText([immp.Segment(text)])
# Telegram entities assume the text is UTF-16.
encoded = text.encode("utf-16-le")
changes = defaultdict(dict)
for json in entities:
entity = _Schema.entity(json)
start = entity["offset"] * 2
end = start + (entity["length"] * 2)
if entity["type"] in ("bold", "italic", "underline", "code", "pre"):
key = entity["type"]
value = True
elif entity["type"] == "strikethrough":
key = "strike"
value = True
elif entity["type"] == "url":
key = "link"
value = encoded[start:end].decode("utf-16-le")
elif entity["type"] == "email":
key = "link"
value = "mailto:{}".format(encoded[start:end].decode("utf-16-le"))
elif entity["type"] == "text_link":
key = "link"
value = entity["url"]
elif entity["type"] == "mention":
key = "mention"
username = encoded[start + 2:end].decode("utf-16-le")
value = await telegram.user_from_username(username)
elif entity["type"] == "text_mention":
key = "mention"
value = TelegramUser.from_bot_user(telegram, entity["user"])
else:
continue
clear = False if value is True else None
changes[start][key] = value
changes[end][key] = clear
return cls._from_changes(encoded, changes)
@classmethod
async def from_proto_entities(cls, telegram, text, entities):
"""
Convert a string annotated by Telegram's entities to :class:`.RichText`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the text.
text (str):
Plain text without formatting.
entities (telethon.types.TypeMessageEntity list):
List of Telegram entity objects.
Returns:
.TelegramRichText:
Parsed rich text container.
"""
if not text:
return None
elif not entities:
return immp.RichText([immp.Segment(text)])
changes = defaultdict(dict)
for entity in entities:
value = True
if isinstance(entity, tl.types.MessageEntityBold):
key = "bold"
elif isinstance(entity, tl.types.MessageEntityItalic):
key = "italic"
elif isinstance(entity, tl.types.MessageEntityUnderline):
key = "underline"
elif isinstance(entity, tl.types.MessageEntityStrike):
key = "strike"
elif isinstance(entity, tl.types.MessageEntityCode):
key = "code"
elif isinstance(entity, tl.types.MessageEntityPre):
key = "pre"
elif isinstance(entity, tl.types.MessageEntityUrl):
key = "link"
value = text[entity.offset:entity.offset + entity.length]
elif isinstance(entity, tl.types.MessageEntityTextUrl):
key = "link"
value = entity.url
elif isinstance(entity, tl.types.MessageEntityEmail):
key = "link"
value = "mailto:{}".format(text[entity.offset:entity.offset + entity.length])
elif isinstance(entity, tl.types.MessageEntityMention):
key = "mention"
username = text[entity.offset + 1:entity.offset + entity.length]
value = await telegram.user_from_username(username)
elif isinstance(entity, tl.types.MessageEntityMentionName):
key = "mention"
value = await telegram.user_from_id(entity.user_id)
else:
continue
clear = False if value is True else None
changes[entity.offset][key] = value
changes[entity.offset + entity.length][key] = clear
return cls._from_changes(text, changes)
class TelegramFile(immp.File):
"""
File attachment originating from Telegram.
"""
@classmethod
async def from_id(cls, telegram, id_, type_=immp.File.Type.unknown, name=None):
"""
Generate a file using the bot API URL for a Telegram file.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the file.
id (str):
File ID as provided in the bot API, or constructed from a raw MTProto file.
type (.File.Type):
Corresponding file type.
name (str):
Original filename, if available for the file format.
Returns:
.TelegramFile:
Parsed file object.
"""
file = await telegram._api("getFile", _Schema.file, params={"file_id": id_})
url = ("https://api.telegram.org/file/bot{}/{}"
.format(telegram.config["token"], file["file_path"]))
return immp.File(name, type_, url)
class TelegramMessage(immp.Message):
"""
Message originating from Telegram.
"""
_file_types = ("animation", "video", "video_note", "audio", "voice", "document")
@classmethod
async def from_bot_message(cls, telegram, json):
"""
Convert an API message :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
json (dict):
Telegram API `message <https://core.telegram.org/bots/api#message>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
message = _Schema.message(json)
# Message IDs are just a sequence, only unique to their channel and not the whole network.
# Pair with the chat ID for a network-unique value.
id_ = "{}:{}".format(message["chat"]["id"], message["message_id"])
revision = message["edit_date"] or message["date"]
at = datetime.fromtimestamp(message["date"], timezone.utc)
channel = immp.Channel(telegram, message["chat"]["id"])
edited = bool(message["edit_date"])
text = await TelegramRichText.from_bot_entities(telegram, message["text"],
message["entities"])
user = None
action = False
reply_to = None
joined = None
left = None
title = None
attachments = []
if message["from"]:
user = TelegramUser.from_bot_user(telegram, message["from"])
if message["reply_to_message"]:
reply_to = await cls.from_bot_message(telegram, message["reply_to_message"])
# At most one of these fields will be set.
if message["group_chat_created"]:
action = True
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message["chat"]["title"], bold=True)])
elif message["new_chat_members"]:
joined = [(TelegramUser.from_bot_user(telegram, member))
for member in message["new_chat_members"]]
action = True
if joined == [user]:
text = "joined group via invite link"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, bold=True, link=join.link))
elif message["left_chat_member"]:
left = [TelegramUser.from_bot_user(telegram, message["left_chat_member"])]
action = True
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif message["new_chat_title"]:
title = message["new_chat_title"]
action = True
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif message["new_chat_photo"]:
action = True
text = "changed group photo"
photo = max(message["new_chat_photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
elif message["delete_chat_photo"]:
action = True
text = "removed group photo"
elif message["pinned_message"]:
action = True
text = "pinned a message"
attachments.append(await cls.from_bot_message(telegram, message["pinned_message"]))
elif message["photo"]:
# This is a list of resolutions, find the original sized one to return.
photo = max(message["photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
if message["caption"]:
text = await TelegramRichText.from_bot_entities(telegram, message["caption"],
message["caption_entities"])
elif message["sticker"]:
attachments.append(await TelegramFile.from_id(telegram, message["sticker"]["file_id"],
immp.File.Type.image))
# All real stickers should have an emoji, but webp images uploaded as photos are
# incorrectly categorised as stickers in the API response.
if not text and message["sticker"]["emoji"]:
action = True
text = "sent {} sticker".format(message["sticker"]["emoji"])
elif any(message[key] for key in cls._file_types):
for key in cls._file_types:
if message[key]:
obj = message[key]
break
if key == "animation":
type_ = immp.File.Type.image
elif key in ("video", "video_note"):
type_ = immp.File.Type.video
else:
type_ = immp.File.Type.unknown
attachments.append(await TelegramFile.from_id(telegram, obj["file_id"], type_,
obj["file_name"]))
elif message["venue"]:
attachments.append(immp.Location(latitude=message["venue"]["location"]["latitude"],
longitude=message["venue"]["location"]["longitude"],
name=message["venue"]["title"],
address=message["venue"]["address"]))
elif message["location"]:
attachments.append(immp.Location(latitude=message["location"]["latitude"],
longitude=message["location"]["longitude"]))
elif message["poll"]:
action = True
prefix = "closed the" if message["poll"]["is_closed"] else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message["poll"]["question"], bold=True)])
elif not text:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=at,
channel=channel,
user=user,
raw=message)
if message["forward_date"]:
# Event is a message containing another message. Forwarded messages have no ID, so we
# use a Message instead of a SentMessage here, unless they come from a channel.
forward_id = forward_channel = forward_user = None
if message["forward_from_chat"]:
forward_channel = immp.Channel(telegram, message["forward_from_chat"]["id"])
try:
forward_user = TelegramUser.from_bot_channel(telegram,
message["forward_from_chat"])
except _HiddenSender:
if message["forward_signature"]:
forward_user = immp.User(real_name=message["forward_signature"])
if message["forward_from_message_id"]:
forward_id = "{}:{}".format(message["forward_from_chat"]["id"],
message["forward_from_message_id"])
elif message["forward_from"]:
forward_user = TelegramUser.from_bot_user(telegram, message["forward_from"])
elif message["forward_sender_name"]:
forward_user = immp.User(real_name=message["forward_sender_name"])
forward_common = dict(text=text,
user=forward_user,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
raw=message)
if forward_id:
forward = immp.SentMessage(id_=forward_id,
channel=forward_channel,
**forward_common)
else:
forward = immp.Message(**forward_common)
# Embed the inner message as an attachment.
return immp.SentMessage(attachments=[forward], **common)
else:
return immp.SentMessage(text=text,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
**common)
@classmethod
async def from_bot_update(cls, telegram, update):
"""
Convert an API update :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
update (dict):
Telegram API `update <https://core.telegram.org/bots/api#update>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
for key in ("message", "channel_post"):
if update.get(key):
return await cls.from_bot_message(telegram, update[key])
elif update.get("edited_{}".format(key)):
return await cls.from_bot_message(telegram, update["edited_{}".format(key)])
@classmethod
async def from_proto_message(cls, telegram, message):
"""
Convert a Telegram message event to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
message (telethon.tl.custom.Message):
Received message from an event or get request.
Returns:
.TelegramMessage:
Parsed message object.
"""
id_ = "{}:{}".format(message.chat_id, message.id)
channel = immp.Channel(telegram, message.chat_id)
edited = bool(message.edit_date)
if edited:
revision = int(message.edit_date.timestamp())
elif message.date:
revision = int(message.date.timestamp())
else:
revision = None
text = await TelegramRichText.from_proto_entities(telegram, message.message,
message.entities)
sender = await message.get_sender()
if isinstance(sender, tl.types.Channel):
user = TelegramUser.from_proto_channel(telegram, sender)
else:
user = TelegramUser.from_proto_user(telegram, sender)
action = False
reply_to = None
joined = []
left = []
title = None
attachments = []
if message.reply_to_msg_id:
receipt = immp.Receipt(message.reply_to_msg_id, channel)
reply_to = await telegram.resolve_message(receipt)
if message.photo:
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.photo),
immp.File.Type.image)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.document:
type_ = immp.File.Type.unknown
name = None
for attr in message.document.attributes:
if isinstance(attr, tl.types.DocumentAttributeSticker):
type_ = immp.File.Type.image
if attr.alt and not text:
text = "sent {} sticker".format(attr.alt)
action = True
elif isinstance(attr, tl.types.DocumentAttributeAnimated):
type_ = immp.File.Type.image
elif isinstance(attr, tl.types.DocumentAttributeVideo):
type_ = immp.File.Type.video
if isinstance(attr, tl.types.DocumentAttributeFilename):
name = attr.file_name
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.document),
type_, name)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.poll:
action = True
prefix = "closed the" if message.poll.poll.closed else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message.poll.poll.question, bold=True)])
if message.action:
action = True
if isinstance(message.action, tl.types.MessageActionChatCreate):
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message.action.title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatJoinedByLink):
joined = [user]
text = "joined group via invite link"
elif isinstance(message.action, tl.types.MessageActionChatAddUser):
joined = await gather(*(telegram.user_from_id(id_) for id_ in message.action.users))
if joined == [user]:
text = "joined group"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, link=join.link))
elif isinstance(message.action, tl.types.MessageActionChatDeleteUser):
left = [await telegram.user_from_id(message.action.user_id)]
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif isinstance(message.action, tl.types.MessageActionChatEditTitle):
title = message.action.title
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatEditPhoto):
text = "changed group photo"
elif isinstance(message.action, tl.types.MessageActionChatDeletePhoto):
text = "removed group photo"
elif isinstance(message.action, tl.types.MessageActionPinMessage):
attachments.append(reply_to)
reply_to = None
text = "pinned message"
else:
raise NotImplementedError
if not text and not attachments:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=message.date,
channel=channel,
user=user,
raw=message)
if message.forward:
# Event is a message containing another message. Forwarded messages have no ID, so we
# use a Message instead of a SentMessage here, unless they come from a channel.
forward_id = forward_channel = forward_user = None
if message.forward.channel_id and message.forward.channel_post:
forward_channel = immp.Channel(telegram, message.forward.chat_id)
forward_id = "{}:{}".format(message.forward.chat_id,
message.forward.channel_post)
chat = await message.forward.get_chat()
try:
forward_user = TelegramUser.from_proto_channel(telegram, chat,
message.forward.post_author)
except _HiddenSender:
if message.forward.post_author:
forward_user = immp.User(real_name=message.forward.post_author)
elif message.forward.sender_id:
forward_user = TelegramUser.from_proto_user(telegram,
await message.forward.get_sender())
elif message.forward.from_name:
forward_user = immp.User(real_name=message.forward.from_name)
forward_common = dict(text=text,
user=forward_user,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
raw=message)
if forward_id:
forward = immp.SentMessage(id_=forward_id,
channel=forward_channel,
**forward_common)
else:
forward = immp.Message(**forward_common)
# Embed the inner message as an attachment.
return immp.SentMessage(attachments=[forward], **common)
else:
return immp.SentMessage(text=text,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
**common)
if SQLiteSession:
class Session(SQLiteSession):
def _execute_multi(self, statement, *values):
cursor = self._cursor()
try:
return cursor.execute(statement, values).fetchall()
finally:
cursor.close()
def get_user_entities(self):
return self._execute_multi("SELECT id, username, name FROM entities WHERE id > 0")
def get_chat_entities(self):
return self._execute_multi("SELECT id, username, name FROM entities WHERE id < 0")
def get_entity(self, id_):
return self._execute("SELECT id, username, name FROM entities WHERE id = ?", id_)
def get_entity_username(self, username):
return self._execute("SELECT id, username, name FROM entities WHERE username = ?",
username)
class TelegramPlug(immp.HTTPOpenable, immp.Plug):
"""
Plug for a `Telegram <https://telegram.org>`_ bot.
"""
schema = _Schema.config
network_name = "Telegram"
@property
def network_id(self):
return "telegram:{}".format(self._bot_user["id"]) if self._bot_user else None
def __init__(self, name, config, host):
super().__init__(name, config, host)
if bool(self.config["api-id"]) != bool(self.config["api-hash"]):
raise immp.ConfigError("Both of API ID and hash must be given")
if self.config["client-updates"] and not self.config["api-id"]:
raise immp.ConfigError("Client updates require API ID and hash")
if self.config["session"] and not self.config["api-id"]:
raise immp.ConfigError("Session file requires API ID and hash")
# Connection objects that need to be closed on disconnect.
self._bot_user = self._receive = self._client = None
self._closing = False
# Temporary tracking of migrated chats for the current session.
self._migrations = {}
# Caching of user/username lookups to avoid flooding.
self._users = {}
self._usernames = {}
# Blacklist of channels we have an entity for but can't access. Indexed at startup, with
# chats removed if we receive a message from that channel.
self._blacklist = set()
self._blacklist_task = None
# Update ID from which to retrieve the next batch. Should be one higher than the max seen.
self._offset = 0
# Private chats and non-super groups have a shared incremental message ID. Cache the
# highest we've seen, so that we can attempt to fetch past messages with this as a base.
self._last_id = None
async def _api(self, endpoint, schema=_Schema.api(), quiet=False, **kwargs):
url = "https://api.telegram.org/bot{}/{}".format(self.config["token"], endpoint)
if not quiet:
log.debug("Making API request to %r", endpoint)
try:
async with self.session.post(url, **kwargs) as resp:
try:
json = await resp.json()
data = schema(json)
except ClientResponseError as e:
raise TelegramAPIConnectError("Bad response with code: {}"
.format(resp.status)) from e
except ClientError as e:
raise TelegramAPIConnectError("Request failed") from e
except TimeoutError as e:
raise TelegramAPIConnectError("Request timed out") from e
if not data["ok"]:
raise TelegramAPIRequestError(data["error_code"], data["description"])
return data["result"]
async def start(self):
await super().start()
self._closing = False
self._bot_user = await self._api("getMe", _Schema.me)
if self.config["api-id"] and self.config["api-hash"]:
if not TelegramClient:
raise immp.ConfigError("API ID/hash specified but Telethon is not installed")
log.debug("Starting client")
self._client = TelegramClient(Session(self.config["session"]),
self.config["api-id"], self.config["api-hash"])
if self._client and self.config["client-updates"]:
log.debug("Adding client event handlers")
self._client.add_event_handler(self._handle_raw)
for event in (events.NewMessage, events.MessageEdited, events.ChatAction):
self._client.add_event_handler(self._handle, event)
else:
log.debug("Starting update long-poll")
self._receive = ensure_future(self._poll())
if self._client:
await self._client.start(bot_token=self.config["token"])
# Find the most recently received message, and therefore the current value of the shared
# ID sequence. Fetch the current state, then subtract one from pts to make it replay
# the last message, which should appear in new_messages and other_updates.
state = await self._client(tl.functions.updates.GetStateRequest())
diff = await self._client(tl.functions.updates.GetDifferenceRequest(
state.pts - 1, datetime.utcnow(), state.qts))
if isinstance(diff, tl.types.updates.DifferenceEmpty):
# Unclear if this will happen with the given parameters.
pass
elif diff.new_messages:
self._last_id = diff.new_messages[-1].id
self._blacklist = {_HiddenSender.channel_id}
self._blacklist_task = ensure_future(wait([self._blacklist_users(),
self._blacklist_chats()]))
async def stop(self):
await super().stop()
self._closing = True
if self._receive:
log.debug("Stopping update long-poll")
self._receive.cancel()
self._receive = None
if self._client:
log.debug("Closing client")
await self._client.disconnect()
self._client = None
self._bot_user = None
if self._blacklist:
self._blacklist.clear()
if self._blacklist_task:
self._blacklist_task.cancel()
self._blacklist_task = None
self._offset = 0
self._last_id = None
if self._migrations:
log.warning("Chat migrations require a config update before next run")
async def user_from_id(self, id_):
if not self._client:
log.debug("Client auth required to look up users")
return None
entity = self._client.session.get_entity(id_)
if entity:
return TelegramUser.from_entity(self, entity)
elif id_ in self._users:
return self._users[id_]
try:
data = await self._client(tl.functions.users.GetFullUserRequest(int(id_)))
except BadRequestError:
return None
user = TelegramUser.from_proto_user(self, data.user)
self._users[id_] = user
return user
async def user_from_username(self, username):
if not self._client:
log.debug("Client auth required to look up users")
return None
entity = self._client.session.get_entity_username(username)
if entity:
return TelegramUser.from_entity(self, entity)
elif username in self._usernames:
return self._usernames[username]
try:
data = await self._client(tl.functions.contacts.ResolveUsernameRequest(username))
except BadRequestError:
return None
if not data.users:
return None
user = TelegramUser.from_proto_user(self, data.users[0])
self._usernames[username] = user
return user
async def user_is_system(self, user):
return user.id == str(self._bot_user["id"])
async def _blacklist_users(self):
# For each user in the entity table, check the bot API for a corresponding chat, and
# blacklist those who haven't started a conversation with us yet.
log.debug("Finding users to blacklist")
count = 0
for user in self._client.session.get_user_entities():
try:
await self._api("getChat", quiet=True, params={"chat_id": user[0]})
except TelegramAPIRequestError:
count += 1
self._blacklist.add(user[0])
log.debug("Blacklisted %d users", count)
async def _blacklist_chats(self):
# The entity cache is polluted with channels we've seen outside of participation (e.g.
# mentions and forwards). Narrow down the list by excluding chats we can't access.
log.debug("Finding chats to blacklist")
count = 0
lookup = []
for chat in self._client.session.get_chat_entities():
if chat[0] in self._blacklist:
continue
if str(chat[0]).startswith("-100"):
try:
await self._client(tl.functions.channels.GetChannelsRequest([abs(chat[0])]))
except ChannelPrivateError:
count += 1
self._blacklist.add(chat[0])
else:
lookup.append(abs(chat[0]))
if lookup:
chats = await self._client(tl.functions.messages.GetChatsRequest(lookup))
gone = [-chat.id for chat in chats.chats if isinstance(chat, tl.types.ChatForbidden)]
if gone:
count += len(gone)
self._blacklist.update(gone)
log.debug("Blacklisted %d chats", count)
async def public_channels(self):
if not self._client:
log.debug("Client auth required to look up channels")
return None
# Use the session cache to find all "seen" chats -- not guaranteed to be a complete list.
# Filter out chats we're no longer a member of, or otherwise can't access.
ids = set(chat[0] for chat in self._client.session.get_chat_entities())
return [immp.Channel(self, chat) for chat in ids - self._blacklist]
async def private_channels(self):
if not self._client:
log.debug("Client auth required to look up channels")
return None
# Private channels just use user IDs, so return all users we know about, filtered to those
# we also have a valid chat for.
ids = set(chat[0] for chat in self._client.session.get_user_entities())
return [immp.Channel(self, chat) for chat in ids - self._blacklist]
async def channel_for_user(self, user):
if not isinstance(user, TelegramUser):
return None
try:
await self._api("getChat", params={"chat_id": user.id})
except TelegramAPIRequestError as e:
log.warning("Failed to retrieve user %s channel", user.id, exc_info=e)
# Can't create private channels, users must initiate conversations with bots.
return None
else:
return immp.Channel(self, user.id)
async def channel_is_private(self, channel):
return int(channel.source) > 0
async def channel_title(self, channel):
if await channel.is_private():
return None
if self._client:
entity = self._client.session.get_entity(channel.source)
if entity:
return entity[2]
try:
data = await self._api("getChat", _Schema.chat, params={"chat_id": channel.source})
except TelegramAPIRequestError as e:
log.warning("Failed to retrieve channel %s title", channel.source, exc_info=e)
return None
else:
return data["title"]
async def channel_rename(self, channel, title):
await self._api("setChatTitle", params={"chat_id": channel.source, "title": title})
async def channel_members(self, channel):
if not self._client:
log.debug("Client auth required to list channel members")
return None
# Private channels should just contain the bot and the corresponding user.
if await channel.is_private():
if channel.source == str(self._bot_user["id"]):
return [TelegramUser.from_bot_user(self, self._bot_user)]
elif int(channel.source) > 0:
entity = self._client.session.get_entity(channel.source)
if entity:
return [TelegramUser.from_bot_user(self, self._bot_user),
await self.user_from_id(channel.source)]
# Channel and supergroup chat IDs have a bot-API-only prefix to distinguish them.
if channel.source.startswith("-100"):
chat = int(channel.source[4:])
users = []
try:
while True:
data = await self._client(tl.functions.channels.GetParticipantsRequest(
chat, tl.types.ChannelParticipantsRecent(), len(users), 1000, 0))
if data.users:
users += [TelegramUser.from_proto_user(self, user) for user in data.users]
else:
break
except BadRequestError:
return None
else:
return users
else:
chat = abs(int(channel.source))
try:
data = await self._client(tl.functions.messages.GetFullChatRequest(chat))
except BadRequestError:
return None
else:
return [TelegramUser.from_proto_user(self, user) for user in data.users]
async def channel_remove(self, channel, user):
if user.id == self._bot_user["id"]:
await self._api("leaveChat", params={"chat_id": channel.source})
else:
await self._api("kickChatMember", params={"chat_id": channel.source,
"user_id": user.id})
async def channel_history(self, channel, before=None):
if not self._client:
log.debug("Client auth required to retrieve messages")
return []
# Telegram channels (including supergroups) have their own message ID sequence starting from
# 1. Each user has a shared ID sequence used for non-super groups and private chats.
private_seq = channel.source.startswith("-100")
if not before:
if private_seq:
request = tl.functions.channels.GetFullChannelRequest(int(channel.source))
chat = await self._client(request)
before = immp.Receipt("{}:{}".format(channel.source, chat.full_chat.pts), channel)
elif self._last_id:
before = immp.Receipt("{}:{}".format(channel.source, self._last_id + 1), channel)
else:
log.debug("Before message required to retrieve messages")
return []
chat, message = (int(field) for field in before.id.split(":", 1))
# For a channel-private sequence, we can just retrieve the last batch of messages. For the
# shared sequence, we can't lookup for a specific chat, so we instead fetch a larger batch
# (maxes out at 200) and filter to messages from the target chat.
limit = 50 if private_seq else 200
ids = list(range(max(message - limit, 1), message))
history = filter(None, await self._client.get_messages(entity=chat, ids=ids))
tasks = (TelegramMessage.from_proto_message(self, message) for message in history)
results = await gather(*tasks, return_exceptions=True)
messages = []
for result in results:
if isinstance(result, NotImplementedError):
continue
elif isinstance(result, Exception):
raise result
else:
messages.append(result)
return messages
async def get_message(self, receipt):
if not self._client:
log.debug("Client auth required to retrieve messages")
return None
message = await self._client.get_messages(int(receipt.channel.source), ids=int(receipt.id))
if not message:
log.debug("Failed to find message %d in %d", receipt.id, receipt.channel.source)
return None
try:
return await TelegramMessage.from_proto_message(self, message)
except NotImplementedError:
return None
async def _form_data(self, base, field, attach):
data = FormData(base)
if attach.source:
data.add_field(field, attach.source)
else:
img_resp = await attach.get_content(self.session)
data.add_field(field, img_resp.content, filename=attach.title or field)
return data
async def _upload_attachment(self, chat, msg, attach):
# Upload a file to Telegram in its own message.
# Prefer a source URL if available, else fall back to re-uploading the file.
base = {"chat_id": str(chat)}
if msg.user:
if attach.type == immp.File.Type.image:
what = "an image"
elif attach.type == immp.File.Type.video:
what = "a video"
else:
what = "a file"
rich = immp.Message(text=immp.RichText([immp.Segment("sent {}".format(what))]),
user=msg.user, action=True).render()
text = "".join(TelegramSegment.to_html(self, segment) for segment in rich)
base["caption"] = text
base["parse_mode"] = "HTML"
if attach.type == immp.File.Type.image:
data = await self._form_data(base, "photo", attach)
try:
return await self._api("sendPhoto", _Schema.send, data=data)
except (TelegramAPIConnectError, TelegramAPIRequestError):
log.debug("Failed to upload image, falling back to document upload")
elif attach.type == immp.File.Type.video:
data = await self._form_data(base, "video", attach)
try:
return await self._api("sendVideo", _Schema.send, data=data)
except (TelegramAPIConnectError, TelegramAPIRequestError):
log.debug("Failed to upload video, falling back to document upload")
data = await self._form_data(base, "document", attach)
try:
return await self._api("sendDocument", _Schema.send, data=data)
except TelegramAPIConnectError as e:
log.warning("Failed to upload file", exc_info=e)
return None
def _requests(self, chat, msg):
requests = []
if msg.text or msg.reply_to:
quote = False
reply_to = ""
if isinstance(msg.reply_to, immp.Receipt):
# Reply natively to the given parent message.
reply_to = int(msg.reply_to.id.split(":")[1])
elif isinstance(msg.reply_to, immp.Message):
quote = True
rich = msg.render(edit=msg.edited, quote_reply=quote)
text = "".join(TelegramSegment.to_html(self, segment) for segment in rich)
no_link_preview = "true" if msg.user and msg.user.link else "false"
requests.append(self._api("sendMessage", _Schema.send,
params={"chat_id": chat,
"text": text,
"parse_mode": "HTML",
# Prevent linked user names generating previews.
"disable_web_page_preview": no_link_preview,
"reply_to_message_id": reply_to}))
for attach in msg.attachments:
if isinstance(attach, immp.File):
requests.append(self._upload_attachment(chat, msg, attach))
elif isinstance(attach, immp.Location):
requests.append(self._api("sendLocation", _Schema.send,
params={"chat_id": chat,
"latitude": str(attach.latitude),
"longitude": str(attach.longitude)}))
if msg.user:
caption = immp.Message(user=msg.user, text="sent a location", action=True)
text = "".join(TelegramSegment.to_html(self, segment)
for segment in caption.render())
requests.append(self._api("sendMessage", _Schema.send,
params={"chat_id": chat,
"text": text,
"parse_mode": "HTML"}))
return requests
async def put(self, channel, msg):
chat = channel.source
while chat in self._migrations:
log.debug("Following chat migration: %r -> %r", chat, self._migrations[chat])
chat = self._migrations[chat]
requests = []
for attach in msg.attachments:
# Generate requests for attached messages first.
if isinstance(attach, immp.Receipt):
# Forward the messages natively using the given chat/ID.
forward_chat, forward_id = map(int, attach.id.split(":"))
requests.append(self._api("forwardMessage", _Schema.send,
params={"chat_id": chat,
"from_chat_id": forward_chat,
"message_id": forward_id}))
elif isinstance(attach, immp.Message):
requests += self._requests(chat, attach)
own_requests = self._requests(chat, msg)
if requests and not own_requests and msg.user:
# Forwarding a message but no content to show who forwarded it.
info = immp.Message(user=msg.user, action=True, text="forwarded a message")
own_requests = self._requests(chat, info)
requests += own_requests
ids = []
for request in requests:
result = await request
if not result:
continue
sent = await TelegramMessage.from_bot_message(self, result)
ids.append(sent.id)
self._post_recv(sent)
return ids
async def delete(self, sent):
chat, message = sent.id.split(":", 1)
await self._api("deleteMessage", params={"chat_id": chat, "message_id": message})
def _migrate(self, old, new):
log.warning("Chat has migrated: %r -> %r", old, new)
self._migrations[old] = new
for name, channel in self.host.channels.items():
if channel.plug is self and channel.source == old:
log.debug("Updating named channel %r in place", name)
channel.source = new
def _post_recv(self, sent):
self.queue(sent)
chat, seq = (int(part) for part in sent.id.split(":", 1))
if self._blacklist:
self._blacklist.discard(chat)
if not str(chat).startswith("-100"):
self._last_id = seq
async def _poll(self):
while not self._closing:
params = {"offset": self._offset,
"timeout": 240}
fetch = ensure_future(self._api("getUpdates", _Schema.updates, params=params))
try:
result = await fetch
except CancelledError:
log.debug("Cancelling polling")
return
except (TelegramAPIConnectError, TelegramAPIRequestError) as e:
log.debug("Unexpected response or timeout: %r", e)
log.debug("Reconnecting in 3 seconds")
await sleep(3)
continue
except Exception as e:
log.exception("Uncaught exception during long-poll: %r", e)
raise
for update in result:
log.debug("Received an update")
if "message" in update and update["message"]["migrate_to_chat_id"]:
old = update["message"]["chat"]["id"]
new = update["message"]["migrate_to_chat_id"]
self._migrate(old, new)
if any(key in update or "edited_{}".format(key) in update
for key in ("message", "channel_post")):
try:
sent = await TelegramMessage.from_bot_update(self, update)
except NotImplementedError:
log.debug("Skipping message with no usable parts")
except CancelledError:
log.debug("Cancel request for plug %r getter", self.name)
return
else:
self._post_recv(sent)
else:
log.debug("Ignoring update with unknown keys: %s", ", ".join(update.keys()))
self._offset = max(update["update_id"] + 1, self._offset)
async def _handle_raw(self, event):
log.debug("Received a %s event", event.__class__.__qualname__)
if isinstance(event, tl.types.UpdateNewMessage):
if isinstance(event.message.action, tl.types.MessageActionChatMigrateTo):
old = event.message.chat_id
new = int("-100{}".format(event.message.action.channel_id))
self._migrate(old, new)
async def _handle(self, event):
if isinstance(event, events.ChatAction.Event):
message = event.action_message
else:
message = event.message
try:
sent = await TelegramMessage.from_proto_message(self, message)
except NotImplementedError:
log.debug("Skipping message with no usable parts")
else:
self._post_recv(sent) | 0.657209 | 0.109111 |
import unittest
from . import EventBusBridgeStarter, CountDownLatch
from vertx import EventBus
class EventBusClientTLSTests(unittest.TestCase):
"""
Tests against EventBus Bridge with TLS enabled.
"""
def __init__(self, *args, **kwargs):
super(EventBusClientTLSTests, self).__init__(*args, **kwargs)
def test_tls_client_auth_off(self):
"""
Tests when tls.CLIENT_AUTH is disabled.
"""
latch = CountDownLatch()
starter = EventBusBridgeStarter(debug=True, conf={"server-options":
{"ssl": "true", "clientAuth": "NONE", "pemKeyCertOptions":
{"keyPath": "test/systemtesting/ca.key",
"certPath": "test/systemtesting/ca.crt"}
}
})
try:
starter.start()
starter.wait_started()
ebus = EventBus(options={"debug": "True", "ssl_options": {"ca_file": "test/systemtesting/ca.crt"}})
ebus.connect()
def handler(message):
self.assertEqual(message['body']['hello'], 'world')
ebus.close()
print("Passed!")
latch.count_down()
ebus.register_handler("echo-back", handler)
ebus.send("echo", reply_address="echo-back", body={'hello': 'world'})
latch.awaits(10)
finally:
starter.stop()
def test_tls_client_auth_on(self):
"""
Tests when tls.CLIENT_AUTH is enabled.
"""
latch = CountDownLatch()
starter = EventBusBridgeStarter(debug=True, conf={"server-options":
{"ssl": "true", "clientAuth": "REQUIRED",
"pemKeyCertOptions":
{"keyPath": "test/systemtesting/ca.key",
"certPath": "test/systemtesting/ca.crt"},
"pemTrustOptions": {"certPaths": ["test/systemtesting/ca.crt"]}
}
})
try:
starter.start()
starter.wait_started()
ebus = EventBus(options={"debug": "True", "ssl_options": {"ca_file": "test/systemtesting/ca.crt",
"cert_file": "test/systemtesting/ca.crt",
"key_file": "test/systemtesting/ca.key"}})
ebus.connect()
def handler(message):
self.assertEqual(message['body']['hello'], 'world')
ebus.close()
print("Passed!")
latch.count_down()
ebus.register_handler("echo-back", handler)
ebus.send("echo", reply_address="echo-back", body={'hello': 'world'})
latch.awaits(10)
finally:
starter.stop()
if __name__ == "__main__":
unittest.main() | python/test/systemtesting/test_eventbus_tls.py | import unittest
from . import EventBusBridgeStarter, CountDownLatch
from vertx import EventBus
class EventBusClientTLSTests(unittest.TestCase):
"""
Tests against EventBus Bridge with TLS enabled.
"""
def __init__(self, *args, **kwargs):
super(EventBusClientTLSTests, self).__init__(*args, **kwargs)
def test_tls_client_auth_off(self):
"""
Tests when tls.CLIENT_AUTH is disabled.
"""
latch = CountDownLatch()
starter = EventBusBridgeStarter(debug=True, conf={"server-options":
{"ssl": "true", "clientAuth": "NONE", "pemKeyCertOptions":
{"keyPath": "test/systemtesting/ca.key",
"certPath": "test/systemtesting/ca.crt"}
}
})
try:
starter.start()
starter.wait_started()
ebus = EventBus(options={"debug": "True", "ssl_options": {"ca_file": "test/systemtesting/ca.crt"}})
ebus.connect()
def handler(message):
self.assertEqual(message['body']['hello'], 'world')
ebus.close()
print("Passed!")
latch.count_down()
ebus.register_handler("echo-back", handler)
ebus.send("echo", reply_address="echo-back", body={'hello': 'world'})
latch.awaits(10)
finally:
starter.stop()
def test_tls_client_auth_on(self):
"""
Tests when tls.CLIENT_AUTH is enabled.
"""
latch = CountDownLatch()
starter = EventBusBridgeStarter(debug=True, conf={"server-options":
{"ssl": "true", "clientAuth": "REQUIRED",
"pemKeyCertOptions":
{"keyPath": "test/systemtesting/ca.key",
"certPath": "test/systemtesting/ca.crt"},
"pemTrustOptions": {"certPaths": ["test/systemtesting/ca.crt"]}
}
})
try:
starter.start()
starter.wait_started()
ebus = EventBus(options={"debug": "True", "ssl_options": {"ca_file": "test/systemtesting/ca.crt",
"cert_file": "test/systemtesting/ca.crt",
"key_file": "test/systemtesting/ca.key"}})
ebus.connect()
def handler(message):
self.assertEqual(message['body']['hello'], 'world')
ebus.close()
print("Passed!")
latch.count_down()
ebus.register_handler("echo-back", handler)
ebus.send("echo", reply_address="echo-back", body={'hello': 'world'})
latch.awaits(10)
finally:
starter.stop()
if __name__ == "__main__":
unittest.main() | 0.413359 | 0.288118 |
import os
import unittest
import shutil
from collections import defaultdict
from jumpdir.directories import Directories
from tests.tools import create_dtree
simple_dtree = {
'first': {
'example': None,
},
'second': {
'toot toot': None,
'bootboot': {
'jam_recipes': None,
},
'example': None,
},
'.hidden': None,
}
class MainTest(unittest.TestCase):
@staticmethod
def create_mock_cache(path_to_cache_file):
with open(path_to_cache_file, 'w') as f:
f.write("{}")
@classmethod
def setUpClass(cls):
cls.base_path = os.getcwd()
os.mkdir('mock_dtree')
cls.test_path = os.path.join(cls.base_path, 'mock_dtree')
create_dtree(simple_dtree, cls.test_path)
cls.mock_cache = os.path.join(cls.test_path, '.mockcache')
cls.create_mock_cache(cls.mock_cache)
cls.directories = Directories(cls.test_path, cls.mock_cache)
cls.directories.map_directories()
try:
# Python2
cls.assertCountEqual = cls.assertItemsEqual
except AttributeError:
# Python3
pass
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_path)
def test_Directories_initialises_with_base_dir_attribute_of_type_str(self):
self.assertEqual(type(self.directories.base_dir), str)
def test_Directories_initialises_with_correct_base_dir_attribute(self):
self.assertEqual(self.directories.base_dir, self.test_path)
def test_Directories_initialises_with_dirs_attribute_of_type_default_dict(self):
self.assertEqual(type(self.directories.dirs), defaultdict)
def test_Directories_is_iterable(self):
self.assertCountEqual(iter(self.directories), self.directories.dirs.keys())
def test_dict_builder_method_returns_dict_of_correct_length(self):
self.assertEqual(len(self.directories.dirs), 6)
def test_dict_builder_stores_directories_with_matching_names_under_same_key(self):
self.assertEqual(len(self.directories.dirs['example']), 2)
def test_dict_builder_stores_paths(self):
self.assertIn(
os.path.join(self.test_path, 'first', 'example'),
self.directories.dirs['example'])
def test_dict_builder_ignores_directories_beginning_with_a_period(self):
self.assertNotIn('.hidden', self.directories.dirs.keys())
def test_Directories_instances_are_subscriptable(self):
self.assertEqual(self.directories['example'], self.directories.dirs['example'])
def test_shallowest_path_to_returns_shortest_path_from_list(self):
shallowest = self.directories.shallowest_path_to('example')
is_shallowest = True
for path in self.directories['example']:
if (shallowest > path):
is_shallowest = False
self.assertTrue(is_shallowest)
if __name__ == '__main__':
unittest.main() | tests/directories_test.py | import os
import unittest
import shutil
from collections import defaultdict
from jumpdir.directories import Directories
from tests.tools import create_dtree
simple_dtree = {
'first': {
'example': None,
},
'second': {
'toot toot': None,
'bootboot': {
'jam_recipes': None,
},
'example': None,
},
'.hidden': None,
}
class MainTest(unittest.TestCase):
@staticmethod
def create_mock_cache(path_to_cache_file):
with open(path_to_cache_file, 'w') as f:
f.write("{}")
@classmethod
def setUpClass(cls):
cls.base_path = os.getcwd()
os.mkdir('mock_dtree')
cls.test_path = os.path.join(cls.base_path, 'mock_dtree')
create_dtree(simple_dtree, cls.test_path)
cls.mock_cache = os.path.join(cls.test_path, '.mockcache')
cls.create_mock_cache(cls.mock_cache)
cls.directories = Directories(cls.test_path, cls.mock_cache)
cls.directories.map_directories()
try:
# Python2
cls.assertCountEqual = cls.assertItemsEqual
except AttributeError:
# Python3
pass
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_path)
def test_Directories_initialises_with_base_dir_attribute_of_type_str(self):
self.assertEqual(type(self.directories.base_dir), str)
def test_Directories_initialises_with_correct_base_dir_attribute(self):
self.assertEqual(self.directories.base_dir, self.test_path)
def test_Directories_initialises_with_dirs_attribute_of_type_default_dict(self):
self.assertEqual(type(self.directories.dirs), defaultdict)
def test_Directories_is_iterable(self):
self.assertCountEqual(iter(self.directories), self.directories.dirs.keys())
def test_dict_builder_method_returns_dict_of_correct_length(self):
self.assertEqual(len(self.directories.dirs), 6)
def test_dict_builder_stores_directories_with_matching_names_under_same_key(self):
self.assertEqual(len(self.directories.dirs['example']), 2)
def test_dict_builder_stores_paths(self):
self.assertIn(
os.path.join(self.test_path, 'first', 'example'),
self.directories.dirs['example'])
def test_dict_builder_ignores_directories_beginning_with_a_period(self):
self.assertNotIn('.hidden', self.directories.dirs.keys())
def test_Directories_instances_are_subscriptable(self):
self.assertEqual(self.directories['example'], self.directories.dirs['example'])
def test_shallowest_path_to_returns_shortest_path_from_list(self):
shallowest = self.directories.shallowest_path_to('example')
is_shallowest = True
for path in self.directories['example']:
if (shallowest > path):
is_shallowest = False
self.assertTrue(is_shallowest)
if __name__ == '__main__':
unittest.main() | 0.42656 | 0.40116 |
"""A simple Q-learning agent trained to play Catch."""
from absl import app
from absl import flags
from bsuite.experiments.catch import catch
import haiku as hk
from haiku import nets
import jax
from jax.experimental import optix
import jax.numpy as jnp
import rlax
FLAGS = flags.FLAGS
flags.DEFINE_integer("train_episodes", 500, "Number of train episodes.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon", 0.01, "eps-greedy exploration probability.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.01, "Optimizer learning rate.")
flags.DEFINE_integer("seed", 1234, "Random seed.")
def build_network(num_actions: int) -> hk.Transformed:
def q(obs):
flatten = lambda x: jnp.reshape(x, (-1,))
network = hk.Sequential(
[flatten, nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.transform(q)
def main_loop(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
rng = hk.PRNGSequence(jax.random.PRNGKey(FLAGS.seed))
# Build and initialize Q-network.
num_actions = env.action_spec().num_values
network = build_network(num_actions)
sample_input = env.observation_spec().generate_value()
net_params = network.init(next(rng), sample_input)
# Build and initialize optimizer.
optimizer = optix.adam(FLAGS.learning_rate)
opt_state = optimizer.init(net_params)
@jax.jit
def policy(net_params, key, obs):
"""Sample action from epsilon-greedy policy."""
q = network.apply(net_params, obs)
a = rlax.epsilon_greedy(epsilon=FLAGS.epsilon).sample(key, q)
return q, a
@jax.jit
def eval_policy(net_params, key, obs):
"""Sample action from greedy policy."""
q = network.apply(net_params, obs)
return rlax.greedy().sample(key, q)
@jax.jit
def update(net_params, opt_state, obs_tm1, a_tm1, r_t, discount_t, q_t):
"""Update network weights wrt Q-learning loss."""
def q_learning_loss(net_params, obs_tm1, a_tm1, r_t, discount_t, q_t):
q_tm1 = network.apply(net_params, obs_tm1)
td_error = rlax.q_learning(q_tm1, a_tm1, r_t, discount_t, q_t)
return rlax.l2_loss(td_error)
dloss_dtheta = jax.grad(q_learning_loss)(net_params, obs_tm1, a_tm1, r_t,
discount_t, q_t)
updates, opt_state = optimizer.update(dloss_dtheta, opt_state)
net_params = optix.apply_updates(net_params, updates)
return net_params, opt_state
print(f"Training agent for {FLAGS.train_episodes} episodes...")
for _ in range(FLAGS.train_episodes):
timestep = env.reset()
obs_tm1 = timestep.observation
_, a_tm1 = policy(net_params, next(rng), obs_tm1)
while not timestep.last():
new_timestep = env.step(int(a_tm1))
obs_t = new_timestep.observation
# Sample action from agent policy.
q_t, a_t = policy(net_params, next(rng), obs_t)
# Update Q-values.
r_t = new_timestep.reward
discount_t = FLAGS.discount_factor * new_timestep.discount
net_params, opt_state = update(net_params, opt_state, obs_tm1, a_tm1, r_t,
discount_t, q_t)
timestep = new_timestep
obs_tm1 = obs_t
a_tm1 = a_t
print(f"Evaluating agent for {FLAGS.eval_episodes} episodes...")
returns = 0.
for _ in range(FLAGS.eval_episodes):
timestep = env.reset()
obs = timestep.observation
while not timestep.last():
action = eval_policy(net_params, next(rng), obs)
timestep = env.step(int(action))
obs = timestep.observation
returns += timestep.reward
avg_returns = returns / FLAGS.eval_episodes
print(f"Done! Average returns: {avg_returns} (range [-1.0, 1.0])")
if __name__ == "__main__":
app.run(main_loop) | examples/catch.py | """A simple Q-learning agent trained to play Catch."""
from absl import app
from absl import flags
from bsuite.experiments.catch import catch
import haiku as hk
from haiku import nets
import jax
from jax.experimental import optix
import jax.numpy as jnp
import rlax
FLAGS = flags.FLAGS
flags.DEFINE_integer("train_episodes", 500, "Number of train episodes.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon", 0.01, "eps-greedy exploration probability.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.01, "Optimizer learning rate.")
flags.DEFINE_integer("seed", 1234, "Random seed.")
def build_network(num_actions: int) -> hk.Transformed:
def q(obs):
flatten = lambda x: jnp.reshape(x, (-1,))
network = hk.Sequential(
[flatten, nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.transform(q)
def main_loop(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
rng = hk.PRNGSequence(jax.random.PRNGKey(FLAGS.seed))
# Build and initialize Q-network.
num_actions = env.action_spec().num_values
network = build_network(num_actions)
sample_input = env.observation_spec().generate_value()
net_params = network.init(next(rng), sample_input)
# Build and initialize optimizer.
optimizer = optix.adam(FLAGS.learning_rate)
opt_state = optimizer.init(net_params)
@jax.jit
def policy(net_params, key, obs):
"""Sample action from epsilon-greedy policy."""
q = network.apply(net_params, obs)
a = rlax.epsilon_greedy(epsilon=FLAGS.epsilon).sample(key, q)
return q, a
@jax.jit
def eval_policy(net_params, key, obs):
"""Sample action from greedy policy."""
q = network.apply(net_params, obs)
return rlax.greedy().sample(key, q)
@jax.jit
def update(net_params, opt_state, obs_tm1, a_tm1, r_t, discount_t, q_t):
"""Update network weights wrt Q-learning loss."""
def q_learning_loss(net_params, obs_tm1, a_tm1, r_t, discount_t, q_t):
q_tm1 = network.apply(net_params, obs_tm1)
td_error = rlax.q_learning(q_tm1, a_tm1, r_t, discount_t, q_t)
return rlax.l2_loss(td_error)
dloss_dtheta = jax.grad(q_learning_loss)(net_params, obs_tm1, a_tm1, r_t,
discount_t, q_t)
updates, opt_state = optimizer.update(dloss_dtheta, opt_state)
net_params = optix.apply_updates(net_params, updates)
return net_params, opt_state
print(f"Training agent for {FLAGS.train_episodes} episodes...")
for _ in range(FLAGS.train_episodes):
timestep = env.reset()
obs_tm1 = timestep.observation
_, a_tm1 = policy(net_params, next(rng), obs_tm1)
while not timestep.last():
new_timestep = env.step(int(a_tm1))
obs_t = new_timestep.observation
# Sample action from agent policy.
q_t, a_t = policy(net_params, next(rng), obs_t)
# Update Q-values.
r_t = new_timestep.reward
discount_t = FLAGS.discount_factor * new_timestep.discount
net_params, opt_state = update(net_params, opt_state, obs_tm1, a_tm1, r_t,
discount_t, q_t)
timestep = new_timestep
obs_tm1 = obs_t
a_tm1 = a_t
print(f"Evaluating agent for {FLAGS.eval_episodes} episodes...")
returns = 0.
for _ in range(FLAGS.eval_episodes):
timestep = env.reset()
obs = timestep.observation
while not timestep.last():
action = eval_policy(net_params, next(rng), obs)
timestep = env.step(int(action))
obs = timestep.observation
returns += timestep.reward
avg_returns = returns / FLAGS.eval_episodes
print(f"Done! Average returns: {avg_returns} (range [-1.0, 1.0])")
if __name__ == "__main__":
app.run(main_loop) | 0.872429 | 0.481088 |
import copy
import hashlib
import time
from json import JSONDecodeError
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.clients.deluge import OutputDeluge
from flexget.plugins.clients.transmission import PluginTransmission
from flexget.utils import json
from loguru import logger
from requests import RequestException
from .ptsites.executor import Executor
from .ptsites.utils.net_utils import NetUtils
def update_header_cookie(entry, headers, task):
if entry.get('headers'):
task.requests.headers.update(entry['headers'])
else:
task.requests.headers.clear()
task.requests.headers = headers
if entry.get('cookie'):
task.requests.cookies.update(NetUtils.cookie_str_to_dict(entry['cookie']))
else:
task.requests.cookies.clear()
def get_qbittorrent_mod_seeding(client_torrent):
if 'up' in client_torrent['qbittorrent_state'].lower() and 'pause' not in client_torrent[
'qbittorrent_state'].lower():
client_torrent['reseed'] = {
'path': client_torrent['qbittorrent_save_path'],
'autoTMM': client_torrent['qbittorrent_auto_tmm'],
'category': client_torrent['qbittorrent_category']
}
return True
def to_qbittorrent_mod(entry, client_torrent):
entry['savepath'] = client_torrent['reseed'].get('path')
entry['autoTMM'] = client_torrent['reseed'].get('autoTMM')
entry['category'] = client_torrent['reseed'].get('category')
entry['paused'] = 'true'
def get_transmission_seeding(client_torrent):
if 'seed' in client_torrent['transmission_status'].lower():
client_torrent['reseed'] = {
'path': client_torrent['transmission_downloadDir']
}
return client_torrent
def to_transmission(entry, client_torrent):
entry['path'] = client_torrent['reseed'].get('path')
entry['add_paused'] = 'Yes'
def transmission_on_task_download(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
headers = copy.deepcopy(task.requests.headers)
for entry in task.accepted:
if entry.get('transmission_id'):
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
continue
update_header_cookie(entry, headers, task)
download.get_temp_file(task, entry, handle_magnets=True, fail_html=True)
PluginTransmission.on_task_download = transmission_on_task_download
def get_deluge_seeding(client_torrent):
if 'seeding' in client_torrent['deluge_state'].lower():
client_torrent['reseed'] = {
'path': client_torrent['deluge_save_path'],
'move_completed_path': client_torrent['deluge_move_completed_path'],
}
return client_torrent
def to_deluge(entry, client_torrent):
entry['path'] = client_torrent['reseed'].get('path')
entry['move_completed_path'] = client_torrent['reseed'].get('move_completed_path')
entry['add_paused'] = 'Yes'
def deluge_on_task_download(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
headers = copy.deepcopy(task.requests.headers)
for entry in task.accepted:
if entry.get('deluge_id'):
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
continue
update_header_cookie(entry, headers, task)
download.get_temp_file(task, entry, handle_magnets=True)
OutputDeluge.on_task_download = deluge_on_task_download
client_map = {
'from_qbittorrent_mod': get_qbittorrent_mod_seeding,
'qbittorrent_mod': to_qbittorrent_mod,
'from_transmission': get_transmission_seeding,
'transmission': to_transmission,
'from_deluge': get_deluge_seeding,
'deluge': to_deluge,
}
class PluginIYUUAutoReseed:
schema = {
'type': 'object',
'properties': {
'from': {
'anyOf': [
{'$ref': '/schema/plugins?name=from_qbittorrent_mod'},
{'$ref': '/schema/plugins?name=from_transmission'},
{'$ref': '/schema/plugins?name=from_deluge'},
]
},
'to': {'type': 'string', 'enum': list(filter(lambda x: not x.startswith('from'), client_map.keys()))},
'iyuu': {'type': 'string'},
'user-agent': {'type': 'string'},
'show_detail': {'type': 'boolean'},
'limit': {'type': 'integer'},
'passkeys': {
'type': 'object',
'properties': Executor.build_reseed_schema()
}
},
'additionalProperties': False
}
def prepare_config(self, config):
config.setdefault('iyuu', '')
config.setdefault('version', '1.10.9')
config.setdefault('limit', 999)
config.setdefault('show_detail', False)
config.setdefault('passkeys', {})
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
passkeys = config.get('passkeys')
limit = config.get('limit')
show_detail = config.get('show_detail')
to = config.get('to')
result = []
from_client_method = None
to_client_method = None
for from_name, client_config in config['from'].items():
from_client = plugin.get_plugin_by_name(from_name)
start_method = from_client.phase_handlers['start']
input_method = from_client.phase_handlers['input']
if not to:
to = from_name[5:]
start_method(task, client_config)
result = input_method(task, client_config)
from_client_method = client_map[from_name]
to_client_method = client_map[to]
torrent_dict, torrents_hashes = self.get_torrents_data(result, config, from_client_method)
if not torrent_dict:
return []
try:
data = {
'sign': config['iyuu'],
'version': config['version']
}
sites_response = task.requests.get('http://api.iyuu.cn/index.php?s=App.Api.Sites', timeout=60,
params=data).json()
if sites_response.get('ret') != 200:
raise plugin.PluginError(
'http://api.iyuu.cn/index.php?s=App.Api.Sites: {}'.format(sites_response)
)
sites_json = self.modify_sites(sites_response['data']['sites'])
reseed_response = task.requests.post('http://api.iyuu.cn/index.php?s=App.Api.Infohash',
json=torrents_hashes,
timeout=60).json()
if reseed_response.get('ret') != 200:
raise plugin.PluginError(
'http://api.iyuu.cn/index.php?s=App.Api.Infohash Error: {}'.format(reseed_response)
)
reseed_json = reseed_response['data']
except (RequestException, JSONDecodeError) as e:
raise plugin.PluginError(
'Error when trying to send request to iyuu: {}'.format(e)
)
entries = []
site_limit = {}
if sites_json and reseed_json:
for info_hash, seeds_data in reseed_json.items():
client_torrent = torrent_dict[info_hash]
for torrent in seeds_data['torrent']:
site = sites_json.get(str(torrent['sid']))
if not site:
continue
if torrent['info_hash'] in torrent_dict.keys():
continue
site_name = self._get_site_name(site['base_url'])
passkey = passkeys.get(site_name)
if not passkey:
if show_detail:
logger.info(
'no passkey, skip site: {}, title: {}'.format(site_name, client_torrent['title']))
continue
if not site_limit.get(site_name):
site_limit[site_name] = 1
else:
if site_limit[site_name] >= limit:
logger.info(
'site_limit:{} >= limit: {}, skip site: {}, title: {}'.format(
site_limit[site_name],
limit,
site_name,
client_torrent['title'])
)
continue
site_limit[site_name] = site_limit[site_name] + 1
torrent_id = str(torrent['torrent_id'])
entry = Entry(
title=client_torrent['title'],
torrent_info_hash=torrent['info_hash']
)
to_client_method(entry, client_torrent)
entry['class_name'] = site_name
Executor.build_reseed(entry, config, site, passkey, torrent_id)
if show_detail:
logger.info(
f"accept site: {site_name}, title: {client_torrent['title']}, url: {entry.get('url', None)}")
if entry.get('url'):
entries.append(entry)
return entries
def get_torrents_data(self, result, config, from_client_method):
torrent_dict = {}
torrents_hashes = {}
hashes = []
for client_torrent in result:
if from_client_method(client_torrent):
torrent_info_hash = client_torrent['torrent_info_hash'].lower()
torrent_dict[torrent_info_hash] = client_torrent
hashes.append(torrent_info_hash)
list.sort(hashes)
hashes_json = json.dumps(hashes, separators=(',', ':'))
sha1 = hashlib.sha1(hashes_json.encode("utf-8")).hexdigest()
torrents_hashes['hash'] = hashes_json
torrents_hashes['sha1'] = sha1
torrents_hashes['sign'] = config['iyuu']
torrents_hashes['timestamp'] = int(time.time())
torrents_hashes['version'] = config['version']
return torrent_dict, torrents_hashes
def modify_sites(self, sites_json):
sites_dict = {}
for site in sites_json:
site['download_page'] = site['download_page'].replace('{}', '{torrent_id}')
if site['base_url'] == 'pt.upxin.net':
site['base_url'] = 'pt.hdupt.com'
sites_dict[str(site['id'])] = site
return sites_dict
def _get_site_name(self, base_url):
domain = base_url.split('.')
site_name = domain[-2]
if site_name == 'edu':
site_name = domain[-3]
return site_name
@event('plugin.register')
def register_plugin():
plugin.register(PluginIYUUAutoReseed, 'iyuu_auto_reseed', api_ver=2) | iyuu_auto_reseed.py | import copy
import hashlib
import time
from json import JSONDecodeError
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.clients.deluge import OutputDeluge
from flexget.plugins.clients.transmission import PluginTransmission
from flexget.utils import json
from loguru import logger
from requests import RequestException
from .ptsites.executor import Executor
from .ptsites.utils.net_utils import NetUtils
def update_header_cookie(entry, headers, task):
if entry.get('headers'):
task.requests.headers.update(entry['headers'])
else:
task.requests.headers.clear()
task.requests.headers = headers
if entry.get('cookie'):
task.requests.cookies.update(NetUtils.cookie_str_to_dict(entry['cookie']))
else:
task.requests.cookies.clear()
def get_qbittorrent_mod_seeding(client_torrent):
if 'up' in client_torrent['qbittorrent_state'].lower() and 'pause' not in client_torrent[
'qbittorrent_state'].lower():
client_torrent['reseed'] = {
'path': client_torrent['qbittorrent_save_path'],
'autoTMM': client_torrent['qbittorrent_auto_tmm'],
'category': client_torrent['qbittorrent_category']
}
return True
def to_qbittorrent_mod(entry, client_torrent):
entry['savepath'] = client_torrent['reseed'].get('path')
entry['autoTMM'] = client_torrent['reseed'].get('autoTMM')
entry['category'] = client_torrent['reseed'].get('category')
entry['paused'] = 'true'
def get_transmission_seeding(client_torrent):
if 'seed' in client_torrent['transmission_status'].lower():
client_torrent['reseed'] = {
'path': client_torrent['transmission_downloadDir']
}
return client_torrent
def to_transmission(entry, client_torrent):
entry['path'] = client_torrent['reseed'].get('path')
entry['add_paused'] = 'Yes'
def transmission_on_task_download(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
headers = copy.deepcopy(task.requests.headers)
for entry in task.accepted:
if entry.get('transmission_id'):
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
continue
update_header_cookie(entry, headers, task)
download.get_temp_file(task, entry, handle_magnets=True, fail_html=True)
PluginTransmission.on_task_download = transmission_on_task_download
def get_deluge_seeding(client_torrent):
if 'seeding' in client_torrent['deluge_state'].lower():
client_torrent['reseed'] = {
'path': client_torrent['deluge_save_path'],
'move_completed_path': client_torrent['deluge_move_completed_path'],
}
return client_torrent
def to_deluge(entry, client_torrent):
entry['path'] = client_torrent['reseed'].get('path')
entry['move_completed_path'] = client_torrent['reseed'].get('move_completed_path')
entry['add_paused'] = 'Yes'
def deluge_on_task_download(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
headers = copy.deepcopy(task.requests.headers)
for entry in task.accepted:
if entry.get('deluge_id'):
continue
if config['action'] != 'add' and entry.get('torrent_info_hash'):
continue
update_header_cookie(entry, headers, task)
download.get_temp_file(task, entry, handle_magnets=True)
OutputDeluge.on_task_download = deluge_on_task_download
client_map = {
'from_qbittorrent_mod': get_qbittorrent_mod_seeding,
'qbittorrent_mod': to_qbittorrent_mod,
'from_transmission': get_transmission_seeding,
'transmission': to_transmission,
'from_deluge': get_deluge_seeding,
'deluge': to_deluge,
}
class PluginIYUUAutoReseed:
schema = {
'type': 'object',
'properties': {
'from': {
'anyOf': [
{'$ref': '/schema/plugins?name=from_qbittorrent_mod'},
{'$ref': '/schema/plugins?name=from_transmission'},
{'$ref': '/schema/plugins?name=from_deluge'},
]
},
'to': {'type': 'string', 'enum': list(filter(lambda x: not x.startswith('from'), client_map.keys()))},
'iyuu': {'type': 'string'},
'user-agent': {'type': 'string'},
'show_detail': {'type': 'boolean'},
'limit': {'type': 'integer'},
'passkeys': {
'type': 'object',
'properties': Executor.build_reseed_schema()
}
},
'additionalProperties': False
}
def prepare_config(self, config):
config.setdefault('iyuu', '')
config.setdefault('version', '1.10.9')
config.setdefault('limit', 999)
config.setdefault('show_detail', False)
config.setdefault('passkeys', {})
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
passkeys = config.get('passkeys')
limit = config.get('limit')
show_detail = config.get('show_detail')
to = config.get('to')
result = []
from_client_method = None
to_client_method = None
for from_name, client_config in config['from'].items():
from_client = plugin.get_plugin_by_name(from_name)
start_method = from_client.phase_handlers['start']
input_method = from_client.phase_handlers['input']
if not to:
to = from_name[5:]
start_method(task, client_config)
result = input_method(task, client_config)
from_client_method = client_map[from_name]
to_client_method = client_map[to]
torrent_dict, torrents_hashes = self.get_torrents_data(result, config, from_client_method)
if not torrent_dict:
return []
try:
data = {
'sign': config['iyuu'],
'version': config['version']
}
sites_response = task.requests.get('http://api.iyuu.cn/index.php?s=App.Api.Sites', timeout=60,
params=data).json()
if sites_response.get('ret') != 200:
raise plugin.PluginError(
'http://api.iyuu.cn/index.php?s=App.Api.Sites: {}'.format(sites_response)
)
sites_json = self.modify_sites(sites_response['data']['sites'])
reseed_response = task.requests.post('http://api.iyuu.cn/index.php?s=App.Api.Infohash',
json=torrents_hashes,
timeout=60).json()
if reseed_response.get('ret') != 200:
raise plugin.PluginError(
'http://api.iyuu.cn/index.php?s=App.Api.Infohash Error: {}'.format(reseed_response)
)
reseed_json = reseed_response['data']
except (RequestException, JSONDecodeError) as e:
raise plugin.PluginError(
'Error when trying to send request to iyuu: {}'.format(e)
)
entries = []
site_limit = {}
if sites_json and reseed_json:
for info_hash, seeds_data in reseed_json.items():
client_torrent = torrent_dict[info_hash]
for torrent in seeds_data['torrent']:
site = sites_json.get(str(torrent['sid']))
if not site:
continue
if torrent['info_hash'] in torrent_dict.keys():
continue
site_name = self._get_site_name(site['base_url'])
passkey = passkeys.get(site_name)
if not passkey:
if show_detail:
logger.info(
'no passkey, skip site: {}, title: {}'.format(site_name, client_torrent['title']))
continue
if not site_limit.get(site_name):
site_limit[site_name] = 1
else:
if site_limit[site_name] >= limit:
logger.info(
'site_limit:{} >= limit: {}, skip site: {}, title: {}'.format(
site_limit[site_name],
limit,
site_name,
client_torrent['title'])
)
continue
site_limit[site_name] = site_limit[site_name] + 1
torrent_id = str(torrent['torrent_id'])
entry = Entry(
title=client_torrent['title'],
torrent_info_hash=torrent['info_hash']
)
to_client_method(entry, client_torrent)
entry['class_name'] = site_name
Executor.build_reseed(entry, config, site, passkey, torrent_id)
if show_detail:
logger.info(
f"accept site: {site_name}, title: {client_torrent['title']}, url: {entry.get('url', None)}")
if entry.get('url'):
entries.append(entry)
return entries
def get_torrents_data(self, result, config, from_client_method):
torrent_dict = {}
torrents_hashes = {}
hashes = []
for client_torrent in result:
if from_client_method(client_torrent):
torrent_info_hash = client_torrent['torrent_info_hash'].lower()
torrent_dict[torrent_info_hash] = client_torrent
hashes.append(torrent_info_hash)
list.sort(hashes)
hashes_json = json.dumps(hashes, separators=(',', ':'))
sha1 = hashlib.sha1(hashes_json.encode("utf-8")).hexdigest()
torrents_hashes['hash'] = hashes_json
torrents_hashes['sha1'] = sha1
torrents_hashes['sign'] = config['iyuu']
torrents_hashes['timestamp'] = int(time.time())
torrents_hashes['version'] = config['version']
return torrent_dict, torrents_hashes
def modify_sites(self, sites_json):
sites_dict = {}
for site in sites_json:
site['download_page'] = site['download_page'].replace('{}', '{torrent_id}')
if site['base_url'] == 'pt.upxin.net':
site['base_url'] = 'pt.hdupt.com'
sites_dict[str(site['id'])] = site
return sites_dict
def _get_site_name(self, base_url):
domain = base_url.split('.')
site_name = domain[-2]
if site_name == 'edu':
site_name = domain[-3]
return site_name
@event('plugin.register')
def register_plugin():
plugin.register(PluginIYUUAutoReseed, 'iyuu_auto_reseed', api_ver=2) | 0.22919 | 0.095602 |
from datetime import datetime
from http import HTTPStatus
from operator import itemgetter
from typing import List, Optional, Tuple, Union, cast
import faiss
import torch
from fastapi import FastAPI, Request
from pydantic import BaseSettings
from semantic_search import __version__
from semantic_search.common.util import (
add_to_faiss_index,
encode_with_transformer,
setup_faiss_index,
setup_model_and_tokenizer,
normalize_documents,
)
from semantic_search.schemas import Model, Search, TopMatch
app = FastAPI(
title="Scientific Semantic Search",
description="A simple semantic search engine for scientific papers.",
version=__version__,
)
class Settings(BaseSettings):
"""Store global settings for the web-service. Pass these as environment variables at server
startup. E.g.
`CUDA_DEVICE=0 MAX_LENGTH=384 uvicorn semantic_search.main:app`
"""
pretrained_model_name_or_path: str = "johngiorgi/declutr-sci-base"
batch_size: int = 64
max_length: Optional[int] = None
mean_pool: bool = True
cuda_device: int = -1
settings = Settings()
model = Model()
def encode(text: Union[str, List[str]]) -> torch.Tensor:
if isinstance(text, str):
text = [text]
# Sort the inputs by length, maintaining the original indices so we can un-sort
# before returning the embeddings. This speeds up embedding by minimizing the
# amount of computation performed on pads. Because this sorting happens before
# tokenization, it is only a proxy of the true lengths of the inputs to the model.
# In the future, it would be better to sort by length *after* tokenization which
# would lead to an even larger speedup.
# https://stackoverflow.com/questions/8372399/zip-with-list-output-instead-of-tuple
sorted_indices, text = cast(
Tuple[Tuple[int], List[str]], zip(*sorted(enumerate(text), key=itemgetter(1)))
) # tell mypy explicitly the types of items in the unpacked tuple
unsorted_indices, _ = zip(*sorted(enumerate(sorted_indices), key=itemgetter(1)))
embeddings: torch.Tensor = []
for i in range(0, len(text), settings.batch_size):
embedding = encode_with_transformer(
list(text[i : i + settings.batch_size]),
tokenizer=model.tokenizer,
model=model.model,
mean_pool=settings.mean_pool,
)
embeddings.append(embedding)
embeddings = torch.cat(embeddings)
# Unsort the embedded text so that it is returned in the same order it was recieved.
unsorted_indices = torch.as_tensor(unsorted_indices, dtype=torch.long, device=embeddings.device)
embeddings = torch.index_select(embeddings, dim=0, index=unsorted_indices)
return embeddings
@app.on_event("startup")
def app_startup():
model.tokenizer, model.model = setup_model_and_tokenizer(
settings.pretrained_model_name_or_path, cuda_device=settings.cuda_device
)
embedding_dim = model.model.config.hidden_size
model.index = setup_faiss_index(embedding_dim)
@app.get("/", tags=["General"])
def index(request: Request):
"""Health check."""
response = {
"message": HTTPStatus.OK.phrase,
"method": request.method,
"status-code": HTTPStatus.OK,
"timestamp": datetime.now().isoformat(),
"url": request.url._url,
}
return response
@app.post("/search", tags=["Search"], response_model=List[TopMatch])
async def search(search: Search):
"""Returns the `top_k` most similar documents to `query` from the provided list of `documents`
and the index.
"""
ids = [int(doc.uid) for doc in search.documents]
texts = [document.text for document in search.documents]
# Only add items to the index if they do not already exist.
# See: https://github.com/facebookresearch/faiss/issues/859
# To do this, we first determine which of the incoming ids do not exist in the index
indexed_ids = set(faiss.vector_to_array(model.index.id_map).tolist())
if search.query.text is None and search.query.uid not in indexed_ids:
search.query.text = normalize_documents([search.query.uid])
for i, (id_, text) in enumerate(zip(ids, texts)):
if text is None and id_ not in indexed_ids:
texts[i] = normalize_documents([str(id_)])
# We then embed the corresponding text and update the index
to_embed = [(id_, text) for id_, text in zip(ids, texts) if id_ not in indexed_ids]
if to_embed:
ids, texts = zip(*to_embed) # type: ignore
embeddings = encode(texts).cpu().numpy() # type: ignore
add_to_faiss_index(ids, embeddings, model.index)
# Can't search for more items than exist in the index
top_k = min(model.index.ntotal, search.top_k)
# Embed the query and perform the search
query_embedding = encode(search.query.text).cpu().numpy() # type: ignore
top_k_scores, top_k_indicies = model.index.search(query_embedding, top_k)
top_k_indicies = top_k_indicies.reshape(-1).tolist()
top_k_scores = top_k_scores.reshape(-1).tolist()
if int(search.query.uid) in top_k_indicies:
index = top_k_indicies.index(int(search.query.uid))
del top_k_indicies[index], top_k_scores[index]
response = [TopMatch(uid=uid, score=score) for uid, score in zip(top_k_indicies, top_k_scores)]
return response | semantic_search/main.py | from datetime import datetime
from http import HTTPStatus
from operator import itemgetter
from typing import List, Optional, Tuple, Union, cast
import faiss
import torch
from fastapi import FastAPI, Request
from pydantic import BaseSettings
from semantic_search import __version__
from semantic_search.common.util import (
add_to_faiss_index,
encode_with_transformer,
setup_faiss_index,
setup_model_and_tokenizer,
normalize_documents,
)
from semantic_search.schemas import Model, Search, TopMatch
app = FastAPI(
title="Scientific Semantic Search",
description="A simple semantic search engine for scientific papers.",
version=__version__,
)
class Settings(BaseSettings):
"""Store global settings for the web-service. Pass these as environment variables at server
startup. E.g.
`CUDA_DEVICE=0 MAX_LENGTH=384 uvicorn semantic_search.main:app`
"""
pretrained_model_name_or_path: str = "johngiorgi/declutr-sci-base"
batch_size: int = 64
max_length: Optional[int] = None
mean_pool: bool = True
cuda_device: int = -1
settings = Settings()
model = Model()
def encode(text: Union[str, List[str]]) -> torch.Tensor:
if isinstance(text, str):
text = [text]
# Sort the inputs by length, maintaining the original indices so we can un-sort
# before returning the embeddings. This speeds up embedding by minimizing the
# amount of computation performed on pads. Because this sorting happens before
# tokenization, it is only a proxy of the true lengths of the inputs to the model.
# In the future, it would be better to sort by length *after* tokenization which
# would lead to an even larger speedup.
# https://stackoverflow.com/questions/8372399/zip-with-list-output-instead-of-tuple
sorted_indices, text = cast(
Tuple[Tuple[int], List[str]], zip(*sorted(enumerate(text), key=itemgetter(1)))
) # tell mypy explicitly the types of items in the unpacked tuple
unsorted_indices, _ = zip(*sorted(enumerate(sorted_indices), key=itemgetter(1)))
embeddings: torch.Tensor = []
for i in range(0, len(text), settings.batch_size):
embedding = encode_with_transformer(
list(text[i : i + settings.batch_size]),
tokenizer=model.tokenizer,
model=model.model,
mean_pool=settings.mean_pool,
)
embeddings.append(embedding)
embeddings = torch.cat(embeddings)
# Unsort the embedded text so that it is returned in the same order it was recieved.
unsorted_indices = torch.as_tensor(unsorted_indices, dtype=torch.long, device=embeddings.device)
embeddings = torch.index_select(embeddings, dim=0, index=unsorted_indices)
return embeddings
@app.on_event("startup")
def app_startup():
model.tokenizer, model.model = setup_model_and_tokenizer(
settings.pretrained_model_name_or_path, cuda_device=settings.cuda_device
)
embedding_dim = model.model.config.hidden_size
model.index = setup_faiss_index(embedding_dim)
@app.get("/", tags=["General"])
def index(request: Request):
"""Health check."""
response = {
"message": HTTPStatus.OK.phrase,
"method": request.method,
"status-code": HTTPStatus.OK,
"timestamp": datetime.now().isoformat(),
"url": request.url._url,
}
return response
@app.post("/search", tags=["Search"], response_model=List[TopMatch])
async def search(search: Search):
"""Returns the `top_k` most similar documents to `query` from the provided list of `documents`
and the index.
"""
ids = [int(doc.uid) for doc in search.documents]
texts = [document.text for document in search.documents]
# Only add items to the index if they do not already exist.
# See: https://github.com/facebookresearch/faiss/issues/859
# To do this, we first determine which of the incoming ids do not exist in the index
indexed_ids = set(faiss.vector_to_array(model.index.id_map).tolist())
if search.query.text is None and search.query.uid not in indexed_ids:
search.query.text = normalize_documents([search.query.uid])
for i, (id_, text) in enumerate(zip(ids, texts)):
if text is None and id_ not in indexed_ids:
texts[i] = normalize_documents([str(id_)])
# We then embed the corresponding text and update the index
to_embed = [(id_, text) for id_, text in zip(ids, texts) if id_ not in indexed_ids]
if to_embed:
ids, texts = zip(*to_embed) # type: ignore
embeddings = encode(texts).cpu().numpy() # type: ignore
add_to_faiss_index(ids, embeddings, model.index)
# Can't search for more items than exist in the index
top_k = min(model.index.ntotal, search.top_k)
# Embed the query and perform the search
query_embedding = encode(search.query.text).cpu().numpy() # type: ignore
top_k_scores, top_k_indicies = model.index.search(query_embedding, top_k)
top_k_indicies = top_k_indicies.reshape(-1).tolist()
top_k_scores = top_k_scores.reshape(-1).tolist()
if int(search.query.uid) in top_k_indicies:
index = top_k_indicies.index(int(search.query.uid))
del top_k_indicies[index], top_k_scores[index]
response = [TopMatch(uid=uid, score=score) for uid, score in zip(top_k_indicies, top_k_scores)]
return response | 0.947308 | 0.309663 |
from dataclasses import dataclass
from sqlalchemy import select, insert, update, delete
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
from .dto import TodoOutDTO, CreatingTodoDTO, UpdatingTodoDTO
from .models import Todo
@dataclass
class TodoNotFound(Exception):
todo_id: int
class TodosServiceDependency:
pass
class TodosService:
def __init__(self, alchemy_session: AsyncSession):
self._session = alchemy_session
async def get_all(self) -> list[TodoOutDTO]:
todos = await self._session.scalars(select(Todo))
return [_map_model(t) for t in todos]
async def get_by_id(self, todo_id: int) -> TodoOutDTO:
result = await self._session.get(Todo, todo_id)
_ensure_that_result_is_not_null(result, todo_id)
return _map_model(result)
async def create(self, todo: CreatingTodoDTO) -> TodoOutDTO:
result = await self._session.execute(
insert(Todo).values(todo.dict()).returning(Todo)
)
await self._session.commit()
return _map_model(result.one())
async def update(self, todo_id: int, todo: UpdatingTodoDTO) -> TodoOutDTO:
result = await self._session.execute(
update(Todo)
.values(todo.dict(exclude_unset=True))
.where(Todo.id == todo_id)
.returning(Todo)
)
await self._session.commit()
return _map_model(_get_one(result, todo_id))
async def delete(self, todo_id: int) -> TodoOutDTO:
result = await self._session.execute(
delete(Todo).where(Todo.id == todo_id).returning(Todo)
)
await self._session.commit()
return _map_model(_get_one(result, todo_id))
def _map_model(todo: Todo) -> TodoOutDTO:
return TodoOutDTO(id=todo.id, text=todo.text, is_completed=todo.is_completed)
def _ensure_that_result_is_not_null(result, todo_id: int) -> None:
if result is None:
raise TodoNotFound(todo_id=todo_id)
def _get_one(result, todo_id: int):
try:
return result.one()
except NoResultFound:
raise TodoNotFound(todo_id=todo_id) | app/modules/todo/services.py | from dataclasses import dataclass
from sqlalchemy import select, insert, update, delete
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
from .dto import TodoOutDTO, CreatingTodoDTO, UpdatingTodoDTO
from .models import Todo
@dataclass
class TodoNotFound(Exception):
todo_id: int
class TodosServiceDependency:
pass
class TodosService:
def __init__(self, alchemy_session: AsyncSession):
self._session = alchemy_session
async def get_all(self) -> list[TodoOutDTO]:
todos = await self._session.scalars(select(Todo))
return [_map_model(t) for t in todos]
async def get_by_id(self, todo_id: int) -> TodoOutDTO:
result = await self._session.get(Todo, todo_id)
_ensure_that_result_is_not_null(result, todo_id)
return _map_model(result)
async def create(self, todo: CreatingTodoDTO) -> TodoOutDTO:
result = await self._session.execute(
insert(Todo).values(todo.dict()).returning(Todo)
)
await self._session.commit()
return _map_model(result.one())
async def update(self, todo_id: int, todo: UpdatingTodoDTO) -> TodoOutDTO:
result = await self._session.execute(
update(Todo)
.values(todo.dict(exclude_unset=True))
.where(Todo.id == todo_id)
.returning(Todo)
)
await self._session.commit()
return _map_model(_get_one(result, todo_id))
async def delete(self, todo_id: int) -> TodoOutDTO:
result = await self._session.execute(
delete(Todo).where(Todo.id == todo_id).returning(Todo)
)
await self._session.commit()
return _map_model(_get_one(result, todo_id))
def _map_model(todo: Todo) -> TodoOutDTO:
return TodoOutDTO(id=todo.id, text=todo.text, is_completed=todo.is_completed)
def _ensure_that_result_is_not_null(result, todo_id: int) -> None:
if result is None:
raise TodoNotFound(todo_id=todo_id)
def _get_one(result, todo_id: int):
try:
return result.one()
except NoResultFound:
raise TodoNotFound(todo_id=todo_id) | 0.505859 | 0.233411 |
import os
import sys
import json
import torch
import logging
from tqdm import tqdm
from . import loader_utils
from ..constant import BOS_WORD, EOS_WORD
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label
# ------------------------------------------------------------------------------------------
def get_span_label(start_end_pos, max_doc_word):
# flatten, rank, filter overlap for answer positions
sorted_positions = loader_utils.flat_rank_pos(start_end_pos)
filter_positions = loader_utils.loose_filter_overlap(sorted_positions)
if len(filter_positions) != len(sorted_positions):
overlap_flag = True
else:
overlap_flag = False
s_label = [0] * max_doc_word
e_label = []
for s, e in filter_positions:
if (s <= e) and (e < max_doc_word):
s_label[s] = 1
e_label.append(e)
else:
continue
if (len(e_label) > 0) and (sum(s_label) == len(e_label)):
return {"s_label": s_label, "e_label": e_label, "overlap_flag": overlap_flag}
else:
return {"s_label": None, "e_label": None, "overlap_flag": overlap_flag}
def bert2span_preprocessor(
examples,
tokenizer,
max_token,
pretrain_model,
mode,
max_phrase_words,
stem_flag=False,
):
logger.info(
"start preparing (%s) features for bert2span (%s) ..." % (mode, pretrain_model)
)
overlap_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
# tokenize
tokenize_output = loader_utils.tokenize_for_bert(
doc_words=ex["doc_words"], tokenizer=tokenizer
)
if len(tokenize_output["tokens"]) < max_token:
max_word = max_token
else:
max_word = tokenize_output["tok_to_orig_index"][max_token - 1] + 1
new_ex = {}
new_ex["url"] = ex["url"]
new_ex["tokens"] = tokenize_output["tokens"][:max_token]
new_ex["valid_mask"] = tokenize_output["valid_mask"][:max_token]
new_ex["doc_words"] = ex["doc_words"][:max_word]
assert len(new_ex["tokens"]) == len(new_ex["valid_mask"])
assert sum(new_ex["valid_mask"]) == len(new_ex["doc_words"])
if mode == "train":
parameter = {
"start_end_pos": ex["start_end_pos"],
"max_doc_word": len(new_ex["doc_words"]),
}
label_dict = get_span_label(**parameter)
if not label_dict["s_label"]:
continue
new_ex["s_label"] = label_dict["s_label"]
new_ex["e_label"] = label_dict["e_label"]
assert sum(new_ex["valid_mask"]) == len(new_ex["s_label"])
if label_dict["overlap_flag"]:
overlap_num += 1
new_examples.append(new_ex)
logger.info(
"Delete Overlap Keyphrase : %d (overlap / total = %.2f"
% (overlap_num, float(overlap_num / len(examples) * 100))
+ "%)"
)
return new_examples
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# batch batchfy
def bert2span_converter(index, ex, tokenizer, mode, max_phrase_words):
""" convert each batch data to tensor ; add [CLS] [SEP] tokens ;"""
src_tokens = [BOS_WORD] + ex["tokens"] + [EOS_WORD]
valid_ids = [0] + ex["valid_mask"] + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == "train":
s_label_tensor = torch.LongTensor(ex["s_label"])
e_label_tensor = torch.LongTensor(ex["e_label"])
return (
index,
src_tensor,
valid_mask,
orig_doc_len,
s_label_tensor,
e_label_tensor,
)
else:
return index, src_tensor, valid_mask, orig_doc_len
def batchify_bert2span_features_for_train(batch):
""" train dataloader & eval dataloader ."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
s_label_list = [ex[4] for ex in batch]
e_label_list = [ex[5] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] start label [5] active_mask
s_label = torch.LongTensor(len(s_label_list), max_word_len).zero_()
active_mask = torch.LongTensor(len(s_label_list), max_word_len).zero_()
for i, s in enumerate(s_label_list):
s_label[i, : s.size(0)].copy_(s)
active_mask[i, : s.size(0)].fill_(1)
# ---------------------------------------------------------------
# [6] end label [7] end_mask
e_label_max_length = max([e.size(0) for e in e_label_list])
e_label = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
end_mask = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
for i, e in enumerate(e_label_list):
if e.size(0) <= 0:
continue
e_label[i, : e.size(0)].copy_(e)
end_mask[i, : e.size(0)].fill_(1)
# -------------------------------------------------------------------
# [8] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
s_label,
e_label,
end_mask,
ids,
)
def batchify_bert2span_features_for_test(batch):
""" test dataloader for Dev & Public_Valid."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] valid length tensor
active_mask = torch.LongTensor(len(doc_word_lens), max_word_len).zero_()
for i, l in enumerate(doc_word_lens):
active_mask[i, :l].fill_(1)
# -------------------------------------------------------------------
# [5] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
doc_word_lens,
ids,
) | bertkpe/dataloader/bert2span_dataloader.py | import os
import sys
import json
import torch
import logging
from tqdm import tqdm
from . import loader_utils
from ..constant import BOS_WORD, EOS_WORD
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label
# ------------------------------------------------------------------------------------------
def get_span_label(start_end_pos, max_doc_word):
# flatten, rank, filter overlap for answer positions
sorted_positions = loader_utils.flat_rank_pos(start_end_pos)
filter_positions = loader_utils.loose_filter_overlap(sorted_positions)
if len(filter_positions) != len(sorted_positions):
overlap_flag = True
else:
overlap_flag = False
s_label = [0] * max_doc_word
e_label = []
for s, e in filter_positions:
if (s <= e) and (e < max_doc_word):
s_label[s] = 1
e_label.append(e)
else:
continue
if (len(e_label) > 0) and (sum(s_label) == len(e_label)):
return {"s_label": s_label, "e_label": e_label, "overlap_flag": overlap_flag}
else:
return {"s_label": None, "e_label": None, "overlap_flag": overlap_flag}
def bert2span_preprocessor(
examples,
tokenizer,
max_token,
pretrain_model,
mode,
max_phrase_words,
stem_flag=False,
):
logger.info(
"start preparing (%s) features for bert2span (%s) ..." % (mode, pretrain_model)
)
overlap_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
# tokenize
tokenize_output = loader_utils.tokenize_for_bert(
doc_words=ex["doc_words"], tokenizer=tokenizer
)
if len(tokenize_output["tokens"]) < max_token:
max_word = max_token
else:
max_word = tokenize_output["tok_to_orig_index"][max_token - 1] + 1
new_ex = {}
new_ex["url"] = ex["url"]
new_ex["tokens"] = tokenize_output["tokens"][:max_token]
new_ex["valid_mask"] = tokenize_output["valid_mask"][:max_token]
new_ex["doc_words"] = ex["doc_words"][:max_word]
assert len(new_ex["tokens"]) == len(new_ex["valid_mask"])
assert sum(new_ex["valid_mask"]) == len(new_ex["doc_words"])
if mode == "train":
parameter = {
"start_end_pos": ex["start_end_pos"],
"max_doc_word": len(new_ex["doc_words"]),
}
label_dict = get_span_label(**parameter)
if not label_dict["s_label"]:
continue
new_ex["s_label"] = label_dict["s_label"]
new_ex["e_label"] = label_dict["e_label"]
assert sum(new_ex["valid_mask"]) == len(new_ex["s_label"])
if label_dict["overlap_flag"]:
overlap_num += 1
new_examples.append(new_ex)
logger.info(
"Delete Overlap Keyphrase : %d (overlap / total = %.2f"
% (overlap_num, float(overlap_num / len(examples) * 100))
+ "%)"
)
return new_examples
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# batch batchfy
def bert2span_converter(index, ex, tokenizer, mode, max_phrase_words):
""" convert each batch data to tensor ; add [CLS] [SEP] tokens ;"""
src_tokens = [BOS_WORD] + ex["tokens"] + [EOS_WORD]
valid_ids = [0] + ex["valid_mask"] + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == "train":
s_label_tensor = torch.LongTensor(ex["s_label"])
e_label_tensor = torch.LongTensor(ex["e_label"])
return (
index,
src_tensor,
valid_mask,
orig_doc_len,
s_label_tensor,
e_label_tensor,
)
else:
return index, src_tensor, valid_mask, orig_doc_len
def batchify_bert2span_features_for_train(batch):
""" train dataloader & eval dataloader ."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
s_label_list = [ex[4] for ex in batch]
e_label_list = [ex[5] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] start label [5] active_mask
s_label = torch.LongTensor(len(s_label_list), max_word_len).zero_()
active_mask = torch.LongTensor(len(s_label_list), max_word_len).zero_()
for i, s in enumerate(s_label_list):
s_label[i, : s.size(0)].copy_(s)
active_mask[i, : s.size(0)].fill_(1)
# ---------------------------------------------------------------
# [6] end label [7] end_mask
e_label_max_length = max([e.size(0) for e in e_label_list])
e_label = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
end_mask = torch.LongTensor(len(e_label_list), e_label_max_length).zero_()
for i, e in enumerate(e_label_list):
if e.size(0) <= 0:
continue
e_label[i, : e.size(0)].copy_(e)
end_mask[i, : e.size(0)].fill_(1)
# -------------------------------------------------------------------
# [8] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
s_label,
e_label,
end_mask,
ids,
)
def batchify_bert2span_features_for_test(batch):
""" test dataloader for Dev & Public_Valid."""
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, : d.size(0)].copy_(d)
input_mask[i, : d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, : v.size(0)].copy_(v)
# ---------------------------------------------------------------
# [4] valid length tensor
active_mask = torch.LongTensor(len(doc_word_lens), max_word_len).zero_()
for i, l in enumerate(doc_word_lens):
active_mask[i, :l].fill_(1)
# -------------------------------------------------------------------
# [5] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return (
input_ids,
input_mask,
valid_ids,
valid_output,
active_mask,
doc_word_lens,
ids,
) | 0.385606 | 0.327144 |
import torch
from torch import nn
import torch.nn.functional as F
from models.attention import ChannelAttention, SpatialAttention
class BasicConvBlock(nn.Module):
def __init__(self, in_chans, out_chans, stride=1):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, stride=stride, padding=1),
nn.ReLU(),
)
def forward(self, tensor):
return self.layers(tensor)
class ResBlock(nn.Module):
def __init__(self, num_chans, kernel_size=3, res_scale=1.,
use_ca=True, reduction=16, use_gap=True, use_gmp=True,
use_sa=True, sa_kernel_size=7, sa_dilation=1, use_cap=True, use_cmp=True):
super().__init__()
assert kernel_size % 2, 'Kernel size is expected to be an odd number.'
self.layer = nn.Sequential(
nn.Conv2d(in_channels=num_chans, out_channels=num_chans, kernel_size=kernel_size, padding=kernel_size // 2),
nn.ReLU(),
nn.Conv2d(in_channels=num_chans, out_channels=num_chans, kernel_size=kernel_size, padding=kernel_size // 2),
)
self.ca = ChannelAttention(num_chans=num_chans, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp)
self.sa = SpatialAttention(kernel_size=sa_kernel_size, dilation=sa_dilation, use_cap=use_cap, use_cmp=use_cmp)
self.res_scale = res_scale
self.use_ca = use_ca
self.use_sa = use_sa
def forward(self, tensor): # The addition of the residual is also a non-linearity.
output = self.res_scale * self.layer(tensor)
if self.use_ca:
output = self.ca(output)
if self.use_sa:
output = self.sa(output)
return tensor + output
class ResizeConv(nn.Module):
def __init__(self, in_chans, out_chans, scale_factor=2):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=3, padding=1),
nn.ReLU()
)
self.scale_factor = scale_factor
def forward(self, tensor):
output = F.interpolate(tensor, scale_factor=self.scale_factor, mode='nearest')
return self.layers(output)
class UNet(nn.Module):
def __init__(self, in_chans, out_chans, chans, num_pool_layers, num_depth_blocks,
res_scale=0.1, use_residual=True, use_ca=True, reduction=16, use_gap=True, use_gmp=True,
use_sa=True, sa_kernel_size=7, sa_dilation=1, use_cap=True, use_cmp=True):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.num_depth_blocks = num_depth_blocks # This must be a positive integer.
self.use_residual = use_residual
kwargs = dict(use_ca=use_ca, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp, use_sa=use_sa,
sa_kernel_size=sa_kernel_size, sa_dilation=sa_dilation, use_cap=use_cap, use_cmp=use_cmp)
# First block should have no reduction in feature map size.
conv = BasicConvBlock(in_chans=in_chans, out_chans=chans, stride=1)
res = ResBlock(num_chans=chans, kernel_size=3, res_scale=res_scale, **kwargs)
self.down_reshape_layers = nn.ModuleList([conv])
self.down_res_blocks = nn.ModuleList([res])
ch = chans
for _ in range(num_pool_layers - 1):
conv = BasicConvBlock(in_chans=ch, out_chans=ch * 2, stride=2)
res = ResBlock(num_chans=ch * 2, res_scale=res_scale, **kwargs)
self.down_reshape_layers.append(conv)
self.down_res_blocks.append(res)
ch *= 2
# Size reduction happens at the beginning of a block, hence the need for stride here.
self.mid_conv = BasicConvBlock(in_chans=ch, out_chans=ch, stride=2)
self.mid_res_blocks = nn.ModuleList()
for _ in range(num_depth_blocks):
self.mid_res_blocks.append(ResBlock(num_chans=ch, res_scale=res_scale, **kwargs))
self.upscale_layers = nn.ModuleList()
self.up_reshape_layers = nn.ModuleList()
self.up_res_blocks = nn.ModuleList()
for _ in range(num_pool_layers - 1):
deconv = ResizeConv(in_chans=ch, out_chans=ch, scale_factor=2)
conv = BasicConvBlock(in_chans=ch * 2, out_chans=ch // 2, stride=1)
res = ResBlock(num_chans=ch // 2, res_scale=res_scale, **kwargs)
self.upscale_layers.append(deconv)
self.up_reshape_layers.append(conv)
self.up_res_blocks.append(res)
ch //= 2
else: # Last block of up-sampling.
deconv = ResizeConv(in_chans=ch, out_chans=ch, scale_factor=2)
conv = BasicConvBlock(in_chans=ch * 2, out_chans=ch, stride=1)
res = ResBlock(num_chans=ch, res_scale=res_scale, **kwargs)
self.upscale_layers.append(deconv)
self.up_reshape_layers.append(conv)
self.up_res_blocks.append(res)
assert chans == ch, 'Channel indexing error!'
self.final_layers = nn.Conv2d(in_channels=ch, out_channels=out_chans, kernel_size=1)
assert len(self.down_reshape_layers) == len(self.down_res_blocks) == len(self.upscale_layers) \
== len(self.up_reshape_layers) == len(self.up_res_blocks) == self.num_pool_layers, 'Layer number error!'
def forward(self, tensor):
stack = list()
output = tensor
# Down-Sampling
for idx in range(self.num_pool_layers):
output = self.down_reshape_layers[idx](output)
output = self.down_res_blocks[idx](output)
stack.append(output)
# Middle blocks
middle = self.mid_conv(output)
output = middle
for layer in self.mid_res_blocks:
output = layer(output) # Residual layers in the middle.
else: # Residual of the portion before residual blocks began, same as EDSR.
output = output + middle
# Up-Sampling.
for idx in range(self.num_pool_layers):
output = self.upscale_layers[idx](output)
output = torch.cat([output, stack.pop()], dim=1)
output = self.up_reshape_layers[idx](output)
output = self.up_res_blocks[idx](output)
output = self.final_layers(output)
return (tensor + output) if self.use_residual else output | models/new_edsr_unet.py | import torch
from torch import nn
import torch.nn.functional as F
from models.attention import ChannelAttention, SpatialAttention
class BasicConvBlock(nn.Module):
def __init__(self, in_chans, out_chans, stride=1):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, stride=stride, padding=1),
nn.ReLU(),
)
def forward(self, tensor):
return self.layers(tensor)
class ResBlock(nn.Module):
def __init__(self, num_chans, kernel_size=3, res_scale=1.,
use_ca=True, reduction=16, use_gap=True, use_gmp=True,
use_sa=True, sa_kernel_size=7, sa_dilation=1, use_cap=True, use_cmp=True):
super().__init__()
assert kernel_size % 2, 'Kernel size is expected to be an odd number.'
self.layer = nn.Sequential(
nn.Conv2d(in_channels=num_chans, out_channels=num_chans, kernel_size=kernel_size, padding=kernel_size // 2),
nn.ReLU(),
nn.Conv2d(in_channels=num_chans, out_channels=num_chans, kernel_size=kernel_size, padding=kernel_size // 2),
)
self.ca = ChannelAttention(num_chans=num_chans, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp)
self.sa = SpatialAttention(kernel_size=sa_kernel_size, dilation=sa_dilation, use_cap=use_cap, use_cmp=use_cmp)
self.res_scale = res_scale
self.use_ca = use_ca
self.use_sa = use_sa
def forward(self, tensor): # The addition of the residual is also a non-linearity.
output = self.res_scale * self.layer(tensor)
if self.use_ca:
output = self.ca(output)
if self.use_sa:
output = self.sa(output)
return tensor + output
class ResizeConv(nn.Module):
def __init__(self, in_chans, out_chans, scale_factor=2):
super().__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=3, padding=1),
nn.ReLU()
)
self.scale_factor = scale_factor
def forward(self, tensor):
output = F.interpolate(tensor, scale_factor=self.scale_factor, mode='nearest')
return self.layers(output)
class UNet(nn.Module):
def __init__(self, in_chans, out_chans, chans, num_pool_layers, num_depth_blocks,
res_scale=0.1, use_residual=True, use_ca=True, reduction=16, use_gap=True, use_gmp=True,
use_sa=True, sa_kernel_size=7, sa_dilation=1, use_cap=True, use_cmp=True):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.num_depth_blocks = num_depth_blocks # This must be a positive integer.
self.use_residual = use_residual
kwargs = dict(use_ca=use_ca, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp, use_sa=use_sa,
sa_kernel_size=sa_kernel_size, sa_dilation=sa_dilation, use_cap=use_cap, use_cmp=use_cmp)
# First block should have no reduction in feature map size.
conv = BasicConvBlock(in_chans=in_chans, out_chans=chans, stride=1)
res = ResBlock(num_chans=chans, kernel_size=3, res_scale=res_scale, **kwargs)
self.down_reshape_layers = nn.ModuleList([conv])
self.down_res_blocks = nn.ModuleList([res])
ch = chans
for _ in range(num_pool_layers - 1):
conv = BasicConvBlock(in_chans=ch, out_chans=ch * 2, stride=2)
res = ResBlock(num_chans=ch * 2, res_scale=res_scale, **kwargs)
self.down_reshape_layers.append(conv)
self.down_res_blocks.append(res)
ch *= 2
# Size reduction happens at the beginning of a block, hence the need for stride here.
self.mid_conv = BasicConvBlock(in_chans=ch, out_chans=ch, stride=2)
self.mid_res_blocks = nn.ModuleList()
for _ in range(num_depth_blocks):
self.mid_res_blocks.append(ResBlock(num_chans=ch, res_scale=res_scale, **kwargs))
self.upscale_layers = nn.ModuleList()
self.up_reshape_layers = nn.ModuleList()
self.up_res_blocks = nn.ModuleList()
for _ in range(num_pool_layers - 1):
deconv = ResizeConv(in_chans=ch, out_chans=ch, scale_factor=2)
conv = BasicConvBlock(in_chans=ch * 2, out_chans=ch // 2, stride=1)
res = ResBlock(num_chans=ch // 2, res_scale=res_scale, **kwargs)
self.upscale_layers.append(deconv)
self.up_reshape_layers.append(conv)
self.up_res_blocks.append(res)
ch //= 2
else: # Last block of up-sampling.
deconv = ResizeConv(in_chans=ch, out_chans=ch, scale_factor=2)
conv = BasicConvBlock(in_chans=ch * 2, out_chans=ch, stride=1)
res = ResBlock(num_chans=ch, res_scale=res_scale, **kwargs)
self.upscale_layers.append(deconv)
self.up_reshape_layers.append(conv)
self.up_res_blocks.append(res)
assert chans == ch, 'Channel indexing error!'
self.final_layers = nn.Conv2d(in_channels=ch, out_channels=out_chans, kernel_size=1)
assert len(self.down_reshape_layers) == len(self.down_res_blocks) == len(self.upscale_layers) \
== len(self.up_reshape_layers) == len(self.up_res_blocks) == self.num_pool_layers, 'Layer number error!'
def forward(self, tensor):
stack = list()
output = tensor
# Down-Sampling
for idx in range(self.num_pool_layers):
output = self.down_reshape_layers[idx](output)
output = self.down_res_blocks[idx](output)
stack.append(output)
# Middle blocks
middle = self.mid_conv(output)
output = middle
for layer in self.mid_res_blocks:
output = layer(output) # Residual layers in the middle.
else: # Residual of the portion before residual blocks began, same as EDSR.
output = output + middle
# Up-Sampling.
for idx in range(self.num_pool_layers):
output = self.upscale_layers[idx](output)
output = torch.cat([output, stack.pop()], dim=1)
output = self.up_reshape_layers[idx](output)
output = self.up_res_blocks[idx](output)
output = self.final_layers(output)
return (tensor + output) if self.use_residual else output | 0.950238 | 0.435721 |
from __future__ import absolute_import
import abc
import contextlib
from galaxy.tool_util.verify import interactor
from planemo.galaxy.activity import execute
from planemo.galaxy.config import external_galaxy_config
from planemo.galaxy.serve import serve_daemon
from planemo.runnable import RunnableType
from .interface import BaseEngine
class GalaxyEngine(BaseEngine):
"""An :class:`Engine` implementation backed by a managed Galaxy.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
__metaclass__ = abc.ABCMeta
handled_runnable_types = [
RunnableType.cwl_tool,
RunnableType.cwl_workflow,
RunnableType.galaxy_workflow,
RunnableType.galaxy_tool,
RunnableType.galaxy_datamanager,
]
def _run(self, runnable, job_path):
"""Run CWL job in Galaxy."""
self._ctx.vlog("Serving artifact [%s] with Galaxy." % (runnable,))
with self.ensure_runnables_served([runnable]) as config:
self._ctx.vlog("Running job path [%s]" % job_path)
run_response = execute(self._ctx, config, runnable, job_path, **self._kwds)
return run_response
@abc.abstractmethod
def ensure_runnables_served(self, runnables):
"""Use a context manager and describe Galaxy instance with runnables being served."""
def _run_test_case(self, test_case):
if hasattr(test_case, "job_path"):
# Simple file-based job path.
return super(GalaxyEngine, self)._run_test_case(test_case)
else:
with self.ensure_runnables_served([test_case.runnable]) as config:
galaxy_interactor_kwds = {
"galaxy_url": config.galaxy_url,
"master_api_key": config.master_api_key,
"api_key": config.user_api_key,
"keep_outputs_dir": "", # TODO: this...
}
tool_id = test_case.tool_id
test_index = test_case.test_index
tool_version = test_case.tool_version
galaxy_interactor = interactor.GalaxyInteractorApi(**galaxy_interactor_kwds)
test_results = []
def _register_job_data(job_data):
test_results.append({
'id': tool_id + "-" + str(test_index),
'has_data': True,
'data': job_data,
})
verbose = self._ctx.verbose
try:
if verbose:
# TODO: this is pretty hacky, it'd be better to send a stream
# and capture the output information somehow.
interactor.VERBOSE_GALAXY_ERRORS = True
interactor.verify_tool(
tool_id,
galaxy_interactor,
test_index=test_index,
tool_version=tool_version,
register_job_data=_register_job_data,
quiet=not verbose,
)
except Exception:
pass
return test_results[0]
class LocalManagedGalaxyEngine(GalaxyEngine):
"""An :class:`Engine` implementation backed by a managed Galaxy.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
@contextlib.contextmanager
def ensure_runnables_served(self, runnables):
# TODO: define an interface for this - not everything in config would make sense for a
# pre-existing Galaxy interface.
with serve_daemon(self._ctx, runnables, **self._serve_kwds()) as config:
yield config
def _serve_kwds(self):
return self._kwds.copy()
class DockerizedManagedGalaxyEngine(LocalManagedGalaxyEngine):
"""An :class:`Engine` implementation backed by Galaxy running in Docker.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
def _serve_kwds(self):
serve_kwds = self._kwds.copy()
serve_kwds["dockerize"] = True
return serve_kwds
class ExternalGalaxyEngine(GalaxyEngine):
"""An :class:`Engine` implementation backed by an external Galaxy instance.
"""
@contextlib.contextmanager
def ensure_runnables_served(self, runnables):
# TODO: ensure tools are available
with external_galaxy_config(self._ctx, runnables, **self._kwds) as config:
config.install_workflows()
yield config
__all__ = (
"DockerizedManagedGalaxyEngine",
"ExternalGalaxyEngine",
"LocalManagedGalaxyEngine",
) | planemo/engine/galaxy.py | from __future__ import absolute_import
import abc
import contextlib
from galaxy.tool_util.verify import interactor
from planemo.galaxy.activity import execute
from planemo.galaxy.config import external_galaxy_config
from planemo.galaxy.serve import serve_daemon
from planemo.runnable import RunnableType
from .interface import BaseEngine
class GalaxyEngine(BaseEngine):
"""An :class:`Engine` implementation backed by a managed Galaxy.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
__metaclass__ = abc.ABCMeta
handled_runnable_types = [
RunnableType.cwl_tool,
RunnableType.cwl_workflow,
RunnableType.galaxy_workflow,
RunnableType.galaxy_tool,
RunnableType.galaxy_datamanager,
]
def _run(self, runnable, job_path):
"""Run CWL job in Galaxy."""
self._ctx.vlog("Serving artifact [%s] with Galaxy." % (runnable,))
with self.ensure_runnables_served([runnable]) as config:
self._ctx.vlog("Running job path [%s]" % job_path)
run_response = execute(self._ctx, config, runnable, job_path, **self._kwds)
return run_response
@abc.abstractmethod
def ensure_runnables_served(self, runnables):
"""Use a context manager and describe Galaxy instance with runnables being served."""
def _run_test_case(self, test_case):
if hasattr(test_case, "job_path"):
# Simple file-based job path.
return super(GalaxyEngine, self)._run_test_case(test_case)
else:
with self.ensure_runnables_served([test_case.runnable]) as config:
galaxy_interactor_kwds = {
"galaxy_url": config.galaxy_url,
"master_api_key": config.master_api_key,
"api_key": config.user_api_key,
"keep_outputs_dir": "", # TODO: this...
}
tool_id = test_case.tool_id
test_index = test_case.test_index
tool_version = test_case.tool_version
galaxy_interactor = interactor.GalaxyInteractorApi(**galaxy_interactor_kwds)
test_results = []
def _register_job_data(job_data):
test_results.append({
'id': tool_id + "-" + str(test_index),
'has_data': True,
'data': job_data,
})
verbose = self._ctx.verbose
try:
if verbose:
# TODO: this is pretty hacky, it'd be better to send a stream
# and capture the output information somehow.
interactor.VERBOSE_GALAXY_ERRORS = True
interactor.verify_tool(
tool_id,
galaxy_interactor,
test_index=test_index,
tool_version=tool_version,
register_job_data=_register_job_data,
quiet=not verbose,
)
except Exception:
pass
return test_results[0]
class LocalManagedGalaxyEngine(GalaxyEngine):
"""An :class:`Engine` implementation backed by a managed Galaxy.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
@contextlib.contextmanager
def ensure_runnables_served(self, runnables):
# TODO: define an interface for this - not everything in config would make sense for a
# pre-existing Galaxy interface.
with serve_daemon(self._ctx, runnables, **self._serve_kwds()) as config:
yield config
def _serve_kwds(self):
return self._kwds.copy()
class DockerizedManagedGalaxyEngine(LocalManagedGalaxyEngine):
"""An :class:`Engine` implementation backed by Galaxy running in Docker.
More information on Galaxy can be found at http://galaxyproject.org/.
"""
def _serve_kwds(self):
serve_kwds = self._kwds.copy()
serve_kwds["dockerize"] = True
return serve_kwds
class ExternalGalaxyEngine(GalaxyEngine):
"""An :class:`Engine` implementation backed by an external Galaxy instance.
"""
@contextlib.contextmanager
def ensure_runnables_served(self, runnables):
# TODO: ensure tools are available
with external_galaxy_config(self._ctx, runnables, **self._kwds) as config:
config.install_workflows()
yield config
__all__ = (
"DockerizedManagedGalaxyEngine",
"ExternalGalaxyEngine",
"LocalManagedGalaxyEngine",
) | 0.619011 | 0.193471 |
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MAC, TEMP_CELSIUS, CONF_DEVICES, ATTR_TEMPERATURE)
from homeassistant.util.temperature import convert
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['bluepy_devices==0.2.0']
_LOGGER = logging.getLogger(__name__)
ATTR_MODE = 'mode'
ATTR_MODE_READABLE = 'mode_readable'
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_MAC): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES):
vol.Schema({cv.string: DEVICE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the eQ-3 BLE thermostats."""
devices = []
for name, device_cfg in config[CONF_DEVICES].items():
mac = device_cfg[CONF_MAC]
devices.append(EQ3BTSmartThermostat(mac, name))
add_devices(devices)
# pylint: disable=import-error
class EQ3BTSmartThermostat(ClimateDevice):
"""Representation of a eQ-3 Bluetooth Smart thermostat."""
def __init__(self, _mac, _name):
"""Initialize the thermostat."""
from bluepy_devices.devices import eq3btsmart
self._name = _name
self._thermostat = eq3btsmart.EQ3BTSmartThermostat(_mac)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Can not report temperature, so return target_temperature."""
return self.target_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._thermostat.target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._thermostat.target_temperature = temperature
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_MODE: self._thermostat.mode,
ATTR_MODE_READABLE: self._thermostat.mode_readable,
}
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert(self._thermostat.min_temp, TEMP_CELSIUS,
self.unit_of_measurement)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert(self._thermostat.max_temp, TEMP_CELSIUS,
self.unit_of_measurement)
def update(self):
"""Update the data from the thermostat."""
self._thermostat.update() | homeassistant/components/climate/eq3btsmart.py | import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MAC, TEMP_CELSIUS, CONF_DEVICES, ATTR_TEMPERATURE)
from homeassistant.util.temperature import convert
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['bluepy_devices==0.2.0']
_LOGGER = logging.getLogger(__name__)
ATTR_MODE = 'mode'
ATTR_MODE_READABLE = 'mode_readable'
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_MAC): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES):
vol.Schema({cv.string: DEVICE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the eQ-3 BLE thermostats."""
devices = []
for name, device_cfg in config[CONF_DEVICES].items():
mac = device_cfg[CONF_MAC]
devices.append(EQ3BTSmartThermostat(mac, name))
add_devices(devices)
# pylint: disable=import-error
class EQ3BTSmartThermostat(ClimateDevice):
"""Representation of a eQ-3 Bluetooth Smart thermostat."""
def __init__(self, _mac, _name):
"""Initialize the thermostat."""
from bluepy_devices.devices import eq3btsmart
self._name = _name
self._thermostat = eq3btsmart.EQ3BTSmartThermostat(_mac)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Can not report temperature, so return target_temperature."""
return self.target_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._thermostat.target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._thermostat.target_temperature = temperature
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_MODE: self._thermostat.mode,
ATTR_MODE_READABLE: self._thermostat.mode_readable,
}
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert(self._thermostat.min_temp, TEMP_CELSIUS,
self.unit_of_measurement)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert(self._thermostat.max_temp, TEMP_CELSIUS,
self.unit_of_measurement)
def update(self):
"""Update the data from the thermostat."""
self._thermostat.update() | 0.779657 | 0.192122 |
import random
from protocolbuffers import SimObjectAttributes_pb2 as protocols
from interactions import ParticipantType
from interactions.utils.interaction_elements import XevtTriggeredElement
from interactions.utils.loot_basic_op import BaseLootOperation, BaseTargetedLootOperation
from objects.client_object_mixin import ClientObjectMixin
from objects.components import Component, types, componentmethod, componentmethod_with_fallback
from objects.hovertip import TooltipFieldsComplete
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.tunable import HasTunableFactory, Tunable, TunableReference, OptionalTunable, TunableList, TunableTuple, AutoFactoryInit, TunableEnumEntry
from singletons import DEFAULT
import profanity, services, sims4.callback_utils, sims4.log
logger = sims4.log.Logger('NameComponent', default_owner='shipark')
NAME_COMPONENT_TOOLTIP_PRIORITY = 1
class NameComponent(Component, HasTunableFactory, component_name=types.NAME_COMPONENT, persistence_key=protocols.PersistenceMaster.PersistableData.NameComponent):
DEFAULT_AFFORDANCE = TunableReference((services.affordance_manager()), description='The affordance generated by all NameComponents.')
FACTORY_TUNABLES = {'allow_name':Tunable(description='\n If set, the user is allowed to give a custom name to this\n object.\n ',
tunable_type=bool,
default=True),
'allow_description':Tunable(description='\n If set, the user is allowed to give a custom description to this\n object.\n ',
tunable_type=bool,
default=False),
'affordance':OptionalTunable(tunable=TunableReference(description='\n The affordance provided by this Name component. Use it if you want\n to provide a custom affordance instead of the default one, which\n will not be used if this is set.\n ',
manager=(services.affordance_manager())),
disabled_name='use_default'),
'templates':TunableList(description='\n The list of the template content for this component.\n ',
tunable=TunableTuple(template_name=TunableLocalizedStringFactory(description='\n The template name for the component.\n ',
allow_none=True),
template_description=TunableLocalizedStringFactory(description='\n The template description for the component.\n ',
allow_none=True)))}
def __init__(self, *args, allow_name=None, allow_description=None, affordance=None, templates=[], **kwargs):
(super().__init__)(*args, **kwargs)
self.allow_name = allow_name
self.allow_description = allow_description
self._affordance = affordance
self._templates = templates
self._template_name = None
self._template_description = None
self._on_name_changed = None
def get_template_name_and_description(self):
if self._template_name is None or self._template_description is None:
self._set_template_content()
template_name, template_description = self.owner.get_template_content_overrides()
template_name = template_name if template_name is not DEFAULT else self._template_name
template_description = template_description if template_description is not DEFAULT else self._template_description
return (
template_name, template_description)
def _set_template_content(self):
if self._templates:
selected_template = random.choice(self._templates)
self._template_name = selected_template.template_name
self._template_description = selected_template.template_description
def save(self, persistence_master_message):
if self.owner.custom_name is None:
if self.owner.custom_description is None:
return
persistable_data = protocols.PersistenceMaster.PersistableData()
persistable_data.type = protocols.PersistenceMaster.PersistableData.NameComponent
name_component_data = persistable_data.Extensions[protocols.PersistableNameComponent.persistable_data]
if self.owner.custom_name is not None:
name_component_data.name = self.owner.custom_name
if self.owner.custom_description is not None:
name_component_data.description = self.owner.custom_description
persistence_master_message.data.extend([persistable_data])
def load(self, persistable_data):
name_component_data = persistable_data.Extensions[protocols.PersistableNameComponent.persistable_data]
if name_component_data.HasField('name'):
self.owner.custom_name = self._get_filtered_text(name_component_data.name)
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_name), (self.owner.custom_name), priority=NAME_COMPONENT_TOOLTIP_PRIORITY)
if name_component_data.HasField('description'):
self.owner.custom_description = self._get_filtered_text(name_component_data.description)
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_description), (self.owner.custom_description), priority=NAME_COMPONENT_TOOLTIP_PRIORITY)
self.owner.update_object_tooltip()
def _get_filtered_text(self, text):
if self.owner.is_from_gallery:
_, filtered_text = profanity.check(text)
return filtered_text
return text
@componentmethod_with_fallback(lambda : False)
def has_custom_name(self):
if self.owner.custom_name:
return True
return False
@componentmethod_with_fallback(lambda : False)
def has_custom_description(self):
if self.owner.custom_description:
return True
return False
@componentmethod
def set_custom_name(self, name, actor_sim_id=None):
if self.allow_name:
self.owner.custom_name = name if name else None
if isinstance(self.owner, ClientObjectMixin):
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_name), name, priority=NAME_COMPONENT_TOOLTIP_PRIORITY, should_update=True)
if actor_sim_id is not None:
if services.relationship_service().get_mapped_tag_set_of_id(self.owner.definition.id):
services.relationship_service().update_object_type_name(name, actor_sim_id, self.owner.definition.id, self.owner)
self._call_name_changed_callback()
return True
return False
@componentmethod
def remove_custom_name(self):
if not self.set_custom_name(''):
logger.warn('Failed to reset Custom Name on {}. Please check Allow Name on Name Component.', self.owner)
@componentmethod
def set_custom_description(self, description):
if self.allow_description:
self.owner.custom_description = description if description else None
self._call_name_changed_callback()
if isinstance(self.owner, ClientObjectMixin):
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_description), description, priority=NAME_COMPONENT_TOOLTIP_PRIORITY, should_update=True)
return True
return False
@componentmethod
def remove_custom_description(self):
if not self.set_custom_description(''):
logger.warn('Failed to reset Custom Description on {}. Please check Allow Description on Name Component.', self.owner)
@componentmethod_with_fallback(lambda *_, **__: None)
def add_name_changed_callback(self, callback):
if self._on_name_changed is None:
self._on_name_changed = sims4.callback_utils.CallableList()
self._on_name_changed.append(callback)
@componentmethod_with_fallback(lambda *_, **__: None)
def remove_name_changed_callback(self, callback):
if self._on_name_changed is not None:
if callback in self._on_name_changed:
self._on_name_changed.remove(callback)
if not self._on_name_changed:
self._on_name_changed = None
def _call_name_changed_callback(self):
if self._on_name_changed is not None:
self._on_name_changed()
def component_super_affordances_gen(self, **kwargs):
yield self._affordance or self.DEFAULT_AFFORDANCE
def component_interactable_gen(self):
yield self
def populate_localization_token(self, token):
if self.owner.custom_name is not None:
token.custom_name = self.owner.custom_name
if self.owner.custom_description is not None:
token.custom_description = self.owner.custom_description
class NameTransfer(XevtTriggeredElement, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description':'Transfer name between two participants at the beginning/end of an interaction or on XEvent.',
'participant_sending_name':TunableEnumEntry(description='\n The participant who has the name that is being transferred.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'participant_receiving_name':TunableEnumEntry(description='\n The participant who is receiving the name being transferred.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Object),
'transfer_description':Tunable(description='\n If checked, the description will also be transferred along with the name.\n ',
tunable_type=bool,
default=True)}
def _do_behavior(self):
sender = self.interaction.get_participant(self.participant_sending_name)
receiver = self.interaction.get_participant(self.participant_receiving_name)
if sender is None or receiver is None:
logger.error(('Cannot transfer name between None participants. Sender: {}, Receiver: {}, Interaction: {}'.format(sender, receiver, self.interaction)), owner='rmccord')
return
sender_name_component = sender.name_component
receiver_name_component = receiver.name_component
if receiver_name_component is None:
logger.error(('Receiver of Name Transfer does not have a Name Component. Receiver: {}, Interaction: {}'.format(sender, receiver, self.interaction)), owner='rmccord')
return
if sender_name_component.has_custom_name():
receiver_name_component.set_custom_name(sender.custom_name)
if self.transfer_description:
if sender_name_component.has_custom_description():
receiver_name_component.set_custom_description(sender.custom_description)
class NameResetLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'reset_name':Tunable(description='\n If checked, it will reset the custom name of the name component.\n ',
tunable_type=bool,
default=True),
'reset_description':Tunable(description='\n If checked, it will reset the custom description of the name \n component.\n ',
tunable_type=bool,
default=False)}
def __init__(self, reset_name, reset_description, **kwargs):
(super().__init__)(**kwargs)
self.reset_name = reset_name
self.reset_description = reset_description
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Invalid subject specified for this loot operation. Please fix {} in tuning.', self)
return
if subject.name_component is None:
logger.error('Subject {} has no object relationship component. Please fix {} in tuning.', subject, self)
return
if self.reset_name:
subject.remove_custom_name()
if self.reset_description:
subject.remove_custom_description()
class TransferNameLootOp(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'transfer_name':Tunable(description='\n If checked, it will transfer the custom name of the name component\n from the subject to the target.\n ',
tunable_type=bool,
default=True),
'transfer_description':Tunable(description='\n If checked, it will transfer the custom description of the name \n component from the subject to the target.\n ',
tunable_type=bool,
default=True),
'clear_subject_name':Tunable(description="\n If False, the subject's name will not be cleared. If True, then\n the subject's name will be cleared. This will only happen if\n transfer name is set to True. \n ",
tunable_type=bool,
default=False),
'clear_subject_description':Tunable(description="\n If False, the subject's description will not be cleared. If True, then\n the subject's description will be cleared. This will only happen if\n transfer description is set to True.\n ",
tunable_type=bool,
default=False)}
def __init__(self, *args, transfer_name, transfer_description, clear_subject_name, clear_subject_description, **kwargs):
(super().__init__)(*args, **kwargs)
self._transfer_name = transfer_name
self._transfer_description = transfer_description
self._clear_subject_name = clear_subject_name
self._clear_subject_description = clear_subject_description
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a subject participant of None value.", self)
return
subject_name_component = subject.get_component(types.NAME_COMPONENT)
if subject_name_component is None:
logger.error("The Transfer Name Loot tuned on:'{}' has a subject with no name component.", self)
return
if target is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a target participant of None value.", self)
return
target_name_component = target.get_component(types.NAME_COMPONENT)
if target_name_component is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a target with no name component", self)
return
if self._transfer_name:
target_name_component.remove_custom_name()
if subject_name_component.has_custom_name():
target_name_component.set_custom_name(subject.custom_name)
if self._clear_subject_name:
subject_name_component.remove_custom_name()
elif self._transfer_description:
target_name_component.remove_custom_description()
if subject_name_component.has_custom_description():
target_name_component.set_custom_description(subject.custom_description)
if self._clear_subject_description:
subject_name_component.remove_custom_description()
class SetNameFromObjectRelationship(BaseTargetedLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
ownable_component = target.get_component(types.OWNABLE_COMPONENT)
name_component = target.get_component(types.NAME_COMPONENT)
if ownable_component is not None and name_component is not None:
sim_owner_id = ownable_component.get_sim_owner_id()
obj_def_id = target.definition.id
relationship_service = services.relationship_service()
obj_tag_set = relationship_service.get_mapped_tag_set_of_id(obj_def_id)
if obj_tag_set is not None:
obj_relationship = relationship_service.get_object_relationship(sim_owner_id, obj_tag_set)
if obj_relationship is not None and obj_relationship.get_object_rel_name() is not None:
name_component.set_custom_name(obj_relationship.get_object_rel_name())
else:
logger.error('Target {} needs to have both ownable and name components. Please fix {} in tuning.', target, self)
return | Scripts/simulation/objects/components/name_component.py | import random
from protocolbuffers import SimObjectAttributes_pb2 as protocols
from interactions import ParticipantType
from interactions.utils.interaction_elements import XevtTriggeredElement
from interactions.utils.loot_basic_op import BaseLootOperation, BaseTargetedLootOperation
from objects.client_object_mixin import ClientObjectMixin
from objects.components import Component, types, componentmethod, componentmethod_with_fallback
from objects.hovertip import TooltipFieldsComplete
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.tunable import HasTunableFactory, Tunable, TunableReference, OptionalTunable, TunableList, TunableTuple, AutoFactoryInit, TunableEnumEntry
from singletons import DEFAULT
import profanity, services, sims4.callback_utils, sims4.log
logger = sims4.log.Logger('NameComponent', default_owner='shipark')
NAME_COMPONENT_TOOLTIP_PRIORITY = 1
class NameComponent(Component, HasTunableFactory, component_name=types.NAME_COMPONENT, persistence_key=protocols.PersistenceMaster.PersistableData.NameComponent):
DEFAULT_AFFORDANCE = TunableReference((services.affordance_manager()), description='The affordance generated by all NameComponents.')
FACTORY_TUNABLES = {'allow_name':Tunable(description='\n If set, the user is allowed to give a custom name to this\n object.\n ',
tunable_type=bool,
default=True),
'allow_description':Tunable(description='\n If set, the user is allowed to give a custom description to this\n object.\n ',
tunable_type=bool,
default=False),
'affordance':OptionalTunable(tunable=TunableReference(description='\n The affordance provided by this Name component. Use it if you want\n to provide a custom affordance instead of the default one, which\n will not be used if this is set.\n ',
manager=(services.affordance_manager())),
disabled_name='use_default'),
'templates':TunableList(description='\n The list of the template content for this component.\n ',
tunable=TunableTuple(template_name=TunableLocalizedStringFactory(description='\n The template name for the component.\n ',
allow_none=True),
template_description=TunableLocalizedStringFactory(description='\n The template description for the component.\n ',
allow_none=True)))}
def __init__(self, *args, allow_name=None, allow_description=None, affordance=None, templates=[], **kwargs):
(super().__init__)(*args, **kwargs)
self.allow_name = allow_name
self.allow_description = allow_description
self._affordance = affordance
self._templates = templates
self._template_name = None
self._template_description = None
self._on_name_changed = None
def get_template_name_and_description(self):
if self._template_name is None or self._template_description is None:
self._set_template_content()
template_name, template_description = self.owner.get_template_content_overrides()
template_name = template_name if template_name is not DEFAULT else self._template_name
template_description = template_description if template_description is not DEFAULT else self._template_description
return (
template_name, template_description)
def _set_template_content(self):
if self._templates:
selected_template = random.choice(self._templates)
self._template_name = selected_template.template_name
self._template_description = selected_template.template_description
def save(self, persistence_master_message):
if self.owner.custom_name is None:
if self.owner.custom_description is None:
return
persistable_data = protocols.PersistenceMaster.PersistableData()
persistable_data.type = protocols.PersistenceMaster.PersistableData.NameComponent
name_component_data = persistable_data.Extensions[protocols.PersistableNameComponent.persistable_data]
if self.owner.custom_name is not None:
name_component_data.name = self.owner.custom_name
if self.owner.custom_description is not None:
name_component_data.description = self.owner.custom_description
persistence_master_message.data.extend([persistable_data])
def load(self, persistable_data):
name_component_data = persistable_data.Extensions[protocols.PersistableNameComponent.persistable_data]
if name_component_data.HasField('name'):
self.owner.custom_name = self._get_filtered_text(name_component_data.name)
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_name), (self.owner.custom_name), priority=NAME_COMPONENT_TOOLTIP_PRIORITY)
if name_component_data.HasField('description'):
self.owner.custom_description = self._get_filtered_text(name_component_data.description)
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_description), (self.owner.custom_description), priority=NAME_COMPONENT_TOOLTIP_PRIORITY)
self.owner.update_object_tooltip()
def _get_filtered_text(self, text):
if self.owner.is_from_gallery:
_, filtered_text = profanity.check(text)
return filtered_text
return text
@componentmethod_with_fallback(lambda : False)
def has_custom_name(self):
if self.owner.custom_name:
return True
return False
@componentmethod_with_fallback(lambda : False)
def has_custom_description(self):
if self.owner.custom_description:
return True
return False
@componentmethod
def set_custom_name(self, name, actor_sim_id=None):
if self.allow_name:
self.owner.custom_name = name if name else None
if isinstance(self.owner, ClientObjectMixin):
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_name), name, priority=NAME_COMPONENT_TOOLTIP_PRIORITY, should_update=True)
if actor_sim_id is not None:
if services.relationship_service().get_mapped_tag_set_of_id(self.owner.definition.id):
services.relationship_service().update_object_type_name(name, actor_sim_id, self.owner.definition.id, self.owner)
self._call_name_changed_callback()
return True
return False
@componentmethod
def remove_custom_name(self):
if not self.set_custom_name(''):
logger.warn('Failed to reset Custom Name on {}. Please check Allow Name on Name Component.', self.owner)
@componentmethod
def set_custom_description(self, description):
if self.allow_description:
self.owner.custom_description = description if description else None
self._call_name_changed_callback()
if isinstance(self.owner, ClientObjectMixin):
self.owner.update_tooltip_field((TooltipFieldsComplete.custom_description), description, priority=NAME_COMPONENT_TOOLTIP_PRIORITY, should_update=True)
return True
return False
@componentmethod
def remove_custom_description(self):
if not self.set_custom_description(''):
logger.warn('Failed to reset Custom Description on {}. Please check Allow Description on Name Component.', self.owner)
@componentmethod_with_fallback(lambda *_, **__: None)
def add_name_changed_callback(self, callback):
if self._on_name_changed is None:
self._on_name_changed = sims4.callback_utils.CallableList()
self._on_name_changed.append(callback)
@componentmethod_with_fallback(lambda *_, **__: None)
def remove_name_changed_callback(self, callback):
if self._on_name_changed is not None:
if callback in self._on_name_changed:
self._on_name_changed.remove(callback)
if not self._on_name_changed:
self._on_name_changed = None
def _call_name_changed_callback(self):
if self._on_name_changed is not None:
self._on_name_changed()
def component_super_affordances_gen(self, **kwargs):
yield self._affordance or self.DEFAULT_AFFORDANCE
def component_interactable_gen(self):
yield self
def populate_localization_token(self, token):
if self.owner.custom_name is not None:
token.custom_name = self.owner.custom_name
if self.owner.custom_description is not None:
token.custom_description = self.owner.custom_description
class NameTransfer(XevtTriggeredElement, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description':'Transfer name between two participants at the beginning/end of an interaction or on XEvent.',
'participant_sending_name':TunableEnumEntry(description='\n The participant who has the name that is being transferred.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'participant_receiving_name':TunableEnumEntry(description='\n The participant who is receiving the name being transferred.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Object),
'transfer_description':Tunable(description='\n If checked, the description will also be transferred along with the name.\n ',
tunable_type=bool,
default=True)}
def _do_behavior(self):
sender = self.interaction.get_participant(self.participant_sending_name)
receiver = self.interaction.get_participant(self.participant_receiving_name)
if sender is None or receiver is None:
logger.error(('Cannot transfer name between None participants. Sender: {}, Receiver: {}, Interaction: {}'.format(sender, receiver, self.interaction)), owner='rmccord')
return
sender_name_component = sender.name_component
receiver_name_component = receiver.name_component
if receiver_name_component is None:
logger.error(('Receiver of Name Transfer does not have a Name Component. Receiver: {}, Interaction: {}'.format(sender, receiver, self.interaction)), owner='rmccord')
return
if sender_name_component.has_custom_name():
receiver_name_component.set_custom_name(sender.custom_name)
if self.transfer_description:
if sender_name_component.has_custom_description():
receiver_name_component.set_custom_description(sender.custom_description)
class NameResetLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'reset_name':Tunable(description='\n If checked, it will reset the custom name of the name component.\n ',
tunable_type=bool,
default=True),
'reset_description':Tunable(description='\n If checked, it will reset the custom description of the name \n component.\n ',
tunable_type=bool,
default=False)}
def __init__(self, reset_name, reset_description, **kwargs):
(super().__init__)(**kwargs)
self.reset_name = reset_name
self.reset_description = reset_description
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Invalid subject specified for this loot operation. Please fix {} in tuning.', self)
return
if subject.name_component is None:
logger.error('Subject {} has no object relationship component. Please fix {} in tuning.', subject, self)
return
if self.reset_name:
subject.remove_custom_name()
if self.reset_description:
subject.remove_custom_description()
class TransferNameLootOp(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'transfer_name':Tunable(description='\n If checked, it will transfer the custom name of the name component\n from the subject to the target.\n ',
tunable_type=bool,
default=True),
'transfer_description':Tunable(description='\n If checked, it will transfer the custom description of the name \n component from the subject to the target.\n ',
tunable_type=bool,
default=True),
'clear_subject_name':Tunable(description="\n If False, the subject's name will not be cleared. If True, then\n the subject's name will be cleared. This will only happen if\n transfer name is set to True. \n ",
tunable_type=bool,
default=False),
'clear_subject_description':Tunable(description="\n If False, the subject's description will not be cleared. If True, then\n the subject's description will be cleared. This will only happen if\n transfer description is set to True.\n ",
tunable_type=bool,
default=False)}
def __init__(self, *args, transfer_name, transfer_description, clear_subject_name, clear_subject_description, **kwargs):
(super().__init__)(*args, **kwargs)
self._transfer_name = transfer_name
self._transfer_description = transfer_description
self._clear_subject_name = clear_subject_name
self._clear_subject_description = clear_subject_description
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a subject participant of None value.", self)
return
subject_name_component = subject.get_component(types.NAME_COMPONENT)
if subject_name_component is None:
logger.error("The Transfer Name Loot tuned on:'{}' has a subject with no name component.", self)
return
if target is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a target participant of None value.", self)
return
target_name_component = target.get_component(types.NAME_COMPONENT)
if target_name_component is None:
logger.error("The Transfer Name Loot tuned on: '{}' has a target with no name component", self)
return
if self._transfer_name:
target_name_component.remove_custom_name()
if subject_name_component.has_custom_name():
target_name_component.set_custom_name(subject.custom_name)
if self._clear_subject_name:
subject_name_component.remove_custom_name()
elif self._transfer_description:
target_name_component.remove_custom_description()
if subject_name_component.has_custom_description():
target_name_component.set_custom_description(subject.custom_description)
if self._clear_subject_description:
subject_name_component.remove_custom_description()
class SetNameFromObjectRelationship(BaseTargetedLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
ownable_component = target.get_component(types.OWNABLE_COMPONENT)
name_component = target.get_component(types.NAME_COMPONENT)
if ownable_component is not None and name_component is not None:
sim_owner_id = ownable_component.get_sim_owner_id()
obj_def_id = target.definition.id
relationship_service = services.relationship_service()
obj_tag_set = relationship_service.get_mapped_tag_set_of_id(obj_def_id)
if obj_tag_set is not None:
obj_relationship = relationship_service.get_object_relationship(sim_owner_id, obj_tag_set)
if obj_relationship is not None and obj_relationship.get_object_rel_name() is not None:
name_component.set_custom_name(obj_relationship.get_object_rel_name())
else:
logger.error('Target {} needs to have both ownable and name components. Please fix {} in tuning.', target, self)
return | 0.570092 | 0.130175 |
import copy
import os
import warnings
import yaml
import topside as top
from topside.pdl import exceptions, utils
# imports is a dict of {package name: path to file}, used to locate files to load
# on requested import.
# TODO: make importing more efficent by having a YAML file storing the self.importable_files dict
# and updating whenever a Package is instantiated
# TODO: Make the importing code more unit testable
class Package:
"""Package represents a collection of files that make a coherent plumbing system."""
def __init__(self, files, import_paths=None):
"""
Initialize a Package from one or more Files.
A Package should have all the components of a complete plumbing engine system; from
here no additional information will make it into the PlumbingEngine. Once instantiated,
a Package's PDL is cleaned and ready to use.
Parameters
----------
files: iterable
files is an iterable (usually a list) of one or more Files whose contents should go
into the Package.
"""
self.import_paths = copy.deepcopy(import_paths)
if import_paths is None:
self.import_paths = utils.default_paths
self.importable_files = dict()
imports_folder = []
for import_path in self.import_paths:
try:
filenames = os.listdir(import_path)
filenames = [os.path.join(import_path, fname) for fname in filenames]
imports_folder.extend(filenames)
except FileNotFoundError:
imports_folder = []
warnings.warn(f"import directory {import_path} could not be found")
for path in imports_folder:
try:
name = yaml.safe_load(open(path, 'r'))['name']
if name in self.importable_files:
self.importable_files[name].add(path)
else:
self.importable_files[name] = {path}
except KeyError:
warnings.warn(path + " does not describe a pdl file")
if len(list(files)) < 1:
raise exceptions.BadInputError("cannot instantiate a Package with no Files")
self.imports = []
# dicts of {namespace: [entries]}, where entry is a PDL object. Organized like this to
# reduce dict nesting; since this is a one time process it should be easy to keep
# them synced.
self.typedefs = {}
self.component_dict = {}
self.graph_dict = {}
for file in files:
# TODO(wendi): unused import detection
self.imports.extend(copy.deepcopy(file.imports))
for imp in set(self.imports):
if imp not in self.importable_files:
raise exceptions.BadInputError(f"invalid import: {imp}")
for path in self.importable_files[imp]:
files.append(top.File(path))
# consolidate entry information from files
for file in files:
name = file.namespace
if name not in self.typedefs:
self.typedefs[name] = {}
self.component_dict[name] = []
self.graph_dict[name] = []
self.typedefs[name].update(copy.deepcopy(file.typedefs))
self.component_dict[name].extend(copy.deepcopy(file.components))
self.graph_dict[name].extend(copy.deepcopy(file.graphs))
self.clean()
def clean(self):
"""Change user-friendly PDL shortcuts into the verbose PDL standard."""
# preprocess typedefs
for namespace in self.typedefs:
for idx, component in enumerate(self.component_dict[namespace]):
if 'type' in component:
self.component_dict[namespace][idx] = self.fill_typedef(namespace, component)
# clean PDL shortcuts
for namespace, entries in self.component_dict.items():
for idx, component in enumerate(entries):
# deal with single state shortcuts
if 'states' not in component:
self.component_dict[namespace][idx] = unpack_single_state(component)
# unpack single teq direction shortcuts
self.component_dict[namespace][idx] = unpack_teq(component)
self.rename()
default_states = self.get_default_states()
for namespace, entries in self.graph_dict.items():
for entry in entries:
self.fill_blank_states(entry, default_states)
def rename(self):
"""Prepend any conflicting component names with namespace to disambiguate."""
# record of {component name: namespace}
names = set()
# record of which components were repeated (and need prepending)
repeats = {}
for namespace, entries in self.component_dict.items():
for entry in entries:
name = entry['name']
if name in names:
repeats[name] = True
else:
names.add(name)
for namespace, entries in self.component_dict.items():
for idx, entry in enumerate(entries):
name = entry['name']
if name in repeats:
self.component_dict[namespace][idx]['name'] = namespace + '.' + name
def fill_typedef(self, namespace, component):
"""Fill in typedef template for components invoking a typedef."""
name = component['type']
component_name = component['name']
if name.count('.') > 1:
raise NotImplementedError(f"nested imports (in {name}) not supported yet")
# handle imported components
if '.' in name:
# NOTE: we might eventually want to consider how well this will play with nested imports
fields = name.split('.')
namespace = fields[0]
name = fields[-1]
if name not in self.typedefs[namespace]:
raise exceptions.BadInputError(f"invalid component type: {name}")
# default arguments using syntax (parameter=default_value)
for param in self.typedefs[namespace][name]['params']:
if '=' in param:
default_param = param.split('=')
if default_param[0] not in component['params']:
component['params'][default_param[0]] = default_param[1]
params = component['params']
body = yaml.dump(self.typedefs[namespace][name])
for var, value in params.items():
body = body.replace(var, str(value))
ret = yaml.safe_load(body)
ret.pop('params')
ret['name'] = component_name
return ret
def fill_blank_states(self, graph, default_states):
"""Fill in states field with default states if left blank."""
if 'states' not in graph:
graph['states'] = {}
# set of components in this graph
components = set()
for node in graph['nodes'].values():
for component in node['components']:
components.add(component[0])
for component in components:
if component in graph['states']:
continue
if component not in default_states:
raise exceptions.BadInputError(
f"missing component {component}: either a nonexistent or a "
"multi-state component")
graph['states'][component] = default_states[component]
def get_default_states(self):
"""Return a dict of {component_name: default state name} for one-state components"""
# dict of {component:(namespace, index)} used to locate the component in self.component_dict
places = {}
for namespace in self.component_dict:
for idx, component in enumerate(self.component_dict[namespace]):
places[component['name']] = (namespace, idx)
default_states = {}
for component in self.components():
namespace, idx = places[component['name']]
component_states = self.component_dict[namespace][idx]['states']
if len(component_states) == 1:
default_states[component["name"]] = list(component_states.keys())[0]
return default_states
def components(self):
"""Return list of all component objects"""
components = []
for component_list in self.component_dict.values():
components.extend(component_list)
return components
def graphs(self):
"""Return a list of all graph objects"""
graphs = []
for graph_list in self.graph_dict.values():
graphs.extend(graph_list)
return graphs
def unpack_teq(component):
"""Replace single-direction teq shortcut with verbose teq."""
ret = component
for state, edges in component['states'].items():
for edge, teq in edges.items():
if isinstance(teq, dict):
continue
long_teq = {}
long_teq['fwd'] = teq
long_teq['back'] = teq
ret['states'][state][edge] = long_teq
return ret
def unpack_single_state(component):
"""Replace single-state shortcut with verbose states entry."""
ret = component
states = {'default': {}}
for edge, specs in component['edges'].items():
states['default'][edge] = specs['teq']
ret['edges'][edge].pop('teq')
ret['states'] = states
return ret | topside/pdl/package.py | import copy
import os
import warnings
import yaml
import topside as top
from topside.pdl import exceptions, utils
# imports is a dict of {package name: path to file}, used to locate files to load
# on requested import.
# TODO: make importing more efficent by having a YAML file storing the self.importable_files dict
# and updating whenever a Package is instantiated
# TODO: Make the importing code more unit testable
class Package:
"""Package represents a collection of files that make a coherent plumbing system."""
def __init__(self, files, import_paths=None):
"""
Initialize a Package from one or more Files.
A Package should have all the components of a complete plumbing engine system; from
here no additional information will make it into the PlumbingEngine. Once instantiated,
a Package's PDL is cleaned and ready to use.
Parameters
----------
files: iterable
files is an iterable (usually a list) of one or more Files whose contents should go
into the Package.
"""
self.import_paths = copy.deepcopy(import_paths)
if import_paths is None:
self.import_paths = utils.default_paths
self.importable_files = dict()
imports_folder = []
for import_path in self.import_paths:
try:
filenames = os.listdir(import_path)
filenames = [os.path.join(import_path, fname) for fname in filenames]
imports_folder.extend(filenames)
except FileNotFoundError:
imports_folder = []
warnings.warn(f"import directory {import_path} could not be found")
for path in imports_folder:
try:
name = yaml.safe_load(open(path, 'r'))['name']
if name in self.importable_files:
self.importable_files[name].add(path)
else:
self.importable_files[name] = {path}
except KeyError:
warnings.warn(path + " does not describe a pdl file")
if len(list(files)) < 1:
raise exceptions.BadInputError("cannot instantiate a Package with no Files")
self.imports = []
# dicts of {namespace: [entries]}, where entry is a PDL object. Organized like this to
# reduce dict nesting; since this is a one time process it should be easy to keep
# them synced.
self.typedefs = {}
self.component_dict = {}
self.graph_dict = {}
for file in files:
# TODO(wendi): unused import detection
self.imports.extend(copy.deepcopy(file.imports))
for imp in set(self.imports):
if imp not in self.importable_files:
raise exceptions.BadInputError(f"invalid import: {imp}")
for path in self.importable_files[imp]:
files.append(top.File(path))
# consolidate entry information from files
for file in files:
name = file.namespace
if name not in self.typedefs:
self.typedefs[name] = {}
self.component_dict[name] = []
self.graph_dict[name] = []
self.typedefs[name].update(copy.deepcopy(file.typedefs))
self.component_dict[name].extend(copy.deepcopy(file.components))
self.graph_dict[name].extend(copy.deepcopy(file.graphs))
self.clean()
def clean(self):
"""Change user-friendly PDL shortcuts into the verbose PDL standard."""
# preprocess typedefs
for namespace in self.typedefs:
for idx, component in enumerate(self.component_dict[namespace]):
if 'type' in component:
self.component_dict[namespace][idx] = self.fill_typedef(namespace, component)
# clean PDL shortcuts
for namespace, entries in self.component_dict.items():
for idx, component in enumerate(entries):
# deal with single state shortcuts
if 'states' not in component:
self.component_dict[namespace][idx] = unpack_single_state(component)
# unpack single teq direction shortcuts
self.component_dict[namespace][idx] = unpack_teq(component)
self.rename()
default_states = self.get_default_states()
for namespace, entries in self.graph_dict.items():
for entry in entries:
self.fill_blank_states(entry, default_states)
def rename(self):
"""Prepend any conflicting component names with namespace to disambiguate."""
# record of {component name: namespace}
names = set()
# record of which components were repeated (and need prepending)
repeats = {}
for namespace, entries in self.component_dict.items():
for entry in entries:
name = entry['name']
if name in names:
repeats[name] = True
else:
names.add(name)
for namespace, entries in self.component_dict.items():
for idx, entry in enumerate(entries):
name = entry['name']
if name in repeats:
self.component_dict[namespace][idx]['name'] = namespace + '.' + name
def fill_typedef(self, namespace, component):
"""Fill in typedef template for components invoking a typedef."""
name = component['type']
component_name = component['name']
if name.count('.') > 1:
raise NotImplementedError(f"nested imports (in {name}) not supported yet")
# handle imported components
if '.' in name:
# NOTE: we might eventually want to consider how well this will play with nested imports
fields = name.split('.')
namespace = fields[0]
name = fields[-1]
if name not in self.typedefs[namespace]:
raise exceptions.BadInputError(f"invalid component type: {name}")
# default arguments using syntax (parameter=default_value)
for param in self.typedefs[namespace][name]['params']:
if '=' in param:
default_param = param.split('=')
if default_param[0] not in component['params']:
component['params'][default_param[0]] = default_param[1]
params = component['params']
body = yaml.dump(self.typedefs[namespace][name])
for var, value in params.items():
body = body.replace(var, str(value))
ret = yaml.safe_load(body)
ret.pop('params')
ret['name'] = component_name
return ret
def fill_blank_states(self, graph, default_states):
"""Fill in states field with default states if left blank."""
if 'states' not in graph:
graph['states'] = {}
# set of components in this graph
components = set()
for node in graph['nodes'].values():
for component in node['components']:
components.add(component[0])
for component in components:
if component in graph['states']:
continue
if component not in default_states:
raise exceptions.BadInputError(
f"missing component {component}: either a nonexistent or a "
"multi-state component")
graph['states'][component] = default_states[component]
def get_default_states(self):
"""Return a dict of {component_name: default state name} for one-state components"""
# dict of {component:(namespace, index)} used to locate the component in self.component_dict
places = {}
for namespace in self.component_dict:
for idx, component in enumerate(self.component_dict[namespace]):
places[component['name']] = (namespace, idx)
default_states = {}
for component in self.components():
namespace, idx = places[component['name']]
component_states = self.component_dict[namespace][idx]['states']
if len(component_states) == 1:
default_states[component["name"]] = list(component_states.keys())[0]
return default_states
def components(self):
"""Return list of all component objects"""
components = []
for component_list in self.component_dict.values():
components.extend(component_list)
return components
def graphs(self):
"""Return a list of all graph objects"""
graphs = []
for graph_list in self.graph_dict.values():
graphs.extend(graph_list)
return graphs
def unpack_teq(component):
"""Replace single-direction teq shortcut with verbose teq."""
ret = component
for state, edges in component['states'].items():
for edge, teq in edges.items():
if isinstance(teq, dict):
continue
long_teq = {}
long_teq['fwd'] = teq
long_teq['back'] = teq
ret['states'][state][edge] = long_teq
return ret
def unpack_single_state(component):
"""Replace single-state shortcut with verbose states entry."""
ret = component
states = {'default': {}}
for edge, specs in component['edges'].items():
states['default'][edge] = specs['teq']
ret['edges'][edge].pop('teq')
ret['states'] = states
return ret | 0.39946 | 0.291825 |
import graphene
from lingvodoc.schema.gql_holders import (
LingvodocObjectType,
CompositeIdHolder,
CreatedAt,
MarkedForDeletion,
TypeHolder,
client_id_check,
acl_check_by_id,
ResponseError,
fetch_object,
LingvodocID,
TranslationHolder
)
from lingvodoc.models import (
TranslationGist as dbTranslationGist,
Client,
User as dbUser,
BaseGroup as dbBaseGroup,
Group as dbGroup,
ObjectTOC as dbObjectTOC,
DBSession,
TranslationAtom as dbTranslationAtom,
)
from lingvodoc.utils.creation import add_user_to_group
from lingvodoc.utils.verification import check_client_id
from lingvodoc.schema.gql_translationatom import TranslationAtom
class TranslationGist(LingvodocObjectType):
"""
#created_at | timestamp without time zone | NOT NULL
#object_id | bigint | NOT NULL
#client_id | bigint | NOT NULL
#marked_for_deletion | boolean | NOT NULL
#type | text |
{"variables": {}, "query": "query QUERYNAME { translationgist(id:[578, 6]){id created_at}}" }
"""
dbType = dbTranslationGist
translationatoms = graphene.List(TranslationAtom)
class Meta:
interfaces = (CompositeIdHolder,
CreatedAt,
MarkedForDeletion,
TypeHolder,
TranslationHolder
)
@fetch_object("translationatoms") # TODO: fix that
def resolve_translationatoms(self, info):
result = list()
for dbatom in self.dbObject.translationatom:
atom = TranslationAtom(id=[dbatom.client_id, dbatom.object_id])
atom.dbObject = dbatom
result.append(atom)
return result
class CreateTranslationGist(graphene.Mutation):
"""
example:
mutation {
create_translationgist(id: [949,22], type: "some type") {
translationgist {
id
type
}
triumph
}
}
(this example works)
returns:
{
"data": {
"create_translationgist": {
"translationgist": {
"id": [
1197,
206
],
"type": "some type"
},
"triumph": true
}
}
}
"""
class Arguments:
id = LingvodocID()
type = graphene.String(required=True)
translationgist = graphene.Field(TranslationGist)
triumph = graphene.Boolean()
@staticmethod
@client_id_check()
def mutate(root, info, **args):
type = args.get('type')
id = args.get('id')
client_id = id[0] if id else info.context["client_id"]
object_id = id[1] if id else None
client = DBSession.query(Client).filter_by(id=client_id).first()
user = DBSession.query(dbUser).filter_by(id=client.user_id).first()
dbtranslationgist = dbTranslationGist(client_id=client_id, object_id=object_id, type=type)
DBSession.add(dbtranslationgist)
DBSession.flush()
basegroups = list()
basegroups.append(DBSession.query(dbBaseGroup).filter_by(name="Can delete translationgist").first())
if not object_id:
groups = []
for base in basegroups:
group = dbGroup(subject_client_id=dbtranslationgist.client_id, subject_object_id=dbtranslationgist.object_id,
parent=base)
groups += [group]
for group in groups:
add_user_to_group(user, group)
translationgist = TranslationGist(id=[dbtranslationgist.client_id, dbtranslationgist.object_id])
translationgist.dbObject = dbtranslationgist
return CreateTranslationGist(translationgist=translationgist, triumph=True)
class DeleteTranslationGist(graphene.Mutation):
"""
example:
mutation {
delete_translationgist(id: [949,22]) {
translationgist {
id
}
triumph
}
}
now returns:
{
"delete_translationgist": {
"translationgist": {
"id": [
949,
22
]
},
"triumph": true
}
}
"""
class Arguments:
id = LingvodocID(required=True)
translationgist = graphene.Field(TranslationGist)
triumph = graphene.Boolean()
@staticmethod
@acl_check_by_id('delete', 'translations')
def mutate(root, info, **args):
id = args.get('id')
client_id, object_id= id
dbtranslationgist = DBSession.query(dbTranslationGist).filter_by(client_id=client_id, object_id=object_id).first()
if not dbtranslationgist or dbtranslationgist.marked_for_deletion:
raise ResponseError(message="No such translationgist in the system")
dbtranslationgist.marked_for_deletion = True
objecttoc = DBSession.query(dbObjectTOC).filter_by(client_id=dbtranslationgist.client_id,
object_id=dbtranslationgist.object_id).one()
objecttoc.marked_for_deletion = True
for translationatom in dbtranslationgist.translationatom:
translationatom.marked_for_deletion = True
objecttoc = DBSession.query(dbObjectTOC).filter_by(client_id=translationatom.client_id,
object_id=translationatom.object_id).one()
objecttoc.marked_for_deletion = True
translationgist = TranslationGist(id=[dbtranslationgist.client_id, dbtranslationgist.object_id])
translationgist.dbObject = dbtranslationgist
return DeleteTranslationGist(translationgist=translationgist, triumph=True) | lingvodoc/schema/gql_translationgist.py | import graphene
from lingvodoc.schema.gql_holders import (
LingvodocObjectType,
CompositeIdHolder,
CreatedAt,
MarkedForDeletion,
TypeHolder,
client_id_check,
acl_check_by_id,
ResponseError,
fetch_object,
LingvodocID,
TranslationHolder
)
from lingvodoc.models import (
TranslationGist as dbTranslationGist,
Client,
User as dbUser,
BaseGroup as dbBaseGroup,
Group as dbGroup,
ObjectTOC as dbObjectTOC,
DBSession,
TranslationAtom as dbTranslationAtom,
)
from lingvodoc.utils.creation import add_user_to_group
from lingvodoc.utils.verification import check_client_id
from lingvodoc.schema.gql_translationatom import TranslationAtom
class TranslationGist(LingvodocObjectType):
"""
#created_at | timestamp without time zone | NOT NULL
#object_id | bigint | NOT NULL
#client_id | bigint | NOT NULL
#marked_for_deletion | boolean | NOT NULL
#type | text |
{"variables": {}, "query": "query QUERYNAME { translationgist(id:[578, 6]){id created_at}}" }
"""
dbType = dbTranslationGist
translationatoms = graphene.List(TranslationAtom)
class Meta:
interfaces = (CompositeIdHolder,
CreatedAt,
MarkedForDeletion,
TypeHolder,
TranslationHolder
)
@fetch_object("translationatoms") # TODO: fix that
def resolve_translationatoms(self, info):
result = list()
for dbatom in self.dbObject.translationatom:
atom = TranslationAtom(id=[dbatom.client_id, dbatom.object_id])
atom.dbObject = dbatom
result.append(atom)
return result
class CreateTranslationGist(graphene.Mutation):
"""
example:
mutation {
create_translationgist(id: [949,22], type: "some type") {
translationgist {
id
type
}
triumph
}
}
(this example works)
returns:
{
"data": {
"create_translationgist": {
"translationgist": {
"id": [
1197,
206
],
"type": "some type"
},
"triumph": true
}
}
}
"""
class Arguments:
id = LingvodocID()
type = graphene.String(required=True)
translationgist = graphene.Field(TranslationGist)
triumph = graphene.Boolean()
@staticmethod
@client_id_check()
def mutate(root, info, **args):
type = args.get('type')
id = args.get('id')
client_id = id[0] if id else info.context["client_id"]
object_id = id[1] if id else None
client = DBSession.query(Client).filter_by(id=client_id).first()
user = DBSession.query(dbUser).filter_by(id=client.user_id).first()
dbtranslationgist = dbTranslationGist(client_id=client_id, object_id=object_id, type=type)
DBSession.add(dbtranslationgist)
DBSession.flush()
basegroups = list()
basegroups.append(DBSession.query(dbBaseGroup).filter_by(name="Can delete translationgist").first())
if not object_id:
groups = []
for base in basegroups:
group = dbGroup(subject_client_id=dbtranslationgist.client_id, subject_object_id=dbtranslationgist.object_id,
parent=base)
groups += [group]
for group in groups:
add_user_to_group(user, group)
translationgist = TranslationGist(id=[dbtranslationgist.client_id, dbtranslationgist.object_id])
translationgist.dbObject = dbtranslationgist
return CreateTranslationGist(translationgist=translationgist, triumph=True)
class DeleteTranslationGist(graphene.Mutation):
"""
example:
mutation {
delete_translationgist(id: [949,22]) {
translationgist {
id
}
triumph
}
}
now returns:
{
"delete_translationgist": {
"translationgist": {
"id": [
949,
22
]
},
"triumph": true
}
}
"""
class Arguments:
id = LingvodocID(required=True)
translationgist = graphene.Field(TranslationGist)
triumph = graphene.Boolean()
@staticmethod
@acl_check_by_id('delete', 'translations')
def mutate(root, info, **args):
id = args.get('id')
client_id, object_id= id
dbtranslationgist = DBSession.query(dbTranslationGist).filter_by(client_id=client_id, object_id=object_id).first()
if not dbtranslationgist or dbtranslationgist.marked_for_deletion:
raise ResponseError(message="No such translationgist in the system")
dbtranslationgist.marked_for_deletion = True
objecttoc = DBSession.query(dbObjectTOC).filter_by(client_id=dbtranslationgist.client_id,
object_id=dbtranslationgist.object_id).one()
objecttoc.marked_for_deletion = True
for translationatom in dbtranslationgist.translationatom:
translationatom.marked_for_deletion = True
objecttoc = DBSession.query(dbObjectTOC).filter_by(client_id=translationatom.client_id,
object_id=translationatom.object_id).one()
objecttoc.marked_for_deletion = True
translationgist = TranslationGist(id=[dbtranslationgist.client_id, dbtranslationgist.object_id])
translationgist.dbObject = dbtranslationgist
return DeleteTranslationGist(translationgist=translationgist, triumph=True) | 0.282196 | 0.128552 |
from copy import deepcopy
from typing import List
from operator_generator_strategies.base_generator_strategy import BaseGeneratorStrategy
from operators.filter_operator import FilterOperator
from operators.map_operator import MapOperator
from utils.contracts import Operator, Schema, FieldAssignmentExpression, FieldAccessExpression, ConstantExpression, \
ArithmeticOperators, ArithmeticExpression, LogicalExpression, LogicalOperators
from utils.utils import random_list_element, random_int_between, random_field_name
class FilterSubstituteMapExpressionGeneratorStrategy(BaseGeneratorStrategy):
def __init__(self):
self._mapFieldName = None
self._contValue = None
self._mapAssignmentFieldName = None
self._arithOp = None
self._logicalOp = None
self._schema = None
def generate(self, schema: Schema) -> List[Operator]:
"""
This method is responsible for generating two map operators for simulating complex arithmetic expressions
Example: map(y = 30).filter(x > 30) vs map(y = 30).filter(x > y)
:param schema: schema to be used for generating the operators
:return: the list of operators
"""
if len(schema.get_numerical_fields()) == 1:
print("Skipping FilterSubstituteMapExpressionGeneratorStrategy as only 1 field is present in the schema")
return []
if not self._mapFieldName:
self.__initializeFiltersWithSubstitutedMapExpression(schema)
if not self.validation(schema):
self.update_columns(schema)
baseMap = MapOperator(FieldAssignmentExpression(FieldAccessExpression(self._mapFieldName),
ConstantExpression(str(self._contValue))), schema)
arithExpression1 = ArithmeticExpression(FieldAccessExpression(self._mapAssignmentFieldName),
FieldAccessExpression(self._mapFieldName), self._arithOp)
arithExpression2 = ArithmeticExpression(FieldAccessExpression(self._mapAssignmentFieldName),
ConstantExpression(str(self._contValue)), self._arithOp)
followUpFilter1 = FilterOperator(
LogicalExpression(FieldAccessExpression(self._mapAssignmentFieldName), arithExpression1, self._logicalOp),
schema)
followUpFilter2 = FilterOperator(
LogicalExpression(FieldAccessExpression(self._mapAssignmentFieldName), arithExpression2, self._logicalOp),
schema)
_, followUpMap = random_list_element([followUpFilter1, followUpFilter2])
return [baseMap, followUpMap]
def __initializeFiltersWithSubstitutedMapExpression(self, schema: Schema):
schemaCopy = deepcopy(schema)
numFields = schemaCopy.get_numerical_fields()
self._mapFieldName = random_field_name(numFields)
numFields.remove(self._mapFieldName)
self._contValue = random_int_between(1, 10)
self._mapAssignmentFieldName = random_field_name(numFields)
_, self._arithOp = random_list_element(list(ArithmeticOperators))
_, self._logicalOp = random_list_element(
[LogicalOperators.lt, LogicalOperators.gt, LogicalOperators.lte, LogicalOperators.gte])
self._schema = schema
def validation(self, schema: Schema) -> bool:
if self._mapFieldName not in schema.get_numerical_fields():
return False
return True
def update_columns(self, schema: Schema):
for key, value in schema.get_field_name_mapping().items():
if value == self._schema.get_field_name_mapping()[self._mapFieldName]:
self._mapFieldName = key
break
for key, value in schema.get_field_name_mapping().items():
if value == self._schema.get_field_name_mapping()[self._mapAssignmentFieldName]:
self._mapAssignmentFieldName = key
break
self._schema = schema | operator_generator_strategies/equivalent_operator_strategies/filter_substitute_map_expression_startegy.py | from copy import deepcopy
from typing import List
from operator_generator_strategies.base_generator_strategy import BaseGeneratorStrategy
from operators.filter_operator import FilterOperator
from operators.map_operator import MapOperator
from utils.contracts import Operator, Schema, FieldAssignmentExpression, FieldAccessExpression, ConstantExpression, \
ArithmeticOperators, ArithmeticExpression, LogicalExpression, LogicalOperators
from utils.utils import random_list_element, random_int_between, random_field_name
class FilterSubstituteMapExpressionGeneratorStrategy(BaseGeneratorStrategy):
def __init__(self):
self._mapFieldName = None
self._contValue = None
self._mapAssignmentFieldName = None
self._arithOp = None
self._logicalOp = None
self._schema = None
def generate(self, schema: Schema) -> List[Operator]:
"""
This method is responsible for generating two map operators for simulating complex arithmetic expressions
Example: map(y = 30).filter(x > 30) vs map(y = 30).filter(x > y)
:param schema: schema to be used for generating the operators
:return: the list of operators
"""
if len(schema.get_numerical_fields()) == 1:
print("Skipping FilterSubstituteMapExpressionGeneratorStrategy as only 1 field is present in the schema")
return []
if not self._mapFieldName:
self.__initializeFiltersWithSubstitutedMapExpression(schema)
if not self.validation(schema):
self.update_columns(schema)
baseMap = MapOperator(FieldAssignmentExpression(FieldAccessExpression(self._mapFieldName),
ConstantExpression(str(self._contValue))), schema)
arithExpression1 = ArithmeticExpression(FieldAccessExpression(self._mapAssignmentFieldName),
FieldAccessExpression(self._mapFieldName), self._arithOp)
arithExpression2 = ArithmeticExpression(FieldAccessExpression(self._mapAssignmentFieldName),
ConstantExpression(str(self._contValue)), self._arithOp)
followUpFilter1 = FilterOperator(
LogicalExpression(FieldAccessExpression(self._mapAssignmentFieldName), arithExpression1, self._logicalOp),
schema)
followUpFilter2 = FilterOperator(
LogicalExpression(FieldAccessExpression(self._mapAssignmentFieldName), arithExpression2, self._logicalOp),
schema)
_, followUpMap = random_list_element([followUpFilter1, followUpFilter2])
return [baseMap, followUpMap]
def __initializeFiltersWithSubstitutedMapExpression(self, schema: Schema):
schemaCopy = deepcopy(schema)
numFields = schemaCopy.get_numerical_fields()
self._mapFieldName = random_field_name(numFields)
numFields.remove(self._mapFieldName)
self._contValue = random_int_between(1, 10)
self._mapAssignmentFieldName = random_field_name(numFields)
_, self._arithOp = random_list_element(list(ArithmeticOperators))
_, self._logicalOp = random_list_element(
[LogicalOperators.lt, LogicalOperators.gt, LogicalOperators.lte, LogicalOperators.gte])
self._schema = schema
def validation(self, schema: Schema) -> bool:
if self._mapFieldName not in schema.get_numerical_fields():
return False
return True
def update_columns(self, schema: Schema):
for key, value in schema.get_field_name_mapping().items():
if value == self._schema.get_field_name_mapping()[self._mapFieldName]:
self._mapFieldName = key
break
for key, value in schema.get_field_name_mapping().items():
if value == self._schema.get_field_name_mapping()[self._mapAssignmentFieldName]:
self._mapAssignmentFieldName = key
break
self._schema = schema | 0.841988 | 0.392511 |
import os
import nuke
from avalon.nuke import lib as anlib
from pype.api import resources
def set_context_favorites(favorites={}):
""" Addig favorite folders to nuke's browser
Argumets:
favorites (dict): couples of {name:path}
"""
icon_path = resources.get_resource("icons", "folder-favorite3.png")
for name, path in favorites.items():
nuke.addFavoriteDir(
name,
path,
nuke.IMAGE | nuke.SCRIPT | nuke.GEO,
icon=icon_path)
def get_node_outputs(node):
'''
Return a dictionary of the nodes and pipes that are connected to node
'''
dep_dict = {}
dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)
for d in dependencies:
dep_dict[d] = []
for i in range(d.inputs()):
if d.input(i) == node:
dep_dict[d].append(i)
return dep_dict
def is_node_gizmo(node):
'''
return True if node is gizmo
'''
return 'gizmo_file' in node.knobs()
def gizmo_is_nuke_default(gizmo):
'''Check if gizmo is in default install path'''
plug_dir = os.path.join(os.path.dirname(
nuke.env['ExecutablePath']), 'plugins')
return gizmo.filename().startswith(plug_dir)
def bake_gizmos_recursively(in_group=nuke.Root()):
"""Converting a gizmo to group
Argumets:
is_group (nuke.Node)[optonal]: group node or all nodes
"""
# preserve selection after all is done
with anlib.maintained_selection():
# jump to the group
with in_group:
for node in nuke.allNodes():
if is_node_gizmo(node) and not gizmo_is_nuke_default(node):
with node:
outputs = get_node_outputs(node)
group = node.makeGroup()
# Reconnect inputs and outputs if any
if outputs:
for n, pipes in outputs.items():
for i in pipes:
n.setInput(i, group)
for i in range(node.inputs()):
group.setInput(i, node.input(i))
# set node position and name
group.setXYpos(node.xpos(), node.ypos())
name = node.name()
nuke.delete(node)
group.setName(name)
node = group
if node.Class() == "Group":
bake_gizmos_recursively(node) | pype/hosts/nuke/utils.py | import os
import nuke
from avalon.nuke import lib as anlib
from pype.api import resources
def set_context_favorites(favorites={}):
""" Addig favorite folders to nuke's browser
Argumets:
favorites (dict): couples of {name:path}
"""
icon_path = resources.get_resource("icons", "folder-favorite3.png")
for name, path in favorites.items():
nuke.addFavoriteDir(
name,
path,
nuke.IMAGE | nuke.SCRIPT | nuke.GEO,
icon=icon_path)
def get_node_outputs(node):
'''
Return a dictionary of the nodes and pipes that are connected to node
'''
dep_dict = {}
dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)
for d in dependencies:
dep_dict[d] = []
for i in range(d.inputs()):
if d.input(i) == node:
dep_dict[d].append(i)
return dep_dict
def is_node_gizmo(node):
'''
return True if node is gizmo
'''
return 'gizmo_file' in node.knobs()
def gizmo_is_nuke_default(gizmo):
'''Check if gizmo is in default install path'''
plug_dir = os.path.join(os.path.dirname(
nuke.env['ExecutablePath']), 'plugins')
return gizmo.filename().startswith(plug_dir)
def bake_gizmos_recursively(in_group=nuke.Root()):
"""Converting a gizmo to group
Argumets:
is_group (nuke.Node)[optonal]: group node or all nodes
"""
# preserve selection after all is done
with anlib.maintained_selection():
# jump to the group
with in_group:
for node in nuke.allNodes():
if is_node_gizmo(node) and not gizmo_is_nuke_default(node):
with node:
outputs = get_node_outputs(node)
group = node.makeGroup()
# Reconnect inputs and outputs if any
if outputs:
for n, pipes in outputs.items():
for i in pipes:
n.setInput(i, group)
for i in range(node.inputs()):
group.setInput(i, node.input(i))
# set node position and name
group.setXYpos(node.xpos(), node.ypos())
name = node.name()
nuke.delete(node)
group.setName(name)
node = group
if node.Class() == "Group":
bake_gizmos_recursively(node) | 0.388386 | 0.259227 |
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ReservationList(ListResource):
def __init__(self, version, workspace_sid, task_sid):
"""
Initialize the ReservationList
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
"""
super(ReservationList, self).__init__(version)
# Path Solution
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
}
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations'.format(**self._solution)
def stream(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Streams ReservationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ReservationInstance.Status reservation_status: The reservation_status
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
reservation_status=reservation_status,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Lists ReservationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ReservationInstance.Status reservation_status: The reservation_status
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
return list(self.stream(
reservation_status=reservation_status,
limit=limit,
page_size=page_size,
))
def page(self, reservation_status=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ReservationInstance records from the API.
Request is executed immediately
:param ReservationInstance.Status reservation_status: The reservation_status
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
params = values.of({
'ReservationStatus': reservation_status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ReservationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ReservationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ReservationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationList>'
class ReservationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ReservationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
super(ReservationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationPage>'
class ReservationContext(InstanceContext):
def __init__(self, version, workspace_sid, task_sid, sid):
"""
Initialize the ReservationContext
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
super(ReservationContext, self).__init__(version)
# Path Solution
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid,
}
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a ReservationInstance
:returns: Fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The reservation_status
:param unicode worker_activity_sid: The worker_activity_sid
:param unicode instruction: The instruction
:param unicode dequeue_post_work_activity_sid: The dequeue_post_work_activity_sid
:param unicode dequeue_from: The dequeue_from
:param unicode dequeue_record: The dequeue_record
:param unicode dequeue_timeout: The dequeue_timeout
:param unicode dequeue_to: The dequeue_to
:param unicode dequeue_status_callback_url: The dequeue_status_callback_url
:param unicode call_from: The call_from
:param unicode call_record: The call_record
:param unicode call_timeout: The call_timeout
:param unicode call_to: The call_to
:param unicode call_url: The call_url
:param unicode call_status_callback_url: The call_status_callback_url
:param bool call_accept: The call_accept
:param unicode redirect_call_sid: The redirect_call_sid
:param bool redirect_accept: The redirect_accept
:param unicode redirect_url: The redirect_url
:returns: Updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
data = values.of({
'ReservationStatus': reservation_status,
'WorkerActivitySid': worker_activity_sid,
'Instruction': instruction,
'DequeuePostWorkActivitySid': dequeue_post_work_activity_sid,
'DequeueFrom': dequeue_from,
'DequeueRecord': dequeue_record,
'DequeueTimeout': dequeue_timeout,
'DequeueTo': dequeue_to,
'DequeueStatusCallbackUrl': dequeue_status_callback_url,
'CallFrom': call_from,
'CallRecord': call_record,
'CallTimeout': call_timeout,
'CallTo': call_to,
'CallUrl': call_url,
'CallStatusCallbackUrl': call_status_callback_url,
'CallAccept': call_accept,
'RedirectCallSid': redirect_call_sid,
'RedirectAccept': redirect_accept,
'RedirectUrl': redirect_url,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationContext {}>'.format(context)
class ReservationInstance(InstanceResource):
class Status(object):
PENDING = "pending"
ACCEPTED = "accepted"
REJECTED = "rejected"
TIMEOUT = "timeout"
CANCELED = "canceled"
RESCINDED = "rescinded"
def __init__(self, version, payload, workspace_sid, task_sid, sid=None):
"""
Initialize the ReservationInstance
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
super(ReservationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'reservation_status': payload['reservation_status'],
'sid': payload['sid'],
'task_sid': payload['task_sid'],
'worker_name': payload['worker_name'],
'worker_sid': payload['worker_sid'],
'workspace_sid': payload['workspace_sid'],
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ReservationContext for this ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
if self._context is None:
self._context = ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def reservation_status(self):
"""
:returns: The reservation_status
:rtype: ReservationInstance.Status
"""
return self._properties['reservation_status']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def task_sid(self):
"""
:returns: The task_sid
:rtype: unicode
"""
return self._properties['task_sid']
@property
def worker_name(self):
"""
:returns: The worker_name
:rtype: unicode
"""
return self._properties['worker_name']
@property
def worker_sid(self):
"""
:returns: The worker_sid
:rtype: unicode
"""
return self._properties['worker_sid']
@property
def workspace_sid(self):
"""
:returns: The workspace_sid
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ReservationInstance
:returns: Fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.fetch()
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The reservation_status
:param unicode worker_activity_sid: The worker_activity_sid
:param unicode instruction: The instruction
:param unicode dequeue_post_work_activity_sid: The dequeue_post_work_activity_sid
:param unicode dequeue_from: The dequeue_from
:param unicode dequeue_record: The dequeue_record
:param unicode dequeue_timeout: The dequeue_timeout
:param unicode dequeue_to: The dequeue_to
:param unicode dequeue_status_callback_url: The dequeue_status_callback_url
:param unicode call_from: The call_from
:param unicode call_record: The call_record
:param unicode call_timeout: The call_timeout
:param unicode call_to: The call_to
:param unicode call_url: The call_url
:param unicode call_status_callback_url: The call_status_callback_url
:param bool call_accept: The call_accept
:param unicode redirect_call_sid: The redirect_call_sid
:param bool redirect_accept: The redirect_accept
:param unicode redirect_url: The redirect_url
:returns: Updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.update(
reservation_status=reservation_status,
worker_activity_sid=worker_activity_sid,
instruction=instruction,
dequeue_post_work_activity_sid=dequeue_post_work_activity_sid,
dequeue_from=dequeue_from,
dequeue_record=dequeue_record,
dequeue_timeout=dequeue_timeout,
dequeue_to=dequeue_to,
dequeue_status_callback_url=dequeue_status_callback_url,
call_from=call_from,
call_record=call_record,
call_timeout=call_timeout,
call_to=call_to,
call_url=call_url,
call_status_callback_url=call_status_callback_url,
call_accept=call_accept,
redirect_call_sid=redirect_call_sid,
redirect_accept=redirect_accept,
redirect_url=redirect_url,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationInstance {}>'.format(context) | lib/python3.5/site-packages/twilio/rest/taskrouter/v1/workspace/task/reservation.py | from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ReservationList(ListResource):
def __init__(self, version, workspace_sid, task_sid):
"""
Initialize the ReservationList
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
"""
super(ReservationList, self).__init__(version)
# Path Solution
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
}
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations'.format(**self._solution)
def stream(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Streams ReservationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ReservationInstance.Status reservation_status: The reservation_status
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
reservation_status=reservation_status,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Lists ReservationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ReservationInstance.Status reservation_status: The reservation_status
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
return list(self.stream(
reservation_status=reservation_status,
limit=limit,
page_size=page_size,
))
def page(self, reservation_status=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ReservationInstance records from the API.
Request is executed immediately
:param ReservationInstance.Status reservation_status: The reservation_status
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
params = values.of({
'ReservationStatus': reservation_status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ReservationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ReservationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ReservationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationList>'
class ReservationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ReservationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
super(ReservationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationPage>'
class ReservationContext(InstanceContext):
def __init__(self, version, workspace_sid, task_sid, sid):
"""
Initialize the ReservationContext
:param Version version: Version that contains the resource
:param workspace_sid: The workspace_sid
:param task_sid: The task_sid
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
super(ReservationContext, self).__init__(version)
# Path Solution
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid,
}
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a ReservationInstance
:returns: Fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The reservation_status
:param unicode worker_activity_sid: The worker_activity_sid
:param unicode instruction: The instruction
:param unicode dequeue_post_work_activity_sid: The dequeue_post_work_activity_sid
:param unicode dequeue_from: The dequeue_from
:param unicode dequeue_record: The dequeue_record
:param unicode dequeue_timeout: The dequeue_timeout
:param unicode dequeue_to: The dequeue_to
:param unicode dequeue_status_callback_url: The dequeue_status_callback_url
:param unicode call_from: The call_from
:param unicode call_record: The call_record
:param unicode call_timeout: The call_timeout
:param unicode call_to: The call_to
:param unicode call_url: The call_url
:param unicode call_status_callback_url: The call_status_callback_url
:param bool call_accept: The call_accept
:param unicode redirect_call_sid: The redirect_call_sid
:param bool redirect_accept: The redirect_accept
:param unicode redirect_url: The redirect_url
:returns: Updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
data = values.of({
'ReservationStatus': reservation_status,
'WorkerActivitySid': worker_activity_sid,
'Instruction': instruction,
'DequeuePostWorkActivitySid': dequeue_post_work_activity_sid,
'DequeueFrom': dequeue_from,
'DequeueRecord': dequeue_record,
'DequeueTimeout': dequeue_timeout,
'DequeueTo': dequeue_to,
'DequeueStatusCallbackUrl': dequeue_status_callback_url,
'CallFrom': call_from,
'CallRecord': call_record,
'CallTimeout': call_timeout,
'CallTo': call_to,
'CallUrl': call_url,
'CallStatusCallbackUrl': call_status_callback_url,
'CallAccept': call_accept,
'RedirectCallSid': redirect_call_sid,
'RedirectAccept': redirect_accept,
'RedirectUrl': redirect_url,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationContext {}>'.format(context)
class ReservationInstance(InstanceResource):
class Status(object):
PENDING = "pending"
ACCEPTED = "accepted"
REJECTED = "rejected"
TIMEOUT = "timeout"
CANCELED = "canceled"
RESCINDED = "rescinded"
def __init__(self, version, payload, workspace_sid, task_sid, sid=None):
"""
Initialize the ReservationInstance
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
super(ReservationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'reservation_status': payload['reservation_status'],
'sid': payload['sid'],
'task_sid': payload['task_sid'],
'worker_name': payload['worker_name'],
'worker_sid': payload['worker_sid'],
'workspace_sid': payload['workspace_sid'],
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ReservationContext for this ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
if self._context is None:
self._context = ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def reservation_status(self):
"""
:returns: The reservation_status
:rtype: ReservationInstance.Status
"""
return self._properties['reservation_status']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def task_sid(self):
"""
:returns: The task_sid
:rtype: unicode
"""
return self._properties['task_sid']
@property
def worker_name(self):
"""
:returns: The worker_name
:rtype: unicode
"""
return self._properties['worker_name']
@property
def worker_sid(self):
"""
:returns: The worker_sid
:rtype: unicode
"""
return self._properties['worker_sid']
@property
def workspace_sid(self):
"""
:returns: The workspace_sid
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ReservationInstance
:returns: Fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.fetch()
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The reservation_status
:param unicode worker_activity_sid: The worker_activity_sid
:param unicode instruction: The instruction
:param unicode dequeue_post_work_activity_sid: The dequeue_post_work_activity_sid
:param unicode dequeue_from: The dequeue_from
:param unicode dequeue_record: The dequeue_record
:param unicode dequeue_timeout: The dequeue_timeout
:param unicode dequeue_to: The dequeue_to
:param unicode dequeue_status_callback_url: The dequeue_status_callback_url
:param unicode call_from: The call_from
:param unicode call_record: The call_record
:param unicode call_timeout: The call_timeout
:param unicode call_to: The call_to
:param unicode call_url: The call_url
:param unicode call_status_callback_url: The call_status_callback_url
:param bool call_accept: The call_accept
:param unicode redirect_call_sid: The redirect_call_sid
:param bool redirect_accept: The redirect_accept
:param unicode redirect_url: The redirect_url
:returns: Updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.update(
reservation_status=reservation_status,
worker_activity_sid=worker_activity_sid,
instruction=instruction,
dequeue_post_work_activity_sid=dequeue_post_work_activity_sid,
dequeue_from=dequeue_from,
dequeue_record=dequeue_record,
dequeue_timeout=dequeue_timeout,
dequeue_to=dequeue_to,
dequeue_status_callback_url=dequeue_status_callback_url,
call_from=call_from,
call_record=call_record,
call_timeout=call_timeout,
call_to=call_to,
call_url=call_url,
call_status_callback_url=call_status_callback_url,
call_accept=call_accept,
redirect_call_sid=redirect_call_sid,
redirect_accept=redirect_accept,
redirect_url=redirect_url,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationInstance {}>'.format(context) | 0.880026 | 0.154535 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import scheduler_pb2 as scheduler__pb2
class ServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.schedule_job = channel.unary_unary(
'/scheduler.Service/schedule_job',
request_serializer=scheduler__pb2.Payload.SerializeToString,
response_deserializer=scheduler__pb2.Job.FromString,
)
class ServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def schedule_job(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'schedule_job': grpc.unary_unary_rpc_method_handler(
servicer.schedule_job,
request_deserializer=scheduler__pb2.Payload.FromString,
response_serializer=scheduler__pb2.Job.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'scheduler.Service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Service(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def schedule_job(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Service/schedule_job',
scheduler__pb2.Payload.SerializeToString,
scheduler__pb2.Job.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | tracardi/process_engine/action/v1/pro/scheduler/proto/stubs/scheduler_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
import scheduler_pb2 as scheduler__pb2
class ServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.schedule_job = channel.unary_unary(
'/scheduler.Service/schedule_job',
request_serializer=scheduler__pb2.Payload.SerializeToString,
response_deserializer=scheduler__pb2.Job.FromString,
)
class ServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def schedule_job(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'schedule_job': grpc.unary_unary_rpc_method_handler(
servicer.schedule_job,
request_deserializer=scheduler__pb2.Payload.FromString,
response_serializer=scheduler__pb2.Job.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'scheduler.Service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Service(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def schedule_job(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Service/schedule_job',
scheduler__pb2.Payload.SerializeToString,
scheduler__pb2.Job.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.690663 | 0.178311 |
import asyncio
import concurrent.futures
import time
import uuid
import distributed.comm
from distributed import default_client, get_worker
from distributed.comm.addressing import (
parse_address,
parse_host_port,
unparse_address,
)
from . import utils
_default_comms = None
def default_comms(client=None):
""" Return a comms instance if one has been initialized.
Otherwise, initialize a new comms instance.
"""
global _default_comms
if _default_comms is None:
_default_comms = CommsContext(client=client)
return _default_comms
def worker_state(sessionId=None):
worker = get_worker()
if not hasattr(worker, "_explicit_comm_state"):
worker._explicit_comm_state = {}
if sessionId is not None and sessionId not in worker._explicit_comm_state:
worker._explicit_comm_state[sessionId] = {
"ts": time.time(),
"eps": {},
"loop": worker.loop.asyncio_loop,
"worker": worker,
}
if sessionId is not None:
return worker._explicit_comm_state[sessionId]
return worker._explicit_comm_state
def _run_coroutine_on_worker(sessionId, coroutine, args):
session_state = worker_state(sessionId)
def _run():
future = asyncio.run_coroutine_threadsafe(
coroutine(session_state, *args), session_state["loop"]
)
return future.result()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
return executor.submit(_run).result()
async def _create_listeners(session_state, nworkers, rank):
assert session_state["loop"] is asyncio.get_event_loop()
assert "nworkers" not in session_state
session_state["nworkers"] = nworkers
assert "rank" not in session_state
session_state["rank"] = rank
async def server_handler(ep):
peer_rank = await ep.read()
session_state["eps"][peer_rank] = ep
# We listen on the same protocol and address as the worker address
protocol, address = parse_address(session_state["worker"].address)
address = parse_host_port(address)[0]
address = unparse_address(protocol, address)
session_state["lf"] = distributed.comm.listen(address, server_handler)
await session_state["lf"].start()
return session_state["lf"].listen_address
async def _create_endpoints(session_state, peers):
""" Each worker creates a UCX endpoint to all workers with greater rank"""
assert session_state["loop"] is asyncio.get_event_loop()
myrank = session_state["rank"]
peers = list(enumerate(peers))
# Create endpoints to workers with a greater rank the my rank
for rank, address in peers[myrank + 1 :]:
ep = await distributed.comm.connect(address)
await ep.write(session_state["rank"])
session_state["eps"][rank] = ep
# Block until all endpoints has been created
while len(session_state["eps"]) < session_state["nworkers"] - 1:
await asyncio.sleep(0.1)
async def _stop_ucp_listeners(session_state):
assert len(session_state["eps"]) == session_state["nworkers"] - 1
assert session_state["loop"] is asyncio.get_event_loop()
session_state["lf"].stop()
del session_state["lf"]
class CommsContext:
"""Communication handler for explicit communication"""
def __init__(self, client=None):
self.client = client if client is not None else default_client()
self.sessionId = uuid.uuid4().bytes
# Get address of all workers (not Nanny addresses)
self.worker_addresses = list(self.client.run(lambda: 42).keys())
# Make all workers listen and get all listen addresses
self.worker_direct_addresses = []
for rank, address in enumerate(self.worker_addresses):
self.worker_direct_addresses.append(
self.submit(
address,
_create_listeners,
len(self.worker_addresses),
rank,
wait=True,
)
)
# Each worker creates an endpoint to all workers with greater rank
self.run(_create_endpoints, self.worker_direct_addresses)
# At this point all workers should have a rank and endpoints to
# all other workers thus we can now stop the listening.
self.run(_stop_ucp_listeners)
def submit(self, worker, coroutine, *args, wait=False):
"""Run a coroutine on a single worker
Parameters
----------
worker: str
Worker to run the `coroutine`
coroutine: coroutine
The function to run on the worker
*args:
Arguments for `coroutine`
wait: boolean, optional
If True, waits for the coroutine to finished before returning.
Returns
-------
ret: object or Future
If wait=True, the result of `coroutine`
If wait=False, Future that can be waited on later.
"""
ret = self.client.submit(
_run_coroutine_on_worker,
self.sessionId,
coroutine,
args,
workers=[worker],
pure=False,
)
return ret.result() if wait else ret
def run(self, coroutine, *args, workers=None):
"""Run a coroutine on workers
Parameters
----------
coroutine: coroutine
The function to run on each worker
*args:
Arguments for `coroutine`
workers: list, optional
List of workers. Default is all workers
Returns
-------
ret: list
List of the output from each worker
"""
if workers is None:
workers = self.worker_addresses
ret = []
for worker in workers:
ret.append(
self.client.submit(
_run_coroutine_on_worker,
self.sessionId,
coroutine,
args,
workers=[worker],
pure=False,
)
)
return self.client.gather(ret)
def dataframe_operation(self, coroutine, df_list, extra_args=tuple()):
"""Submit an operation on a list of Dask dataframe
Parameters
----------
coroutine: coroutine
The function to run on each worker
df_list: list of Dask.dataframe.Dataframe
Input dataframes
extra_args: tuple
Extra function input
Returns
-------
dataframe: Dask.dataframe.Dataframe
The resulting dataframe
"""
df_parts_list = []
for df in df_list:
df_parts_list.append(utils.extract_ddf_partitions(df))
# Let's create a dict for each dataframe that specifices the
# number of partitions each worker has
world = set()
dfs_nparts = []
for df_parts in df_parts_list:
nparts = {}
for rank, worker in enumerate(self.worker_addresses):
npart = len(df_parts.get(worker, []))
if npart > 0:
nparts[rank] = npart
world.add(rank)
dfs_nparts.append(nparts)
# Submit `coroutine` on each worker given the df_parts that
# belong the specific worker as input
ret = []
for rank, worker in enumerate(self.worker_addresses):
if rank in world:
dfs = []
for df_parts in df_parts_list:
dfs.append(df_parts.get(worker, []))
ret.append(
self.submit(worker, coroutine, world, dfs_nparts, dfs, *extra_args)
)
return utils.dataframes_to_dask_dataframe(ret) | dask_cuda/explicit_comms/comms.py | import asyncio
import concurrent.futures
import time
import uuid
import distributed.comm
from distributed import default_client, get_worker
from distributed.comm.addressing import (
parse_address,
parse_host_port,
unparse_address,
)
from . import utils
_default_comms = None
def default_comms(client=None):
""" Return a comms instance if one has been initialized.
Otherwise, initialize a new comms instance.
"""
global _default_comms
if _default_comms is None:
_default_comms = CommsContext(client=client)
return _default_comms
def worker_state(sessionId=None):
worker = get_worker()
if not hasattr(worker, "_explicit_comm_state"):
worker._explicit_comm_state = {}
if sessionId is not None and sessionId not in worker._explicit_comm_state:
worker._explicit_comm_state[sessionId] = {
"ts": time.time(),
"eps": {},
"loop": worker.loop.asyncio_loop,
"worker": worker,
}
if sessionId is not None:
return worker._explicit_comm_state[sessionId]
return worker._explicit_comm_state
def _run_coroutine_on_worker(sessionId, coroutine, args):
session_state = worker_state(sessionId)
def _run():
future = asyncio.run_coroutine_threadsafe(
coroutine(session_state, *args), session_state["loop"]
)
return future.result()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
return executor.submit(_run).result()
async def _create_listeners(session_state, nworkers, rank):
assert session_state["loop"] is asyncio.get_event_loop()
assert "nworkers" not in session_state
session_state["nworkers"] = nworkers
assert "rank" not in session_state
session_state["rank"] = rank
async def server_handler(ep):
peer_rank = await ep.read()
session_state["eps"][peer_rank] = ep
# We listen on the same protocol and address as the worker address
protocol, address = parse_address(session_state["worker"].address)
address = parse_host_port(address)[0]
address = unparse_address(protocol, address)
session_state["lf"] = distributed.comm.listen(address, server_handler)
await session_state["lf"].start()
return session_state["lf"].listen_address
async def _create_endpoints(session_state, peers):
""" Each worker creates a UCX endpoint to all workers with greater rank"""
assert session_state["loop"] is asyncio.get_event_loop()
myrank = session_state["rank"]
peers = list(enumerate(peers))
# Create endpoints to workers with a greater rank the my rank
for rank, address in peers[myrank + 1 :]:
ep = await distributed.comm.connect(address)
await ep.write(session_state["rank"])
session_state["eps"][rank] = ep
# Block until all endpoints has been created
while len(session_state["eps"]) < session_state["nworkers"] - 1:
await asyncio.sleep(0.1)
async def _stop_ucp_listeners(session_state):
assert len(session_state["eps"]) == session_state["nworkers"] - 1
assert session_state["loop"] is asyncio.get_event_loop()
session_state["lf"].stop()
del session_state["lf"]
class CommsContext:
"""Communication handler for explicit communication"""
def __init__(self, client=None):
self.client = client if client is not None else default_client()
self.sessionId = uuid.uuid4().bytes
# Get address of all workers (not Nanny addresses)
self.worker_addresses = list(self.client.run(lambda: 42).keys())
# Make all workers listen and get all listen addresses
self.worker_direct_addresses = []
for rank, address in enumerate(self.worker_addresses):
self.worker_direct_addresses.append(
self.submit(
address,
_create_listeners,
len(self.worker_addresses),
rank,
wait=True,
)
)
# Each worker creates an endpoint to all workers with greater rank
self.run(_create_endpoints, self.worker_direct_addresses)
# At this point all workers should have a rank and endpoints to
# all other workers thus we can now stop the listening.
self.run(_stop_ucp_listeners)
def submit(self, worker, coroutine, *args, wait=False):
"""Run a coroutine on a single worker
Parameters
----------
worker: str
Worker to run the `coroutine`
coroutine: coroutine
The function to run on the worker
*args:
Arguments for `coroutine`
wait: boolean, optional
If True, waits for the coroutine to finished before returning.
Returns
-------
ret: object or Future
If wait=True, the result of `coroutine`
If wait=False, Future that can be waited on later.
"""
ret = self.client.submit(
_run_coroutine_on_worker,
self.sessionId,
coroutine,
args,
workers=[worker],
pure=False,
)
return ret.result() if wait else ret
def run(self, coroutine, *args, workers=None):
"""Run a coroutine on workers
Parameters
----------
coroutine: coroutine
The function to run on each worker
*args:
Arguments for `coroutine`
workers: list, optional
List of workers. Default is all workers
Returns
-------
ret: list
List of the output from each worker
"""
if workers is None:
workers = self.worker_addresses
ret = []
for worker in workers:
ret.append(
self.client.submit(
_run_coroutine_on_worker,
self.sessionId,
coroutine,
args,
workers=[worker],
pure=False,
)
)
return self.client.gather(ret)
def dataframe_operation(self, coroutine, df_list, extra_args=tuple()):
"""Submit an operation on a list of Dask dataframe
Parameters
----------
coroutine: coroutine
The function to run on each worker
df_list: list of Dask.dataframe.Dataframe
Input dataframes
extra_args: tuple
Extra function input
Returns
-------
dataframe: Dask.dataframe.Dataframe
The resulting dataframe
"""
df_parts_list = []
for df in df_list:
df_parts_list.append(utils.extract_ddf_partitions(df))
# Let's create a dict for each dataframe that specifices the
# number of partitions each worker has
world = set()
dfs_nparts = []
for df_parts in df_parts_list:
nparts = {}
for rank, worker in enumerate(self.worker_addresses):
npart = len(df_parts.get(worker, []))
if npart > 0:
nparts[rank] = npart
world.add(rank)
dfs_nparts.append(nparts)
# Submit `coroutine` on each worker given the df_parts that
# belong the specific worker as input
ret = []
for rank, worker in enumerate(self.worker_addresses):
if rank in world:
dfs = []
for df_parts in df_parts_list:
dfs.append(df_parts.get(worker, []))
ret.append(
self.submit(worker, coroutine, world, dfs_nparts, dfs, *extra_args)
)
return utils.dataframes_to_dask_dataframe(ret) | 0.768473 | 0.207195 |
import binascii
import struct
from datetime import datetime
import re
from mi.core.exceptions import SampleException
# newline.
NEWLINE = '\n\r'
# default timeout.
TIMEOUT = 15
# offset to accurately set instrument clock, in seconds
CLOCK_SYNC_OFFSET = 2.0
# maximum acceptable time difference when verifying clock sync, in seconds
CLOCK_SYNC_MAX_DIFF = 2
# sample collection is ~60 seconds, add padding
SAMPLE_TIMEOUT = 70
# set up the 'structure' lengths (in bytes) and sync/id/size constants
CHECK_SUM_SEED = 0xb58c
HW_CONFIG_LEN = 48
HW_CONFIG_SYNC_BYTES = '\xa5\x05\x18\x00'
HARDWARE_CONFIG_DATA_PATTERN = r'(%s)(.{44})(\x06\x06)' % HW_CONFIG_SYNC_BYTES
HARDWARE_CONFIG_DATA_REGEX = re.compile(HARDWARE_CONFIG_DATA_PATTERN, re.DOTALL)
HEAD_CONFIG_LEN = 224
HEAD_CONFIG_SYNC_BYTES = '\xa5\x04\x70\x00'
HEAD_CONFIG_DATA_PATTERN = r'(%s)(.{220})(\x06\x06)' % HEAD_CONFIG_SYNC_BYTES
HEAD_CONFIG_DATA_REGEX = re.compile(HEAD_CONFIG_DATA_PATTERN, re.DOTALL)
USER_CONFIG_LEN = 512
USER_CONFIG_SYNC_BYTES = '\xa5\x00\x00\x01'
USER_CONFIG_DATA_PATTERN = r'(%s)(.{508})(\x06\x06)' % USER_CONFIG_SYNC_BYTES
USER_CONFIG_DATA_REGEX = re.compile(USER_CONFIG_DATA_PATTERN, re.DOTALL)
# min, sec, day, hour, year, month
CLOCK_DATA_PATTERN = r'([\x00-\x60])([\x00-\x60])([\x01-\x31])([\x00-\x24])([\x00-\x99])([\x01-\x12])\x06\x06'
CLOCK_DATA_REGEX = re.compile(CLOCK_DATA_PATTERN, re.DOTALL)
# Special combined regex to give battery voltage a "unique sync byte" to search for (non-unique regex workaround)
ID_BATTERY_DATA_PATTERN = r'(?:AQD|VEC) ?[0-9]{4,5} {0,6}\x06\x06([\x00-\xFF][\x13-\x46])\x06\x06'
ID_BATTERY_DATA_REGEX = re.compile(ID_BATTERY_DATA_PATTERN, re.DOTALL)
# [\x00, \x01, \x02, \x04, and \x05]
MODE_DATA_PATTERN = r'([\x00-\x02,\x04,\x05]\x00)(\x06\x06)'
MODE_DATA_REGEX = re.compile(MODE_DATA_PATTERN, re.DOTALL)
# ~5000mV (0x1388) minimum to ~18000mv (0x4650) maximum
BATTERY_DATA_PATTERN = r'([\x00-\xFF][\x13-\x46])\x06\x06'
BATTERY_DATA_REGEX = re.compile(BATTERY_DATA_PATTERN, re.DOTALL)
# ["VEC 8181", "AQD 8493 "]
ID_DATA_PATTERN = r'((?:AQD|VEC) ?[0-9]{4,5}) {0,6}\x06\x06'
ID_DATA_REGEX = re.compile(ID_DATA_PATTERN, re.DOTALL)
NORTEK_COMMON_REGEXES = [USER_CONFIG_DATA_REGEX,
HARDWARE_CONFIG_DATA_REGEX,
HEAD_CONFIG_DATA_REGEX,
ID_BATTERY_DATA_REGEX,
CLOCK_DATA_REGEX]
INTERVAL_TIME_REGEX = r"([0-9][0-9]:[0-9][0-9]:[0-9][0-9])"
def convert_word_to_int(word):
"""
Converts a word into an integer field
"""
try:
return struct.unpack('<H', word)[0]
except struct.error:
raise SampleException("Invalid number of bytes in word input! Found %s with input %s" % (word, len(word)))
def convert_word_to_bit_field(word):
"""
Convert little-endian short to a bit field
@param input_bytes
@retval an list of 1 or 0 in order
"""
try:
short = struct.unpack('<H', word)[0]
return [int(x) for x in format(short, '016b')]
except struct.error:
raise SampleException("Invalid number of bytes in word input! Found %s with input %s" % (word, len(word)))
def convert_bcd_bytes_to_ints(input_bytes):
"""
Convert block of 6 BCD-encoded bytes into a date/time structure for the instrument family
@param input_bytes 6 bytes
@retval An array of 6 ints corresponding to the date/time structure
@raise SampleException If the date/time cannot be found
"""
if len(input_bytes) != 6:
raise SampleException("Invalid number of bytes in input! Found %s" % len(input_bytes))
return [int(binascii.hexlify(c)) for c in input_bytes]
def convert_time(response):
"""
Converts the timestamp in BCD to a datetime object
"""
minutes, seconds, day, hour, year, month = convert_bcd_bytes_to_ints(response)
return datetime(year + 2000, month, day, hour, minutes, seconds) | mi/instrument/nortek/common.py | import binascii
import struct
from datetime import datetime
import re
from mi.core.exceptions import SampleException
# newline.
NEWLINE = '\n\r'
# default timeout.
TIMEOUT = 15
# offset to accurately set instrument clock, in seconds
CLOCK_SYNC_OFFSET = 2.0
# maximum acceptable time difference when verifying clock sync, in seconds
CLOCK_SYNC_MAX_DIFF = 2
# sample collection is ~60 seconds, add padding
SAMPLE_TIMEOUT = 70
# set up the 'structure' lengths (in bytes) and sync/id/size constants
CHECK_SUM_SEED = 0xb58c
HW_CONFIG_LEN = 48
HW_CONFIG_SYNC_BYTES = '\xa5\x05\x18\x00'
HARDWARE_CONFIG_DATA_PATTERN = r'(%s)(.{44})(\x06\x06)' % HW_CONFIG_SYNC_BYTES
HARDWARE_CONFIG_DATA_REGEX = re.compile(HARDWARE_CONFIG_DATA_PATTERN, re.DOTALL)
HEAD_CONFIG_LEN = 224
HEAD_CONFIG_SYNC_BYTES = '\xa5\x04\x70\x00'
HEAD_CONFIG_DATA_PATTERN = r'(%s)(.{220})(\x06\x06)' % HEAD_CONFIG_SYNC_BYTES
HEAD_CONFIG_DATA_REGEX = re.compile(HEAD_CONFIG_DATA_PATTERN, re.DOTALL)
USER_CONFIG_LEN = 512
USER_CONFIG_SYNC_BYTES = '\xa5\x00\x00\x01'
USER_CONFIG_DATA_PATTERN = r'(%s)(.{508})(\x06\x06)' % USER_CONFIG_SYNC_BYTES
USER_CONFIG_DATA_REGEX = re.compile(USER_CONFIG_DATA_PATTERN, re.DOTALL)
# min, sec, day, hour, year, month
CLOCK_DATA_PATTERN = r'([\x00-\x60])([\x00-\x60])([\x01-\x31])([\x00-\x24])([\x00-\x99])([\x01-\x12])\x06\x06'
CLOCK_DATA_REGEX = re.compile(CLOCK_DATA_PATTERN, re.DOTALL)
# Special combined regex to give battery voltage a "unique sync byte" to search for (non-unique regex workaround)
ID_BATTERY_DATA_PATTERN = r'(?:AQD|VEC) ?[0-9]{4,5} {0,6}\x06\x06([\x00-\xFF][\x13-\x46])\x06\x06'
ID_BATTERY_DATA_REGEX = re.compile(ID_BATTERY_DATA_PATTERN, re.DOTALL)
# [\x00, \x01, \x02, \x04, and \x05]
MODE_DATA_PATTERN = r'([\x00-\x02,\x04,\x05]\x00)(\x06\x06)'
MODE_DATA_REGEX = re.compile(MODE_DATA_PATTERN, re.DOTALL)
# ~5000mV (0x1388) minimum to ~18000mv (0x4650) maximum
BATTERY_DATA_PATTERN = r'([\x00-\xFF][\x13-\x46])\x06\x06'
BATTERY_DATA_REGEX = re.compile(BATTERY_DATA_PATTERN, re.DOTALL)
# ["VEC 8181", "AQD 8493 "]
ID_DATA_PATTERN = r'((?:AQD|VEC) ?[0-9]{4,5}) {0,6}\x06\x06'
ID_DATA_REGEX = re.compile(ID_DATA_PATTERN, re.DOTALL)
NORTEK_COMMON_REGEXES = [USER_CONFIG_DATA_REGEX,
HARDWARE_CONFIG_DATA_REGEX,
HEAD_CONFIG_DATA_REGEX,
ID_BATTERY_DATA_REGEX,
CLOCK_DATA_REGEX]
INTERVAL_TIME_REGEX = r"([0-9][0-9]:[0-9][0-9]:[0-9][0-9])"
def convert_word_to_int(word):
"""
Converts a word into an integer field
"""
try:
return struct.unpack('<H', word)[0]
except struct.error:
raise SampleException("Invalid number of bytes in word input! Found %s with input %s" % (word, len(word)))
def convert_word_to_bit_field(word):
"""
Convert little-endian short to a bit field
@param input_bytes
@retval an list of 1 or 0 in order
"""
try:
short = struct.unpack('<H', word)[0]
return [int(x) for x in format(short, '016b')]
except struct.error:
raise SampleException("Invalid number of bytes in word input! Found %s with input %s" % (word, len(word)))
def convert_bcd_bytes_to_ints(input_bytes):
"""
Convert block of 6 BCD-encoded bytes into a date/time structure for the instrument family
@param input_bytes 6 bytes
@retval An array of 6 ints corresponding to the date/time structure
@raise SampleException If the date/time cannot be found
"""
if len(input_bytes) != 6:
raise SampleException("Invalid number of bytes in input! Found %s" % len(input_bytes))
return [int(binascii.hexlify(c)) for c in input_bytes]
def convert_time(response):
"""
Converts the timestamp in BCD to a datetime object
"""
minutes, seconds, day, hour, year, month = convert_bcd_bytes_to_ints(response)
return datetime(year + 2000, month, day, hour, minutes, seconds) | 0.569853 | 0.285565 |
import keras
from keras.preprocessing import image
import json
import dlv
import numpy as np
import os
import inspect
class Model:
def __init__(self, k_model: keras.Model):
"""
:param k_model: keras Model
"""
self._k_model = k_model
# Set layers
self._layers = []
self._layerNameToIdx = {}
self.setLayers()
# A Dictionary that has
# Key : Input data filepath
# Value : dlv.type.Model that has Feature Maps of layer
self._indata_FeatureMap_Dict = {}
self._indata_preparedIndata_Dict = {}
# Set to be fetched Tensors
self._fetchedTensors = []
self._fetchedTensorNameToIdxMap = {}
self.setFetchedTensor()
def setLayers(self):
"""
Set dlv.Layer List
Each Layer has layer idx && layer class name && layer name
- layer idx : idx of layer in this dlv.Model
- layer class name : Class name of layer, Kinds of class name is listed in "dlv.layer.py"
- layer name : layer name which is unique in this Model
"""
k_layers = [layer for layer in self._k_model.layers]
k_layer_classs = [layer['class_name'] for layer in self._k_model.get_config()['layers']]
k_layer_names = [layer['name'] for layer in self._k_model.get_config()['layers']]
for layerIdx, k_layer, k_layer_class, k_layer_name \
in \
zip(range(len(k_layers)), k_layers, k_layer_classs, k_layer_names):
self._layers += [dlv.Layer(layerIdx, k_layer, k_layer_class, k_layer_name)]
self._layerNameToIdx[k_layer_name] = self._layers[len(self._layers)-1]
def setFetchedTensor(self):
"""
Set To be fetched Layer
self._fetchedLayers's outputs are calculated at prediction, in function "getFeatures~()"
self._fetchedLayerIdx is dict, that has map between pos of self._fetchedLayer and pos of self._layer
"""
counter = 0
for idx, layer in enumerate(self._layers):
if (layer._layerType == 'Activation' or layer._layerType == 'Dense'):
self._fetchedTensors += [self._k_model.get_layer(layer._layerName).output]
self._fetchedTensorNameToIdxMap[layer._layerName] = counter
counter += 1
def addInputData(self, imagePath: str):
"""
Add inputData to self._indata_FeatureMap_Dict
:param imagePath:
"""
# TODO
self._indata_FeatureMap_Dict[imagePath] = 0
self._indata_preparedIndata_Dict[imagePath] = self.prepareImage(imagePath)
def getLayerNames(self):
"""
:return: List of layers's "name" in this model
"""
jmodel = json.loads(self._k_model.to_json())
config = jmodel["config"]
cof = config['layers']
data = [layer['name'] for layer in config["layers"]]
return data
def prepareImage(self, imgPath: str):
"""
Convert img at imgpath to ndarray
:param imgPath:
:return:
"""
curDirPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +'/../'
img_path = curDirPath + imgPath
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# TODO
# x = preprocess_input(x)
return x
def getFeaturesFromLayerAboutImage(self, layerName: str, imgPath: str):
model = keras.models.Model(inputs=self._k_model.input, outputs=self._k_model.get_layer(layerName).output)
x = self.prepareImage(imgPath)
return model.predict(x)
def getFeaturesFromLayer(self, layerName: str):
return self.getFeaturesFromLayerAboutImage(layerName, 'dog.jpg')
def getFeaturesFromFetchedList(self):
"""
:return:
"""
dimsCompressedPreparedImg = \
[preparedImg[0,:,:,:] for preparedImg in self._indata_preparedIndata_Dict.values()]
preparedXList = np.stack(dimsCompressedPreparedImg, axis=0)
model = keras.models.Model(inputs=self._k_model.input, outputs=self._fetchedTensors)
results = model.predict(preparedXList)
each_results = []
for idx in range(len(self._indata_FeatureMap_Dict)):
each_result = []
for layerResult in results:
each_result += [layerResult[idx]]
each_results += [each_result]
for imgPath, each_result in zip(self._indata_FeatureMap_Dict.keys(),each_results):
self._indata_FeatureMap_Dict[imgPath] = FeatureMapModel(self, each_result)
class FeatureMapModel:
def __init__(self, model: Model, featureMapList):
self._model = model
self._layers = []
self._featureMapList = featureMapList | dlv/model.py | import keras
from keras.preprocessing import image
import json
import dlv
import numpy as np
import os
import inspect
class Model:
def __init__(self, k_model: keras.Model):
"""
:param k_model: keras Model
"""
self._k_model = k_model
# Set layers
self._layers = []
self._layerNameToIdx = {}
self.setLayers()
# A Dictionary that has
# Key : Input data filepath
# Value : dlv.type.Model that has Feature Maps of layer
self._indata_FeatureMap_Dict = {}
self._indata_preparedIndata_Dict = {}
# Set to be fetched Tensors
self._fetchedTensors = []
self._fetchedTensorNameToIdxMap = {}
self.setFetchedTensor()
def setLayers(self):
"""
Set dlv.Layer List
Each Layer has layer idx && layer class name && layer name
- layer idx : idx of layer in this dlv.Model
- layer class name : Class name of layer, Kinds of class name is listed in "dlv.layer.py"
- layer name : layer name which is unique in this Model
"""
k_layers = [layer for layer in self._k_model.layers]
k_layer_classs = [layer['class_name'] for layer in self._k_model.get_config()['layers']]
k_layer_names = [layer['name'] for layer in self._k_model.get_config()['layers']]
for layerIdx, k_layer, k_layer_class, k_layer_name \
in \
zip(range(len(k_layers)), k_layers, k_layer_classs, k_layer_names):
self._layers += [dlv.Layer(layerIdx, k_layer, k_layer_class, k_layer_name)]
self._layerNameToIdx[k_layer_name] = self._layers[len(self._layers)-1]
def setFetchedTensor(self):
"""
Set To be fetched Layer
self._fetchedLayers's outputs are calculated at prediction, in function "getFeatures~()"
self._fetchedLayerIdx is dict, that has map between pos of self._fetchedLayer and pos of self._layer
"""
counter = 0
for idx, layer in enumerate(self._layers):
if (layer._layerType == 'Activation' or layer._layerType == 'Dense'):
self._fetchedTensors += [self._k_model.get_layer(layer._layerName).output]
self._fetchedTensorNameToIdxMap[layer._layerName] = counter
counter += 1
def addInputData(self, imagePath: str):
"""
Add inputData to self._indata_FeatureMap_Dict
:param imagePath:
"""
# TODO
self._indata_FeatureMap_Dict[imagePath] = 0
self._indata_preparedIndata_Dict[imagePath] = self.prepareImage(imagePath)
def getLayerNames(self):
"""
:return: List of layers's "name" in this model
"""
jmodel = json.loads(self._k_model.to_json())
config = jmodel["config"]
cof = config['layers']
data = [layer['name'] for layer in config["layers"]]
return data
def prepareImage(self, imgPath: str):
"""
Convert img at imgpath to ndarray
:param imgPath:
:return:
"""
curDirPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +'/../'
img_path = curDirPath + imgPath
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# TODO
# x = preprocess_input(x)
return x
def getFeaturesFromLayerAboutImage(self, layerName: str, imgPath: str):
model = keras.models.Model(inputs=self._k_model.input, outputs=self._k_model.get_layer(layerName).output)
x = self.prepareImage(imgPath)
return model.predict(x)
def getFeaturesFromLayer(self, layerName: str):
return self.getFeaturesFromLayerAboutImage(layerName, 'dog.jpg')
def getFeaturesFromFetchedList(self):
"""
:return:
"""
dimsCompressedPreparedImg = \
[preparedImg[0,:,:,:] for preparedImg in self._indata_preparedIndata_Dict.values()]
preparedXList = np.stack(dimsCompressedPreparedImg, axis=0)
model = keras.models.Model(inputs=self._k_model.input, outputs=self._fetchedTensors)
results = model.predict(preparedXList)
each_results = []
for idx in range(len(self._indata_FeatureMap_Dict)):
each_result = []
for layerResult in results:
each_result += [layerResult[idx]]
each_results += [each_result]
for imgPath, each_result in zip(self._indata_FeatureMap_Dict.keys(),each_results):
self._indata_FeatureMap_Dict[imgPath] = FeatureMapModel(self, each_result)
class FeatureMapModel:
def __init__(self, model: Model, featureMapList):
self._model = model
self._layers = []
self._featureMapList = featureMapList | 0.194444 | 0.384623 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Trunk']
class Trunk(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
sub_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a networking V2 trunk resource within OpenStack.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.1.0/24",
enable_dhcp=True,
ip_version=4,
network_id=network1.id,
no_gateway=True)
parent_port1 = openstack.networking.Port("parentPort1",
admin_state_up=True,
network_id=network1.id,
opts=pulumi.ResourceOptions(depends_on=["openstack_networking_subnet_v2.subnet_1"]))
subport1 = openstack.networking.Port("subport1",
admin_state_up=True,
network_id=network1.id,
opts=pulumi.ResourceOptions(depends_on=["openstack_networking_subnet_v2.subnet_1"]))
trunk1 = openstack.networking.Trunk("trunk1",
admin_state_up=True,
port_id=parent_port1.id,
sub_ports=[openstack.networking.TrunkSubPortArgs(
port_id=subport1.id,
segmentation_id=1,
segmentation_type="vlan",
)])
instance1 = openstack.compute.Instance("instance1",
networks=[openstack.compute.InstanceNetworkArgs(
port=trunk1.port_id,
)],
security_groups=["default"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
:param pulumi.Input[str] description: Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
:param pulumi.Input[str] name: A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
:param pulumi.Input[str] port_id: The ID of the port to be made a subport of the trunk.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]] sub_ports: The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the port.
:param pulumi.Input[str] tenant_id: The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['admin_state_up'] = admin_state_up
__props__['description'] = description
__props__['name'] = name
if port_id is None and not opts.urn:
raise TypeError("Missing required property 'port_id'")
__props__['port_id'] = port_id
__props__['region'] = region
__props__['sub_ports'] = sub_ports
__props__['tags'] = tags
__props__['tenant_id'] = tenant_id
__props__['all_tags'] = None
super(Trunk, __self__).__init__(
'openstack:networking/trunk:Trunk',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
sub_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None) -> 'Trunk':
"""
Get an existing Trunk resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the trunk, which have been
explicitly and implicitly added.
:param pulumi.Input[str] description: Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
:param pulumi.Input[str] name: A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
:param pulumi.Input[str] port_id: The ID of the port to be made a subport of the trunk.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]] sub_ports: The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the port.
:param pulumi.Input[str] tenant_id: The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["admin_state_up"] = admin_state_up
__props__["all_tags"] = all_tags
__props__["description"] = description
__props__["name"] = name
__props__["port_id"] = port_id
__props__["region"] = region
__props__["sub_ports"] = sub_ports
__props__["tags"] = tags
__props__["tenant_id"] = tenant_id
return Trunk(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminStateUp")
def admin_state_up(self) -> pulumi.Output[Optional[bool]]:
"""
Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
"""
return pulumi.get(self, "admin_state_up")
@property
@pulumi.getter(name="allTags")
def all_tags(self) -> pulumi.Output[Sequence[str]]:
"""
The collection of tags assigned on the trunk, which have been
explicitly and implicitly added.
"""
return pulumi.get(self, "all_tags")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="portId")
def port_id(self) -> pulumi.Output[str]:
"""
The ID of the port to be made a subport of the trunk.
"""
return pulumi.get(self, "port_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="subPorts")
def sub_ports(self) -> pulumi.Output[Optional[Sequence['outputs.TrunkSubPort']]]:
"""
The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
"""
return pulumi.get(self, "sub_ports")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of string tags for the port.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
return pulumi.get(self, "tenant_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_openstack/networking/trunk.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Trunk']
class Trunk(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
sub_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a networking V2 trunk resource within OpenStack.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.1.0/24",
enable_dhcp=True,
ip_version=4,
network_id=network1.id,
no_gateway=True)
parent_port1 = openstack.networking.Port("parentPort1",
admin_state_up=True,
network_id=network1.id,
opts=pulumi.ResourceOptions(depends_on=["openstack_networking_subnet_v2.subnet_1"]))
subport1 = openstack.networking.Port("subport1",
admin_state_up=True,
network_id=network1.id,
opts=pulumi.ResourceOptions(depends_on=["openstack_networking_subnet_v2.subnet_1"]))
trunk1 = openstack.networking.Trunk("trunk1",
admin_state_up=True,
port_id=parent_port1.id,
sub_ports=[openstack.networking.TrunkSubPortArgs(
port_id=subport1.id,
segmentation_id=1,
segmentation_type="vlan",
)])
instance1 = openstack.compute.Instance("instance1",
networks=[openstack.compute.InstanceNetworkArgs(
port=trunk1.port_id,
)],
security_groups=["default"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
:param pulumi.Input[str] description: Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
:param pulumi.Input[str] name: A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
:param pulumi.Input[str] port_id: The ID of the port to be made a subport of the trunk.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]] sub_ports: The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the port.
:param pulumi.Input[str] tenant_id: The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['admin_state_up'] = admin_state_up
__props__['description'] = description
__props__['name'] = name
if port_id is None and not opts.urn:
raise TypeError("Missing required property 'port_id'")
__props__['port_id'] = port_id
__props__['region'] = region
__props__['sub_ports'] = sub_ports
__props__['tags'] = tags
__props__['tenant_id'] = tenant_id
__props__['all_tags'] = None
super(Trunk, __self__).__init__(
'openstack:networking/trunk:Trunk',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
sub_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None) -> 'Trunk':
"""
Get an existing Trunk resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the trunk, which have been
explicitly and implicitly added.
:param pulumi.Input[str] description: Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
:param pulumi.Input[str] name: A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
:param pulumi.Input[str] port_id: The ID of the port to be made a subport of the trunk.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrunkSubPortArgs']]]] sub_ports: The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the port.
:param pulumi.Input[str] tenant_id: The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["admin_state_up"] = admin_state_up
__props__["all_tags"] = all_tags
__props__["description"] = description
__props__["name"] = name
__props__["port_id"] = port_id
__props__["region"] = region
__props__["sub_ports"] = sub_ports
__props__["tags"] = tags
__props__["tenant_id"] = tenant_id
return Trunk(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminStateUp")
def admin_state_up(self) -> pulumi.Output[Optional[bool]]:
"""
Administrative up/down status for the trunk
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing trunk.
"""
return pulumi.get(self, "admin_state_up")
@property
@pulumi.getter(name="allTags")
def all_tags(self) -> pulumi.Output[Sequence[str]]:
"""
The collection of tags assigned on the trunk, which have been
explicitly and implicitly added.
"""
return pulumi.get(self, "all_tags")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Human-readable description of the trunk. Changing this
updates the name of the existing trunk.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the trunk. Changing this
updates the `name` of an existing trunk.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="portId")
def port_id(self) -> pulumi.Output[str]:
"""
The ID of the port to be made a subport of the trunk.
"""
return pulumi.get(self, "port_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a trunk. If omitted, the
`region` argument of the provider is used. Changing this creates a new
trunk.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="subPorts")
def sub_ports(self) -> pulumi.Output[Optional[Sequence['outputs.TrunkSubPort']]]:
"""
The set of ports that will be made subports of the trunk.
The structure of each subport is described below.
"""
return pulumi.get(self, "sub_ports")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of string tags for the port.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The owner of the Trunk. Required if admin wants
to create a trunk on behalf of another tenant. Changing this creates a new trunk.
"""
return pulumi.get(self, "tenant_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.839603 | 0.199035 |
import json
import os
import uuid
import frappe
from frappe.utils import get_files_path
from pdf_text_overlay import pdf_writer
@frappe.whitelist()
def get_filled_pdf():
"""
Receive template_id, data and returns a pdf file.
Post the data to be filled on the pdf form
The value of the key(name) in configuration data must be
present as the key in the post data
Call fill_pdf_form() method to fill the pdf form template
"""
try:
file_name = fill_pdf_form(
json.loads(frappe.local.form_dict.data)
)
except KeyError as e:
frappe.local.response['http_status_code'] = 400
return {"error": "Key not found ", "key": e.message}
except ValueError as e:
frappe.local.response['http_status_code'] = 400
return {"error": e.message}
except IOError as e:
frappe.local.response['http_status_code'] = 400
return {"error": e.message}
# Read contents of file
frappe.local.response.filename = "response.pdf"
with open(file_name, "rb") as fileobj:
filedata = fileobj.read()
# Set response type and response file content
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
# Delete file after reading the file contents
delete_file(file_name)
return "Success"
def fill_pdf_form(post_data):
"""
Receive template_id and post_data and returns a file_name.
New pdf file is created to store the response of pdf_text_overlay library
Call pdf_text_overlay library to fill the pdf form
:param post_data: post_data is the data which
is used to fill the pdf
"""
# Get template, configuration and font from doctype using template id
data = frappe.get_all(
'Templates',
filters={"templateId": post_data['template_id']},
fields=['template', 'configuration', 'font']
)
try:
data, = data
except ValueError:
raise ValueError(
'Could not find data for template ID: {}'.format(
post_data['template_id'])
)
font_path = frappe.get_doc('Fonts', data['font'])
template_path = data["template"]
configuration = data["configuration"]
# Read font file and pdf template
try:
font = file(get_file_path(font_path.font_name), "rb")
pdf_template = file(get_file_path(template_path), "rb")
except IOError as io:
raise IOError('File not found: {}'.format(io.filename))
# Create a unique file name using uuid
file_name = str(uuid.uuid4()) + '.pdf'
# Fill the Pdf using pdf_text_overlay library
pdf_file_object = pdf_writer(
pdf_template, json.loads(configuration),
post_data, font
)
# Save the pdf_file_object to a file for further operations
output_stream = file(file_name, "wb")
pdf_file_object.write(output_stream)
output_stream.close()
return file_name
def get_file_path(file_path):
"""File path."""
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(
*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(
*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(
"There is some problem with the file url: {0}").format(file_path)
return file_path
def delete_file(file_name):
"""
Delete a file if exists.
:param file_name: the file_name is used to delete the file
"""
if os.path.exists(file_name):
os.remove(file_name) | pdfrender/api/pdfrender.py | import json
import os
import uuid
import frappe
from frappe.utils import get_files_path
from pdf_text_overlay import pdf_writer
@frappe.whitelist()
def get_filled_pdf():
"""
Receive template_id, data and returns a pdf file.
Post the data to be filled on the pdf form
The value of the key(name) in configuration data must be
present as the key in the post data
Call fill_pdf_form() method to fill the pdf form template
"""
try:
file_name = fill_pdf_form(
json.loads(frappe.local.form_dict.data)
)
except KeyError as e:
frappe.local.response['http_status_code'] = 400
return {"error": "Key not found ", "key": e.message}
except ValueError as e:
frappe.local.response['http_status_code'] = 400
return {"error": e.message}
except IOError as e:
frappe.local.response['http_status_code'] = 400
return {"error": e.message}
# Read contents of file
frappe.local.response.filename = "response.pdf"
with open(file_name, "rb") as fileobj:
filedata = fileobj.read()
# Set response type and response file content
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
# Delete file after reading the file contents
delete_file(file_name)
return "Success"
def fill_pdf_form(post_data):
"""
Receive template_id and post_data and returns a file_name.
New pdf file is created to store the response of pdf_text_overlay library
Call pdf_text_overlay library to fill the pdf form
:param post_data: post_data is the data which
is used to fill the pdf
"""
# Get template, configuration and font from doctype using template id
data = frappe.get_all(
'Templates',
filters={"templateId": post_data['template_id']},
fields=['template', 'configuration', 'font']
)
try:
data, = data
except ValueError:
raise ValueError(
'Could not find data for template ID: {}'.format(
post_data['template_id'])
)
font_path = frappe.get_doc('Fonts', data['font'])
template_path = data["template"]
configuration = data["configuration"]
# Read font file and pdf template
try:
font = file(get_file_path(font_path.font_name), "rb")
pdf_template = file(get_file_path(template_path), "rb")
except IOError as io:
raise IOError('File not found: {}'.format(io.filename))
# Create a unique file name using uuid
file_name = str(uuid.uuid4()) + '.pdf'
# Fill the Pdf using pdf_text_overlay library
pdf_file_object = pdf_writer(
pdf_template, json.loads(configuration),
post_data, font
)
# Save the pdf_file_object to a file for further operations
output_stream = file(file_name, "wb")
pdf_file_object.write(output_stream)
output_stream.close()
return file_name
def get_file_path(file_path):
"""File path."""
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(
*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(
*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(
"There is some problem with the file url: {0}").format(file_path)
return file_path
def delete_file(file_name):
"""
Delete a file if exists.
:param file_name: the file_name is used to delete the file
"""
if os.path.exists(file_name):
os.remove(file_name) | 0.449876 | 0.22093 |
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from datetime import datetime
from dateutil.tz import tzlocal
import pytz
import re
import numpy as np
import json
import pandas as pd
import datajoint as dj
import warnings
from pipeline import (lab, experiment, ephys, psth, tracking, virus)
import pynwb
from pynwb import NWBFile, NWBHDF5IO
warnings.filterwarnings('ignore', module='pynwb')
# ============================== SET CONSTANTS ==========================================
default_nwb_output_dir = os.path.join('data', 'NWB 2.0')
zero_zero_time = datetime.strptime('00:00:00', '%H:%M:%S').time() # no precise time available
hardware_filter = 'Bandpass filtered 300-6K Hz'
ecephys_fs = 19531.25
institution = 'Janelia Research Campus'
session_description_mapper = {
'li2015': dict(
related_publications='doi:10.1038/nature14178',
experiment_description='Extracellular electrophysiology recordings with optogenetic perturbations performed on anterior lateral region of the mouse cortex during object location discrimination task',
keywords=['motor planning', 'preparatory activity', 'whiskers',
'optogenetic perturbations', 'extracellular electrophysiology']),
'lidaie2016': dict(
related_publications='doi:10.1038/nature17643',
experiment_description='Extracellular electrophysiology recordings with optogenetic perturbations performed on anterior lateral region of the mouse cortex during object location discrimination task',
keywords=['motor planning', 'premotor cortex', 'whiskers',
'optogenetic perturbations', 'extracellular electrophysiology'])}
def export_to_nwb(session_key, nwb_output_dir=default_nwb_output_dir, save=False, overwrite=False):
this_session = (experiment.Session & session_key).fetch1()
print(f'Exporting to NWB 2.0 for session: {this_session}...')
# ===============================================================================
# ============================== META INFORMATION ===============================
# ===============================================================================
sess_desc = session_description_mapper[(experiment.ProjectSession & session_key).fetch1('project_name')]
# -- NWB file - a NWB2.0 file for each session
nwbfile = NWBFile(identifier='_'.join(
['ANM' + str(this_session['subject_id']),
this_session['session_date'].strftime('%Y-%m-%d'),
str(this_session['session'])]),
session_description='',
session_start_time=datetime.combine(this_session['session_date'], zero_zero_time),
file_create_date=datetime.now(tzlocal()),
experimenter=this_session['username'],
institution=institution,
experiment_description=sess_desc['experiment_description'],
related_publications=sess_desc['related_publications'],
keywords=sess_desc['keywords'])
# -- subject
subj = (lab.Subject & session_key).aggr(lab.Subject.Strain, ..., strains='GROUP_CONCAT(animal_strain)').fetch1()
nwbfile.subject = pynwb.file.Subject(
subject_id=str(this_session['subject_id']),
description=f'source: {subj["animal_source"]}; strains: {subj["strains"]}',
genotype=' x '.join((lab.Subject.GeneModification
& subj).fetch('gene_modification')),
sex=subj['sex'],
species=subj['species'],
date_of_birth=datetime.combine(subj['date_of_birth'], zero_zero_time) if subj['date_of_birth'] else None)
# -- virus
nwbfile.virus = json.dumps([{k: str(v) for k, v in virus_injection.items() if k not in subj}
for virus_injection in virus.VirusInjection * virus.Virus & session_key])
# ===============================================================================
# ======================== EXTRACELLULAR & CLUSTERING ===========================
# ===============================================================================
"""
In the event of multiple probe recording (i.e. multiple probe insertions), the clustering results
(and the associated units) are associated with the corresponding probe.
Each probe insertion is associated with one ElectrodeConfiguration (which may define multiple electrode groups)
"""
dj_insert_location = ephys.ProbeInsertion.InsertionLocation.aggr(
ephys.ProbeInsertion.RecordableBrainRegion.proj(brain_region='CONCAT(hemisphere, " ", brain_area)'), ...,
brain_regions='GROUP_CONCAT(brain_region)')
for probe_insertion in ephys.ProbeInsertion & session_key:
electrode_config = (lab.ElectrodeConfig & probe_insertion).fetch1()
electrode_groups = {}
for electrode_group in lab.ElectrodeConfig.ElectrodeGroup & electrode_config:
electrode_groups[electrode_group['electrode_group']] = nwbfile.create_electrode_group(
name=electrode_config['electrode_config_name'] + '_g' + str(electrode_group['electrode_group']),
description='N/A',
device=nwbfile.create_device(name=electrode_config['probe']),
location=json.dumps({k: str(v) for k, v in (dj_insert_location & session_key).fetch1().items()
if k not in dj_insert_location.primary_key}))
for chn in (lab.ElectrodeConfig.Electrode * lab.Probe.Electrode & electrode_config).fetch(as_dict=True):
nwbfile.add_electrode(id=chn['electrode'],
group=electrode_groups[chn['electrode_group']],
filtering=hardware_filter,
imp=-1.,
x=chn['x_coord'] if chn['x_coord'] else np.nan,
y=chn['y_coord'] if chn['y_coord'] else np.nan,
z=chn['z_coord'] if chn['z_coord'] else np.nan,
location=electrode_groups[chn['electrode_group']].location)
# --- unit spike times ---
nwbfile.add_unit_column(name='sampling_rate', description='Sampling rate of the raw voltage traces (Hz)')
nwbfile.add_unit_column(name='quality', description='unit quality from clustering')
nwbfile.add_unit_column(name='posx', description='estimated x position of the unit relative to probe (0,0) (um)')
nwbfile.add_unit_column(name='posy', description='estimated y position of the unit relative to probe (0,0) (um)')
nwbfile.add_unit_column(name='cell_type', description='cell type (e.g. fast spiking or pyramidal)')
for unit_key in (ephys.Unit * ephys.UnitCellType & probe_insertion).fetch('KEY'):
unit = (ephys.Unit * ephys.UnitCellType & probe_insertion & unit_key).fetch1()
# build observation intervals: note the early trials where spikes were not recorded
first_spike, last_spike = unit['spike_times'][0], unit['spike_times'][-1]
obs_start = (experiment.SessionTrial & unit_key & f'start_time < {first_spike}').fetch(
'start_time', order_by='start_time DESC', limit=1)
obs_stop = (experiment.SessionTrial & unit_key & f'stop_time > {last_spike}').fetch(
'stop_time', order_by='stop_time', limit=1)
obs_intervals = [[float(obs_start[0]) if obs_start.size > 0 else first_spike,
float(obs_stop[0]) if obs_stop.size > 0 else last_spike]]
# make an electrode table region (which electrode(s) is this unit coming from)
nwbfile.add_unit(id=unit['unit'],
electrodes=np.where(np.array(nwbfile.electrodes.id.data) == unit['electrode'])[0],
electrode_group=electrode_groups[unit['electrode_group']],
obs_intervals=obs_intervals,
sampling_rate=ecephys_fs,
quality=unit['unit_quality'],
posx=unit['unit_posx'],
posy=unit['unit_posy'],
cell_type=unit['cell_type'],
spike_times=unit['spike_times'],
waveform_mean=np.mean(unit['waveform'], axis=0),
waveform_sd=np.std(unit['waveform'], axis=0))
# ===============================================================================
# ============================= BEHAVIOR TRACKING ===============================
# ===============================================================================
if tracking.LickTrace * experiment.SessionTrial & session_key:
# re-concatenating trialized tracking traces
lick_traces, time_vecs, trial_starts = (tracking.LickTrace * experiment.SessionTrial & session_key).fetch(
'lick_trace', 'lick_trace_timestamps', 'start_time')
behav_acq = pynwb.behavior.BehavioralTimeSeries(name='BehavioralTimeSeries')
nwbfile.add_acquisition(behav_acq)
behav_acq.create_timeseries(name='lick_trace', unit='a.u.', conversion=1.0,
data=np.hstack(lick_traces),
description="Time-series of the animal's tongue movement when licking",
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
# ===============================================================================
# ============================= PHOTO-STIMULATION ===============================
# ===============================================================================
stim_sites = {}
for photostim in experiment.Photostim * experiment.PhotostimBrainRegion * lab.PhotostimDevice & session_key:
stim_device = (nwbfile.get_device(photostim['photostim_device'])
if photostim['photostim_device'] in nwbfile.devices
else nwbfile.create_device(name=photostim['photostim_device']))
stim_site = pynwb.ogen.OptogeneticStimulusSite(
name=photostim['stim_laterality'] + ' ' + photostim['stim_brain_area'],
device=stim_device,
excitation_lambda=float(photostim['excitation_wavelength']),
location=json.dumps([{k: v for k, v in stim_locs.items()
if k not in experiment.Photostim.primary_key}
for stim_locs in (experiment.Photostim.PhotostimLocation.proj(..., '-brain_area')
& photostim).fetch(as_dict=True)], default=str),
description='')
nwbfile.add_ogen_site(stim_site)
stim_sites[photostim['photo_stim']] = stim_site
# re-concatenating trialized photostim traces
dj_photostim = (experiment.PhotostimTrace * experiment.SessionTrial * experiment.PhotostimEvent
* experiment.Photostim & session_key)
for photo_stim, stim_site in stim_sites.items():
if dj_photostim & {'photo_stim': photo_stim}:
aom_input_trace, laser_power, time_vecs, trial_starts = (
dj_photostim & {'photo_stim': photo_stim}).fetch(
'aom_input_trace', 'laser_power', 'photostim_timestamps', 'start_time')
aom_series = pynwb.ogen.OptogeneticSeries(
name=stim_site.name + '_aom_input_trace',
site=stim_site, resolution=0.0, conversion=1e-3,
data=np.hstack(aom_input_trace),
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
laser_series = pynwb.ogen.OptogeneticSeries(
name=stim_site.name + '_laser_power',
site=stim_site, resolution=0.0, conversion=1e-3,
data=np.hstack(laser_power),
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
nwbfile.add_stimulus(aom_series)
nwbfile.add_stimulus(laser_series)
# ===============================================================================
# =============================== BEHAVIOR TRIALS ===============================
# ===============================================================================
# =============== TrialSet ====================
# NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes: 'start_time' and 'stop_time'
# Other trial-related information needs to be added in to the trial-table as additional columns (with column name
# and column description)
dj_trial = experiment.SessionTrial * experiment.BehaviorTrial
skip_adding_columns = experiment.Session.primary_key + ['trial_uid', 'trial']
if experiment.SessionTrial & session_key:
# Get trial descriptors from TrialSet.Trial and TrialStimInfo
trial_columns = [{'name': tag,
'description': re.sub('\s+:|\s+', ' ', re.search(
f'(?<={tag})(.*)', str(dj_trial.heading)).group()).strip()}
for tag in dj_trial.heading.names
if tag not in skip_adding_columns + ['start_time', 'stop_time']]
# Add new table columns to nwb trial-table for trial-label
for c in trial_columns:
nwbfile.add_trial_column(**c)
# Add entry to the trial-table
for trial in (dj_trial & session_key).fetch(as_dict=True):
trial['start_time'] = float(trial['start_time'])
trial['stop_time'] = float(trial['stop_time']) if trial['stop_time'] else np.nan
trial['id'] = trial['trial'] # rename 'trial_id' to 'id'
[trial.pop(k) for k in skip_adding_columns]
nwbfile.add_trial(**trial)
# ===============================================================================
# =============================== BEHAVIOR TRIAL EVENTS ==========================
# ===============================================================================
behav_event = pynwb.behavior.BehavioralEvents(name='BehavioralEvents')
nwbfile.add_acquisition(behav_event)
for trial_event_type in (experiment.TrialEventType & experiment.TrialEvent & session_key).fetch('trial_event_type'):
event_times, trial_starts = (experiment.TrialEvent * experiment.SessionTrial
& session_key & {'trial_event_type': trial_event_type}).fetch(
'trial_event_time', 'start_time')
if len(event_times) > 0:
event_times = np.hstack(event_times.astype(float) + trial_starts.astype(float))
behav_event.create_timeseries(name=trial_event_type, unit='a.u.', conversion=1.0,
data=np.full_like(event_times, 1),
timestamps=event_times)
photostim_event_time, trial_starts, photo_stim, power, duration = (
experiment.PhotostimEvent * experiment.SessionTrial & session_key).fetch(
'photostim_event_time', 'start_time', 'photo_stim', 'power', 'duration')
if len(photostim_event_time) > 0:
behav_event.create_timeseries(name='photostim_start_time', unit='a.u.', conversion=1.0,
data=power,
timestamps=photostim_event_time.astype(float) + trial_starts.astype(float),
control=photo_stim.astype('uint8'), control_description=stim_sites)
behav_event.create_timeseries(name='photostim_stop_time', unit='a.u.', conversion=1.0,
data=np.full_like(photostim_event_time, 0),
timestamps=photostim_event_time.astype(float) + duration.astype(float) + trial_starts.astype(float),
control=photo_stim.astype('uint8'), control_description=stim_sites)
# =============== Write NWB 2.0 file ===============
if save:
save_file_name = ''.join([nwbfile.identifier, '.nwb'])
if not os.path.exists(nwb_output_dir):
os.makedirs(nwb_output_dir)
if not overwrite and os.path.exists(os.path.join(nwb_output_dir, save_file_name)):
return nwbfile
with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name), mode='w') as io:
io.write(nwbfile)
print(f'Write NWB 2.0 file: {save_file_name}')
return nwbfile
# ============================== EXPORT ALL ==========================================
if __name__ == '__main__':
if len(sys.argv) > 1:
nwb_outdir = sys.argv[1]
else:
nwb_outdir = default_nwb_output_dir
for skey in experiment.Session.fetch('KEY'):
export_to_nwb(skey, nwb_output_dir=nwb_outdir, save=True) | pipeline/export/datajoint_to_nwb.py | import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from datetime import datetime
from dateutil.tz import tzlocal
import pytz
import re
import numpy as np
import json
import pandas as pd
import datajoint as dj
import warnings
from pipeline import (lab, experiment, ephys, psth, tracking, virus)
import pynwb
from pynwb import NWBFile, NWBHDF5IO
warnings.filterwarnings('ignore', module='pynwb')
# ============================== SET CONSTANTS ==========================================
default_nwb_output_dir = os.path.join('data', 'NWB 2.0')
zero_zero_time = datetime.strptime('00:00:00', '%H:%M:%S').time() # no precise time available
hardware_filter = 'Bandpass filtered 300-6K Hz'
ecephys_fs = 19531.25
institution = 'Janelia Research Campus'
session_description_mapper = {
'li2015': dict(
related_publications='doi:10.1038/nature14178',
experiment_description='Extracellular electrophysiology recordings with optogenetic perturbations performed on anterior lateral region of the mouse cortex during object location discrimination task',
keywords=['motor planning', 'preparatory activity', 'whiskers',
'optogenetic perturbations', 'extracellular electrophysiology']),
'lidaie2016': dict(
related_publications='doi:10.1038/nature17643',
experiment_description='Extracellular electrophysiology recordings with optogenetic perturbations performed on anterior lateral region of the mouse cortex during object location discrimination task',
keywords=['motor planning', 'premotor cortex', 'whiskers',
'optogenetic perturbations', 'extracellular electrophysiology'])}
def export_to_nwb(session_key, nwb_output_dir=default_nwb_output_dir, save=False, overwrite=False):
this_session = (experiment.Session & session_key).fetch1()
print(f'Exporting to NWB 2.0 for session: {this_session}...')
# ===============================================================================
# ============================== META INFORMATION ===============================
# ===============================================================================
sess_desc = session_description_mapper[(experiment.ProjectSession & session_key).fetch1('project_name')]
# -- NWB file - a NWB2.0 file for each session
nwbfile = NWBFile(identifier='_'.join(
['ANM' + str(this_session['subject_id']),
this_session['session_date'].strftime('%Y-%m-%d'),
str(this_session['session'])]),
session_description='',
session_start_time=datetime.combine(this_session['session_date'], zero_zero_time),
file_create_date=datetime.now(tzlocal()),
experimenter=this_session['username'],
institution=institution,
experiment_description=sess_desc['experiment_description'],
related_publications=sess_desc['related_publications'],
keywords=sess_desc['keywords'])
# -- subject
subj = (lab.Subject & session_key).aggr(lab.Subject.Strain, ..., strains='GROUP_CONCAT(animal_strain)').fetch1()
nwbfile.subject = pynwb.file.Subject(
subject_id=str(this_session['subject_id']),
description=f'source: {subj["animal_source"]}; strains: {subj["strains"]}',
genotype=' x '.join((lab.Subject.GeneModification
& subj).fetch('gene_modification')),
sex=subj['sex'],
species=subj['species'],
date_of_birth=datetime.combine(subj['date_of_birth'], zero_zero_time) if subj['date_of_birth'] else None)
# -- virus
nwbfile.virus = json.dumps([{k: str(v) for k, v in virus_injection.items() if k not in subj}
for virus_injection in virus.VirusInjection * virus.Virus & session_key])
# ===============================================================================
# ======================== EXTRACELLULAR & CLUSTERING ===========================
# ===============================================================================
"""
In the event of multiple probe recording (i.e. multiple probe insertions), the clustering results
(and the associated units) are associated with the corresponding probe.
Each probe insertion is associated with one ElectrodeConfiguration (which may define multiple electrode groups)
"""
dj_insert_location = ephys.ProbeInsertion.InsertionLocation.aggr(
ephys.ProbeInsertion.RecordableBrainRegion.proj(brain_region='CONCAT(hemisphere, " ", brain_area)'), ...,
brain_regions='GROUP_CONCAT(brain_region)')
for probe_insertion in ephys.ProbeInsertion & session_key:
electrode_config = (lab.ElectrodeConfig & probe_insertion).fetch1()
electrode_groups = {}
for electrode_group in lab.ElectrodeConfig.ElectrodeGroup & electrode_config:
electrode_groups[electrode_group['electrode_group']] = nwbfile.create_electrode_group(
name=electrode_config['electrode_config_name'] + '_g' + str(electrode_group['electrode_group']),
description='N/A',
device=nwbfile.create_device(name=electrode_config['probe']),
location=json.dumps({k: str(v) for k, v in (dj_insert_location & session_key).fetch1().items()
if k not in dj_insert_location.primary_key}))
for chn in (lab.ElectrodeConfig.Electrode * lab.Probe.Electrode & electrode_config).fetch(as_dict=True):
nwbfile.add_electrode(id=chn['electrode'],
group=electrode_groups[chn['electrode_group']],
filtering=hardware_filter,
imp=-1.,
x=chn['x_coord'] if chn['x_coord'] else np.nan,
y=chn['y_coord'] if chn['y_coord'] else np.nan,
z=chn['z_coord'] if chn['z_coord'] else np.nan,
location=electrode_groups[chn['electrode_group']].location)
# --- unit spike times ---
nwbfile.add_unit_column(name='sampling_rate', description='Sampling rate of the raw voltage traces (Hz)')
nwbfile.add_unit_column(name='quality', description='unit quality from clustering')
nwbfile.add_unit_column(name='posx', description='estimated x position of the unit relative to probe (0,0) (um)')
nwbfile.add_unit_column(name='posy', description='estimated y position of the unit relative to probe (0,0) (um)')
nwbfile.add_unit_column(name='cell_type', description='cell type (e.g. fast spiking or pyramidal)')
for unit_key in (ephys.Unit * ephys.UnitCellType & probe_insertion).fetch('KEY'):
unit = (ephys.Unit * ephys.UnitCellType & probe_insertion & unit_key).fetch1()
# build observation intervals: note the early trials where spikes were not recorded
first_spike, last_spike = unit['spike_times'][0], unit['spike_times'][-1]
obs_start = (experiment.SessionTrial & unit_key & f'start_time < {first_spike}').fetch(
'start_time', order_by='start_time DESC', limit=1)
obs_stop = (experiment.SessionTrial & unit_key & f'stop_time > {last_spike}').fetch(
'stop_time', order_by='stop_time', limit=1)
obs_intervals = [[float(obs_start[0]) if obs_start.size > 0 else first_spike,
float(obs_stop[0]) if obs_stop.size > 0 else last_spike]]
# make an electrode table region (which electrode(s) is this unit coming from)
nwbfile.add_unit(id=unit['unit'],
electrodes=np.where(np.array(nwbfile.electrodes.id.data) == unit['electrode'])[0],
electrode_group=electrode_groups[unit['electrode_group']],
obs_intervals=obs_intervals,
sampling_rate=ecephys_fs,
quality=unit['unit_quality'],
posx=unit['unit_posx'],
posy=unit['unit_posy'],
cell_type=unit['cell_type'],
spike_times=unit['spike_times'],
waveform_mean=np.mean(unit['waveform'], axis=0),
waveform_sd=np.std(unit['waveform'], axis=0))
# ===============================================================================
# ============================= BEHAVIOR TRACKING ===============================
# ===============================================================================
if tracking.LickTrace * experiment.SessionTrial & session_key:
# re-concatenating trialized tracking traces
lick_traces, time_vecs, trial_starts = (tracking.LickTrace * experiment.SessionTrial & session_key).fetch(
'lick_trace', 'lick_trace_timestamps', 'start_time')
behav_acq = pynwb.behavior.BehavioralTimeSeries(name='BehavioralTimeSeries')
nwbfile.add_acquisition(behav_acq)
behav_acq.create_timeseries(name='lick_trace', unit='a.u.', conversion=1.0,
data=np.hstack(lick_traces),
description="Time-series of the animal's tongue movement when licking",
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
# ===============================================================================
# ============================= PHOTO-STIMULATION ===============================
# ===============================================================================
stim_sites = {}
for photostim in experiment.Photostim * experiment.PhotostimBrainRegion * lab.PhotostimDevice & session_key:
stim_device = (nwbfile.get_device(photostim['photostim_device'])
if photostim['photostim_device'] in nwbfile.devices
else nwbfile.create_device(name=photostim['photostim_device']))
stim_site = pynwb.ogen.OptogeneticStimulusSite(
name=photostim['stim_laterality'] + ' ' + photostim['stim_brain_area'],
device=stim_device,
excitation_lambda=float(photostim['excitation_wavelength']),
location=json.dumps([{k: v for k, v in stim_locs.items()
if k not in experiment.Photostim.primary_key}
for stim_locs in (experiment.Photostim.PhotostimLocation.proj(..., '-brain_area')
& photostim).fetch(as_dict=True)], default=str),
description='')
nwbfile.add_ogen_site(stim_site)
stim_sites[photostim['photo_stim']] = stim_site
# re-concatenating trialized photostim traces
dj_photostim = (experiment.PhotostimTrace * experiment.SessionTrial * experiment.PhotostimEvent
* experiment.Photostim & session_key)
for photo_stim, stim_site in stim_sites.items():
if dj_photostim & {'photo_stim': photo_stim}:
aom_input_trace, laser_power, time_vecs, trial_starts = (
dj_photostim & {'photo_stim': photo_stim}).fetch(
'aom_input_trace', 'laser_power', 'photostim_timestamps', 'start_time')
aom_series = pynwb.ogen.OptogeneticSeries(
name=stim_site.name + '_aom_input_trace',
site=stim_site, resolution=0.0, conversion=1e-3,
data=np.hstack(aom_input_trace),
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
laser_series = pynwb.ogen.OptogeneticSeries(
name=stim_site.name + '_laser_power',
site=stim_site, resolution=0.0, conversion=1e-3,
data=np.hstack(laser_power),
timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
nwbfile.add_stimulus(aom_series)
nwbfile.add_stimulus(laser_series)
# ===============================================================================
# =============================== BEHAVIOR TRIALS ===============================
# ===============================================================================
# =============== TrialSet ====================
# NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes: 'start_time' and 'stop_time'
# Other trial-related information needs to be added in to the trial-table as additional columns (with column name
# and column description)
dj_trial = experiment.SessionTrial * experiment.BehaviorTrial
skip_adding_columns = experiment.Session.primary_key + ['trial_uid', 'trial']
if experiment.SessionTrial & session_key:
# Get trial descriptors from TrialSet.Trial and TrialStimInfo
trial_columns = [{'name': tag,
'description': re.sub('\s+:|\s+', ' ', re.search(
f'(?<={tag})(.*)', str(dj_trial.heading)).group()).strip()}
for tag in dj_trial.heading.names
if tag not in skip_adding_columns + ['start_time', 'stop_time']]
# Add new table columns to nwb trial-table for trial-label
for c in trial_columns:
nwbfile.add_trial_column(**c)
# Add entry to the trial-table
for trial in (dj_trial & session_key).fetch(as_dict=True):
trial['start_time'] = float(trial['start_time'])
trial['stop_time'] = float(trial['stop_time']) if trial['stop_time'] else np.nan
trial['id'] = trial['trial'] # rename 'trial_id' to 'id'
[trial.pop(k) for k in skip_adding_columns]
nwbfile.add_trial(**trial)
# ===============================================================================
# =============================== BEHAVIOR TRIAL EVENTS ==========================
# ===============================================================================
behav_event = pynwb.behavior.BehavioralEvents(name='BehavioralEvents')
nwbfile.add_acquisition(behav_event)
for trial_event_type in (experiment.TrialEventType & experiment.TrialEvent & session_key).fetch('trial_event_type'):
event_times, trial_starts = (experiment.TrialEvent * experiment.SessionTrial
& session_key & {'trial_event_type': trial_event_type}).fetch(
'trial_event_time', 'start_time')
if len(event_times) > 0:
event_times = np.hstack(event_times.astype(float) + trial_starts.astype(float))
behav_event.create_timeseries(name=trial_event_type, unit='a.u.', conversion=1.0,
data=np.full_like(event_times, 1),
timestamps=event_times)
photostim_event_time, trial_starts, photo_stim, power, duration = (
experiment.PhotostimEvent * experiment.SessionTrial & session_key).fetch(
'photostim_event_time', 'start_time', 'photo_stim', 'power', 'duration')
if len(photostim_event_time) > 0:
behav_event.create_timeseries(name='photostim_start_time', unit='a.u.', conversion=1.0,
data=power,
timestamps=photostim_event_time.astype(float) + trial_starts.astype(float),
control=photo_stim.astype('uint8'), control_description=stim_sites)
behav_event.create_timeseries(name='photostim_stop_time', unit='a.u.', conversion=1.0,
data=np.full_like(photostim_event_time, 0),
timestamps=photostim_event_time.astype(float) + duration.astype(float) + trial_starts.astype(float),
control=photo_stim.astype('uint8'), control_description=stim_sites)
# =============== Write NWB 2.0 file ===============
if save:
save_file_name = ''.join([nwbfile.identifier, '.nwb'])
if not os.path.exists(nwb_output_dir):
os.makedirs(nwb_output_dir)
if not overwrite and os.path.exists(os.path.join(nwb_output_dir, save_file_name)):
return nwbfile
with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name), mode='w') as io:
io.write(nwbfile)
print(f'Write NWB 2.0 file: {save_file_name}')
return nwbfile
# ============================== EXPORT ALL ==========================================
if __name__ == '__main__':
if len(sys.argv) > 1:
nwb_outdir = sys.argv[1]
else:
nwb_outdir = default_nwb_output_dir
for skey in experiment.Session.fetch('KEY'):
export_to_nwb(skey, nwb_output_dir=nwb_outdir, save=True) | 0.382372 | 0.215557 |
from optimizer import optimizer_SGD, AdaGrad, NormGrad, SGD
import numpy as np
from functions import sigmoid, sigmoid_back, clip_grads
class Loss:
def __init__(self):
self.Loss = None
self.dout = None
def forward(self, out, t):
self.Loss = 1/2 * np.sum((out - t)**2)
self.dout = out - t
return self.Loss
def backward(self):
return self.dout
class RNNneuron:
def __init__(self, W, Wh, b):
# 引数として受けた重みとバイアスをself.aramsに格納
self.params = [W, Wh, b]
# 更新前に勾配をまとめてオプティマイザーに送るための入れ物(中身はparamsに対応している必要あり)
self.grads = [np.zeros_like(W), np.zeros_like(Wh), np.zeros_like(b)]
# クラス外へ中身を持っていくための入れ物
self.F_container = np.empty(0)
self.B_container = np.empty(0)
# RNN層の中身の入れ物
self.dh_prev = None
# 学習率の格納
self.lr = 0.01
# オプティマイザーの定義(初期値SGD)
self.optimizer = SGD(self.lr)
# クリッピングの実行フラグ
self.clipper = 0
# 勾配クリッピングのしきい値(初期値0.02)
self.NormGrad = 0.02
def forward(self, x, h_prev):
# クラスの初期化時に格納した重みとバイアスの取り出し
W, Wh, b = self.params
# yはニューロン内部の値
#f = open("E:\研究一時ファイル\BP\TEST_1120\Fh.txt", mode="a")
if h_prev is None:
y = np.dot(x, W) + b
else:
y = np.dot(h_prev, Wh) + np.dot(x, W) + b
#w = "\nWh:" + str(Wh) + "\nh_prev:" + str(h_prev) + "\n:" + str(y)
# f.write(w)
# Zが出力
z = sigmoid(y)
self.h_prev = z
self.F_container = [W, Wh, b, x, y, z]
return z, self.F_container
def backward(self, dz, h_prev):
#f = open("E:\研究一時ファイル\BP\TEST_1120\Wh.txt", mode="a")
W, Wh, b, x, y, z = self.F_container
dh_prev = self.dh_prev
# 過去時刻からの勾配の合算
if dh_prev is None:
dz = dz
else:
dz = dh_prev + dz
# 出力部の逆伝搬(シグモイド版)
dy = sigmoid_back(z, dz)
db = dy
dW = np.dot(x.T, dy)
dx = np.dot(dy, W.T)
dWh = np.dot(h_prev.T, dy)
dh_prev = np.dot(dy, Wh.T)
#w = "\ndWh:" + str(dWh) + "\nh_prev:" + str(h_prev) + "\ndy:" + str(dy)
# f.write(w)
# 勾配クリッピングの実行
self.drads, self.clipper = clip_grads(self.grads, self.NormGrad)
self.dh_prev = dh_prev
# self.gradsに更新に行かう勾配を格納
self.grads[0][...] = dW
self.grads[1][...] = dWh
self.grads[2][...] = db
# オプティマイザーによりself.paramsの値を更新
# self.params = optimizer_SGD(self.lr, self.params, self.grads)
self.params = self.optimizer.update(self.params, self.grads)
# すべての結果をself.containerに格納
self.container = [dy, db, dW, dWh, dx]
# f.close
return dx, self.container
def setlr(self, lr, model=0):
self.lr = lr
if model == 0:
self.optimizer = SGD(self.lr)
elif model == 1:
self.optimizer = AdaGrad(self.lr)
elif model == 2:
self.optimizer = NormGrad(self.lr)
def viewlr(self):
return self.optimizer.viewlr()
def change_lr(self, New_lr):
self.optimizer.change_lr(New_lr)
def reset(self):
self.h_prev = None
self.dh_prev = None
def clipper_Chech(self):
return self.clipper
def change_NormGrad(self, NormGrad):
# 勾配クリッピングのしきい値の変更
self.NormGrad = NormGrad
class BPneuron:
def __init__(self, W, b):
# 引数として受けた重みとバイアスをself.aramsに格納
self.params = [W, b]
# 更新前に勾配をまとめてオプティマイザーに送るための入れ物(中身はparamsに対応している必要あり)
self.grads = [np.zeros_like(W), np.zeros_like(b)]
# クラス外へ中身を持っていくための入れ物
self.container = np.empty(0)
# 学習率の格納
self.lr = 0.01
self.optimizer = AdaGrad(self.lr)
def forward(self, x):
# クラスの初期化時に格納した重みとバイアスの取り出し
W, b = self.params
# yはニューロン内部の値
y = np.dot(x, W)+b
# Zが出力
z = sigmoid(y)
self.container = [W, b, x, y, z]
return z, self.container
def backward(self, dz):
W, b, x, y, z = self.container
# 出力部の逆伝搬(シグモイド版)
dy = sigmoid_back(z, dz)
db = dy
dW = np.dot(x.T, dy)
dx = np.dot(dy, W.T)
# self.gradsに更新に行かう勾配を格納
self.grads[0][...] = dW
self.grads[1][...] = db
# オプティマイザーによりself.paramsの値を更新
# self.params = optimizer_SGD(self.lr, self.params, self.grads)
self.params = self.optimizer.update(self.params, self.grads)
# すべての結果をself.containerに格納
self.container = [dy, db, dW, dx]
return dx, self.container
def setlr(self, lr, model=0):
self.lr = lr
if model == 0:
self.optimizer = SGD(self.lr)
elif model == 1:
self.optimizer = AdaGrad(self.lr)
elif model == 2:
self.optimizer = NormGrad(self.lr)
def viewlr(self):
return self.optimizer.viewlr()
def change_lr(self, New_lr):
self.optimizer.change_lr(New_lr) | firerate_vs_time/rnnclass.py | from optimizer import optimizer_SGD, AdaGrad, NormGrad, SGD
import numpy as np
from functions import sigmoid, sigmoid_back, clip_grads
class Loss:
def __init__(self):
self.Loss = None
self.dout = None
def forward(self, out, t):
self.Loss = 1/2 * np.sum((out - t)**2)
self.dout = out - t
return self.Loss
def backward(self):
return self.dout
class RNNneuron:
def __init__(self, W, Wh, b):
# 引数として受けた重みとバイアスをself.aramsに格納
self.params = [W, Wh, b]
# 更新前に勾配をまとめてオプティマイザーに送るための入れ物(中身はparamsに対応している必要あり)
self.grads = [np.zeros_like(W), np.zeros_like(Wh), np.zeros_like(b)]
# クラス外へ中身を持っていくための入れ物
self.F_container = np.empty(0)
self.B_container = np.empty(0)
# RNN層の中身の入れ物
self.dh_prev = None
# 学習率の格納
self.lr = 0.01
# オプティマイザーの定義(初期値SGD)
self.optimizer = SGD(self.lr)
# クリッピングの実行フラグ
self.clipper = 0
# 勾配クリッピングのしきい値(初期値0.02)
self.NormGrad = 0.02
def forward(self, x, h_prev):
# クラスの初期化時に格納した重みとバイアスの取り出し
W, Wh, b = self.params
# yはニューロン内部の値
#f = open("E:\研究一時ファイル\BP\TEST_1120\Fh.txt", mode="a")
if h_prev is None:
y = np.dot(x, W) + b
else:
y = np.dot(h_prev, Wh) + np.dot(x, W) + b
#w = "\nWh:" + str(Wh) + "\nh_prev:" + str(h_prev) + "\n:" + str(y)
# f.write(w)
# Zが出力
z = sigmoid(y)
self.h_prev = z
self.F_container = [W, Wh, b, x, y, z]
return z, self.F_container
def backward(self, dz, h_prev):
#f = open("E:\研究一時ファイル\BP\TEST_1120\Wh.txt", mode="a")
W, Wh, b, x, y, z = self.F_container
dh_prev = self.dh_prev
# 過去時刻からの勾配の合算
if dh_prev is None:
dz = dz
else:
dz = dh_prev + dz
# 出力部の逆伝搬(シグモイド版)
dy = sigmoid_back(z, dz)
db = dy
dW = np.dot(x.T, dy)
dx = np.dot(dy, W.T)
dWh = np.dot(h_prev.T, dy)
dh_prev = np.dot(dy, Wh.T)
#w = "\ndWh:" + str(dWh) + "\nh_prev:" + str(h_prev) + "\ndy:" + str(dy)
# f.write(w)
# 勾配クリッピングの実行
self.drads, self.clipper = clip_grads(self.grads, self.NormGrad)
self.dh_prev = dh_prev
# self.gradsに更新に行かう勾配を格納
self.grads[0][...] = dW
self.grads[1][...] = dWh
self.grads[2][...] = db
# オプティマイザーによりself.paramsの値を更新
# self.params = optimizer_SGD(self.lr, self.params, self.grads)
self.params = self.optimizer.update(self.params, self.grads)
# すべての結果をself.containerに格納
self.container = [dy, db, dW, dWh, dx]
# f.close
return dx, self.container
def setlr(self, lr, model=0):
self.lr = lr
if model == 0:
self.optimizer = SGD(self.lr)
elif model == 1:
self.optimizer = AdaGrad(self.lr)
elif model == 2:
self.optimizer = NormGrad(self.lr)
def viewlr(self):
return self.optimizer.viewlr()
def change_lr(self, New_lr):
self.optimizer.change_lr(New_lr)
def reset(self):
self.h_prev = None
self.dh_prev = None
def clipper_Chech(self):
return self.clipper
def change_NormGrad(self, NormGrad):
# 勾配クリッピングのしきい値の変更
self.NormGrad = NormGrad
class BPneuron:
def __init__(self, W, b):
# 引数として受けた重みとバイアスをself.aramsに格納
self.params = [W, b]
# 更新前に勾配をまとめてオプティマイザーに送るための入れ物(中身はparamsに対応している必要あり)
self.grads = [np.zeros_like(W), np.zeros_like(b)]
# クラス外へ中身を持っていくための入れ物
self.container = np.empty(0)
# 学習率の格納
self.lr = 0.01
self.optimizer = AdaGrad(self.lr)
def forward(self, x):
# クラスの初期化時に格納した重みとバイアスの取り出し
W, b = self.params
# yはニューロン内部の値
y = np.dot(x, W)+b
# Zが出力
z = sigmoid(y)
self.container = [W, b, x, y, z]
return z, self.container
def backward(self, dz):
W, b, x, y, z = self.container
# 出力部の逆伝搬(シグモイド版)
dy = sigmoid_back(z, dz)
db = dy
dW = np.dot(x.T, dy)
dx = np.dot(dy, W.T)
# self.gradsに更新に行かう勾配を格納
self.grads[0][...] = dW
self.grads[1][...] = db
# オプティマイザーによりself.paramsの値を更新
# self.params = optimizer_SGD(self.lr, self.params, self.grads)
self.params = self.optimizer.update(self.params, self.grads)
# すべての結果をself.containerに格納
self.container = [dy, db, dW, dx]
return dx, self.container
def setlr(self, lr, model=0):
self.lr = lr
if model == 0:
self.optimizer = SGD(self.lr)
elif model == 1:
self.optimizer = AdaGrad(self.lr)
elif model == 2:
self.optimizer = NormGrad(self.lr)
def viewlr(self):
return self.optimizer.viewlr()
def change_lr(self, New_lr):
self.optimizer.change_lr(New_lr) | 0.434221 | 0.231647 |
import itertools
import six
import math
class PolylineCodec(object):
def _pcitr(self, iterable):
return six.moves.zip(iterable, itertools.islice(iterable, 1, None))
def _py2_round(self, x):
# The polyline algorithm uses Python 2's way of rounding
return int(math.copysign(math.floor(math.fabs(x) + 0.5), x))
def _write(self, output, curr_value, prev_value, factor):
curr_value = self._py2_round(curr_value * factor)
prev_value = self._py2_round(prev_value * factor)
coord = curr_value - prev_value
coord <<= 1
coord = coord if coord >= 0 else ~coord
while coord >= 0x20:
output.write(six.unichr((0x20 | (coord & 0x1f)) + 63))
coord >>= 5
output.write(six.unichr(coord + 63))
def _trans(self, value, index):
byte, result, shift = None, 0, 0
while byte is None or byte >= 0x20:
byte = ord(value[index]) - 63
index += 1
result |= (byte & 0x1f) << shift
shift += 5
comp = result & 1
return ~(result >> 1) if comp else (result >> 1), index
def decode(self, expression, precision=5, geojson=False):
coordinates, index, lat, lng, length, factor = [], 0, 0, 0, len(expression), float(10 ** precision)
while index < length:
lat_change, index = self._trans(expression, index)
lng_change, index = self._trans(expression, index)
lat += lat_change
lng += lng_change
coordinates.append((lat / factor, lng / factor))
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
return coordinates
def encode(self, coordinates, precision=5, geojson=False):
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
output, factor = six.StringIO(), int(10 ** precision)
self._write(output, coordinates[0][0], 0, factor)
self._write(output, coordinates[0][1], 0, factor)
for prev, curr in self._pcitr(coordinates):
self._write(output, curr[0], prev[0], factor)
self._write(output, curr[1], prev[1], factor)
return output.getvalue() | venv/lib/python3.7/site-packages/polyline/codec.py | import itertools
import six
import math
class PolylineCodec(object):
def _pcitr(self, iterable):
return six.moves.zip(iterable, itertools.islice(iterable, 1, None))
def _py2_round(self, x):
# The polyline algorithm uses Python 2's way of rounding
return int(math.copysign(math.floor(math.fabs(x) + 0.5), x))
def _write(self, output, curr_value, prev_value, factor):
curr_value = self._py2_round(curr_value * factor)
prev_value = self._py2_round(prev_value * factor)
coord = curr_value - prev_value
coord <<= 1
coord = coord if coord >= 0 else ~coord
while coord >= 0x20:
output.write(six.unichr((0x20 | (coord & 0x1f)) + 63))
coord >>= 5
output.write(six.unichr(coord + 63))
def _trans(self, value, index):
byte, result, shift = None, 0, 0
while byte is None or byte >= 0x20:
byte = ord(value[index]) - 63
index += 1
result |= (byte & 0x1f) << shift
shift += 5
comp = result & 1
return ~(result >> 1) if comp else (result >> 1), index
def decode(self, expression, precision=5, geojson=False):
coordinates, index, lat, lng, length, factor = [], 0, 0, 0, len(expression), float(10 ** precision)
while index < length:
lat_change, index = self._trans(expression, index)
lng_change, index = self._trans(expression, index)
lat += lat_change
lng += lng_change
coordinates.append((lat / factor, lng / factor))
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
return coordinates
def encode(self, coordinates, precision=5, geojson=False):
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
output, factor = six.StringIO(), int(10 ** precision)
self._write(output, coordinates[0][0], 0, factor)
self._write(output, coordinates[0][1], 0, factor)
for prev, curr in self._pcitr(coordinates):
self._write(output, curr[0], prev[0], factor)
self._write(output, curr[1], prev[1], factor)
return output.getvalue() | 0.519765 | 0.402979 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
Trainer,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000):
sample_length = int(round(sample_rate * max_length))
if len(wav) <= sample_length:
return wav
random_offset = randint(0, len(wav) - sample_length - 1)
return wav[random_offset : random_offset + sample_length]
@dataclass
class DataTrainingArguments:
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="validation",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to "
"'validation'"
},
)
max_length_seconds: float = field(
default=20,
metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."},
)
add_argument("--model_name_or_path", type=str, default="facebook/wav2vec2-base", required=True)
@dataclass
class ModelArguments:
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
attention_mask: bool = field(
default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."}
)
freeze_feature_extractor: Optional[bool] = field(
default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
)
def __post_init__(self):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.",
FutureWarning,
)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`."
)
def main():
raw_datasets = DatasetDict()
raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_config, split=data_args.train_split_name
)
raw_datasets["eval"] = load_dataset(
data_args.dataset_name, data_args.dataset_config, split=data_args.eval_split_name
)
if data_args.audio_column not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column {data_args.audio_column} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.label_column not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column {data_args.label_column} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor or model_args.model_name_or_path,
return_attention_mask=model_args.attention_mask,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
def train_transforms(batch):
"""Apply train_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column]:
wav = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate
)
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column]]
return output_batch
def val_transforms(batch):
"""Apply val_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column]:
wav = audio["array"]
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column]]
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
labels = raw_datasets["train"].features[data_args.label_column].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# Load the accuracy metric from the datasets package
metric = datasets.load_metric("accuracy")
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(eval_pred):
"""Computes accuracy on a batch of predictions"""
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,
num_labels=len(labels),
label2id=label2id,
id2label=id2label,
finetuning_task="audio-classification",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(train_transforms, output_all_columns=False)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=raw_datasets["train"] if training_args.do_train else None,
eval_dataset=raw_datasets["eval"] if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=feature_extractor,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main() | std/huggingface/audio.py |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
Trainer,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000):
sample_length = int(round(sample_rate * max_length))
if len(wav) <= sample_length:
return wav
random_offset = randint(0, len(wav) - sample_length - 1)
return wav[random_offset : random_offset + sample_length]
@dataclass
class DataTrainingArguments:
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="validation",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to "
"'validation'"
},
)
max_length_seconds: float = field(
default=20,
metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."},
)
add_argument("--model_name_or_path", type=str, default="facebook/wav2vec2-base", required=True)
@dataclass
class ModelArguments:
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
attention_mask: bool = field(
default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."}
)
freeze_feature_extractor: Optional[bool] = field(
default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
)
def __post_init__(self):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.",
FutureWarning,
)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`."
)
def main():
raw_datasets = DatasetDict()
raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_config, split=data_args.train_split_name
)
raw_datasets["eval"] = load_dataset(
data_args.dataset_name, data_args.dataset_config, split=data_args.eval_split_name
)
if data_args.audio_column not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column {data_args.audio_column} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.label_column not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column {data_args.label_column} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor or model_args.model_name_or_path,
return_attention_mask=model_args.attention_mask,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
def train_transforms(batch):
"""Apply train_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column]:
wav = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate
)
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column]]
return output_batch
def val_transforms(batch):
"""Apply val_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column]:
wav = audio["array"]
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column]]
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
labels = raw_datasets["train"].features[data_args.label_column].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# Load the accuracy metric from the datasets package
metric = datasets.load_metric("accuracy")
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(eval_pred):
"""Computes accuracy on a batch of predictions"""
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,
num_labels=len(labels),
label2id=label2id,
id2label=id2label,
finetuning_task="audio-classification",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(train_transforms, output_all_columns=False)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=raw_datasets["train"] if training_args.do_train else None,
eval_dataset=raw_datasets["eval"] if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=feature_extractor,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main() | 0.820146 | 0.373504 |
import sqlite3
class Connection():
def __init__(self):
self.connection = sqlite3.connect('results.db')
self.cursor = self.connection.cursor()
self.enable_foreign_keys()
def close(self):
self.connection.close()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def __enter__(self):
self.__init__()
return self
def __exit__(self, etype, evalue, etb):
if etb is None:
self.commit()
else:
self.rollback()
self.close()
def enable_foreign_keys(self):
enabled, = self.cursor.execute(
"PRAGMA foreign_keys;"
).fetchone()
if not enabled:
self.cursor.execute("PRAGMA foreign_keys = ON;")
def insert_results(self, query, results):
self.cursor.execute("""
INSERT OR IGNORE INTO queries(query, result_count)
VALUES (?, ?)
""", (query, results["res_count"]))
res = results["res"]
users = unique_users(res)
self.cursor.executemany("""
INSERT OR IGNORE INTO users(handle, name)
VALUES (?, ?)
""", iter(users))
for result in res:
self.cursor.execute("""
SELECT ROWID FROM queries WHERE query = ?
""", (query,))
result["query_id"], = self.cursor.fetchone()
self.cursor.execute(
"SELECT ROWID FROM users WHERE handle = :handle",
result["user"])
result["author_id"], = self.cursor.fetchone()
self.cursor.execute("""
INSERT INTO results(full_text, date, query, author)
VALUES (:full_text, :when, :query_id, :author_id)
""", result)
result_id = self.cursor.lastrowid
if "mentions" in result:
for mention in result["mentions"]:
self.cursor.execute(
"SELECT ROWID FROM users WHERE handle = :handle",
mention)
author_id, = self.cursor.fetchone()
self.cursor.execute(
"INSERT INTO mentions(result, user) VALUES (?, ?)",
(result_id, author_id))
if "hashtags" in result:
for hashtag in result["hashtags"]:
self.cursor.execute(
"SELECT id FROM hashtags WHERE text = :text",
hashtag)
existing_ht = self.cursor.fetchone()
if existing_ht:
self.cursor.execute("""
INSERT INTO hashtag_to_result(
result, hashtag
) VALUES (?, ?)
""", (result_id, existing_ht[0]))
else:
self.cursor.execute(
"INSERT INTO hashtags(text) VALUES (:text)",
hashtag)
self.cursor.execute("""
INSERT INTO hashtag_to_result(
result, hashtag
) VALUES (?, ?)
""", (result_id, self.cursor.lastrowid))
def get_query(self, query):
self.cursor.execute(
"SELECT id, result_count FROM queries WHERE query = ?",
(query,))
query = self.cursor.fetchone()
if not query:
return None
query_id, res_count = query
json_dict = {
"res_count": res_count,
"res_faces": {},
"res": [],
}
self.cursor.execute(
"SELECT * FROM results WHERE query = ?",
(query_id,))
for result in self.cursor.fetchall():
result_dict = {}
self.cursor.execute(
"SELECT * FROM hashtag_to_result WHERE result = ?",
(result[0],))
hashtags = self.cursor.fetchall()
if hashtags:
hashtag_list = []
for hashtag in hashtags:
self.cursor.execute(
"SELECT * FROM hashtags WHERE id = ?",
(hashtag[1],))
_, ht = self.cursor.fetchone()
hashtag_list.append({"text": ht})
result_dict["hashtags"] = hashtag_list
result_dict["full_text"] = result[1]
self.cursor.execute(
"SELECT * FROM users WHERE id = ?",
(result[4],))
_, handle, name = self.cursor.fetchone()
self.cursor.execute(
"SELECT * FROM mentions WHERE result = ?",
(result[0],))
mentions = self.cursor.fetchall()
if mentions:
mentions_list = []
for mention in mentions:
self.cursor.execute(
"SELECT * FROM users WHERE id = ?",
(mention[1],))
user = self.cursor.fetchone()
mentions_list.append({
"name": user[2],
"handle": user[1],
})
result_dict["mentions"] = mentions_list
result_dict["user"] = {
"name": name,
"handle": handle,
}
result_dict["when"] = result[2]
json_dict["res"].append(result_dict)
return json_dict
def get_relations(self, user_handle):
self.cursor.execute("""
SELECT * FROM author_mentions WHERE (author = :handle
OR mention = :handle) AND author != mention
""", {'handle': user_handle})
return self.cursor.fetchall()
def get_relations_from_list(self, user_handles):
exe_str = [
"SELECT * FROM author_mentions WHERE (author != mention) "
]
exe_str.extend(
f"AND (author IN ({','.join(['?'] * len(user_handles))}) ")
exe_str.extend(
f"OR mention IN ({','.join(['?'] * len(user_handles))}))")
user_handles.extend(user_handles)
self.cursor.execute(''.join(exe_str), user_handles)
return self.cursor.fetchall()
def unique_users(res):
users = []
for result in res:
users.append((result["user"]["handle"], result["user"]["name"]))
if "mentions" in result:
for mention in result["mentions"]:
users.append((mention["handle"], mention["name"]))
return list(set(users))
def init_tables(cursor):
cursor.executescript("""
CREATE TABLE IF NOT EXISTS queries (
id INTEGER PRIMARY KEY,
query TEXT,
result_count INTEGER,
UNIQUE (query)
);
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
handle TEXT,
name TEXT,
UNIQUE (handle)
);
CREATE TABLE IF NOT EXISTS results (
id INTEGER PRIMARY KEY,
full_text TEXT,
date TEXT,
query INTEGER REFERENCES queries,
author INTEGER REFERENCES users
);
CREATE INDEX IF NOT EXISTS query_index ON results(query);
CREATE INDEX IF NOT EXISTS author_index ON results(author);
CREATE TABLE IF NOT EXISTS mentions (
result INTEGER REFERENCES results,
user INTEGER REFERENCES users,
PRIMARY KEY(result, user)
);
CREATE INDEX IF NOT EXISTS result_index ON mentions(result);
CREATE INDEX IF NOT EXISTS user_index ON mentions(user);
CREATE TABLE IF NOT EXISTS hashtags (
id INTEGER PRIMARY KEY,
text TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS hashtag_to_result (
result INTEGER REFERENCES results,
hashtag INTEGER REFERENCES hashtags,
PRIMARY KEY(result, hashtag)
);
CREATE INDEX IF NOT EXISTS result_index ON hashtag_to_result(result);
CREATE INDEX IF NOT EXISTS hashtag_index ON hashtag_to_result(hashtag);
""")
if __name__ == "__main__":
with Connection() as connection:
init_tables(connection.cursor) | database.py | import sqlite3
class Connection():
def __init__(self):
self.connection = sqlite3.connect('results.db')
self.cursor = self.connection.cursor()
self.enable_foreign_keys()
def close(self):
self.connection.close()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def __enter__(self):
self.__init__()
return self
def __exit__(self, etype, evalue, etb):
if etb is None:
self.commit()
else:
self.rollback()
self.close()
def enable_foreign_keys(self):
enabled, = self.cursor.execute(
"PRAGMA foreign_keys;"
).fetchone()
if not enabled:
self.cursor.execute("PRAGMA foreign_keys = ON;")
def insert_results(self, query, results):
self.cursor.execute("""
INSERT OR IGNORE INTO queries(query, result_count)
VALUES (?, ?)
""", (query, results["res_count"]))
res = results["res"]
users = unique_users(res)
self.cursor.executemany("""
INSERT OR IGNORE INTO users(handle, name)
VALUES (?, ?)
""", iter(users))
for result in res:
self.cursor.execute("""
SELECT ROWID FROM queries WHERE query = ?
""", (query,))
result["query_id"], = self.cursor.fetchone()
self.cursor.execute(
"SELECT ROWID FROM users WHERE handle = :handle",
result["user"])
result["author_id"], = self.cursor.fetchone()
self.cursor.execute("""
INSERT INTO results(full_text, date, query, author)
VALUES (:full_text, :when, :query_id, :author_id)
""", result)
result_id = self.cursor.lastrowid
if "mentions" in result:
for mention in result["mentions"]:
self.cursor.execute(
"SELECT ROWID FROM users WHERE handle = :handle",
mention)
author_id, = self.cursor.fetchone()
self.cursor.execute(
"INSERT INTO mentions(result, user) VALUES (?, ?)",
(result_id, author_id))
if "hashtags" in result:
for hashtag in result["hashtags"]:
self.cursor.execute(
"SELECT id FROM hashtags WHERE text = :text",
hashtag)
existing_ht = self.cursor.fetchone()
if existing_ht:
self.cursor.execute("""
INSERT INTO hashtag_to_result(
result, hashtag
) VALUES (?, ?)
""", (result_id, existing_ht[0]))
else:
self.cursor.execute(
"INSERT INTO hashtags(text) VALUES (:text)",
hashtag)
self.cursor.execute("""
INSERT INTO hashtag_to_result(
result, hashtag
) VALUES (?, ?)
""", (result_id, self.cursor.lastrowid))
def get_query(self, query):
self.cursor.execute(
"SELECT id, result_count FROM queries WHERE query = ?",
(query,))
query = self.cursor.fetchone()
if not query:
return None
query_id, res_count = query
json_dict = {
"res_count": res_count,
"res_faces": {},
"res": [],
}
self.cursor.execute(
"SELECT * FROM results WHERE query = ?",
(query_id,))
for result in self.cursor.fetchall():
result_dict = {}
self.cursor.execute(
"SELECT * FROM hashtag_to_result WHERE result = ?",
(result[0],))
hashtags = self.cursor.fetchall()
if hashtags:
hashtag_list = []
for hashtag in hashtags:
self.cursor.execute(
"SELECT * FROM hashtags WHERE id = ?",
(hashtag[1],))
_, ht = self.cursor.fetchone()
hashtag_list.append({"text": ht})
result_dict["hashtags"] = hashtag_list
result_dict["full_text"] = result[1]
self.cursor.execute(
"SELECT * FROM users WHERE id = ?",
(result[4],))
_, handle, name = self.cursor.fetchone()
self.cursor.execute(
"SELECT * FROM mentions WHERE result = ?",
(result[0],))
mentions = self.cursor.fetchall()
if mentions:
mentions_list = []
for mention in mentions:
self.cursor.execute(
"SELECT * FROM users WHERE id = ?",
(mention[1],))
user = self.cursor.fetchone()
mentions_list.append({
"name": user[2],
"handle": user[1],
})
result_dict["mentions"] = mentions_list
result_dict["user"] = {
"name": name,
"handle": handle,
}
result_dict["when"] = result[2]
json_dict["res"].append(result_dict)
return json_dict
def get_relations(self, user_handle):
self.cursor.execute("""
SELECT * FROM author_mentions WHERE (author = :handle
OR mention = :handle) AND author != mention
""", {'handle': user_handle})
return self.cursor.fetchall()
def get_relations_from_list(self, user_handles):
exe_str = [
"SELECT * FROM author_mentions WHERE (author != mention) "
]
exe_str.extend(
f"AND (author IN ({','.join(['?'] * len(user_handles))}) ")
exe_str.extend(
f"OR mention IN ({','.join(['?'] * len(user_handles))}))")
user_handles.extend(user_handles)
self.cursor.execute(''.join(exe_str), user_handles)
return self.cursor.fetchall()
def unique_users(res):
users = []
for result in res:
users.append((result["user"]["handle"], result["user"]["name"]))
if "mentions" in result:
for mention in result["mentions"]:
users.append((mention["handle"], mention["name"]))
return list(set(users))
def init_tables(cursor):
cursor.executescript("""
CREATE TABLE IF NOT EXISTS queries (
id INTEGER PRIMARY KEY,
query TEXT,
result_count INTEGER,
UNIQUE (query)
);
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
handle TEXT,
name TEXT,
UNIQUE (handle)
);
CREATE TABLE IF NOT EXISTS results (
id INTEGER PRIMARY KEY,
full_text TEXT,
date TEXT,
query INTEGER REFERENCES queries,
author INTEGER REFERENCES users
);
CREATE INDEX IF NOT EXISTS query_index ON results(query);
CREATE INDEX IF NOT EXISTS author_index ON results(author);
CREATE TABLE IF NOT EXISTS mentions (
result INTEGER REFERENCES results,
user INTEGER REFERENCES users,
PRIMARY KEY(result, user)
);
CREATE INDEX IF NOT EXISTS result_index ON mentions(result);
CREATE INDEX IF NOT EXISTS user_index ON mentions(user);
CREATE TABLE IF NOT EXISTS hashtags (
id INTEGER PRIMARY KEY,
text TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS hashtag_to_result (
result INTEGER REFERENCES results,
hashtag INTEGER REFERENCES hashtags,
PRIMARY KEY(result, hashtag)
);
CREATE INDEX IF NOT EXISTS result_index ON hashtag_to_result(result);
CREATE INDEX IF NOT EXISTS hashtag_index ON hashtag_to_result(hashtag);
""")
if __name__ == "__main__":
with Connection() as connection:
init_tables(connection.cursor) | 0.196865 | 0.093885 |
def time_stretch(audio, factor, sample_rate=44100):
import pyrubberband as pyrb
return pyrb.time_stretch(audio, sample_rate, factor)
def load_audio(filepath):
# returns loaded mono audio.
from essentia.standard import MonoLoader
return MonoLoader(filename=filepath)()
def save_audio(audio, filename, file_format='wav', bit_rate=320):
from essentia.standard import MonoWriter
MonoWriter(filename=filename, bitrate=bit_rate, format=file_format)(audio)
def does_annotations_folder_exist(folder_name='pycrossfade_annotations'):
from os.path import isdir
return isdir(folder_name)
def create_annotations_folder(folder_name='pycrossfade_annotations'):
from os import mkdir
if not does_annotations_folder_exist(folder_name):
mkdir(folder_name)
return True
return False
def path_to_annotation_file(annt_folder_name, file_name, file_format='txt'):
from os.path import join
return join(annt_folder_name, file_name + '.' + file_format)
def linear_fade_volume(audio, start_volume=0.0, end_volume=1.0):
import numpy as np
if start_volume == end_volume:
return audio
length = audio.size
profile = np.sqrt(np.linspace(start_volume, end_volume, length))
return audio * profile
def linear_fade_filter(audio, filter_type, start_volume=0.0, end_volume=1.0):
from yodel.filter import Biquad
import numpy as np
from scipy.signal import lfilter
if start_volume == end_volume:
return audio
SAMPLE_RATE = 44100
LOW_CUTOFF = 70
MID_CENTER = 1000
HIGH_CUTOFF = 13000
Q = 1.0 / np.sqrt(2)
NUM_STEPS = 20 if start_volume != end_volume else 1
bquad_filter = Biquad()
length = audio.size # Assumes mono audio
profile = np.linspace(start_volume, end_volume, NUM_STEPS)
output_audio = np.zeros(audio.shape)
for i in range(NUM_STEPS):
start_idx = int((i / float(NUM_STEPS)) * length)
end_idx = int(((i + 1) / float(NUM_STEPS)) * length)
if filter_type == 'low_shelf':
bquad_filter.low_shelf(SAMPLE_RATE, LOW_CUTOFF, Q, -int(26 * (1.0 - profile[i])))
elif filter_type == 'high_shelf':
bquad_filter.high_shelf(SAMPLE_RATE, HIGH_CUTOFF, Q, -int(26 * (1.0 - profile[i])))
else:
raise Exception('Unknown filter type: ' + filter_type)
# ~ bquad_filter.process(audio[start_idx : end_idx], output_audio[start_idx : end_idx]) # This was too slow, code beneath is faster!
b = bquad_filter._b_coeffs
a = bquad_filter._a_coeffs
a[
0] = 1.0 # Normalizing the coefficients is already done in the yodel object, but a[0] is never reset to 1.0 after division!
output_audio[start_idx: end_idx] = lfilter(b, a, audio[start_idx: end_idx]).astype('float32')
return output_audio | pycrossfade/utils.py | def time_stretch(audio, factor, sample_rate=44100):
import pyrubberband as pyrb
return pyrb.time_stretch(audio, sample_rate, factor)
def load_audio(filepath):
# returns loaded mono audio.
from essentia.standard import MonoLoader
return MonoLoader(filename=filepath)()
def save_audio(audio, filename, file_format='wav', bit_rate=320):
from essentia.standard import MonoWriter
MonoWriter(filename=filename, bitrate=bit_rate, format=file_format)(audio)
def does_annotations_folder_exist(folder_name='pycrossfade_annotations'):
from os.path import isdir
return isdir(folder_name)
def create_annotations_folder(folder_name='pycrossfade_annotations'):
from os import mkdir
if not does_annotations_folder_exist(folder_name):
mkdir(folder_name)
return True
return False
def path_to_annotation_file(annt_folder_name, file_name, file_format='txt'):
from os.path import join
return join(annt_folder_name, file_name + '.' + file_format)
def linear_fade_volume(audio, start_volume=0.0, end_volume=1.0):
import numpy as np
if start_volume == end_volume:
return audio
length = audio.size
profile = np.sqrt(np.linspace(start_volume, end_volume, length))
return audio * profile
def linear_fade_filter(audio, filter_type, start_volume=0.0, end_volume=1.0):
from yodel.filter import Biquad
import numpy as np
from scipy.signal import lfilter
if start_volume == end_volume:
return audio
SAMPLE_RATE = 44100
LOW_CUTOFF = 70
MID_CENTER = 1000
HIGH_CUTOFF = 13000
Q = 1.0 / np.sqrt(2)
NUM_STEPS = 20 if start_volume != end_volume else 1
bquad_filter = Biquad()
length = audio.size # Assumes mono audio
profile = np.linspace(start_volume, end_volume, NUM_STEPS)
output_audio = np.zeros(audio.shape)
for i in range(NUM_STEPS):
start_idx = int((i / float(NUM_STEPS)) * length)
end_idx = int(((i + 1) / float(NUM_STEPS)) * length)
if filter_type == 'low_shelf':
bquad_filter.low_shelf(SAMPLE_RATE, LOW_CUTOFF, Q, -int(26 * (1.0 - profile[i])))
elif filter_type == 'high_shelf':
bquad_filter.high_shelf(SAMPLE_RATE, HIGH_CUTOFF, Q, -int(26 * (1.0 - profile[i])))
else:
raise Exception('Unknown filter type: ' + filter_type)
# ~ bquad_filter.process(audio[start_idx : end_idx], output_audio[start_idx : end_idx]) # This was too slow, code beneath is faster!
b = bquad_filter._b_coeffs
a = bquad_filter._a_coeffs
a[
0] = 1.0 # Normalizing the coefficients is already done in the yodel object, but a[0] is never reset to 1.0 after division!
output_audio[start_idx: end_idx] = lfilter(b, a, audio[start_idx: end_idx]).astype('float32')
return output_audio | 0.524882 | 0.299323 |
# -*- coding:utf-8 -*-
import argparse
import bleu
import weighted_ngram_match
import syntax_match
import dataflow_match
parser = argparse.ArgumentParser()
parser.add_argument('--refs', type=str, nargs='+', required=True,
help='reference files')
parser.add_argument('--hyp', type=str, required=True,
help='hypothesis file')
parser.add_argument('--lang', type=str, required=True,
choices=['java','js','c_sharp','php','go','python','ruby', 'c'],
help='programming language')
parser.add_argument('--params', type=str, default='0.25,0.25,0.25,0.25',
help='alpha, beta and gamma')
args = parser.parse_args()
lang = args.lang
alpha,beta,gamma,theta = [float(x) for x in args.params.split(',')]
# preprocess inputs
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] \
for file in args.refs]
hypothesis = [x.strip() for x in open(args.hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert len(hypothesis) == len(pre_references[i])
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert len(references) == len(pre_references)*len(hypothesis)
# calculate ngram match (BLEU)
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs,tokenized_hyps)
# calculate weighted ngram match
keywords = [x.strip() for x in open('keywords/'+args.lang+'.txt', 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token:1 if token in key_word_list else 0.2 \
for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)]\
for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights,tokenized_hyps)
# calculate syntax match
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, args.lang)
# calculate dataflow match
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, args.lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'.\
format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
code_bleu_score = alpha*ngram_match_score\
+ beta*weighted_ngram_match_score\
+ gamma*syntax_match_score\
+ theta*dataflow_match_score
print('CodeBLEU score: ', code_bleu_score) | Code-Code/code-to-code-trans/evaluator/CodeBLEU/calc_code_bleu.py |
# -*- coding:utf-8 -*-
import argparse
import bleu
import weighted_ngram_match
import syntax_match
import dataflow_match
parser = argparse.ArgumentParser()
parser.add_argument('--refs', type=str, nargs='+', required=True,
help='reference files')
parser.add_argument('--hyp', type=str, required=True,
help='hypothesis file')
parser.add_argument('--lang', type=str, required=True,
choices=['java','js','c_sharp','php','go','python','ruby', 'c'],
help='programming language')
parser.add_argument('--params', type=str, default='0.25,0.25,0.25,0.25',
help='alpha, beta and gamma')
args = parser.parse_args()
lang = args.lang
alpha,beta,gamma,theta = [float(x) for x in args.params.split(',')]
# preprocess inputs
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] \
for file in args.refs]
hypothesis = [x.strip() for x in open(args.hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert len(hypothesis) == len(pre_references[i])
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert len(references) == len(pre_references)*len(hypothesis)
# calculate ngram match (BLEU)
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs,tokenized_hyps)
# calculate weighted ngram match
keywords = [x.strip() for x in open('keywords/'+args.lang+'.txt', 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token:1 if token in key_word_list else 0.2 \
for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)]\
for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights,tokenized_hyps)
# calculate syntax match
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, args.lang)
# calculate dataflow match
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, args.lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'.\
format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
code_bleu_score = alpha*ngram_match_score\
+ beta*weighted_ngram_match_score\
+ gamma*syntax_match_score\
+ theta*dataflow_match_score
print('CodeBLEU score: ', code_bleu_score) | 0.268558 | 0.20834 |
import tensorflow as tf
import tensorblock as tb
class recipe_init:
####### Initialize Variables
def initVariables( self ):
self.labels = {}
self.cnt = 0
self.curr_input = None
self.blocks , self.order = [] , []
self.layers , self.extras = [] , []
self.inputs , self.variables = [] , []
self.weights , self.biases , self.dropouts = [] , [] , []
self.operations = []
self.summaries , self.writers = [] , []
self.savers , self.plotters = [] , []
####### Initialize Defaults
def initDefaults( self ):
self.defs_block = {
'src' : None , 'dst' : None , 'type' : None ,
'mod_inputs' : True , 'mod_variables' : True , 'mod_layers' : True ,
'no_ops' : False ,
}
self.defs_input = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'copy' : None , 'share' : None , 'first_none' : True ,
'dtype' : tf.float32 ,
}
self.defs_variable = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'first_none' : False ,
'type' : tb.vars.truncated_normal ,
'copy' : None , 'share' : None ,
'mean' : 0.0 , 'stddev' : 0.1 ,
'value' : 0.0 , 'min' : 0.0 , 'max' : 1.0 ,
'trainable' : True , 'seed' : None ,
}
self.defs_operation = {
'name' : None , 'function' : None ,
'input' : None , 'extra' : None , 'src' : None , 'dst' : None ,
'learning_rate' : 1e-4 ,
}
self.defs_train = {
'train_data' : None , 'train_labels' : None , 'train_seqlen' : None , 'train_length' : None ,
'test_data' : None , 'test_labels' : None , 'test_seqlen' : None , 'test_length' : None ,
'size_batch' : 100 , 'num_epochs' : 10 ,
'optimizer' : None ,
'summary' : None , 'writer' : None ,
'saver' : None , 'save_freq' : 10 ,
'eval_function' : None , 'eval_freq' : 1 ,
'plot_function' : None , 'plot_freq' : 1 ,
}
self.defs_plotter = {
'name' : None , 'function' : None ,
'dir' : 'figures' , 'shape' : [ 2 , 5 ] ,
}
self.defs_layer = {
'input' : None , 'type' : None , 'name' : None ,
'copy' : None , 'share' : None , 'label' : None ,
'weight_type' : tb.vars.truncated_normal ,
'weight_name' : None , 'weight_copy' : None , 'weight_share' : None ,
'weight_mean' : 0.0 , 'weight_stddev' : 0.1 ,
'weight_value' : 0.0 , 'weight_min' : 0.0 , 'weight_max' : 1.0 ,
'weight_trainable' : True , 'weight_seed' : None ,
'bias_type' : tb.vars.truncated_normal ,
'bias_name' : None , 'bias_copy' : None , 'bias_share' : None ,
'bias_mean' : 0.0 , 'bias_stddev' : 0.1 ,
'bias_value' : 0.0 , 'bias_min' : 0.0 , 'bias_max' : 1.0 ,
'bias_trainable' : True , 'bias_seed' : None ,
'dropout_name' : None , 'dropout' : 0.0 ,
'dropout_copy' : None , 'dropout_share' : None ,
'in_sides' : None , 'out_sides' : None ,
'in_channels' : None , 'out_channels' : None ,
'pooling' : 1 , 'pooling_ksize' : None ,
'pooling_strides' : None , 'pooling_padding' : None ,
'cell_type' : 'LSTM' , 'num_cells' : None ,
'in_dropout' : 0.0 , 'in_dropout_name' : None ,
'out_dropout' : 0.0 , 'out_dropout_name' : None ,
'seqlen' : None ,
'strides' : 1 , 'ksize' : 3 , 'padding' : 'SAME' ,
'activation' : tb.activs.relu , 'activation_pars' : None
}
####### Set Input Defaults
def setInputDefaults( self , **args ):
self.defs_input = { **self.defs_input , **args }
####### Set Layer Defaults
def setLayerDefaults( self , **args ):
self.defs_layer = { **self.defs_layer , **args }
####### Set Operation Defaults
def setOperationDefaults( self , **args ):
self.defs_operation = { **self.defs_operation , **args }
####### Set Variable Defaults
def setVariableDefaults( self , **args ):
self.defs_variable = { **self.defs_variable , **args }
####### Initialize
def initialize( self , vars = None ):
if vars is None : vars = self.folder
collection = self.collection( vars )
self.sess.run( tf.variables_initializer( collection ) ) | tensorblock/recipe/recipe_init.py | import tensorflow as tf
import tensorblock as tb
class recipe_init:
####### Initialize Variables
def initVariables( self ):
self.labels = {}
self.cnt = 0
self.curr_input = None
self.blocks , self.order = [] , []
self.layers , self.extras = [] , []
self.inputs , self.variables = [] , []
self.weights , self.biases , self.dropouts = [] , [] , []
self.operations = []
self.summaries , self.writers = [] , []
self.savers , self.plotters = [] , []
####### Initialize Defaults
def initDefaults( self ):
self.defs_block = {
'src' : None , 'dst' : None , 'type' : None ,
'mod_inputs' : True , 'mod_variables' : True , 'mod_layers' : True ,
'no_ops' : False ,
}
self.defs_input = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'copy' : None , 'share' : None , 'first_none' : True ,
'dtype' : tf.float32 ,
}
self.defs_variable = {
'name' : None , 'shape' : None , 'tensor' : None ,
'out_sides' : None , 'out_channels' : None ,
'first_none' : False ,
'type' : tb.vars.truncated_normal ,
'copy' : None , 'share' : None ,
'mean' : 0.0 , 'stddev' : 0.1 ,
'value' : 0.0 , 'min' : 0.0 , 'max' : 1.0 ,
'trainable' : True , 'seed' : None ,
}
self.defs_operation = {
'name' : None , 'function' : None ,
'input' : None , 'extra' : None , 'src' : None , 'dst' : None ,
'learning_rate' : 1e-4 ,
}
self.defs_train = {
'train_data' : None , 'train_labels' : None , 'train_seqlen' : None , 'train_length' : None ,
'test_data' : None , 'test_labels' : None , 'test_seqlen' : None , 'test_length' : None ,
'size_batch' : 100 , 'num_epochs' : 10 ,
'optimizer' : None ,
'summary' : None , 'writer' : None ,
'saver' : None , 'save_freq' : 10 ,
'eval_function' : None , 'eval_freq' : 1 ,
'plot_function' : None , 'plot_freq' : 1 ,
}
self.defs_plotter = {
'name' : None , 'function' : None ,
'dir' : 'figures' , 'shape' : [ 2 , 5 ] ,
}
self.defs_layer = {
'input' : None , 'type' : None , 'name' : None ,
'copy' : None , 'share' : None , 'label' : None ,
'weight_type' : tb.vars.truncated_normal ,
'weight_name' : None , 'weight_copy' : None , 'weight_share' : None ,
'weight_mean' : 0.0 , 'weight_stddev' : 0.1 ,
'weight_value' : 0.0 , 'weight_min' : 0.0 , 'weight_max' : 1.0 ,
'weight_trainable' : True , 'weight_seed' : None ,
'bias_type' : tb.vars.truncated_normal ,
'bias_name' : None , 'bias_copy' : None , 'bias_share' : None ,
'bias_mean' : 0.0 , 'bias_stddev' : 0.1 ,
'bias_value' : 0.0 , 'bias_min' : 0.0 , 'bias_max' : 1.0 ,
'bias_trainable' : True , 'bias_seed' : None ,
'dropout_name' : None , 'dropout' : 0.0 ,
'dropout_copy' : None , 'dropout_share' : None ,
'in_sides' : None , 'out_sides' : None ,
'in_channels' : None , 'out_channels' : None ,
'pooling' : 1 , 'pooling_ksize' : None ,
'pooling_strides' : None , 'pooling_padding' : None ,
'cell_type' : 'LSTM' , 'num_cells' : None ,
'in_dropout' : 0.0 , 'in_dropout_name' : None ,
'out_dropout' : 0.0 , 'out_dropout_name' : None ,
'seqlen' : None ,
'strides' : 1 , 'ksize' : 3 , 'padding' : 'SAME' ,
'activation' : tb.activs.relu , 'activation_pars' : None
}
####### Set Input Defaults
def setInputDefaults( self , **args ):
self.defs_input = { **self.defs_input , **args }
####### Set Layer Defaults
def setLayerDefaults( self , **args ):
self.defs_layer = { **self.defs_layer , **args }
####### Set Operation Defaults
def setOperationDefaults( self , **args ):
self.defs_operation = { **self.defs_operation , **args }
####### Set Variable Defaults
def setVariableDefaults( self , **args ):
self.defs_variable = { **self.defs_variable , **args }
####### Initialize
def initialize( self , vars = None ):
if vars is None : vars = self.folder
collection = self.collection( vars )
self.sess.run( tf.variables_initializer( collection ) ) | 0.451085 | 0.084304 |
import panscore
def load(filename:str)->panscore.Score:
#打开文件,返回panscore.Score对象
#由于编码不确定,先用二进制打开文件
with open(filename,'rb') as f:
file=f.read()
#读取编码
if(b"Charset=UTF-8" in file):
encoding="utf-8"
else:
encoding="shift-JIS"
#分块
blocks=[]
block=[]
for line in file.split(b"\n"):
line=line.strip(b"\r")
#逐行解码
try:
linestr=str(line,encoding=encoding)
except UnicodeDecodeError:
#如果某行编码与其他行不同,则尝试用各种编码解析
for i in ["gbk","utf-8","shift-JIS"]:
try:
linestr=str(line,encoding=i)
break
except UnicodeDecodeError:
pass
else:
linestr=""
if(linestr.startswith("[")):
blocks.append(block)
block=[]
block.append(linestr)
#读文件头
"""
fileproperties={}
for line in blocks[2]:
if("=" in line):
[key,value]=line.split("=")
if(value!=""):
fileproperties[key]=ustvaluetyper(key,value)
tempo=fileproperties.pop("Tempo",120.0)
"""
#读音符
notes=[]
time=0
for block in blocks[3:]:
noteproperties={}
length=0
notenum=60
lyric="R"
for line in block:
if("=" in line):
[key,value]=line.split("=")
if(key=="Length"):
length=int(value)
elif(key=="NoteNum"):
notenum=int(value)
elif(key=="Lyric"):
lyric=value.strip(" \n")
if(not (lyric in {"R","r"})):
notes.append(panscore.Note(start=time,
length=length,
notenum=notenum,
lyric=lyric))
time+=length
return panscore.Score(track=[panscore.Track(note=notes)])
#TODO
pass
def save(score:panscore.Score,filename:str,track:int=0):
#将panscore.Score对象保存为文件
s='[#VERSION]\nUST Version1.2\nCharset=UTF-8\n[#SETTING]\n'
noteindex=0#音符序号
time=0
def dumpnote(length:int,notenum:int,lyric:int)->str:
return "[#{:0>4}]\nLength={}\nNoteNum={}\nLyric={}\n".format(noteindex,length,notenum,lyric)
tr=score.track[track]
for note in tr.note:
if(note.start>time):
s+=dumpnote(note.start-time,60,"R")#休止符
noteindex+=1
s+=dumpnote(note.length,note.notenum,note.lyric)
noteindex+=1
time=note.start+note.length
s+="[#TRACKEND]\n"
with open(filename,"w",encoding="utf8") as file:
file.write(s) | panscore/filetypes/ust.py | import panscore
def load(filename:str)->panscore.Score:
#打开文件,返回panscore.Score对象
#由于编码不确定,先用二进制打开文件
with open(filename,'rb') as f:
file=f.read()
#读取编码
if(b"Charset=UTF-8" in file):
encoding="utf-8"
else:
encoding="shift-JIS"
#分块
blocks=[]
block=[]
for line in file.split(b"\n"):
line=line.strip(b"\r")
#逐行解码
try:
linestr=str(line,encoding=encoding)
except UnicodeDecodeError:
#如果某行编码与其他行不同,则尝试用各种编码解析
for i in ["gbk","utf-8","shift-JIS"]:
try:
linestr=str(line,encoding=i)
break
except UnicodeDecodeError:
pass
else:
linestr=""
if(linestr.startswith("[")):
blocks.append(block)
block=[]
block.append(linestr)
#读文件头
"""
fileproperties={}
for line in blocks[2]:
if("=" in line):
[key,value]=line.split("=")
if(value!=""):
fileproperties[key]=ustvaluetyper(key,value)
tempo=fileproperties.pop("Tempo",120.0)
"""
#读音符
notes=[]
time=0
for block in blocks[3:]:
noteproperties={}
length=0
notenum=60
lyric="R"
for line in block:
if("=" in line):
[key,value]=line.split("=")
if(key=="Length"):
length=int(value)
elif(key=="NoteNum"):
notenum=int(value)
elif(key=="Lyric"):
lyric=value.strip(" \n")
if(not (lyric in {"R","r"})):
notes.append(panscore.Note(start=time,
length=length,
notenum=notenum,
lyric=lyric))
time+=length
return panscore.Score(track=[panscore.Track(note=notes)])
#TODO
pass
def save(score:panscore.Score,filename:str,track:int=0):
#将panscore.Score对象保存为文件
s='[#VERSION]\nUST Version1.2\nCharset=UTF-8\n[#SETTING]\n'
noteindex=0#音符序号
time=0
def dumpnote(length:int,notenum:int,lyric:int)->str:
return "[#{:0>4}]\nLength={}\nNoteNum={}\nLyric={}\n".format(noteindex,length,notenum,lyric)
tr=score.track[track]
for note in tr.note:
if(note.start>time):
s+=dumpnote(note.start-time,60,"R")#休止符
noteindex+=1
s+=dumpnote(note.length,note.notenum,note.lyric)
noteindex+=1
time=note.start+note.length
s+="[#TRACKEND]\n"
with open(filename,"w",encoding="utf8") as file:
file.write(s) | 0.030072 | 0.111 |
from cargo.expressions import *
__all__ = ('NetworkingLogic',)
class NetworkingLogic(BaseLogic):
__slots__ = tuple()
'''
< is less than inet '192.168.1.5' < inet '192.168.1.6'
<= is less than or equal inet '192.168.1.5' <= inet '192.168.1.5'
= equals inet '192.168.1.5' = inet '192.168.1.5'
>= is greater or equal inet '192.168.1.5' >= inet '192.168.1.5'
> is greater than inet '192.168.1.5' > inet '192.168.1.4'
<> is not equal inet '192.168.1.5' <> inet '192.168.1.4'
<< is contained by inet '192.168.1.5' << inet '192.168.1/24'
<<= is contained by or equals inet
'192.168.1/24' <<= inet '192.168.1/24'
>> contains inet '192.168.1/24' >> inet '192.168.1.5'
>>= contains or equals inet '192.168.1/24' >>= inet '192.168.1/24'
&& contains or is contained by inet
'192.168.1/24' && inet '192.168.1.80/28'
~ bitwise NOT ~ inet '192.168.1.6'
& bitwise AND inet '192.168.1.6' & inet '0.0.0.255'
| bitwise OR inet '192.168.1.6' | inet '0.0.0.255'
+ addition inet '192.168.1.6' + 25
- subtraction inet '192.168.1.43' - 36
- subtraction inet '192.168.1.43' - inet '192.168.1.19'
abbrev(inet) text abbreviated display format as text
abbrev(inet '10.1.0.0/16') 10.1.0.0/16
abbrev(cidr) text abbreviated display format as text
abbrev(cidr '10.1.0.0/16') 10.1/16
broadcast(inet) inet broadcast address for network
broadcast('192.168.1.5/24') 192.168.1.255/24
family(inet) int extract family of address; 4 for IPv4, 6 for IPv6
family('::1') 6
host(inet) text extract IP address as text host('192.168.1.5/24')
192.168.1.5
hostmask(inet) inet construct host mask for network
hostmask('192.168.23.20/30') 0.0.0.3
masklen(inet) int extract netmask length masklen('192.168.1.5/24') 24
netmask(inet) inet construct netmask for network
netmask('192.168.1.5/24') 255.255.255.0
network(inet) cidr extract network part of address
network('192.168.1.5/24') 192.168.1.0/24
set_masklen(inet, int) inet set netmask length for inet value
set_masklen('192.168.1.5/24', 16) 192.168.1.5/16
set_masklen(cidr, int) cidr set netmask length for cidr value
set_masklen('192.168.1.0/24'::cidr, 16) 192.168.0.0/16
text(inet) text extract IP address and netmask length as text
text(inet '192.168.1.5') 192.168.1.5/32
inet_same_family(inet, inet) boolean are the addresses from the same
family? inet_same_family('192.168.1.5/24', '::1') false
inet_merge(inet, inet) cidr the smallest network which includes both of
the given networks inet_merge('192.168.1.5/24', '192.168.2.5/24')
192.168.0.0/22
''' | cargo/logic/networking.py | from cargo.expressions import *
__all__ = ('NetworkingLogic',)
class NetworkingLogic(BaseLogic):
__slots__ = tuple()
'''
< is less than inet '192.168.1.5' < inet '192.168.1.6'
<= is less than or equal inet '192.168.1.5' <= inet '192.168.1.5'
= equals inet '192.168.1.5' = inet '192.168.1.5'
>= is greater or equal inet '192.168.1.5' >= inet '192.168.1.5'
> is greater than inet '192.168.1.5' > inet '192.168.1.4'
<> is not equal inet '192.168.1.5' <> inet '192.168.1.4'
<< is contained by inet '192.168.1.5' << inet '192.168.1/24'
<<= is contained by or equals inet
'192.168.1/24' <<= inet '192.168.1/24'
>> contains inet '192.168.1/24' >> inet '192.168.1.5'
>>= contains or equals inet '192.168.1/24' >>= inet '192.168.1/24'
&& contains or is contained by inet
'192.168.1/24' && inet '192.168.1.80/28'
~ bitwise NOT ~ inet '192.168.1.6'
& bitwise AND inet '192.168.1.6' & inet '0.0.0.255'
| bitwise OR inet '192.168.1.6' | inet '0.0.0.255'
+ addition inet '192.168.1.6' + 25
- subtraction inet '192.168.1.43' - 36
- subtraction inet '192.168.1.43' - inet '192.168.1.19'
abbrev(inet) text abbreviated display format as text
abbrev(inet '10.1.0.0/16') 10.1.0.0/16
abbrev(cidr) text abbreviated display format as text
abbrev(cidr '10.1.0.0/16') 10.1/16
broadcast(inet) inet broadcast address for network
broadcast('192.168.1.5/24') 192.168.1.255/24
family(inet) int extract family of address; 4 for IPv4, 6 for IPv6
family('::1') 6
host(inet) text extract IP address as text host('192.168.1.5/24')
192.168.1.5
hostmask(inet) inet construct host mask for network
hostmask('192.168.23.20/30') 0.0.0.3
masklen(inet) int extract netmask length masklen('192.168.1.5/24') 24
netmask(inet) inet construct netmask for network
netmask('192.168.1.5/24') 255.255.255.0
network(inet) cidr extract network part of address
network('192.168.1.5/24') 192.168.1.0/24
set_masklen(inet, int) inet set netmask length for inet value
set_masklen('192.168.1.5/24', 16) 192.168.1.5/16
set_masklen(cidr, int) cidr set netmask length for cidr value
set_masklen('192.168.1.0/24'::cidr, 16) 192.168.0.0/16
text(inet) text extract IP address and netmask length as text
text(inet '192.168.1.5') 192.168.1.5/32
inet_same_family(inet, inet) boolean are the addresses from the same
family? inet_same_family('192.168.1.5/24', '::1') false
inet_merge(inet, inet) cidr the smallest network which includes both of
the given networks inet_merge('192.168.1.5/24', '192.168.2.5/24')
192.168.0.0/22
''' | 0.361616 | 0.095602 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner as qr
class FeedingQueueRunner(qr.QueueRunner):
"""A queue runner that allows the feeding of values such as numpy arrays."""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, feed_fns=None,
queue_closed_exception_types=None):
"""Initialize the queue runner.
For further documentation, see `queue_runner.py`. Note that
`FeedingQueueRunner` does not support construction from protobuffer nor
serialization to protobuffer.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
feed_fns: a list of functions that return a dictionary mapping fed
`Tensor`s to values. Must be the same length as `enqueue_ops`.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to
`(tf.errors.OutOfRangeError, tf.errors.CancelledError)`.
Raises:
ValueError: `feed_fns` is not `None` and has different length than
`enqueue_ops`.
"""
if queue_closed_exception_types is None:
queue_closed_exception_types = (
errors.OutOfRangeError, errors.CancelledError)
super(FeedingQueueRunner, self).__init__(
queue, enqueue_ops, close_op,
cancel_op, queue_closed_exception_types=queue_closed_exception_types)
if feed_fns is None:
self._feed_fns = [None for _ in enqueue_ops]
else:
if len(feed_fns) != len(enqueue_ops):
raise ValueError(
"If feed_fns is not None, it must have the same length as "
"enqueue_ops.")
self._feed_fns = feed_fns
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, feed_fn, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A `Session`.
enqueue_op: The `Operation` to run.
feed_fn: the feed function to pass to `sess.run`.
coord: Optional `Coordinator` object for reporting errors and checking
for stop conditions.
"""
# TODO(jamieas): Reduce code duplication with `QueueRunner`.
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
feed_dict = None if feed_fn is None else feed_fn()
sess.run(enqueue_op, feed_dict=feed_dict)
except (errors.OutOfRangeError, errors.CancelledError):
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run,
args=(sess, op, feed_fn, coord))
for op, feed_fn in zip(self._enqueue_ops, self._feed_fns)]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def _init_from_proto(self, queue_runner_def):
raise NotImplementedError(
"{} does not support initialization from proto.".format(type(
self).__name__))
def to_proto(self):
raise NotImplementedError(
"{} does not support serialization to proto.".format(type(
self).__name__)) | tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner as qr
class FeedingQueueRunner(qr.QueueRunner):
"""A queue runner that allows the feeding of values such as numpy arrays."""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, feed_fns=None,
queue_closed_exception_types=None):
"""Initialize the queue runner.
For further documentation, see `queue_runner.py`. Note that
`FeedingQueueRunner` does not support construction from protobuffer nor
serialization to protobuffer.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
feed_fns: a list of functions that return a dictionary mapping fed
`Tensor`s to values. Must be the same length as `enqueue_ops`.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to
`(tf.errors.OutOfRangeError, tf.errors.CancelledError)`.
Raises:
ValueError: `feed_fns` is not `None` and has different length than
`enqueue_ops`.
"""
if queue_closed_exception_types is None:
queue_closed_exception_types = (
errors.OutOfRangeError, errors.CancelledError)
super(FeedingQueueRunner, self).__init__(
queue, enqueue_ops, close_op,
cancel_op, queue_closed_exception_types=queue_closed_exception_types)
if feed_fns is None:
self._feed_fns = [None for _ in enqueue_ops]
else:
if len(feed_fns) != len(enqueue_ops):
raise ValueError(
"If feed_fns is not None, it must have the same length as "
"enqueue_ops.")
self._feed_fns = feed_fns
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, feed_fn, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A `Session`.
enqueue_op: The `Operation` to run.
feed_fn: the feed function to pass to `sess.run`.
coord: Optional `Coordinator` object for reporting errors and checking
for stop conditions.
"""
# TODO(jamieas): Reduce code duplication with `QueueRunner`.
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
feed_dict = None if feed_fn is None else feed_fn()
sess.run(enqueue_op, feed_dict=feed_dict)
except (errors.OutOfRangeError, errors.CancelledError):
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run,
args=(sess, op, feed_fn, coord))
for op, feed_fn in zip(self._enqueue_ops, self._feed_fns)]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def _init_from_proto(self, queue_runner_def):
raise NotImplementedError(
"{} does not support initialization from proto.".format(type(
self).__name__))
def to_proto(self):
raise NotImplementedError(
"{} does not support serialization to proto.".format(type(
self).__name__)) | 0.850949 | 0.218107 |
import json
from io import StringIO
TP_NONE = 0
TP_VERTEX = 1
TP_EDGE = 2
TP_PATH = 3
class Graph():
def __init__(self, stmt=None) -> None:
self.statement = stmt
self.rows = list()
self.vertices = dict()
def __iter__(self):
return self.rows.__iter__()
def __len__(self):
return self.rows.__len__()
def __getitem__(self,index):
return self.rows[index]
def size(self):
return self.rows.__len__()
def append(self, agObj):
self.rows.append(agObj)
def getVertices(self):
return self.vertices
def getVertex(self, id):
if id in self.vertices:
return self.vertices[id]
else:
return None
class AGObj:
@property
def gtype(self):
return TP_NONE
class Path(AGObj):
entities = []
def __init__(self, entities=None) -> None:
self.entities = entities
@property
def gtype(self):
return TP_PATH
def __iter__(self):
return self.entities.__iter__()
def __len__(self):
return self.entities.__len__()
def __getitem__(self,index):
return self.entities[index]
def size(self):
return self.entities.__len__()
def append(self, agObj:AGObj ):
self.entities.append(agObj)
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def toString(self) -> str:
buf = StringIO()
buf.write("[")
max = len(self.entities)
idx = 0
while idx < max:
if idx > 0:
buf.write(",")
self.entities[idx]._toString(buf)
idx += 1
buf.write("]::PATH")
return buf.getvalue()
def toJson(self) -> str:
buf = StringIO()
buf.write("{\"gtype\": \"path\", \"elements\": [")
max = len(self.entities)
idx = 0
while idx < max:
if idx > 0:
buf.write(",")
self.entities[idx]._toJson(buf)
idx += 1
buf.write("]}")
return buf.getvalue()
class Vertex(AGObj):
def __init__(self, id=None, label=None, properties=None) -> None:
self.id = id
self.label = label
self.properties = properties
@property
def gtype(self):
return TP_VERTEX
def __setitem__(self,name, value):
self.properties[name]=value
def __getitem__(self,name):
if name in self.properties:
return self.properties[name]
else:
return None
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def toString(self) -> str:
return nodeToString(self)
def _toString(self, buf):
_nodeToString(self, buf)
def toJson(self) -> str:
return nodeToJson(self)
def _toJson(self, buf):
_nodeToJson(self, buf)
class Edge(AGObj):
def __init__(self, id=None, label=None, properties=None) -> None:
self.id = id
self.label = label
self.start_id = None
self.end_id = None
self.properties = properties
@property
def gtype(self):
return TP_EDGE
def __setitem__(self,name, value):
self.properties[name]=value
def __getitem__(self,name):
if name in self.properties:
return self.properties[name]
else:
return None
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def extraStrFormat(node, buf):
if node.start_id != None:
buf.write(", start_id:")
buf.write(str(node.start_id))
if node.end_id != None:
buf.write(", end_id:")
buf.write(str(node.end_id))
def toString(self) -> str:
return nodeToString(self, Edge.extraStrFormat)
def _toString(self, buf):
_nodeToString(self, buf, Edge.extraStrFormat)
def extraJsonFormat(node, buf):
if node.start_id != None:
buf.write(", \"start_id\": \"")
buf.write(str(node.start_id))
buf.write("\"")
if node.end_id != None:
buf.write(", \"end_id\": \"")
buf.write(str(node.end_id))
buf.write("\"")
def toJson(self) -> str:
return nodeToJson(self, Edge.extraJsonFormat)
def _toJson(self, buf):
_nodeToJson(self, buf, Edge.extraJsonFormat)
def nodeToString(node, extraFormatter=None):
buf = StringIO()
_nodeToString(node,buf,extraFormatter=extraFormatter)
return buf.getvalue()
def _nodeToString(node, buf, extraFormatter=None):
buf.write("{")
if node.label != None:
buf.write("label:")
buf.write(node.label)
if node.id != None:
buf.write(", id:")
buf.write(str(node.id))
if node.properties != None:
buf.write(", properties:{")
for k,v in node.properties.items():
buf.write(k)
buf.write(": ")
buf.write(str(v))
buf.write(", ")
buf.write("}")
if extraFormatter != None:
extraFormatter(node, buf)
if node.gtype == TP_VERTEX:
buf.write("}::VERTEX")
if node.gtype == TP_EDGE:
buf.write("}::EDGE")
def nodeToJson(node, extraFormatter=None):
buf = StringIO()
_nodeToJson(node, buf, extraFormatter=extraFormatter)
return buf.getvalue()
def _nodeToJson(node, buf, extraFormatter=None):
buf.write("{\"gtype\": ")
if node.gtype == TP_VERTEX:
buf.write("\"vertex\", ")
if node.gtype == TP_EDGE:
buf.write("\"edge\", ")
if node.label != None:
buf.write("\"label\":\"")
buf.write(node.label)
buf.write("\"")
if node.id != None:
buf.write(", \"id\":")
buf.write(str(node.id))
if extraFormatter != None:
extraFormatter(node, buf)
if node.properties != None:
buf.write(", \"properties\":{")
for k,v in node.properties.items():
buf.write("\"")
buf.write(k)
buf.write("\": \"")
buf.write(str(v))
buf.write("\", ")
buf.write("}")
buf.write("}") | drivers/python/age/models.py | import json
from io import StringIO
TP_NONE = 0
TP_VERTEX = 1
TP_EDGE = 2
TP_PATH = 3
class Graph():
def __init__(self, stmt=None) -> None:
self.statement = stmt
self.rows = list()
self.vertices = dict()
def __iter__(self):
return self.rows.__iter__()
def __len__(self):
return self.rows.__len__()
def __getitem__(self,index):
return self.rows[index]
def size(self):
return self.rows.__len__()
def append(self, agObj):
self.rows.append(agObj)
def getVertices(self):
return self.vertices
def getVertex(self, id):
if id in self.vertices:
return self.vertices[id]
else:
return None
class AGObj:
@property
def gtype(self):
return TP_NONE
class Path(AGObj):
entities = []
def __init__(self, entities=None) -> None:
self.entities = entities
@property
def gtype(self):
return TP_PATH
def __iter__(self):
return self.entities.__iter__()
def __len__(self):
return self.entities.__len__()
def __getitem__(self,index):
return self.entities[index]
def size(self):
return self.entities.__len__()
def append(self, agObj:AGObj ):
self.entities.append(agObj)
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def toString(self) -> str:
buf = StringIO()
buf.write("[")
max = len(self.entities)
idx = 0
while idx < max:
if idx > 0:
buf.write(",")
self.entities[idx]._toString(buf)
idx += 1
buf.write("]::PATH")
return buf.getvalue()
def toJson(self) -> str:
buf = StringIO()
buf.write("{\"gtype\": \"path\", \"elements\": [")
max = len(self.entities)
idx = 0
while idx < max:
if idx > 0:
buf.write(",")
self.entities[idx]._toJson(buf)
idx += 1
buf.write("]}")
return buf.getvalue()
class Vertex(AGObj):
def __init__(self, id=None, label=None, properties=None) -> None:
self.id = id
self.label = label
self.properties = properties
@property
def gtype(self):
return TP_VERTEX
def __setitem__(self,name, value):
self.properties[name]=value
def __getitem__(self,name):
if name in self.properties:
return self.properties[name]
else:
return None
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def toString(self) -> str:
return nodeToString(self)
def _toString(self, buf):
_nodeToString(self, buf)
def toJson(self) -> str:
return nodeToJson(self)
def _toJson(self, buf):
_nodeToJson(self, buf)
class Edge(AGObj):
def __init__(self, id=None, label=None, properties=None) -> None:
self.id = id
self.label = label
self.start_id = None
self.end_id = None
self.properties = properties
@property
def gtype(self):
return TP_EDGE
def __setitem__(self,name, value):
self.properties[name]=value
def __getitem__(self,name):
if name in self.properties:
return self.properties[name]
else:
return None
def __str__(self) -> str:
return self.toString()
def __repr__(self) -> str:
return self.toString()
def extraStrFormat(node, buf):
if node.start_id != None:
buf.write(", start_id:")
buf.write(str(node.start_id))
if node.end_id != None:
buf.write(", end_id:")
buf.write(str(node.end_id))
def toString(self) -> str:
return nodeToString(self, Edge.extraStrFormat)
def _toString(self, buf):
_nodeToString(self, buf, Edge.extraStrFormat)
def extraJsonFormat(node, buf):
if node.start_id != None:
buf.write(", \"start_id\": \"")
buf.write(str(node.start_id))
buf.write("\"")
if node.end_id != None:
buf.write(", \"end_id\": \"")
buf.write(str(node.end_id))
buf.write("\"")
def toJson(self) -> str:
return nodeToJson(self, Edge.extraJsonFormat)
def _toJson(self, buf):
_nodeToJson(self, buf, Edge.extraJsonFormat)
def nodeToString(node, extraFormatter=None):
buf = StringIO()
_nodeToString(node,buf,extraFormatter=extraFormatter)
return buf.getvalue()
def _nodeToString(node, buf, extraFormatter=None):
buf.write("{")
if node.label != None:
buf.write("label:")
buf.write(node.label)
if node.id != None:
buf.write(", id:")
buf.write(str(node.id))
if node.properties != None:
buf.write(", properties:{")
for k,v in node.properties.items():
buf.write(k)
buf.write(": ")
buf.write(str(v))
buf.write(", ")
buf.write("}")
if extraFormatter != None:
extraFormatter(node, buf)
if node.gtype == TP_VERTEX:
buf.write("}::VERTEX")
if node.gtype == TP_EDGE:
buf.write("}::EDGE")
def nodeToJson(node, extraFormatter=None):
buf = StringIO()
_nodeToJson(node, buf, extraFormatter=extraFormatter)
return buf.getvalue()
def _nodeToJson(node, buf, extraFormatter=None):
buf.write("{\"gtype\": ")
if node.gtype == TP_VERTEX:
buf.write("\"vertex\", ")
if node.gtype == TP_EDGE:
buf.write("\"edge\", ")
if node.label != None:
buf.write("\"label\":\"")
buf.write(node.label)
buf.write("\"")
if node.id != None:
buf.write(", \"id\":")
buf.write(str(node.id))
if extraFormatter != None:
extraFormatter(node, buf)
if node.properties != None:
buf.write(", \"properties\":{")
for k,v in node.properties.items():
buf.write("\"")
buf.write(k)
buf.write("\": \"")
buf.write(str(v))
buf.write("\", ")
buf.write("}")
buf.write("}") | 0.391522 | 0.08882 |
import datetime
import typing
from typing_extensions import TypeGuard
from .. import spec
from .. import exceptions
from . import timelength_units
def detect_timelength_representation(
timelength: spec.Timelength,
) -> spec.TimelengthRepresentation:
"""return str name of Timelength representation"""
if is_timelength_seconds(timelength):
return 'TimelengthSeconds'
elif is_timelength_seconds_precise(timelength):
return 'TimelengthSecondsPrecise'
elif is_timelength_label(timelength):
return 'TimelengthLabel'
elif is_timelength_clock(timelength):
return 'TimelengthClock'
elif is_timelength_phrase(timelength):
return 'TimelengthPhrase'
elif is_timelength_clock_phrase(timelength):
return 'TimelengthClockPhrase'
elif is_timelength_timedelta(timelength):
return 'TimelengthTimedelta'
else:
raise exceptions.RepresentationDetectionException(
'could not determine Timelength representation: ' + str(timelength)
)
def is_timelength(timelength: typing.Any) -> TypeGuard[spec.Timelength]:
"""return bool of whether input is Timelength"""
try:
detect_timelength_representation(timelength)
return True
except exceptions.RepresentationDetectionException:
return False
def is_timelength_seconds(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSeconds]:
"""return bool of whether input is TimelengthSeconds"""
return isinstance(timelength, int)
def is_timelength_seconds_precise(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSecondsPrecise]:
"""return bool of whether input is TimelengthSecondsPrecise"""
return isinstance(timelength, float)
def is_timelength_label(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthLabel]:
"""return bool of whether input is TimelengthLabel"""
if not isinstance(timelength, str) or len(timelength) < 2:
return False
try:
int(timelength[:-1])
letter = timelength[-1]
return letter.isalnum()
except ValueError:
return False
def is_timelength_clock(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClock]:
"""return bool of whether input is TimelengthClock"""
if not isinstance(timelength, str):
return False
numbers = timelength.split(':')
try:
for number in numbers[:-1]:
int(number)
float(numbers[-1])
return True
except ValueError:
return False
def is_timelength_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthPhrase]:
"""return bool of whether input is TimelengthPhrase"""
if not isinstance(timelength, str):
return False
unit_names_to_labels = timelength_units.get_unit_labels()
pieces = timelength.split(', ')
try:
for piece in pieces:
amount, unit_name = piece.split(' ')
float(amount)
if unit_name not in unit_names_to_labels:
return False
return True
except Exception:
return False
def is_timelength_clock_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClockPhrase]:
"""return bool of whether input is TimelengthClockPhrase"""
if not isinstance(timelength, str):
return False
pieces = timelength.split(', ')
if ':' in pieces[-1]:
clock = pieces[-1]
if not is_timelength_clock(clock):
return False
phrase = ', '.join(pieces[:-1])
else:
phrase = ', '.join(pieces)
return is_timelength_phrase(phrase)
def is_timelength_timedelta(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthTimedelta]:
"""return bool of whether input is TimelengthTimedelta"""
return isinstance(timelength, datetime.timedelta) | tooltime/timelength_utils/timelength_identify.py | import datetime
import typing
from typing_extensions import TypeGuard
from .. import spec
from .. import exceptions
from . import timelength_units
def detect_timelength_representation(
timelength: spec.Timelength,
) -> spec.TimelengthRepresentation:
"""return str name of Timelength representation"""
if is_timelength_seconds(timelength):
return 'TimelengthSeconds'
elif is_timelength_seconds_precise(timelength):
return 'TimelengthSecondsPrecise'
elif is_timelength_label(timelength):
return 'TimelengthLabel'
elif is_timelength_clock(timelength):
return 'TimelengthClock'
elif is_timelength_phrase(timelength):
return 'TimelengthPhrase'
elif is_timelength_clock_phrase(timelength):
return 'TimelengthClockPhrase'
elif is_timelength_timedelta(timelength):
return 'TimelengthTimedelta'
else:
raise exceptions.RepresentationDetectionException(
'could not determine Timelength representation: ' + str(timelength)
)
def is_timelength(timelength: typing.Any) -> TypeGuard[spec.Timelength]:
"""return bool of whether input is Timelength"""
try:
detect_timelength_representation(timelength)
return True
except exceptions.RepresentationDetectionException:
return False
def is_timelength_seconds(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSeconds]:
"""return bool of whether input is TimelengthSeconds"""
return isinstance(timelength, int)
def is_timelength_seconds_precise(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthSecondsPrecise]:
"""return bool of whether input is TimelengthSecondsPrecise"""
return isinstance(timelength, float)
def is_timelength_label(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthLabel]:
"""return bool of whether input is TimelengthLabel"""
if not isinstance(timelength, str) or len(timelength) < 2:
return False
try:
int(timelength[:-1])
letter = timelength[-1]
return letter.isalnum()
except ValueError:
return False
def is_timelength_clock(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClock]:
"""return bool of whether input is TimelengthClock"""
if not isinstance(timelength, str):
return False
numbers = timelength.split(':')
try:
for number in numbers[:-1]:
int(number)
float(numbers[-1])
return True
except ValueError:
return False
def is_timelength_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthPhrase]:
"""return bool of whether input is TimelengthPhrase"""
if not isinstance(timelength, str):
return False
unit_names_to_labels = timelength_units.get_unit_labels()
pieces = timelength.split(', ')
try:
for piece in pieces:
amount, unit_name = piece.split(' ')
float(amount)
if unit_name not in unit_names_to_labels:
return False
return True
except Exception:
return False
def is_timelength_clock_phrase(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthClockPhrase]:
"""return bool of whether input is TimelengthClockPhrase"""
if not isinstance(timelength, str):
return False
pieces = timelength.split(', ')
if ':' in pieces[-1]:
clock = pieces[-1]
if not is_timelength_clock(clock):
return False
phrase = ', '.join(pieces[:-1])
else:
phrase = ', '.join(pieces)
return is_timelength_phrase(phrase)
def is_timelength_timedelta(
timelength: typing.Any,
) -> TypeGuard[spec.TimelengthTimedelta]:
"""return bool of whether input is TimelengthTimedelta"""
return isinstance(timelength, datetime.timedelta) | 0.625667 | 0.373476 |
import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import Basic, Identify, LevelControl, OnOff
from zigpy.zcl.clusters.lighting import Color
from zigpy.zdo.types import NodeDescriptor
from .. import LUMI, BasicCluster, PowerConfigurationCluster, XiaomiCustomDevice
from ...const import (
ARGS,
BUTTON_1,
BUTTON_2,
BUTTON_3,
BUTTON_4,
BUTTON_5,
BUTTON_6,
COMMAND,
COMMAND_MOVE,
COMMAND_MOVE_COLOR_TEMP,
COMMAND_OFF,
COMMAND_ON,
COMMAND_STEP,
COMMAND_STEP_COLOR_TEMP,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINT_ID,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
NODE_DESCRIPTOR,
OUTPUT_CLUSTERS,
PROFILE_ID,
SHORT_PRESS,
)
BOTH_BUTTONS = "both_buttons"
BOTH_DOUBLE = "both_double"
BOTH_HOLD = "both_long press"
BOTH_SINGLE = "both_single"
ENDPOINT_MAP = {1: "left", 2: "right", 3: "both"}
LEFT_DOUBLE = "left_double"
LEFT_HOLD = "left_long press"
LEFT_SINGLE = "left_single"
PRESS_TYPES = {0: "long press", 1: "single", 2: "double"}
RIGHT_DOUBLE = "right_double"
RIGHT_HOLD = "right_long press"
RIGHT_SINGLE = "right_single"
STATUS_TYPE_ATTR = 0x0055 # decimal = 85
XIAOMI_CLUSTER_ID = 0xFFFF
XIAOMI_DEVICE_TYPE = 0x5F01
XIAOMI_DEVICE_TYPE2 = 0x5F02
XIAOMI_DEVICE_TYPE3 = 0x5F03
_LOGGER = logging.getLogger(__name__)
class RemoteB286OPCN01(XiaomiCustomDevice):
"""Aqara Opple 2 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b286opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(DOUBLE_PRESS, BUTTON_1): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(LONG_PRESS, BUTTON_1): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(DOUBLE_PRESS, BUTTON_2): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(LONG_PRESS, BUTTON_2): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
}
class RemoteB486OPCN01(XiaomiCustomDevice):
"""Aqara Opple 4 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b486opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(DOUBLE_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(SHORT_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(DOUBLE_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
}
class RemoteB686OPCN01(XiaomiCustomDevice):
"""Aqara Opple 6 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b686opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(LONG_PRESS, BUTTON_3): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 1, ARGS: [1, 15]},
(SHORT_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(LONG_PRESS, BUTTON_4): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 1, ARGS: [0, 15]},
(SHORT_PRESS, BUTTON_5): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(LONG_PRESS, BUTTON_5): {
COMMAND: COMMAND_MOVE_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 15, 0, 0],
},
(SHORT_PRESS, BUTTON_6): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
(LONG_PRESS, BUTTON_6): {
COMMAND: COMMAND_MOVE_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 15, 0, 0],
},
} | zhaquirks/xiaomi/aqara/opple_remote.py | import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import Basic, Identify, LevelControl, OnOff
from zigpy.zcl.clusters.lighting import Color
from zigpy.zdo.types import NodeDescriptor
from .. import LUMI, BasicCluster, PowerConfigurationCluster, XiaomiCustomDevice
from ...const import (
ARGS,
BUTTON_1,
BUTTON_2,
BUTTON_3,
BUTTON_4,
BUTTON_5,
BUTTON_6,
COMMAND,
COMMAND_MOVE,
COMMAND_MOVE_COLOR_TEMP,
COMMAND_OFF,
COMMAND_ON,
COMMAND_STEP,
COMMAND_STEP_COLOR_TEMP,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINT_ID,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
NODE_DESCRIPTOR,
OUTPUT_CLUSTERS,
PROFILE_ID,
SHORT_PRESS,
)
BOTH_BUTTONS = "both_buttons"
BOTH_DOUBLE = "both_double"
BOTH_HOLD = "both_long press"
BOTH_SINGLE = "both_single"
ENDPOINT_MAP = {1: "left", 2: "right", 3: "both"}
LEFT_DOUBLE = "left_double"
LEFT_HOLD = "left_long press"
LEFT_SINGLE = "left_single"
PRESS_TYPES = {0: "long press", 1: "single", 2: "double"}
RIGHT_DOUBLE = "right_double"
RIGHT_HOLD = "right_long press"
RIGHT_SINGLE = "right_single"
STATUS_TYPE_ATTR = 0x0055 # decimal = 85
XIAOMI_CLUSTER_ID = 0xFFFF
XIAOMI_DEVICE_TYPE = 0x5F01
XIAOMI_DEVICE_TYPE2 = 0x5F02
XIAOMI_DEVICE_TYPE3 = 0x5F03
_LOGGER = logging.getLogger(__name__)
class RemoteB286OPCN01(XiaomiCustomDevice):
"""Aqara Opple 2 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b286opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(DOUBLE_PRESS, BUTTON_1): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(LONG_PRESS, BUTTON_1): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(DOUBLE_PRESS, BUTTON_2): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(LONG_PRESS, BUTTON_2): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
}
class RemoteB486OPCN01(XiaomiCustomDevice):
"""Aqara Opple 4 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b486opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(DOUBLE_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(SHORT_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(DOUBLE_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
}
class RemoteB686OPCN01(XiaomiCustomDevice):
"""Aqara Opple 6 button remote device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3, 1]
# output_clusters=[3, 6, 8, 768]>
MODELS_INFO: [(LUMI, "lumi.remote.b686opcn01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
PowerConfigurationCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=259
# device_version=1
# input_clusters=[3]
# output_clusters=[6, 3]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
replacement = {
NODE_DESCRIPTOR: NodeDescriptor(
0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
),
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
BasicCluster,
Identify.cluster_id,
PowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, OnOff.cluster_id],
},
3: {},
4: {},
5: {},
6: {},
},
}
device_automation_triggers = {
(SHORT_PRESS, BUTTON_1): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_2): {COMMAND: COMMAND_ON, ENDPOINT_ID: 1},
(SHORT_PRESS, BUTTON_3): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [1, 85, 7],
},
(LONG_PRESS, BUTTON_3): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 1, ARGS: [1, 15]},
(SHORT_PRESS, BUTTON_4): {
COMMAND: COMMAND_STEP,
ENDPOINT_ID: 1,
ARGS: [0, 85, 7],
},
(LONG_PRESS, BUTTON_4): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 1, ARGS: [0, 15]},
(SHORT_PRESS, BUTTON_5): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 69, 7, 0, 0],
},
(LONG_PRESS, BUTTON_5): {
COMMAND: COMMAND_MOVE_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [1, 15, 0, 0],
},
(SHORT_PRESS, BUTTON_6): {
COMMAND: COMMAND_STEP_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 69, 7, 0, 0],
},
(LONG_PRESS, BUTTON_6): {
COMMAND: COMMAND_MOVE_COLOR_TEMP,
ENDPOINT_ID: 1,
ARGS: [3, 15, 0, 0],
},
} | 0.502441 | 0.153581 |
from asyncio.futures import Future
import ctypes
from dataclasses import dataclass
from typing import Type, Union, List, Any
from ctypes import CFUNCTYPE, c_char_p, c_size_t, c_void_p, c_uint32, c_uint16, py_object
from .ClusterObjects import ClusterAttributeDescriptor
import chip.exceptions
import chip.interaction_model
import chip.tlv
import inspect
import sys
import logging
@dataclass
class AttributePath:
EndpointId: int = None
ClusterId: int = None
AttributeId: int = None
def __init__(self, EndpointId: int = None, Cluster=None, Attribute=None, ClusterId=None, AttributeId=None):
self.EndpointId = EndpointId
if Cluster is not None:
# Wildcard read for a specific cluster
if (Attribute is not None) or (ClusterId is not None) or (AttributeId is not None):
raise Warning(
"Attribute, ClusterId and AttributeId is ignored when Cluster is specified")
self.ClusterId = Cluster.id
return
if Attribute is not None:
if (ClusterId is not None) or (AttributeId is not None):
raise Warning(
"ClusterId and AttributeId is ignored when Attribute is specified")
self.ClusterId = Attribute.cluster_id
self.AttributeId = Attribute.attribute_id
return
self.ClusterId = ClusterId
self.AttributeId = AttributeId
def __str__(self) -> str:
return f"{self.EndpointId}/{self.ClusterId}/{self.AttributeId}"
@dataclass
class AttributeStatus:
Path: AttributePath
Status: Union[chip.interaction_model.Status, int]
AttributeWriteResult = AttributeStatus
@dataclass
class AttributeDescriptorWithEndpoint:
EndpointId: int
Attribute: ClusterAttributeDescriptor
@dataclass
class AttributeWriteRequest(AttributeDescriptorWithEndpoint):
Data: Any
AttributeReadRequest = AttributeDescriptorWithEndpoint
@dataclass
class AttributeReadResult(AttributeStatus):
Data: Any = None
_AttributeIndex = {}
def _BuildAttributeIndex():
''' Build internal attribute index for locating the corresponding cluster object by path in the future.
We do this because this operation will take a long time when there are lots of attributes, it takes about 300ms for a single query.
This is acceptable during init, but unacceptable when the server returns lots of attributes at the same time.
'''
for clusterName, obj in inspect.getmembers(sys.modules['chip.clusters.Objects']):
if ('chip.clusters.Objects' in str(obj)) and inspect.isclass(obj):
for objName, subclass in inspect.getmembers(obj):
if inspect.isclass(subclass) and (('Attribute') in str(subclass)):
for attributeName, attribute in inspect.getmembers(subclass):
if inspect.isclass(attribute):
for name, field in inspect.getmembers(attribute):
if ('__dataclass_fields__' in name):
_AttributeIndex[str(AttributePath(ClusterId=field['cluster_id'].default, AttributeId=field['attribute_id'].default))] = eval(
'chip.clusters.Objects.' + clusterName + '.Attributes.' + attributeName)
class AsyncReadTransaction:
def __init__(self, future: Future, eventLoop):
self._event_loop = eventLoop
self._future = future
self._res = []
def _handleAttributeData(self, path: AttributePath, status: int, data: bytes):
try:
imStatus = status
try:
imStatus = chip.interaction_model.Status(status)
except:
pass
attributeType = _AttributeIndex.get(str(AttributePath(
ClusterId=path.ClusterId, AttributeId=path.AttributeId)), None)
attributeValue = None
if attributeType is None:
attributeValue = chip.tlv.TLVReader(data).get().get("Any", {})
else:
attributeValue = attributeType.FromTLV(data)
self._res.append(AttributeReadResult(
Path=path, Status=imStatus, Data=attributeValue))
except Exception as ex:
logging.exception(ex)
def handleAttributeData(self, path: AttributePath, status: int, data: bytes):
self._event_loop.call_soon_threadsafe(
self._handleAttributeData, path, status, data)
def _handleError(self, chipError: int):
self._future.set_exception(
chip.exceptions.ChipStackError(chipError))
def handleError(self, chipError: int):
self._event_loop.call_soon_threadsafe(
self._handleError, chipError
)
def _handleDone(self, asd):
if not self._future.done():
self._future.set_result(self._res)
def handleDone(self):
self._event_loop.call_soon_threadsafe(self._handleDone, "asdasa")
class AsyncWriteTransaction:
def __init__(self, future: Future, eventLoop):
self._event_loop = eventLoop
self._future = future
self._res = []
def _handleResponse(self, path: AttributePath, status: int):
try:
imStatus = chip.interaction_model.Status(status)
self._res.append(AttributeWriteResult(Path=path, Status=imStatus))
except:
self._res.append(AttributeWriteResult(Path=path, Status=status))
def handleResponse(self, path: AttributePath, status: int):
self._event_loop.call_soon_threadsafe(
self._handleResponse, path, status)
def _handleError(self, chipError: int):
self._future.set_exception(
chip.exceptions.ChipStackError(chipError))
def handleError(self, chipError: int):
self._event_loop.call_soon_threadsafe(
self._handleError, chipError
)
def _handleDone(self):
if not self._future.done():
self._future.set_result(self._res)
def handleDone(self):
self._event_loop.call_soon_threadsafe(self._handleDone)
_OnReadAttributeDataCallbackFunct = CFUNCTYPE(
None, py_object, c_uint16, c_uint32, c_uint32, c_uint16, c_char_p, c_size_t)
_OnReadErrorCallbackFunct = CFUNCTYPE(
None, py_object, c_uint32)
_OnReadDoneCallbackFunct = CFUNCTYPE(
None, py_object)
@_OnReadAttributeDataCallbackFunct
def _OnReadAttributeDataCallback(closure, endpoint: int, cluster: int, attribute: int, status, data, len):
dataBytes = ctypes.string_at(data, len)
closure.handleAttributeData(AttributePath(
EndpointId=endpoint, ClusterId=cluster, AttributeId=attribute), status, dataBytes[:])
@_OnReadErrorCallbackFunct
def _OnReadErrorCallback(closure, chiperror: int):
closure.handleError(chiperror)
@_OnReadDoneCallbackFunct
def _OnReadDoneCallback(closure):
closure.handleDone()
ctypes.pythonapi.Py_DecRef(ctypes.py_object(closure))
_OnWriteResponseCallbackFunct = CFUNCTYPE(
None, py_object, c_uint16, c_uint32, c_uint32, c_uint16)
_OnWriteErrorCallbackFunct = CFUNCTYPE(
None, py_object, c_uint32)
_OnWriteDoneCallbackFunct = CFUNCTYPE(
None, py_object)
@_OnWriteResponseCallbackFunct
def _OnWriteResponseCallback(closure, endpoint: int, cluster: int, attribute: int, status):
closure.handleResponse(AttributePath(
EndpointId=endpoint, ClusterId=cluster, AttributeId=attribute), status)
@_OnWriteErrorCallbackFunct
def _OnWriteErrorCallback(closure, chiperror: int):
closure.handleError(chiperror)
@_OnWriteDoneCallbackFunct
def _OnWriteDoneCallback(closure):
closure.handleDone()
ctypes.pythonapi.Py_DecRef(ctypes.py_object(closure))
def WriteAttributes(future: Future, eventLoop, device, attributes: List[AttributeWriteRequest]) -> int:
handle = chip.native.GetLibraryHandle()
transaction = AsyncWriteTransaction(future, eventLoop)
writeargs = []
for attr in attributes:
path = chip.interaction_model.AttributePathIBstruct.parse(
b'\x00' * chip.interaction_model.AttributePathIBstruct.sizeof())
path.EndpointId = attr.EndpointId
path.ClusterId = attr.Attribute.cluster_id
path.AttributeId = attr.Attribute.attribute_id
path = chip.interaction_model.AttributePathIBstruct.build(path)
tlv = attr.Attribute.ToTLV(None, attr.Data)
writeargs.append(ctypes.c_char_p(path))
writeargs.append(ctypes.c_char_p(bytes(tlv)))
writeargs.append(ctypes.c_int(len(tlv)))
ctypes.pythonapi.Py_IncRef(ctypes.py_object(transaction))
res = handle.pychip_WriteClient_WriteAttributes(
ctypes.py_object(transaction), device, ctypes.c_size_t(len(attributes)), *writeargs)
if res != 0:
ctypes.pythonapi.Py_DecRef(ctypes.py_object(transaction))
return res
def ReadAttributes(future: Future, eventLoop, device, attributes: List[AttributePath]) -> int:
handle = chip.native.GetLibraryHandle()
transaction = AsyncReadTransaction(future, eventLoop)
readargs = []
for attr in attributes:
path = chip.interaction_model.AttributePathIBstruct.parse(
b'\xff' * chip.interaction_model.AttributePathIBstruct.sizeof())
if attr.EndpointId is not None:
path.EndpointId = attr.EndpointId
if attr.ClusterId is not None:
path.ClusterId = attr.ClusterId
if attr.AttributeId is not None:
path.AttributeId = attr.AttributeId
path = chip.interaction_model.AttributePathIBstruct.build(path)
readargs.append(ctypes.c_char_p(path))
ctypes.pythonapi.Py_IncRef(ctypes.py_object(transaction))
res = handle.pychip_ReadClient_ReadAttributes(
ctypes.py_object(transaction), device, ctypes.c_size_t(len(attributes)), *readargs)
if res != 0:
ctypes.pythonapi.Py_DecRef(ctypes.py_object(transaction))
return res
def Init():
handle = chip.native.GetLibraryHandle()
# Uses one of the type decorators as an indicator for everything being
# initialized.
if not handle.pychip_WriteClient_InitCallbacks.argtypes:
setter = chip.native.NativeLibraryHandleMethodArguments(handle)
handle.pychip_WriteClient_WriteAttributes.restype = c_uint32
setter.Set('pychip_WriteClient_InitCallbacks', None, [
_OnWriteResponseCallbackFunct, _OnWriteErrorCallbackFunct, _OnWriteDoneCallbackFunct])
handle.pychip_ReadClient_ReadAttributes.restype = c_uint32
setter.Set('pychip_ReadClient_InitCallbacks', None, [
_OnReadAttributeDataCallbackFunct, _OnReadErrorCallbackFunct, _OnReadDoneCallbackFunct])
handle.pychip_WriteClient_InitCallbacks(
_OnWriteResponseCallback, _OnWriteErrorCallback, _OnWriteDoneCallback)
handle.pychip_ReadClient_InitCallbacks(
_OnReadAttributeDataCallback, _OnReadErrorCallback, _OnReadDoneCallback)
_BuildAttributeIndex() | src/controller/python/chip/clusters/Attribute.py |
from asyncio.futures import Future
import ctypes
from dataclasses import dataclass
from typing import Type, Union, List, Any
from ctypes import CFUNCTYPE, c_char_p, c_size_t, c_void_p, c_uint32, c_uint16, py_object
from .ClusterObjects import ClusterAttributeDescriptor
import chip.exceptions
import chip.interaction_model
import chip.tlv
import inspect
import sys
import logging
@dataclass
class AttributePath:
EndpointId: int = None
ClusterId: int = None
AttributeId: int = None
def __init__(self, EndpointId: int = None, Cluster=None, Attribute=None, ClusterId=None, AttributeId=None):
self.EndpointId = EndpointId
if Cluster is not None:
# Wildcard read for a specific cluster
if (Attribute is not None) or (ClusterId is not None) or (AttributeId is not None):
raise Warning(
"Attribute, ClusterId and AttributeId is ignored when Cluster is specified")
self.ClusterId = Cluster.id
return
if Attribute is not None:
if (ClusterId is not None) or (AttributeId is not None):
raise Warning(
"ClusterId and AttributeId is ignored when Attribute is specified")
self.ClusterId = Attribute.cluster_id
self.AttributeId = Attribute.attribute_id
return
self.ClusterId = ClusterId
self.AttributeId = AttributeId
def __str__(self) -> str:
return f"{self.EndpointId}/{self.ClusterId}/{self.AttributeId}"
@dataclass
class AttributeStatus:
Path: AttributePath
Status: Union[chip.interaction_model.Status, int]
AttributeWriteResult = AttributeStatus
@dataclass
class AttributeDescriptorWithEndpoint:
EndpointId: int
Attribute: ClusterAttributeDescriptor
@dataclass
class AttributeWriteRequest(AttributeDescriptorWithEndpoint):
Data: Any
AttributeReadRequest = AttributeDescriptorWithEndpoint
@dataclass
class AttributeReadResult(AttributeStatus):
Data: Any = None
_AttributeIndex = {}
def _BuildAttributeIndex():
''' Build internal attribute index for locating the corresponding cluster object by path in the future.
We do this because this operation will take a long time when there are lots of attributes, it takes about 300ms for a single query.
This is acceptable during init, but unacceptable when the server returns lots of attributes at the same time.
'''
for clusterName, obj in inspect.getmembers(sys.modules['chip.clusters.Objects']):
if ('chip.clusters.Objects' in str(obj)) and inspect.isclass(obj):
for objName, subclass in inspect.getmembers(obj):
if inspect.isclass(subclass) and (('Attribute') in str(subclass)):
for attributeName, attribute in inspect.getmembers(subclass):
if inspect.isclass(attribute):
for name, field in inspect.getmembers(attribute):
if ('__dataclass_fields__' in name):
_AttributeIndex[str(AttributePath(ClusterId=field['cluster_id'].default, AttributeId=field['attribute_id'].default))] = eval(
'chip.clusters.Objects.' + clusterName + '.Attributes.' + attributeName)
class AsyncReadTransaction:
def __init__(self, future: Future, eventLoop):
self._event_loop = eventLoop
self._future = future
self._res = []
def _handleAttributeData(self, path: AttributePath, status: int, data: bytes):
try:
imStatus = status
try:
imStatus = chip.interaction_model.Status(status)
except:
pass
attributeType = _AttributeIndex.get(str(AttributePath(
ClusterId=path.ClusterId, AttributeId=path.AttributeId)), None)
attributeValue = None
if attributeType is None:
attributeValue = chip.tlv.TLVReader(data).get().get("Any", {})
else:
attributeValue = attributeType.FromTLV(data)
self._res.append(AttributeReadResult(
Path=path, Status=imStatus, Data=attributeValue))
except Exception as ex:
logging.exception(ex)
def handleAttributeData(self, path: AttributePath, status: int, data: bytes):
self._event_loop.call_soon_threadsafe(
self._handleAttributeData, path, status, data)
def _handleError(self, chipError: int):
self._future.set_exception(
chip.exceptions.ChipStackError(chipError))
def handleError(self, chipError: int):
self._event_loop.call_soon_threadsafe(
self._handleError, chipError
)
def _handleDone(self, asd):
if not self._future.done():
self._future.set_result(self._res)
def handleDone(self):
self._event_loop.call_soon_threadsafe(self._handleDone, "asdasa")
class AsyncWriteTransaction:
def __init__(self, future: Future, eventLoop):
self._event_loop = eventLoop
self._future = future
self._res = []
def _handleResponse(self, path: AttributePath, status: int):
try:
imStatus = chip.interaction_model.Status(status)
self._res.append(AttributeWriteResult(Path=path, Status=imStatus))
except:
self._res.append(AttributeWriteResult(Path=path, Status=status))
def handleResponse(self, path: AttributePath, status: int):
self._event_loop.call_soon_threadsafe(
self._handleResponse, path, status)
def _handleError(self, chipError: int):
self._future.set_exception(
chip.exceptions.ChipStackError(chipError))
def handleError(self, chipError: int):
self._event_loop.call_soon_threadsafe(
self._handleError, chipError
)
def _handleDone(self):
if not self._future.done():
self._future.set_result(self._res)
def handleDone(self):
self._event_loop.call_soon_threadsafe(self._handleDone)
_OnReadAttributeDataCallbackFunct = CFUNCTYPE(
None, py_object, c_uint16, c_uint32, c_uint32, c_uint16, c_char_p, c_size_t)
_OnReadErrorCallbackFunct = CFUNCTYPE(
None, py_object, c_uint32)
_OnReadDoneCallbackFunct = CFUNCTYPE(
None, py_object)
@_OnReadAttributeDataCallbackFunct
def _OnReadAttributeDataCallback(closure, endpoint: int, cluster: int, attribute: int, status, data, len):
dataBytes = ctypes.string_at(data, len)
closure.handleAttributeData(AttributePath(
EndpointId=endpoint, ClusterId=cluster, AttributeId=attribute), status, dataBytes[:])
@_OnReadErrorCallbackFunct
def _OnReadErrorCallback(closure, chiperror: int):
closure.handleError(chiperror)
@_OnReadDoneCallbackFunct
def _OnReadDoneCallback(closure):
closure.handleDone()
ctypes.pythonapi.Py_DecRef(ctypes.py_object(closure))
_OnWriteResponseCallbackFunct = CFUNCTYPE(
None, py_object, c_uint16, c_uint32, c_uint32, c_uint16)
_OnWriteErrorCallbackFunct = CFUNCTYPE(
None, py_object, c_uint32)
_OnWriteDoneCallbackFunct = CFUNCTYPE(
None, py_object)
@_OnWriteResponseCallbackFunct
def _OnWriteResponseCallback(closure, endpoint: int, cluster: int, attribute: int, status):
closure.handleResponse(AttributePath(
EndpointId=endpoint, ClusterId=cluster, AttributeId=attribute), status)
@_OnWriteErrorCallbackFunct
def _OnWriteErrorCallback(closure, chiperror: int):
closure.handleError(chiperror)
@_OnWriteDoneCallbackFunct
def _OnWriteDoneCallback(closure):
closure.handleDone()
ctypes.pythonapi.Py_DecRef(ctypes.py_object(closure))
def WriteAttributes(future: Future, eventLoop, device, attributes: List[AttributeWriteRequest]) -> int:
handle = chip.native.GetLibraryHandle()
transaction = AsyncWriteTransaction(future, eventLoop)
writeargs = []
for attr in attributes:
path = chip.interaction_model.AttributePathIBstruct.parse(
b'\x00' * chip.interaction_model.AttributePathIBstruct.sizeof())
path.EndpointId = attr.EndpointId
path.ClusterId = attr.Attribute.cluster_id
path.AttributeId = attr.Attribute.attribute_id
path = chip.interaction_model.AttributePathIBstruct.build(path)
tlv = attr.Attribute.ToTLV(None, attr.Data)
writeargs.append(ctypes.c_char_p(path))
writeargs.append(ctypes.c_char_p(bytes(tlv)))
writeargs.append(ctypes.c_int(len(tlv)))
ctypes.pythonapi.Py_IncRef(ctypes.py_object(transaction))
res = handle.pychip_WriteClient_WriteAttributes(
ctypes.py_object(transaction), device, ctypes.c_size_t(len(attributes)), *writeargs)
if res != 0:
ctypes.pythonapi.Py_DecRef(ctypes.py_object(transaction))
return res
def ReadAttributes(future: Future, eventLoop, device, attributes: List[AttributePath]) -> int:
handle = chip.native.GetLibraryHandle()
transaction = AsyncReadTransaction(future, eventLoop)
readargs = []
for attr in attributes:
path = chip.interaction_model.AttributePathIBstruct.parse(
b'\xff' * chip.interaction_model.AttributePathIBstruct.sizeof())
if attr.EndpointId is not None:
path.EndpointId = attr.EndpointId
if attr.ClusterId is not None:
path.ClusterId = attr.ClusterId
if attr.AttributeId is not None:
path.AttributeId = attr.AttributeId
path = chip.interaction_model.AttributePathIBstruct.build(path)
readargs.append(ctypes.c_char_p(path))
ctypes.pythonapi.Py_IncRef(ctypes.py_object(transaction))
res = handle.pychip_ReadClient_ReadAttributes(
ctypes.py_object(transaction), device, ctypes.c_size_t(len(attributes)), *readargs)
if res != 0:
ctypes.pythonapi.Py_DecRef(ctypes.py_object(transaction))
return res
def Init():
handle = chip.native.GetLibraryHandle()
# Uses one of the type decorators as an indicator for everything being
# initialized.
if not handle.pychip_WriteClient_InitCallbacks.argtypes:
setter = chip.native.NativeLibraryHandleMethodArguments(handle)
handle.pychip_WriteClient_WriteAttributes.restype = c_uint32
setter.Set('pychip_WriteClient_InitCallbacks', None, [
_OnWriteResponseCallbackFunct, _OnWriteErrorCallbackFunct, _OnWriteDoneCallbackFunct])
handle.pychip_ReadClient_ReadAttributes.restype = c_uint32
setter.Set('pychip_ReadClient_InitCallbacks', None, [
_OnReadAttributeDataCallbackFunct, _OnReadErrorCallbackFunct, _OnReadDoneCallbackFunct])
handle.pychip_WriteClient_InitCallbacks(
_OnWriteResponseCallback, _OnWriteErrorCallback, _OnWriteDoneCallback)
handle.pychip_ReadClient_InitCallbacks(
_OnReadAttributeDataCallback, _OnReadErrorCallback, _OnReadDoneCallback)
_BuildAttributeIndex() | 0.508056 | 0.089375 |
from numpy import array, zeros
from typing import Iterable
from random import randint
# Les Types
TabEntiers = Iterable[int]
Texte = (str, 15)
TabTexte = Iterable[str]
class Calcul:
etapes: TabTexte = None
valeur: int = 0
# Les Constantes
SIGNES: TabTexte = array(["+", "*", "-", "//"])
def copier_tab(src: TabEntiers, dest: TabEntiers, nb: int):
for i in range(nb):
dest[i] = src[i]
def extraire(tab: TabEntiers, i: int, pos_fin: int) -> int:
i_value: int = tab[i]
tab[i] = tab[pos_fin]
tab[pos_fin] = 0
return i_value
# Test
# tab: TabEntiers = [1, 2, 3, 4, 5, 6, 7]
# v: int = extraire(tab, 2, 6)
# print(v, tab)
def verifier(op: str, a: int, b: int) -> bool:
if (op == SIGNES[3] and (b == 1 or a / b != a // b))\
or (op == SIGNES[1] and (a == 1 or b == 1))\
or (op == SIGNES[2] and a - b <= 0):
return False
else:
return True
# Test
# a: int = 1
# b: int = 2
# print(verifier("+", a, b), verifier("-", a, b), verifier("*", a, b), verifier("//", a, b))
# a: int = 5
# b: int = 4
# print(verifier("+", a, b), verifier("-", a, b), verifier("*", a, b), verifier("//", a, b))
def calculer(op: str, a: int, b: int) -> int:
if op == SIGNES[0]:
return a + b
elif verifier(op, a, b):
if op == SIGNES[1]:
return a * b
elif op == SIGNES[2]:
return a - b
elif op == SIGNES[3]:
return a // b
# Test
# print(calculer("+", 4, 5))
# print(calculer("//", 984, 24))
def choisir_operateur(a: int, b: int) -> str:
rand_x: int = randint(0, 3)
while not verifier(SIGNES[rand_x], a, b):
rand_x = randint(0, 3)
return SIGNES[rand_x]
# Test
# print(choisir_operateur(4, 4))
# print(choisir_operateur(4, 4))
# print(choisir_operateur(4, 4))
def essayer_calcul(tab_num: TabEntiers, but: int) -> Calcul:
# Tente des opérations au hasard, sur les nombres pris au hasard, jusqu'à ce qu'il n'en reste qu'un.
# S'il arrive au but, retourne la chaîne menant au calcul.
calcul = Calcul()
calcul.etapes = zeros(5, Texte)
# Travailler sur une copie du tableau des nombres
copie_num: TabEntiers = zeros(6, int)
copier_tab(tab_num, copie_num, 6)
j: int = 0
for nb in range(5, 0, -1):
# Prendre une valeur entre 0 et nb
i = randint(0, nb)
a: int = extraire(copie_num, i, nb)
# Prendre une valeur entre 0 et nb - 1
i: int = randint(0, nb - 1)
b: int = extraire(copie_num, i, nb - 1)
# Choisir une opération applicable sur a et b
signe = choisir_operateur(a, b)
# Lancer le calcul de l'opération
resultat = calculer(signe, a, b)
# Classer le résultat en fin de tableau
copie_num[nb - 1] = resultat
# Noter le détail de l'opération dans le tableau du calcul
calcul.etapes[j] = str(a) + " " + signe + " " + str(b) + " = " + str(resultat)
j += 1
# Mettre à jour le résultat actuel dans le calcul
calcul.valeur = resultat
# Sortir si le but est trouvé
if resultat == but:
return calcul
return calcul
# Test
# c: Calcul = essayer_calcul([12, 23, 34, 45, 56, 65], 77)
# print(c.valeur, " trouvé par ", c.etapes)
# c = essayer_calcul([12, 23, 34, 45, 56, 65], 77)
# print(c.valeur, " trouvé par ", c.etapes)
def lancer_essais(but: int, tab_num: TabEntiers):
essais_max: int = 100000
calc = Calcul()
nb_essais: int = 0
val_proche: int = 0
print("À partir des opérations sur les nombres", tab_num, "\nLe but est d'obtenir", but)
for i in range(essais_max):
calc = essayer_calcul(tab_num, but)
nb_essais = i + 1
if abs(but - calc.valeur) < abs(but - val_proche):
val_proche = calc.valeur
if val_proche == but:
break
print("Fin de recherche, valeur la plus proche trouvée :", calc.valeur)
print("Nb d'essais effectués :", nb_essais)
print("Détail des opérations :")
for e in calc.etapes:
print(e)
lancer_essais(668, [1, 10, 4, 5, 3, 50])
lancer_essais(789, [1, 2, 4, 8, 10, 25]) | S2/TP3/ex_01.py | from numpy import array, zeros
from typing import Iterable
from random import randint
# Les Types
TabEntiers = Iterable[int]
Texte = (str, 15)
TabTexte = Iterable[str]
class Calcul:
etapes: TabTexte = None
valeur: int = 0
# Les Constantes
SIGNES: TabTexte = array(["+", "*", "-", "//"])
def copier_tab(src: TabEntiers, dest: TabEntiers, nb: int):
for i in range(nb):
dest[i] = src[i]
def extraire(tab: TabEntiers, i: int, pos_fin: int) -> int:
i_value: int = tab[i]
tab[i] = tab[pos_fin]
tab[pos_fin] = 0
return i_value
# Test
# tab: TabEntiers = [1, 2, 3, 4, 5, 6, 7]
# v: int = extraire(tab, 2, 6)
# print(v, tab)
def verifier(op: str, a: int, b: int) -> bool:
if (op == SIGNES[3] and (b == 1 or a / b != a // b))\
or (op == SIGNES[1] and (a == 1 or b == 1))\
or (op == SIGNES[2] and a - b <= 0):
return False
else:
return True
# Test
# a: int = 1
# b: int = 2
# print(verifier("+", a, b), verifier("-", a, b), verifier("*", a, b), verifier("//", a, b))
# a: int = 5
# b: int = 4
# print(verifier("+", a, b), verifier("-", a, b), verifier("*", a, b), verifier("//", a, b))
def calculer(op: str, a: int, b: int) -> int:
if op == SIGNES[0]:
return a + b
elif verifier(op, a, b):
if op == SIGNES[1]:
return a * b
elif op == SIGNES[2]:
return a - b
elif op == SIGNES[3]:
return a // b
# Test
# print(calculer("+", 4, 5))
# print(calculer("//", 984, 24))
def choisir_operateur(a: int, b: int) -> str:
rand_x: int = randint(0, 3)
while not verifier(SIGNES[rand_x], a, b):
rand_x = randint(0, 3)
return SIGNES[rand_x]
# Test
# print(choisir_operateur(4, 4))
# print(choisir_operateur(4, 4))
# print(choisir_operateur(4, 4))
def essayer_calcul(tab_num: TabEntiers, but: int) -> Calcul:
# Tente des opérations au hasard, sur les nombres pris au hasard, jusqu'à ce qu'il n'en reste qu'un.
# S'il arrive au but, retourne la chaîne menant au calcul.
calcul = Calcul()
calcul.etapes = zeros(5, Texte)
# Travailler sur une copie du tableau des nombres
copie_num: TabEntiers = zeros(6, int)
copier_tab(tab_num, copie_num, 6)
j: int = 0
for nb in range(5, 0, -1):
# Prendre une valeur entre 0 et nb
i = randint(0, nb)
a: int = extraire(copie_num, i, nb)
# Prendre une valeur entre 0 et nb - 1
i: int = randint(0, nb - 1)
b: int = extraire(copie_num, i, nb - 1)
# Choisir une opération applicable sur a et b
signe = choisir_operateur(a, b)
# Lancer le calcul de l'opération
resultat = calculer(signe, a, b)
# Classer le résultat en fin de tableau
copie_num[nb - 1] = resultat
# Noter le détail de l'opération dans le tableau du calcul
calcul.etapes[j] = str(a) + " " + signe + " " + str(b) + " = " + str(resultat)
j += 1
# Mettre à jour le résultat actuel dans le calcul
calcul.valeur = resultat
# Sortir si le but est trouvé
if resultat == but:
return calcul
return calcul
# Test
# c: Calcul = essayer_calcul([12, 23, 34, 45, 56, 65], 77)
# print(c.valeur, " trouvé par ", c.etapes)
# c = essayer_calcul([12, 23, 34, 45, 56, 65], 77)
# print(c.valeur, " trouvé par ", c.etapes)
def lancer_essais(but: int, tab_num: TabEntiers):
essais_max: int = 100000
calc = Calcul()
nb_essais: int = 0
val_proche: int = 0
print("À partir des opérations sur les nombres", tab_num, "\nLe but est d'obtenir", but)
for i in range(essais_max):
calc = essayer_calcul(tab_num, but)
nb_essais = i + 1
if abs(but - calc.valeur) < abs(but - val_proche):
val_proche = calc.valeur
if val_proche == but:
break
print("Fin de recherche, valeur la plus proche trouvée :", calc.valeur)
print("Nb d'essais effectués :", nb_essais)
print("Détail des opérations :")
for e in calc.etapes:
print(e)
lancer_essais(668, [1, 10, 4, 5, 3, 50])
lancer_essais(789, [1, 2, 4, 8, 10, 25]) | 0.327346 | 0.532851 |
import collections
import os
import random
import xml.etree.ElementTree as ET
import numpy as np
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base, common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from six.moves import range
from dmc_remastered import DMCR_VARY, SUITE_DIR, register
from dmc_remastered.rng import dmcr_random
from .generate_visuals import get_assets
_DEFAULT_TIME_LIMIT = 20 # (seconds)
_CONTROL_TIMESTEP = 0.02 # (seconds)
# For TURN tasks, the 'tip' geom needs to enter a spherical target of sizes:
_EASY_TARGET_SIZE = 0.07
_HARD_TARGET_SIZE = 0.03
# Initial spin velocity for the Stop task.
_INITIAL_SPIN_VELOCITY = 100
# Spinning slower than this value (radian/second) is considered stopped.
_STOP_VELOCITY = 1e-6
# Spinning faster than this value (radian/second) is considered spinning.
_SPIN_VELOCITY = 15.0
def get_model(visual_seed, vary=["camera", "light"]):
with open(os.path.join(SUITE_DIR, os.path.join("assets", "finger.xml")), "r") as f:
xml = ET.fromstring(f.read())
if visual_seed != 0:
with dmcr_random(visual_seed):
camera_x = random.uniform(-0.02, 0.02)
camera_y = random.uniform(-1.01, -0.99)
camera_z = random.uniform(0.7, 1.0)
light_pos_x = random.uniform(-0.8, 0.8)
light_pos_y = random.uniform(-0.8, 0.8)
light_pos_z = random.uniform(1.0, 3.0)
light_dif = random.uniform(0.5, 1.1)
light_dif_del_r = random.uniform(-0.02, 0.02)
light_dif_del_g = random.uniform(-0.02, 0.02)
light_dif_del_b = random.uniform(-0.02, 0.02)
light_spec = random.uniform(0.0, 1.2)
light_spec_del_r = random.uniform(-0.02, 0.02)
light_spec_del_g = random.uniform(-0.02, 0.02)
light_spec_del_b = random.uniform(-0.02, 0.02)
if "camera" in vary:
xml[5][2].attrib["pos"] = f"{camera_x} {camera_y} {camera_z}"
if "light" in vary:
xml[5][0].attrib["pos"] = f"{light_pos_x} {light_pos_y} {light_pos_z}"
xml[5][0].attrib[
"diffuse"
] = f"{light_dif+light_dif_del_r} {light_dif+light_dif_del_g} {light_dif+light_dif_del_b}"
xml[5][0].attrib[
"specular"
] = f"{light_spec+light_spec_del_r} {light_spec+light_spec_del_g} {light_spec+light_spec_del_b}"
return ET.tostring(xml, encoding="utf8", method="xml")
@register("finger", "spin")
def spin(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Spin(random=dynamics_seed)
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
@register("finger", "turn_easy")
def turn_easy(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Turn(target_radius=_EASY_TARGET_SIZE, random=dynamics_seed)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
@register("finger", "turn_hard")
def turn_hard(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Turn(target_radius=_HARD_TARGET_SIZE, random=dynamics_seed)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Finger domain."""
def touch(self):
"""Returns logarithmically scaled signals from the two touch sensors."""
return np.log1p(self.named.data.sensordata[["touchtop", "touchbottom"]])
def hinge_velocity(self):
"""Returns the velocity of the hinge joint."""
return self.named.data.sensordata["hinge_velocity"]
def tip_position(self):
"""Returns the (x,z) position of the tip relative to the hinge."""
return (
self.named.data.sensordata["tip"][[0, 2]]
- self.named.data.sensordata["spinner"][[0, 2]]
)
def bounded_position(self):
"""Returns the positions, with the hinge angle replaced by tip position."""
return np.hstack(
(self.named.data.sensordata[["proximal", "distal"]], self.tip_position())
)
def velocity(self):
"""Returns the velocities (extracted from sensordata)."""
return self.named.data.sensordata[
["proximal_velocity", "distal_velocity", "hinge_velocity"]
]
def target_position(self):
"""Returns the (x,z) position of the target relative to the hinge."""
return (
self.named.data.sensordata["target"][[0, 2]]
- self.named.data.sensordata["spinner"][[0, 2]]
)
def to_target(self):
"""Returns the vector from the tip to the target."""
return self.target_position() - self.tip_position()
def dist_to_target(self):
"""Returns the signed distance to the target surface, negative is inside."""
return (
np.linalg.norm(self.to_target()) - self.named.model.site_size["target", 0]
)
class Spin(base.Task):
"""A Finger `Task` to spin the stopped body."""
def __init__(self, random=None):
"""Initializes a new `Spin` instance.
Args:
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
super(Spin, self).__init__(random=random)
def initialize_episode(self, physics):
physics.named.model.site_rgba["target", 3] = 0
physics.named.model.site_rgba["tip", 3] = 0
physics.named.model.dof_damping["hinge"] = 0.03
_set_random_joint_angles(physics, self.random)
super(Spin, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns state and touch sensors, and target info."""
obs = collections.OrderedDict()
obs["position"] = physics.bounded_position()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
return obs
def get_reward(self, physics):
"""Returns a sparse reward."""
return float(physics.hinge_velocity() <= -_SPIN_VELOCITY)
class Turn(base.Task):
"""A Finger `Task` to turn the body to a target angle."""
def __init__(self, target_radius, random=None):
"""Initializes a new `Turn` instance.
Args:
target_radius: Radius of the target site, which specifies the goal angle.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._target_radius = target_radius
super(Turn, self).__init__(random=random)
def initialize_episode(self, physics):
target_angle = self.random.uniform(-np.pi, np.pi)
hinge_x, hinge_z = physics.named.data.xanchor["hinge", ["x", "z"]]
radius = physics.named.model.geom_size["cap1"].sum()
target_x = hinge_x + radius * np.sin(target_angle)
target_z = hinge_z + radius * np.cos(target_angle)
physics.named.model.site_pos["target", ["x", "z"]] = target_x, target_z
physics.named.model.site_size["target", 0] = self._target_radius
_set_random_joint_angles(physics, self.random)
super(Turn, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns state, touch sensors, and target info."""
obs = collections.OrderedDict()
obs["position"] = physics.bounded_position()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
obs["target_position"] = physics.target_position()
obs["dist_to_target"] = physics.dist_to_target()
return obs
def get_reward(self, physics):
return float(physics.dist_to_target() <= 0)
def _set_random_joint_angles(physics, random, max_attempts=1000):
"""Sets the joints to a random collision-free state."""
for _ in range(max_attempts):
randomizers.randomize_limited_and_rotational_joints(physics, random)
# Check for collisions.
physics.after_reset()
if physics.data.ncon == 0:
break
else:
raise RuntimeError(
"Could not find a collision-free state "
"after {} attempts".format(max_attempts)
) | dmc_remastered/finger.py | import collections
import os
import random
import xml.etree.ElementTree as ET
import numpy as np
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base, common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from six.moves import range
from dmc_remastered import DMCR_VARY, SUITE_DIR, register
from dmc_remastered.rng import dmcr_random
from .generate_visuals import get_assets
_DEFAULT_TIME_LIMIT = 20 # (seconds)
_CONTROL_TIMESTEP = 0.02 # (seconds)
# For TURN tasks, the 'tip' geom needs to enter a spherical target of sizes:
_EASY_TARGET_SIZE = 0.07
_HARD_TARGET_SIZE = 0.03
# Initial spin velocity for the Stop task.
_INITIAL_SPIN_VELOCITY = 100
# Spinning slower than this value (radian/second) is considered stopped.
_STOP_VELOCITY = 1e-6
# Spinning faster than this value (radian/second) is considered spinning.
_SPIN_VELOCITY = 15.0
def get_model(visual_seed, vary=["camera", "light"]):
with open(os.path.join(SUITE_DIR, os.path.join("assets", "finger.xml")), "r") as f:
xml = ET.fromstring(f.read())
if visual_seed != 0:
with dmcr_random(visual_seed):
camera_x = random.uniform(-0.02, 0.02)
camera_y = random.uniform(-1.01, -0.99)
camera_z = random.uniform(0.7, 1.0)
light_pos_x = random.uniform(-0.8, 0.8)
light_pos_y = random.uniform(-0.8, 0.8)
light_pos_z = random.uniform(1.0, 3.0)
light_dif = random.uniform(0.5, 1.1)
light_dif_del_r = random.uniform(-0.02, 0.02)
light_dif_del_g = random.uniform(-0.02, 0.02)
light_dif_del_b = random.uniform(-0.02, 0.02)
light_spec = random.uniform(0.0, 1.2)
light_spec_del_r = random.uniform(-0.02, 0.02)
light_spec_del_g = random.uniform(-0.02, 0.02)
light_spec_del_b = random.uniform(-0.02, 0.02)
if "camera" in vary:
xml[5][2].attrib["pos"] = f"{camera_x} {camera_y} {camera_z}"
if "light" in vary:
xml[5][0].attrib["pos"] = f"{light_pos_x} {light_pos_y} {light_pos_z}"
xml[5][0].attrib[
"diffuse"
] = f"{light_dif+light_dif_del_r} {light_dif+light_dif_del_g} {light_dif+light_dif_del_b}"
xml[5][0].attrib[
"specular"
] = f"{light_spec+light_spec_del_r} {light_spec+light_spec_del_g} {light_spec+light_spec_del_b}"
return ET.tostring(xml, encoding="utf8", method="xml")
@register("finger", "spin")
def spin(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Spin(random=dynamics_seed)
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
@register("finger", "turn_easy")
def turn_easy(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Turn(target_radius=_EASY_TARGET_SIZE, random=dynamics_seed)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
@register("finger", "turn_hard")
def turn_hard(
time_limit=_DEFAULT_TIME_LIMIT, dynamics_seed=None, visual_seed=None, vary=DMCR_VARY
):
model = get_model(visual_seed, vary)
assets, _ = get_assets(visual_seed, vary)
physics = Physics.from_xml_string(model, assets)
task = Turn(target_radius=_HARD_TARGET_SIZE, random=dynamics_seed)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Finger domain."""
def touch(self):
"""Returns logarithmically scaled signals from the two touch sensors."""
return np.log1p(self.named.data.sensordata[["touchtop", "touchbottom"]])
def hinge_velocity(self):
"""Returns the velocity of the hinge joint."""
return self.named.data.sensordata["hinge_velocity"]
def tip_position(self):
"""Returns the (x,z) position of the tip relative to the hinge."""
return (
self.named.data.sensordata["tip"][[0, 2]]
- self.named.data.sensordata["spinner"][[0, 2]]
)
def bounded_position(self):
"""Returns the positions, with the hinge angle replaced by tip position."""
return np.hstack(
(self.named.data.sensordata[["proximal", "distal"]], self.tip_position())
)
def velocity(self):
"""Returns the velocities (extracted from sensordata)."""
return self.named.data.sensordata[
["proximal_velocity", "distal_velocity", "hinge_velocity"]
]
def target_position(self):
"""Returns the (x,z) position of the target relative to the hinge."""
return (
self.named.data.sensordata["target"][[0, 2]]
- self.named.data.sensordata["spinner"][[0, 2]]
)
def to_target(self):
"""Returns the vector from the tip to the target."""
return self.target_position() - self.tip_position()
def dist_to_target(self):
"""Returns the signed distance to the target surface, negative is inside."""
return (
np.linalg.norm(self.to_target()) - self.named.model.site_size["target", 0]
)
class Spin(base.Task):
"""A Finger `Task` to spin the stopped body."""
def __init__(self, random=None):
"""Initializes a new `Spin` instance.
Args:
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
super(Spin, self).__init__(random=random)
def initialize_episode(self, physics):
physics.named.model.site_rgba["target", 3] = 0
physics.named.model.site_rgba["tip", 3] = 0
physics.named.model.dof_damping["hinge"] = 0.03
_set_random_joint_angles(physics, self.random)
super(Spin, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns state and touch sensors, and target info."""
obs = collections.OrderedDict()
obs["position"] = physics.bounded_position()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
return obs
def get_reward(self, physics):
"""Returns a sparse reward."""
return float(physics.hinge_velocity() <= -_SPIN_VELOCITY)
class Turn(base.Task):
"""A Finger `Task` to turn the body to a target angle."""
def __init__(self, target_radius, random=None):
"""Initializes a new `Turn` instance.
Args:
target_radius: Radius of the target site, which specifies the goal angle.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._target_radius = target_radius
super(Turn, self).__init__(random=random)
def initialize_episode(self, physics):
target_angle = self.random.uniform(-np.pi, np.pi)
hinge_x, hinge_z = physics.named.data.xanchor["hinge", ["x", "z"]]
radius = physics.named.model.geom_size["cap1"].sum()
target_x = hinge_x + radius * np.sin(target_angle)
target_z = hinge_z + radius * np.cos(target_angle)
physics.named.model.site_pos["target", ["x", "z"]] = target_x, target_z
physics.named.model.site_size["target", 0] = self._target_radius
_set_random_joint_angles(physics, self.random)
super(Turn, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns state, touch sensors, and target info."""
obs = collections.OrderedDict()
obs["position"] = physics.bounded_position()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
obs["target_position"] = physics.target_position()
obs["dist_to_target"] = physics.dist_to_target()
return obs
def get_reward(self, physics):
return float(physics.dist_to_target() <= 0)
def _set_random_joint_angles(physics, random, max_attempts=1000):
"""Sets the joints to a random collision-free state."""
for _ in range(max_attempts):
randomizers.randomize_limited_and_rotational_joints(physics, random)
# Check for collisions.
physics.after_reset()
if physics.data.ncon == 0:
break
else:
raise RuntimeError(
"Could not find a collision-free state "
"after {} attempts".format(max_attempts)
) | 0.693058 | 0.253561 |